Migration pull 2018-09-26

This supercedes Juan's pull from the 13th
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbq7zSAAoJEAUWMx68W/3nrSAP/jU3MAg5/Cx+AMbNjQntTllN
 0kd1lJPNOW9IBmoKKwu+mJ0SRHRi5kghs5NXuGBYTMcpVoZk0I0Vf8/koq+Yn+2g
 lbZw4Nv7a8h0aOoXo6lNa1u/VIRMCwgRvLWzP3HVIjlVf1Uup+45zTMynt6QnnMi
 w42ctSJXVl5asNp11od8BUJCSZ4C9OI2Uu6Z54F3q2q3GdCEKH0wKkxD19WSBimf
 /j82TSXGctdJGrwWCqEh2yapTG0cYeaYPCTx6Nb8mc/+mqR/gdvquM8plIHTVqEP
 0eBFl/rZp1gnPqN+TIpTBqngPiIO1XezQvg/vXQThbnUaaaz1axnAAefXeXabz/W
 /JPWZdDue5MX2MTtD5uoz/9RKQNfOWwCB+phTDJreqkdSNjeQmrIxItDXksPaD8n
 diNVJd0Erg377E3mt3wn2mJH4PscwJtTk5s8dhLECAqypybqwGRMqKpomXKfodQj
 /bIjjQpsqV0NyCNCcKSWOrTAnZl7KDohUL4KTPi49CgLMTO+J6YqSsGrhoVyU541
 m9uV8xfKLBfX2ebOqEvpu9gty2t21yVFXNHDenJA5sdiF63LvJ5MvD6Hi2zCuf3S
 tRrjSCqioao4BFhGjuQRc20Plsv8YHpfKLy/+w/SN8QF0Gmv49e/WSWcFuE1GuN8
 loI3NEahqd0VrpW/x/HD
 =JP00
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20180926a' into staging

Migration pull 2018-09-26

This supercedes Juan's pull from the 13th

# gpg: Signature made Wed 26 Sep 2018 18:07:30 BST
# gpg:                using RSA key 0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20180926a:
  migration/ram.c: Avoid taking address of fields in packed MultiFDInit_t struct
  migration: fix the compression code
  migration: fix QEMUFile leak
  tests/migration: Speed up the test on ppc64
  migration: cleanup in error paths in loadvm
  migration/postcopy: Clear have_listen_thread
  tests/migration: Add migration-test header file
  tests/migration: Support cross compilation in generating boot header file
  tests/migration: Convert x86 boot block compilation script into Makefile
  migration: use save_page_use_compression in flush_compressed_data
  migration: show the statistics of compression
  migration: do not flush_compressed_data at the end of iteration
  Add a hint message to loadvm and exits on failure
  migration: handle the error condition properly
  migration: fix calculating xbzrle_counters.cache_miss_rate
  migration/rdma: Fix uninitialised rdma_return_path

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2018-09-28 17:07:23 +01:00
commit 042938f46e
15 changed files with 255 additions and 98 deletions

13
hmp.c
View File

@ -271,6 +271,19 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
info->xbzrle_cache->overflow);
}
if (info->has_compression) {
monitor_printf(mon, "compression pages: %" PRIu64 " pages\n",
info->compression->pages);
monitor_printf(mon, "compression busy: %" PRIu64 "\n",
info->compression->busy);
monitor_printf(mon, "compression busy rate: %0.2f\n",
info->compression->busy_rate);
monitor_printf(mon, "compressed size: %" PRIu64 "\n",
info->compression->compressed_size);
monitor_printf(mon, "compression rate: %0.2f\n",
info->compression->compression_rate);
}
if (info->has_cpu_throttle_percentage) {
monitor_printf(mon, "cpu throttle percentage: %" PRIu64 "\n",
info->cpu_throttle_percentage);

View File

@ -758,6 +758,18 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s)
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
}
if (migrate_use_compression()) {
info->has_compression = true;
info->compression = g_malloc0(sizeof(*info->compression));
info->compression->pages = compression_counters.pages;
info->compression->busy = compression_counters.busy;
info->compression->busy_rate = compression_counters.busy_rate;
info->compression->compressed_size =
compression_counters.compressed_size;
info->compression->compression_rate =
compression_counters.compression_rate;
}
if (cpu_throttle_active()) {
info->has_cpu_throttle_percentage = true;
info->cpu_throttle_percentage = cpu_throttle_get_percentage();
@ -2268,7 +2280,10 @@ out:
*/
if (postcopy_pause_return_path_thread(ms)) {
/* Reload rp, reset the rest */
rp = ms->rp_state.from_dst_file;
if (rp != ms->rp_state.from_dst_file) {
qemu_fclose(rp);
rp = ms->rp_state.from_dst_file;
}
ms->rp_state.error = false;
goto retry;
}

View File

@ -301,10 +301,19 @@ struct RAMState {
uint64_t num_dirty_pages_period;
/* xbzrle misses since the beginning of the period */
uint64_t xbzrle_cache_miss_prev;
/* number of iterations at the beginning of period */
uint64_t iterations_prev;
/* Iterations since start */
uint64_t iterations;
/* compression statistics since the beginning of the period */
/* amount of count that no free thread to compress data */
uint64_t compress_thread_busy_prev;
/* amount bytes after compression */
uint64_t compressed_size_prev;
/* amount of compressed pages */
uint64_t compress_pages_prev;
/* total handled target pages at the beginning of period */
uint64_t target_page_count_prev;
/* total handled target pages since start */
uint64_t target_page_count;
/* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages;
/* protects modification of the bitmap */
@ -338,6 +347,8 @@ struct PageSearchStatus {
};
typedef struct PageSearchStatus PageSearchStatus;
CompressionStats compression_counters;
struct CompressParam {
bool done;
bool quit;
@ -420,28 +431,14 @@ static void *do_data_compress(void *opaque)
return NULL;
}
static inline void terminate_compression_threads(void)
{
int idx, thread_count;
thread_count = migrate_compress_threads();
for (idx = 0; idx < thread_count; idx++) {
qemu_mutex_lock(&comp_param[idx].mutex);
comp_param[idx].quit = true;
qemu_cond_signal(&comp_param[idx].cond);
qemu_mutex_unlock(&comp_param[idx].mutex);
}
}
static void compress_threads_save_cleanup(void)
{
int i, thread_count;
if (!migrate_use_compression()) {
if (!migrate_use_compression() || !comp_param) {
return;
}
terminate_compression_threads();
thread_count = migrate_compress_threads();
for (i = 0; i < thread_count; i++) {
/*
@ -451,6 +448,12 @@ static void compress_threads_save_cleanup(void)
if (!comp_param[i].file) {
break;
}
qemu_mutex_lock(&comp_param[i].mutex);
comp_param[i].quit = true;
qemu_cond_signal(&comp_param[i].cond);
qemu_mutex_unlock(&comp_param[i].mutex);
qemu_thread_join(compress_threads + i);
qemu_mutex_destroy(&comp_param[i].mutex);
qemu_cond_destroy(&comp_param[i].cond);
@ -648,8 +651,8 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
return -1;
}
be32_to_cpus(&msg.magic);
be32_to_cpus(&msg.version);
msg.magic = be32_to_cpu(msg.magic);
msg.version = be32_to_cpu(msg.version);
if (msg.magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet magic %x "
@ -734,7 +737,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
RAMBlock *block;
int i;
be32_to_cpus(&packet->magic);
packet->magic = be32_to_cpu(packet->magic);
if (packet->magic != MULTIFD_MAGIC) {
error_setg(errp, "multifd: received packet "
"magic %x and expected magic %x",
@ -742,7 +745,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
return -1;
}
be32_to_cpus(&packet->version);
packet->version = be32_to_cpu(packet->version);
if (packet->version != MULTIFD_VERSION) {
error_setg(errp, "multifd: received packet "
"version %d and expected version %d",
@ -752,7 +755,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
p->flags = be32_to_cpu(packet->flags);
be32_to_cpus(&packet->size);
packet->size = be32_to_cpu(packet->size);
if (packet->size > migrate_multifd_page_count()) {
error_setg(errp, "multifd: received packet "
"with size %d and expected maximum size %d",
@ -1592,21 +1595,42 @@ uint64_t ram_pagesize_summary(void)
static void migration_update_rates(RAMState *rs, int64_t end_time)
{
uint64_t iter_count = rs->iterations - rs->iterations_prev;
uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
double compressed_size;
/* calculate period counters */
ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
/ (end_time - rs->time_last_bitmap_sync);
if (!iter_count) {
if (!page_count) {
return;
}
if (migrate_use_xbzrle()) {
xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
rs->xbzrle_cache_miss_prev) / iter_count;
rs->xbzrle_cache_miss_prev) / page_count;
rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
}
if (migrate_use_compression()) {
compression_counters.busy_rate = (double)(compression_counters.busy -
rs->compress_thread_busy_prev) / page_count;
rs->compress_thread_busy_prev = compression_counters.busy;
compressed_size = compression_counters.compressed_size -
rs->compressed_size_prev;
if (compressed_size) {
double uncompressed_size = (compression_counters.pages -
rs->compress_pages_prev) * TARGET_PAGE_SIZE;
/* Compression-Ratio = Uncompressed-size / Compressed-size */
compression_counters.compression_rate =
uncompressed_size / compressed_size;
rs->compress_pages_prev = compression_counters.pages;
rs->compressed_size_prev = compression_counters.compressed_size;
}
}
}
static void migration_bitmap_sync(RAMState *rs)
@ -1662,7 +1686,7 @@ static void migration_bitmap_sync(RAMState *rs)
migration_update_rates(rs, end_time);
rs->iterations_prev = rs->iterations;
rs->target_page_count_prev = rs->target_page_count;
/* reset period counters */
rs->time_last_bitmap_sync = end_time;
@ -1888,17 +1912,25 @@ exit:
static void
update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
{
ram_counters.transferred += bytes_xmit;
if (param->zero_page) {
ram_counters.duplicate++;
return;
}
ram_counters.transferred += bytes_xmit;
/* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
compression_counters.compressed_size += bytes_xmit - 8;
compression_counters.pages++;
}
static bool save_page_use_compression(RAMState *rs);
static void flush_compressed_data(RAMState *rs)
{
int idx, len, thread_count;
if (!migrate_use_compression()) {
if (!save_page_use_compression(rs)) {
return;
}
thread_count = migrate_compress_threads();
@ -1996,17 +2028,22 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
pss->page = 0;
pss->block = QLIST_NEXT_RCU(pss->block, next);
if (!pss->block) {
/*
* If memory migration starts over, we will meet a dirtied page
* which may still exists in compression threads's ring, so we
* should flush the compressed data to make sure the new page
* is not overwritten by the old one in the destination.
*
* Also If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(rs);
/* Hit the end of the list */
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
/* Flag that we've looped */
pss->complete_round = true;
rs->ram_bulk_stage = false;
if (migrate_use_xbzrle()) {
/* If xbzrle is on, stop using the data compression at this
* point. In theory, xbzrle can do better than compression.
*/
flush_compressed_data(rs);
}
}
/* Didn't find anything this time, but try again on the new block */
*again = true;
@ -2259,6 +2296,7 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
return true;
}
compression_counters.busy++;
return false;
}
@ -2372,7 +2410,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
*
* Called within an RCU critical section.
*
* Returns the number of pages written where zero means no dirty pages
* Returns the number of pages written where zero means no dirty pages,
* or negative on error
*
* @rs: current RAM state
* @last_stage: if we are at the completion stage
@ -3196,7 +3235,13 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
done = 1;
break;
}
rs->iterations++;
if (pages < 0) {
qemu_file_set_error(f, pages);
break;
}
rs->target_page_count += pages;
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
@ -3212,7 +3257,6 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
}
i++;
}
flush_compressed_data(rs);
rcu_read_unlock();
/*
@ -3238,7 +3282,7 @@ out:
/**
* ram_save_complete: function called to send the remaining amount of ram
*
* Returns zero to indicate success
* Returns zero to indicate success or negative on error
*
* Called with iothread lock
*
@ -3249,6 +3293,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
{
RAMState **temp = opaque;
RAMState *rs = *temp;
int ret = 0;
rcu_read_lock();
@ -3269,6 +3314,10 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
if (pages == 0) {
break;
}
if (pages < 0) {
ret = pages;
break;
}
}
flush_compressed_data(rs);
@ -3280,7 +3329,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
return 0;
return ret;
}
static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,

View File

@ -36,6 +36,7 @@
extern MigrationStats ram_counters;
extern XBZRLECacheStats xbzrle_counters;
extern CompressionStats compression_counters;
int xbzrle_cache_resize(int64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);

View File

@ -4012,7 +4012,7 @@ static void rdma_accept_incoming_migration(void *opaque)
void rdma_start_incoming_migration(const char *host_port, Error **errp)
{
int ret;
RDMAContext *rdma, *rdma_return_path;
RDMAContext *rdma, *rdma_return_path = NULL;
Error *local_err = NULL;
trace_rdma_start_incoming_migration();

View File

@ -1679,6 +1679,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
qemu_loadvm_state_cleanup();
rcu_unregister_thread();
mis->have_listen_thread = false;
return NULL;
}
@ -2078,7 +2079,9 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
/* Find savevm section */
se = find_se(idstr, instance_id);
if (se == NULL) {
error_report("Unknown savevm section or instance '%s' %d",
error_report("Unknown savevm section or instance '%s' %d. "
"Make sure that your current VM setup matches your "
"saved VM setup, including any hotplugged devices",
idstr, instance_id);
return -EINVAL;
}
@ -2330,11 +2333,13 @@ int qemu_loadvm_state(QEMUFile *f)
if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing");
qemu_loadvm_state_cleanup();
return -EINVAL;
}
ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
if (ret) {
qemu_loadvm_state_cleanup();
return ret;
}
}

View File

@ -75,6 +75,27 @@
'cache-miss': 'int', 'cache-miss-rate': 'number',
'overflow': 'int' } }
##
# @CompressionStats:
#
# Detailed migration compression statistics
#
# @pages: amount of pages compressed and transferred to the target VM
#
# @busy: count of times that no free thread was available to compress data
#
# @busy-rate: rate of thread busy
#
# @compressed-size: amount of bytes after compression
#
# @compression-rate: rate of compressed size
#
# Since: 3.1
##
{ 'struct': 'CompressionStats',
'data': {'pages': 'int', 'busy': 'int', 'busy-rate': 'number',
'compressed-size': 'int', 'compression-rate': 'number' } }
##
# @MigrationStatus:
#
@ -172,6 +193,8 @@
# only present when the postcopy-blocktime migration capability
# is enabled. (Since 3.0)
#
# @compression: migration compression statistics, only returned if compression
# feature is on and status is 'active' or 'completed' (Since 3.1)
#
# Since: 0.14.0
##
@ -186,7 +209,8 @@
'*cpu-throttle-percentage': 'int',
'*error-desc': 'str',
'*postcopy-blocktime' : 'uint32',
'*postcopy-vcpu-blocktime': ['uint32']} }
'*postcopy-vcpu-blocktime': ['uint32'],
'*compression': 'CompressionStats'} }
##
# @query-migrate:

View File

@ -21,11 +21,13 @@
#include "chardev/char.h"
#include "sysemu/sysemu.h"
#include "migration/migration-test.h"
/* TODO actually test the results and get rid of this */
#define qtest_qmp_discard_response(...) qobject_unref(qtest_qmp(__VA_ARGS__))
const unsigned start_address = 1024 * 1024;
const unsigned end_address = 100 * 1024 * 1024;
unsigned start_address;
unsigned end_address;
bool got_stop;
static bool uffd_feature_thread_id;
@ -80,10 +82,10 @@ static bool ufd_version_check(void)
static const char *tmpfs;
/* A simple PC boot sector that modifies memory (1-100MB) quickly
* outputting a 'B' every so often if it's still running.
/* The boot file modifies memory area in [start_address, end_address)
* repeatedly. It outputs a 'B' at a fixed rate while it's still running.
*/
#include "tests/migration/x86-a-b-bootblock.h"
#include "tests/migration/i386/a-b-bootblock.h"
static void init_bootfile_x86(const char *bootpath)
{
@ -270,11 +272,11 @@ static void wait_for_migration_pass(QTestState *who)
static void check_guests_ram(QTestState *who)
{
/* Our ASM test will have been incrementing one byte from each page from
* 1MB to <100MB in order.
* This gives us a constraint that any page's byte should be equal or less
* than the previous pages byte (mod 256); and they should all be equal
* except for one transition at the point where we meet the incrementer.
* (We're running this with the guest stopped).
* start_address to < end_address in order. This gives us a constraint
* that any page's byte should be equal or less than the previous pages
* byte (mod 256); and they should all be equal except for one transition
* at the point where we meet the incrementer. (We're running this with
* the guest stopped).
*/
unsigned address;
uint8_t first_byte;
@ -285,7 +287,8 @@ static void check_guests_ram(QTestState *who)
qtest_memread(who, start_address, &first_byte, 1);
last_byte = first_byte;
for (address = start_address + 4096; address < end_address; address += 4096)
for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address;
address += TEST_MEM_PAGE_SIZE)
{
uint8_t b;
qtest_memread(who, address, &b, 1);
@ -437,6 +440,8 @@ static int test_migrate_start(QTestState **from, QTestState **to,
" -drive file=%s,format=raw"
" -incoming %s",
accel, tmpfs, bootpath, uri);
start_address = X86_TEST_MEM_START;
end_address = X86_TEST_MEM_END;
} else if (strcmp(arch, "ppc64") == 0) {
cmd_src = g_strdup_printf("-machine accel=%s -m 256M -nodefaults"
" -name source,debug-threads=on"
@ -451,6 +456,9 @@ static int test_migrate_start(QTestState **from, QTestState **to,
" -serial file:%s/dest_serial"
" -incoming %s",
accel, tmpfs, uri);
start_address = PPC_TEST_MEM_START;
end_address = PPC_TEST_MEM_END;
} else {
g_assert_not_reached();
}

35
tests/migration/Makefile Normal file
View File

@ -0,0 +1,35 @@
#
# Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
TARGET_LIST = i386
SRC_PATH = ../..
override define __note
/* This file is automatically generated from the assembly file in
* tests/migration/$@. Edit that file and then run "make all"
* inside tests/migration to update, and then remember to send both
* the header and the assembler differences in your patch submission.
*/
endef
export __note
find-arch-cross-cc = $(lastword $(shell grep -h "CROSS_CC_GUEST=" $(wildcard $(SRC_PATH)/$(patsubst i386,*86*,$(1))-softmmu/config-target.mak) /dev/null))
parse-cross-prefix = $(subst gcc,,$(patsubst cc,gcc,$(patsubst CROSS_CC_GUEST="%",%,$(call find-arch-cross-cc,$(1)))))
gen-cross-prefix = $(patsubst %-,CROSS_PREFIX=%-,$(call parse-cross-prefix,$(1)))
.PHONY: all $(TARGET_LIST)
all: $(TARGET_LIST)
$(TARGET_LIST):
$(MAKE) -C $@ $(call gen-cross-prefix,$@)
clean:
for target in $(TARGET_LIST); do \
$(MAKE) -C $$target clean; \
done

View File

@ -0,0 +1,22 @@
# To specify cross compiler prefix, use CROSS_PREFIX=
# $ make CROSS_PREFIX=x86_64-linux-gnu-
.PHONY: all clean
all: a-b-bootblock.h
a-b-bootblock.h: x86.bootsect
echo "$$__note" > header.tmp
xxd -i $< | sed -e 's/.*int.*//' >> header.tmp
mv header.tmp $@
x86.bootsect: x86.boot
dd if=$< of=$@ bs=256 count=2 skip=124
x86.boot: x86.o
$(CROSS_PREFIX)objcopy -O binary $< $@
x86.o: a-b-bootblock.S
$(CROSS_PREFIX)gcc -m32 -march=i486 -c $< -o $@
clean:
@rm -rf *.boot *.o *.bootsect

View File

@ -3,10 +3,6 @@
# range.
# Outputs an initial 'A' on serial followed by repeated 'B's
#
# run tests/migration/rebuild-x86-bootblock.sh
# to regenerate the hex, and remember to include both the .h and .s
# in any patches.
#
# Copyright (c) 2016 Red Hat, Inc. and/or its affiliates
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.

View File

@ -1,7 +1,7 @@
/* This file is automatically generated from
* tests/migration/x86-a-b-bootblock.s, edit that and then run
* tests/migration/rebuild-x86-bootblock.sh to update,
* and then remember to send both in your patch submission.
/* This file is automatically generated from the assembly file in
* tests/migration/i386. Edit that file and then run "make all"
* inside tests/migration to update, and then remember to send both
* the header and the assembler differences in your patch submission.
*/
unsigned char x86_bootsect[] = {
0xfa, 0x0f, 0x01, 0x16, 0x74, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00,

View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2018 Red Hat, Inc. and/or its affiliates
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef _TEST_MIGRATION_H_
#define _TEST_MIGRATION_H_
/* Common */
#define TEST_MEM_PAGE_SIZE 4096
/* x86 */
#define X86_TEST_MEM_START (1 * 1024 * 1024)
#define X86_TEST_MEM_END (100 * 1024 * 1024)
/* PPC */
#define PPC_TEST_MEM_START (1 * 1024 * 1024)
#define PPC_TEST_MEM_END (100 * 1024 * 1024)
#endif /* _TEST_MIGRATION_H_ */

View File

@ -1,33 +0,0 @@
#!/bin/sh
# Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# Author: dgilbert@redhat.com
ASMFILE=$PWD/tests/migration/x86-a-b-bootblock.s
HEADER=$PWD/tests/migration/x86-a-b-bootblock.h
if [ ! -e "$ASMFILE" ]
then
echo "Couldn't find $ASMFILE" >&2
exit 1
fi
ASM_WORK_DIR=$(mktemp -d --tmpdir X86BB.XXXXXX)
cd "$ASM_WORK_DIR" &&
as --32 -march=i486 "$ASMFILE" -o x86.o &&
objcopy -O binary x86.o x86.boot &&
dd if=x86.boot of=x86.bootsect bs=256 count=2 skip=124 &&
xxd -i x86.bootsect |
sed -e 's/.*int.*//' > x86.hex &&
cat - x86.hex <<HERE > "$HEADER"
/* This file is automatically generated from
* tests/migration/x86-a-b-bootblock.s, edit that and then run
* tests/migration/rebuild-x86-bootblock.sh to update,
* and then remember to send both in your patch submission.
*/
HERE
rm x86.hex x86.bootsect x86.boot x86.o
cd .. && rmdir "$ASM_WORK_DIR"

1
vl.c
View File

@ -4530,6 +4530,7 @@ int main(int argc, char **argv, char **envp)
if (load_snapshot(loadvm, &local_err) < 0) {
error_report_err(local_err);
autostart = 0;
exit(1);
}
}