mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 10:53:37 +08:00
Migration pull request
- Ani's patch to complete the memory API on coalesced IO / eventfd notifies - Fabiano's Coverity fix on using pstrcpy() over strncpy() - Dave's series on removing/deprecating zero-blocks and uffd cleanups - Juraj's one more fix on multifd/cancel test where it can fail when cancellation happens too slow on src - Dave's one more remove deadcode patch in iova-tree.c - Yuan's build fix for multifd qpl compressor -----BEGIN PGP SIGNATURE----- iIgEABYKADAWIQS5GE3CDMRX2s990ak7X8zN86vXBgUCZwZ6CBIccGV0ZXJ4QHJl ZGhhdC5jb20ACgkQO1/MzfOr1wa3ZwD9HiAN9m7WOfZxXKOVIIwhOjUNTw0FiFeO HMxp8A2jeYsBAK+d5lYGX1V2FtQ152YiOJQzRW31MkdAOishJzcHCXgO =gBW0 -----END PGP SIGNATURE----- Merge tag 'migration-20241009-pull-request' of https://gitlab.com/peterx/qemu into staging Migration pull request - Ani's patch to complete the memory API on coalesced IO / eventfd notifies - Fabiano's Coverity fix on using pstrcpy() over strncpy() - Dave's series on removing/deprecating zero-blocks and uffd cleanups - Juraj's one more fix on multifd/cancel test where it can fail when cancellation happens too slow on src - Dave's one more remove deadcode patch in iova-tree.c - Yuan's build fix for multifd qpl compressor # -----BEGIN PGP SIGNATURE----- # # iIgEABYKADAWIQS5GE3CDMRX2s990ak7X8zN86vXBgUCZwZ6CBIccGV0ZXJ4QHJl # ZGhhdC5jb20ACgkQO1/MzfOr1wa3ZwD9HiAN9m7WOfZxXKOVIIwhOjUNTw0FiFeO # HMxp8A2jeYsBAK+d5lYGX1V2FtQ152YiOJQzRW31MkdAOishJzcHCXgO # =gBW0 # -----END PGP SIGNATURE----- # gpg: Signature made Wed 09 Oct 2024 13:41:44 BST # gpg: using EDDSA key B9184DC20CC457DACF7DD1A93B5FCCCDF3ABD706 # gpg: issuer "peterx@redhat.com" # gpg: Good signature from "Peter Xu <xzpeter@gmail.com>" [marginal] # gpg: aka "Peter Xu <peterx@redhat.com>" [marginal] # gpg: WARNING: This key is not certified with sufficiently trusted signatures! # gpg: It is not certain that the signature belongs to the owner. # Primary key fingerprint: B918 4DC2 0CC4 57DA CF7D D1A9 3B5F CCCD F3AB D706 * tag 'migration-20241009-pull-request' of https://gitlab.com/peterx/qemu: migration/multifd: fix build error when qpl compression is enabled util/iova-tree: Remove deadcode tests/migration-test: Wait for cancellation sooner in multifd cancel util/userfaultfd: Remove unused uffd_poll_events migration/postcopy: Use uffd helpers util/userfaultfd: Return -errno on error migration: Remove unused socket_send_channel_create_sync migration: Deprecate zero-blocks capability migration: Remove unused migrate_zero_blocks migration: Remove migrate_cap_set migration/multifd: Ensure packet->ramblock is null-terminated memory: notify hypervisor of all eventfds during listener (de)registration Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
05adb38839
@ -473,3 +473,9 @@ usage of providing a file descriptor to a plain file has been
|
||||
deprecated in favor of explicitly using the ``file:`` URI with the
|
||||
file descriptor being passed as an ``fdset``. Refer to the ``add-fd``
|
||||
command documentation for details on the ``fdset`` usage.
|
||||
|
||||
``zero-blocks`` capability (since 9.2)
|
||||
''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
The ``zero-blocks`` capability was part of the block migration which
|
||||
doesn't exist anymore since it was removed in QEMU v9.1.
|
||||
|
@ -111,31 +111,6 @@ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map);
|
||||
*/
|
||||
const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map);
|
||||
|
||||
/**
|
||||
* iova_tree_find_address:
|
||||
*
|
||||
* @tree: the iova tree to search from
|
||||
* @iova: the iova address to find
|
||||
*
|
||||
* Similar to iova_tree_find(), but it tries to find mapping with
|
||||
* range iova=iova & size=0.
|
||||
*
|
||||
* Return: same as iova_tree_find().
|
||||
*/
|
||||
const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova);
|
||||
|
||||
/**
|
||||
* iova_tree_foreach:
|
||||
*
|
||||
* @tree: the iova tree to iterate on
|
||||
* @iterator: the iterator for the mappings, return true to stop
|
||||
*
|
||||
* Iterate over the iova tree.
|
||||
*
|
||||
* Return: 1 if found any overlap, 0 if not, <0 if error.
|
||||
*/
|
||||
void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator);
|
||||
|
||||
/**
|
||||
* iova_tree_alloc_map:
|
||||
*
|
||||
|
@ -39,7 +39,6 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
|
||||
int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake);
|
||||
int uffd_wakeup(int uffd_fd, void *addr, uint64_t length);
|
||||
int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count);
|
||||
bool uffd_poll_events(int uffd_fd, int tmo);
|
||||
|
||||
#endif /* CONFIG_LINUX */
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "multifd.h"
|
||||
#include "options.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
|
||||
@ -201,7 +202,8 @@ void multifd_ram_fill_packet(MultiFDSendParams *p)
|
||||
packet->zero_pages = cpu_to_be32(zero_num);
|
||||
|
||||
if (pages->block) {
|
||||
strncpy(packet->ramblock, pages->block->idstr, 256);
|
||||
pstrcpy(packet->ramblock, sizeof(packet->ramblock),
|
||||
pages->block->idstr);
|
||||
}
|
||||
|
||||
for (int i = 0; i < pages->num; i++) {
|
||||
|
@ -389,7 +389,7 @@ static void multifd_qpl_compress_pages_slow_path(MultiFDSendParams *p)
|
||||
{
|
||||
QplData *qpl = p->compress_data;
|
||||
MultiFDPages_t *pages = &p->data->u.ram;
|
||||
uint32_t size = p->page_size;
|
||||
uint32_t size = multifd_ram_page_size();
|
||||
qpl_job *job = qpl->sw_job;
|
||||
uint8_t *zbuf = qpl->zbuf;
|
||||
uint8_t *buf;
|
||||
@ -420,7 +420,7 @@ static void multifd_qpl_compress_pages(MultiFDSendParams *p)
|
||||
{
|
||||
QplData *qpl = p->compress_data;
|
||||
MultiFDPages_t *pages = &p->data->u.ram;
|
||||
uint32_t size = p->page_size;
|
||||
uint32_t size = multifd_ram_page_size();
|
||||
QplHwJob *hw_job;
|
||||
uint8_t *buf;
|
||||
uint8_t *zbuf;
|
||||
@ -560,7 +560,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p,
|
||||
Error **errp)
|
||||
{
|
||||
QplData *qpl = p->compress_data;
|
||||
uint32_t size = p->page_size;
|
||||
uint32_t size = multifd_ram_page_size();
|
||||
qpl_job *job = qpl->sw_job;
|
||||
uint8_t *zbuf = qpl->zbuf;
|
||||
uint8_t *addr;
|
||||
@ -598,7 +598,7 @@ static int multifd_qpl_decompress_pages_slow_path(MultiFDRecvParams *p,
|
||||
static int multifd_qpl_decompress_pages(MultiFDRecvParams *p, Error **errp)
|
||||
{
|
||||
QplData *qpl = p->compress_data;
|
||||
uint32_t size = p->page_size;
|
||||
uint32_t size = multifd_ram_page_size();
|
||||
uint8_t *zbuf = qpl->zbuf;
|
||||
uint8_t *addr;
|
||||
uint32_t len;
|
||||
@ -677,7 +677,7 @@ static int multifd_qpl_recv(MultiFDRecvParams *p, Error **errp)
|
||||
}
|
||||
for (int i = 0; i < p->normal_num; i++) {
|
||||
qpl->zlen[i] = be32_to_cpu(qpl->zlen[i]);
|
||||
assert(qpl->zlen[i] <= p->page_size);
|
||||
assert(qpl->zlen[i] <= multifd_ram_page_size());
|
||||
zbuf_len += qpl->zlen[i];
|
||||
}
|
||||
|
||||
|
@ -339,13 +339,6 @@ bool migrate_xbzrle(void)
|
||||
return s->capabilities[MIGRATION_CAPABILITY_XBZRLE];
|
||||
}
|
||||
|
||||
bool migrate_zero_blocks(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
|
||||
return s->capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
|
||||
}
|
||||
|
||||
bool migrate_zero_copy_send(void)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
@ -457,6 +450,10 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
|
||||
ERRP_GUARD();
|
||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||
|
||||
if (new_caps[MIGRATION_CAPABILITY_ZERO_BLOCKS]) {
|
||||
warn_report("zero-blocks capability is deprecated");
|
||||
}
|
||||
|
||||
#ifndef CONFIG_REPLICATION
|
||||
if (new_caps[MIGRATION_CAPABILITY_X_COLO]) {
|
||||
error_setg(errp, "QEMU compiled without replication module"
|
||||
@ -605,26 +602,6 @@ bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool migrate_cap_set(int cap, bool value, Error **errp)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
bool new_caps[MIGRATION_CAPABILITY__MAX];
|
||||
|
||||
if (migration_is_running()) {
|
||||
error_setg(errp, "There's a migration process in progress");
|
||||
return false;
|
||||
}
|
||||
|
||||
memcpy(new_caps, s->capabilities, sizeof(new_caps));
|
||||
new_caps[cap] = value;
|
||||
|
||||
if (!migrate_caps_check(s->capabilities, new_caps, errp)) {
|
||||
return false;
|
||||
}
|
||||
s->capabilities[cap] = value;
|
||||
return true;
|
||||
}
|
||||
|
||||
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
|
||||
{
|
||||
MigrationCapabilityStatusList *head = NULL, **tail = &head;
|
||||
|
@ -40,7 +40,6 @@ bool migrate_release_ram(void);
|
||||
bool migrate_return_path(void);
|
||||
bool migrate_validate_uuid(void);
|
||||
bool migrate_xbzrle(void);
|
||||
bool migrate_zero_blocks(void);
|
||||
bool migrate_zero_copy_send(void);
|
||||
|
||||
/*
|
||||
@ -58,7 +57,6 @@ bool migrate_tls(void);
|
||||
/* capabilities helpers */
|
||||
|
||||
bool migrate_caps_check(bool *old_caps, bool *new_caps, Error **errp);
|
||||
bool migrate_cap_set(int cap, bool value, Error **errp);
|
||||
|
||||
/* parameters */
|
||||
|
||||
|
@ -746,18 +746,10 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
|
||||
RAMBlock *rb)
|
||||
{
|
||||
size_t pagesize = qemu_ram_pagesize(rb);
|
||||
struct uffdio_range range;
|
||||
int ret;
|
||||
trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
|
||||
range.start = ROUND_DOWN(client_addr, pagesize);
|
||||
range.len = pagesize;
|
||||
ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range);
|
||||
if (ret) {
|
||||
error_report("%s: Failed to wake: %zx in %s (%s)",
|
||||
__func__, (size_t)client_addr, qemu_ram_get_idstr(rb),
|
||||
strerror(errno));
|
||||
}
|
||||
return ret;
|
||||
return uffd_wakeup(pcfd->fd,
|
||||
(void *)(uintptr_t)ROUND_DOWN(client_addr, pagesize),
|
||||
pagesize);
|
||||
}
|
||||
|
||||
static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
|
||||
@ -1275,18 +1267,10 @@ static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr,
|
||||
int ret;
|
||||
|
||||
if (from_addr) {
|
||||
struct uffdio_copy copy_struct;
|
||||
copy_struct.dst = (uint64_t)(uintptr_t)host_addr;
|
||||
copy_struct.src = (uint64_t)(uintptr_t)from_addr;
|
||||
copy_struct.len = pagesize;
|
||||
copy_struct.mode = 0;
|
||||
ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct);
|
||||
ret = uffd_copy_page(userfault_fd, host_addr, from_addr, pagesize,
|
||||
false);
|
||||
} else {
|
||||
struct uffdio_zeropage zero_struct;
|
||||
zero_struct.range.start = (uint64_t)(uintptr_t)host_addr;
|
||||
zero_struct.range.len = pagesize;
|
||||
zero_struct.mode = 0;
|
||||
ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct);
|
||||
ret = uffd_zero_page(userfault_fd, host_addr, pagesize, false);
|
||||
}
|
||||
if (!ret) {
|
||||
qemu_mutex_lock(&mis->page_request_mutex);
|
||||
@ -1343,18 +1327,16 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
|
||||
RAMBlock *rb)
|
||||
{
|
||||
size_t pagesize = qemu_ram_pagesize(rb);
|
||||
int e;
|
||||
|
||||
/* copy also acks to the kernel waking the stalled thread up
|
||||
* TODO: We can inhibit that ack and only do it if it was requested
|
||||
* which would be slightly cheaper, but we'd have to be careful
|
||||
* of the order of updating our page state.
|
||||
*/
|
||||
if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) {
|
||||
int e = errno;
|
||||
error_report("%s: %s copy host: %p from: %p (size: %zd)",
|
||||
__func__, strerror(e), host, from, pagesize);
|
||||
|
||||
return -e;
|
||||
e = qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb);
|
||||
if (e) {
|
||||
return e;
|
||||
}
|
||||
|
||||
trace_postcopy_place_page(host);
|
||||
@ -1376,12 +1358,10 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
|
||||
* but it's not available for everything (e.g. hugetlbpages)
|
||||
*/
|
||||
if (qemu_ram_is_uf_zeroable(rb)) {
|
||||
if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) {
|
||||
int e = errno;
|
||||
error_report("%s: %s zero host: %p",
|
||||
__func__, strerror(e), host);
|
||||
|
||||
return -e;
|
||||
int e;
|
||||
e = qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb);
|
||||
if (e) {
|
||||
return e;
|
||||
}
|
||||
return postcopy_notify_shared_wake(rb,
|
||||
qemu_ram_block_host_offset(rb,
|
||||
|
@ -42,24 +42,6 @@ void socket_send_channel_create(QIOTaskFunc f, void *data)
|
||||
f, data, NULL, NULL);
|
||||
}
|
||||
|
||||
QIOChannel *socket_send_channel_create_sync(Error **errp)
|
||||
{
|
||||
QIOChannelSocket *sioc = qio_channel_socket_new();
|
||||
|
||||
if (!outgoing_args.saddr) {
|
||||
object_unref(OBJECT(sioc));
|
||||
error_setg(errp, "Initial sock address not set!");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (qio_channel_socket_connect_sync(sioc, outgoing_args.saddr, errp) < 0) {
|
||||
object_unref(OBJECT(sioc));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return QIO_CHANNEL(sioc);
|
||||
}
|
||||
|
||||
struct SocketConnectData {
|
||||
MigrationState *s;
|
||||
char *hostname;
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include "qemu/sockets.h"
|
||||
|
||||
void socket_send_channel_create(QIOTaskFunc f, void *data);
|
||||
QIOChannel *socket_send_channel_create_sync(Error **errp);
|
||||
|
||||
void socket_start_incoming_migration(SocketAddress *saddr, Error **errp);
|
||||
|
||||
|
@ -479,11 +479,14 @@
|
||||
# Features:
|
||||
#
|
||||
# @unstable: Members @x-colo and @x-ignore-shared are experimental.
|
||||
# @deprecated: Member @zero-blocks is deprecated as being part of
|
||||
# block migration which was already removed.
|
||||
#
|
||||
# Since: 1.2
|
||||
##
|
||||
{ 'enum': 'MigrationCapability',
|
||||
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks',
|
||||
'data': ['xbzrle', 'rdma-pin-all', 'auto-converge',
|
||||
{ 'name': 'zero-blocks', 'features': [ 'deprecated' ] },
|
||||
'events', 'postcopy-ram',
|
||||
{ 'name': 'x-colo', 'features': [ 'unstable' ] },
|
||||
'release-ram',
|
||||
|
@ -941,6 +941,38 @@ static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
flat_range_coalesced_io_notify_listener_add_del(FlatRange *fr,
|
||||
MemoryRegionSection *mrs,
|
||||
MemoryListener *listener,
|
||||
AddressSpace *as, bool add)
|
||||
{
|
||||
CoalescedMemoryRange *cmr;
|
||||
MemoryRegion *mr = fr->mr;
|
||||
AddrRange tmp;
|
||||
|
||||
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
|
||||
tmp = addrrange_shift(cmr->addr,
|
||||
int128_sub(fr->addr.start,
|
||||
int128_make64(fr->offset_in_region)));
|
||||
|
||||
if (!addrrange_intersects(tmp, fr->addr)) {
|
||||
return;
|
||||
}
|
||||
tmp = addrrange_intersection(tmp, fr->addr);
|
||||
|
||||
if (add && listener->coalesced_io_add) {
|
||||
listener->coalesced_io_add(listener, mrs,
|
||||
int128_get64(tmp.start),
|
||||
int128_get64(tmp.size));
|
||||
} else if (!add && listener->coalesced_io_del) {
|
||||
listener->coalesced_io_del(listener, mrs,
|
||||
int128_get64(tmp.start),
|
||||
int128_get64(tmp.size));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void address_space_update_topology_pass(AddressSpace *as,
|
||||
const FlatView *old_view,
|
||||
const FlatView *new_view,
|
||||
@ -3015,8 +3047,10 @@ void memory_global_dirty_log_stop(unsigned int flags)
|
||||
static void listener_add_address_space(MemoryListener *listener,
|
||||
AddressSpace *as)
|
||||
{
|
||||
unsigned i;
|
||||
FlatView *view;
|
||||
FlatRange *fr;
|
||||
MemoryRegionIoeventfd *fd;
|
||||
|
||||
if (listener->begin) {
|
||||
listener->begin(listener);
|
||||
@ -3041,10 +3075,34 @@ static void listener_add_address_space(MemoryListener *listener,
|
||||
if (listener->region_add) {
|
||||
listener->region_add(listener, §ion);
|
||||
}
|
||||
|
||||
/* send coalesced io add notifications */
|
||||
flat_range_coalesced_io_notify_listener_add_del(fr, §ion,
|
||||
listener, as, true);
|
||||
|
||||
if (fr->dirty_log_mask && listener->log_start) {
|
||||
listener->log_start(listener, §ion, 0, fr->dirty_log_mask);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* register all eventfds for this address space for the newly registered
|
||||
* listener.
|
||||
*/
|
||||
for (i = 0; i < as->ioeventfd_nb; i++) {
|
||||
fd = &as->ioeventfds[i];
|
||||
MemoryRegionSection section = (MemoryRegionSection) {
|
||||
.fv = view,
|
||||
.offset_within_address_space = int128_get64(fd->addr.start),
|
||||
.size = fd->addr.size,
|
||||
};
|
||||
|
||||
if (listener->eventfd_add) {
|
||||
listener->eventfd_add(listener, §ion,
|
||||
fd->match_data, fd->data, fd->e);
|
||||
}
|
||||
}
|
||||
|
||||
if (listener->commit) {
|
||||
listener->commit(listener);
|
||||
}
|
||||
@ -3054,8 +3112,10 @@ static void listener_add_address_space(MemoryListener *listener,
|
||||
static void listener_del_address_space(MemoryListener *listener,
|
||||
AddressSpace *as)
|
||||
{
|
||||
unsigned i;
|
||||
FlatView *view;
|
||||
FlatRange *fr;
|
||||
MemoryRegionIoeventfd *fd;
|
||||
|
||||
if (listener->begin) {
|
||||
listener->begin(listener);
|
||||
@ -3067,10 +3127,33 @@ static void listener_del_address_space(MemoryListener *listener,
|
||||
if (fr->dirty_log_mask && listener->log_stop) {
|
||||
listener->log_stop(listener, §ion, fr->dirty_log_mask, 0);
|
||||
}
|
||||
|
||||
/* send coalesced io del notifications */
|
||||
flat_range_coalesced_io_notify_listener_add_del(fr, §ion,
|
||||
listener, as, false);
|
||||
if (listener->region_del) {
|
||||
listener->region_del(listener, §ion);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* de-register all eventfds for this address space for the current
|
||||
* listener.
|
||||
*/
|
||||
for (i = 0; i < as->ioeventfd_nb; i++) {
|
||||
fd = &as->ioeventfds[i];
|
||||
MemoryRegionSection section = (MemoryRegionSection) {
|
||||
.fv = view,
|
||||
.offset_within_address_space = int128_get64(fd->addr.start),
|
||||
.size = fd->addr.size,
|
||||
};
|
||||
|
||||
if (listener->eventfd_del) {
|
||||
listener->eventfd_del(listener, §ion,
|
||||
fd->match_data, fd->data, fd->e);
|
||||
}
|
||||
}
|
||||
|
||||
if (listener->commit) {
|
||||
listener->commit(listener);
|
||||
}
|
||||
|
@ -3267,6 +3267,16 @@ static void test_multifd_tcp_cancel(void)
|
||||
qtest_wait_qemu(to);
|
||||
qtest_quit(to);
|
||||
|
||||
/*
|
||||
* Ensure the source QEMU finishes its cancellation process before we
|
||||
* proceed with the setup of the next migration. The test_migrate_start()
|
||||
* function and others might want to interact with the source in a way that
|
||||
* is not possible while the migration is not canceled properly. For
|
||||
* example, setting migration capabilities when the migration is still
|
||||
* running leads to an error.
|
||||
*/
|
||||
wait_for_migration_status(from, "cancelled", NULL);
|
||||
|
||||
args = (MigrateStart){
|
||||
.only_target = true,
|
||||
};
|
||||
@ -3282,8 +3292,6 @@ static void test_multifd_tcp_cancel(void)
|
||||
/* Start incoming migration from the 1st socket */
|
||||
migrate_incoming_qmp(to2, "tcp:127.0.0.1:0", "{}");
|
||||
|
||||
wait_for_migration_status(from, "cancelled", NULL);
|
||||
|
||||
migrate_ensure_non_converge(from);
|
||||
|
||||
migrate_qmp(from, to2, NULL, NULL, "{}");
|
||||
|
@ -115,13 +115,6 @@ const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map)
|
||||
return args.result;
|
||||
}
|
||||
|
||||
const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova)
|
||||
{
|
||||
const DMAMap map = { .iova = iova, .size = 0 };
|
||||
|
||||
return iova_tree_find(tree, &map);
|
||||
}
|
||||
|
||||
static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range)
|
||||
{
|
||||
/* Key and value are sharing the same range data */
|
||||
@ -148,22 +141,6 @@ int iova_tree_insert(IOVATree *tree, const DMAMap *map)
|
||||
return IOVA_OK;
|
||||
}
|
||||
|
||||
static gboolean iova_tree_traverse(gpointer key, gpointer value,
|
||||
gpointer data)
|
||||
{
|
||||
iova_tree_iterator iterator = data;
|
||||
DMAMap *map = key;
|
||||
|
||||
g_assert(key == value);
|
||||
|
||||
return iterator(map);
|
||||
}
|
||||
|
||||
void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator)
|
||||
{
|
||||
g_tree_foreach(tree->tree, iova_tree_traverse, iterator);
|
||||
}
|
||||
|
||||
void iova_tree_remove(IOVATree *tree, DMAMap map)
|
||||
{
|
||||
const DMAMap *overlap;
|
||||
|
@ -240,7 +240,7 @@ int uffd_change_protection(int uffd_fd, void *addr, uint64_t length,
|
||||
* Copy range of source pages to the destination to resolve
|
||||
* missing page fault somewhere in the destination range.
|
||||
*
|
||||
* Returns 0 on success, negative value in case of an error
|
||||
* Returns 0 on success, -errno in case of an error
|
||||
*
|
||||
* @uffd_fd: UFFD file descriptor
|
||||
* @dst_addr: destination base address
|
||||
@ -259,10 +259,11 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
|
||||
uffd_copy.mode = dont_wake ? UFFDIO_COPY_MODE_DONTWAKE : 0;
|
||||
|
||||
if (ioctl(uffd_fd, UFFDIO_COPY, &uffd_copy)) {
|
||||
int e = errno;
|
||||
error_report("uffd_copy_page() failed: dst_addr=%p src_addr=%p length=%" PRIu64
|
||||
" mode=%" PRIx64 " errno=%i", dst_addr, src_addr,
|
||||
length, (uint64_t) uffd_copy.mode, errno);
|
||||
return -1;
|
||||
length, (uint64_t) uffd_copy.mode, e);
|
||||
return -e;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -273,7 +274,7 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr,
|
||||
*
|
||||
* Fill range pages with zeroes to resolve missing page fault within the range.
|
||||
*
|
||||
* Returns 0 on success, negative value in case of an error
|
||||
* Returns 0 on success, -errno in case of an error
|
||||
*
|
||||
* @uffd_fd: UFFD file descriptor
|
||||
* @addr: base address
|
||||
@ -289,10 +290,11 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake)
|
||||
uffd_zeropage.mode = dont_wake ? UFFDIO_ZEROPAGE_MODE_DONTWAKE : 0;
|
||||
|
||||
if (ioctl(uffd_fd, UFFDIO_ZEROPAGE, &uffd_zeropage)) {
|
||||
int e = errno;
|
||||
error_report("uffd_zero_page() failed: addr=%p length=%" PRIu64
|
||||
" mode=%" PRIx64 " errno=%i", addr, length,
|
||||
(uint64_t) uffd_zeropage.mode, errno);
|
||||
return -1;
|
||||
(uint64_t) uffd_zeropage.mode, e);
|
||||
return -e;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -306,7 +308,7 @@ int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake)
|
||||
* via UFFD-IO IOCTLs with MODE_DONTWAKE flag set, then after that all waits
|
||||
* for the whole memory range are satisfied in a single call to uffd_wakeup().
|
||||
*
|
||||
* Returns 0 on success, negative value in case of an error
|
||||
* Returns 0 on success, -errno in case of an error
|
||||
*
|
||||
* @uffd_fd: UFFD file descriptor
|
||||
* @addr: base address
|
||||
@ -320,9 +322,10 @@ int uffd_wakeup(int uffd_fd, void *addr, uint64_t length)
|
||||
uffd_range.len = length;
|
||||
|
||||
if (ioctl(uffd_fd, UFFDIO_WAKE, &uffd_range)) {
|
||||
int e = errno;
|
||||
error_report("uffd_wakeup() failed: addr=%p length=%" PRIu64 " errno=%i",
|
||||
addr, length, errno);
|
||||
return -1;
|
||||
addr, length, e);
|
||||
return -e;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -355,31 +358,3 @@ int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count)
|
||||
|
||||
return (int) (res / sizeof(struct uffd_msg));
|
||||
}
|
||||
|
||||
/**
|
||||
* uffd_poll_events: poll UFFD file descriptor for read
|
||||
*
|
||||
* Returns true if events are available for read, false otherwise
|
||||
*
|
||||
* @uffd_fd: UFFD file descriptor
|
||||
* @tmo: timeout value
|
||||
*/
|
||||
bool uffd_poll_events(int uffd_fd, int tmo)
|
||||
{
|
||||
int res;
|
||||
struct pollfd poll_fd = { .fd = uffd_fd, .events = POLLIN, .revents = 0 };
|
||||
|
||||
do {
|
||||
res = poll(&poll_fd, 1, tmo);
|
||||
} while (res < 0 && errno == EINTR);
|
||||
|
||||
if (res == 0) {
|
||||
return false;
|
||||
}
|
||||
if (res < 0) {
|
||||
error_report("uffd_poll_events() failed: errno=%i", errno);
|
||||
return false;
|
||||
}
|
||||
|
||||
return (poll_fd.revents & POLLIN) != 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user