vfio/common: Abort migration if dirty log start/stop/sync fails

If VFIO dirty pages log start/stop/sync fails during migration,
migration should be aborted as pages dirtied by VFIO devices might not
be reported properly.

This is not the case today, where in such scenario only an error is
printed.

Fix it by aborting migration in the above scenario.

Fixes: 758b96b61d ("vfio/migrate: Move switch of dirty tracking into vfio_memory_listener")
Fixes: b6dd6504e3 ("vfio: Add vfio_listener_log_sync to mark dirty pages")
Fixes: 9e7b0442f2 ("vfio: Add ioctl to get dirty pages bitmap during dma unmap")
Signed-off-by: Avihai Horon <avihaih@nvidia.com>
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Link: https://lore.kernel.org/r/20230307125450.62409-4-joao.m.martins@oracle.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Avihai Horon 2023-03-07 12:54:38 +00:00 committed by Alex Williamson
parent db9b829b15
commit 236e0a45f5

View File

@ -42,6 +42,7 @@
#include "migration/migration.h"
#include "migration/misc.h"
#include "migration/blocker.h"
#include "migration/qemu-file.h"
#include "sysemu/tpm.h"
VFIOGroupList vfio_group_list =
@ -390,6 +391,19 @@ void vfio_unblock_multiple_devices_migration(void)
multiple_devices_migration_blocker = NULL;
}
static void vfio_set_migration_error(int err)
{
MigrationState *ms = migrate_get_current();
if (migration_is_setup_or_active(ms->state)) {
WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
if (ms->to_dst_file) {
qemu_file_set_error(ms->to_dst_file, err);
}
}
}
}
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
VFIOGroup *group;
@ -680,6 +694,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
iotlb->target_as->name ? iotlb->target_as->name : "none");
vfio_set_migration_error(-EINVAL);
return;
}
@ -714,6 +729,7 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
"0x%"HWADDR_PRIx") = %d (%s)",
container, iova,
iotlb->addr_mask + 1, ret, strerror(-ret));
vfio_set_migration_error(ret);
}
}
out:
@ -1259,7 +1275,7 @@ static void vfio_listener_region_del(MemoryListener *listener,
}
}
static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
static int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
{
int ret;
struct vfio_iommu_type1_dirty_bitmap dirty = {
@ -1267,7 +1283,7 @@ static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
};
if (!container->dirty_pages_supported) {
return;
return 0;
}
if (start) {
@ -1278,23 +1294,34 @@ static void vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
if (ret) {
ret = -errno;
error_report("Failed to set dirty tracking flag 0x%x errno: %d",
dirty.flags, errno);
}
return ret;
}
static void vfio_listener_log_global_start(MemoryListener *listener)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
int ret;
vfio_set_dirty_page_tracking(container, true);
ret = vfio_set_dirty_page_tracking(container, true);
if (ret) {
vfio_set_migration_error(ret);
}
}
static void vfio_listener_log_global_stop(MemoryListener *listener)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
int ret;
vfio_set_dirty_page_tracking(container, false);
ret = vfio_set_dirty_page_tracking(container, false);
if (ret) {
vfio_set_migration_error(ret);
}
}
static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
@ -1370,19 +1397,18 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
VFIOContainer *container = giommu->container;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
ram_addr_t translated_addr;
int ret = -EINVAL;
trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask);
if (iotlb->target_as != &address_space_memory) {
error_report("Wrong target AS \"%s\", only system memory is allowed",
iotlb->target_as->name ? iotlb->target_as->name : "none");
return;
goto out;
}
rcu_read_lock();
if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
int ret;
ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
translated_addr);
if (ret) {
@ -1393,6 +1419,11 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
}
}
rcu_read_unlock();
out:
if (ret) {
vfio_set_migration_error(ret);
}
}
static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section,
@ -1485,13 +1516,19 @@ static void vfio_listener_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
VFIOContainer *container = container_of(listener, VFIOContainer, listener);
int ret;
if (vfio_listener_skipped_section(section)) {
return;
}
if (vfio_devices_all_dirty_tracking(container)) {
vfio_sync_dirty_bitmap(container, section);
ret = vfio_sync_dirty_bitmap(container, section);
if (ret) {
error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
strerror(-ret));
vfio_set_migration_error(ret);
}
}
}