mirror of
https://github.com/qemu/qemu.git
synced 2024-11-26 12:23:36 +08:00
Support individual region unmap in libvhost-user
When the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS protocol feature is enabled, on memory hot-unplug qemu will transmit memory regions to remove individually using the new message VHOST_USER_REM_MEM_REG message. With this change, vhost-user backends build with libvhost-user can now unmap individual memory regions when receiving the VHOST_USER_REM_MEM_REG message. Qemu only sends VHOST_USER_REM_MEM_REG messages when the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS feature is negotiated, and support for that feature has not yet been added in libvhost-user, this new functionality is not yet used. Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Message-Id: <1588533678-23450-10-git-send-email-raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
ec94c8e621
commit
875b9fd97b
@ -139,6 +139,7 @@ vu_request_to_string(unsigned int req)
|
||||
REQ(VHOST_USER_VRING_KICK),
|
||||
REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
|
||||
REQ(VHOST_USER_ADD_MEM_REG),
|
||||
REQ(VHOST_USER_REM_MEM_REG),
|
||||
REQ(VHOST_USER_MAX),
|
||||
};
|
||||
#undef REQ
|
||||
@ -763,6 +764,66 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool reg_equal(VuDevRegion *vudev_reg,
|
||||
VhostUserMemoryRegion *msg_reg)
|
||||
{
|
||||
if (vudev_reg->gpa == msg_reg->guest_phys_addr &&
|
||||
vudev_reg->qva == msg_reg->userspace_addr &&
|
||||
vudev_reg->size == msg_reg->memory_size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
|
||||
int i, j;
|
||||
bool found = false;
|
||||
VuDevRegion shadow_regions[VHOST_MEMORY_MAX_NREGIONS] = {};
|
||||
VhostUserMemoryRegion m = vmsg->payload.memreg.region, *msg_region = &m;
|
||||
|
||||
DPRINT("Removing region:\n");
|
||||
DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
|
||||
msg_region->guest_phys_addr);
|
||||
DPRINT(" memory_size: 0x%016"PRIx64"\n",
|
||||
msg_region->memory_size);
|
||||
DPRINT(" userspace_addr 0x%016"PRIx64"\n",
|
||||
msg_region->userspace_addr);
|
||||
DPRINT(" mmap_offset 0x%016"PRIx64"\n",
|
||||
msg_region->mmap_offset);
|
||||
|
||||
for (i = 0, j = 0; i < dev->nregions; i++) {
|
||||
if (!reg_equal(&dev->regions[i], msg_region)) {
|
||||
shadow_regions[j].gpa = dev->regions[i].gpa;
|
||||
shadow_regions[j].size = dev->regions[i].size;
|
||||
shadow_regions[j].qva = dev->regions[i].qva;
|
||||
shadow_regions[j].mmap_offset = dev->regions[i].mmap_offset;
|
||||
j++;
|
||||
} else {
|
||||
found = true;
|
||||
VuDevRegion *r = &dev->regions[i];
|
||||
void *m = (void *) (uintptr_t) r->mmap_addr;
|
||||
|
||||
if (m) {
|
||||
munmap(m, r->size + r->mmap_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
memcpy(dev->regions, shadow_regions,
|
||||
sizeof(VuDevRegion) * VHOST_MEMORY_MAX_NREGIONS);
|
||||
DPRINT("Successfully removed a region\n");
|
||||
dev->nregions--;
|
||||
vmsg_set_reply_u64(vmsg, 0);
|
||||
} else {
|
||||
vu_panic(dev, "Specified region not found\n");
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
|
||||
{
|
||||
@ -1771,6 +1832,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
|
||||
return vu_handle_get_max_memslots(dev, vmsg);
|
||||
case VHOST_USER_ADD_MEM_REG:
|
||||
return vu_add_mem_reg(dev, vmsg);
|
||||
case VHOST_USER_REM_MEM_REG:
|
||||
return vu_rem_mem_reg(dev, vmsg);
|
||||
default:
|
||||
vmsg_close_fds(vmsg);
|
||||
vu_panic(dev, "Unhandled request: %d", vmsg->request);
|
||||
|
@ -99,6 +99,7 @@ typedef enum VhostUserRequest {
|
||||
VHOST_USER_VRING_KICK = 35,
|
||||
VHOST_USER_GET_MAX_MEM_SLOTS = 36,
|
||||
VHOST_USER_ADD_MEM_REG = 37,
|
||||
VHOST_USER_REM_MEM_REG = 38,
|
||||
VHOST_USER_MAX
|
||||
} VhostUserRequest;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user