vfio/type1: massage unmap iteration

Modify the iteration in vfio_dma_do_unmap so it does not depend on deletion
of each dma entry.  Add a variant of vfio_find_dma that returns the entry
with the lowest iova in the search range to initialize the iteration.  No
externally visible change, but this behavior is needed in the subsequent
update-vaddr patch.

Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Steve Sistare 2021-01-29 08:54:08 -08:00 committed by Alex Williamson
parent 441e8106a2
commit 40ae9b807b

View File

@ -173,6 +173,31 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
return NULL;
}
static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
dma_addr_t start, size_t size)
{
struct rb_node *res = NULL;
struct rb_node *node = iommu->dma_list.rb_node;
struct vfio_dma *dma_res = NULL;
while (node) {
struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
if (start < dma->iova + dma->size) {
res = node;
dma_res = dma;
if (start >= dma->iova)
break;
node = node->rb_left;
} else {
node = node->rb_right;
}
}
if (res && size && dma_res->iova >= start + size)
res = NULL;
return res;
}
static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
{
struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
@ -1079,6 +1104,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
dma_addr_t iova = unmap->iova;
unsigned long size = unmap->size;
bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
struct rb_node *n;
mutex_lock(&iommu->lock);
@ -1149,7 +1175,13 @@ again:
}
ret = 0;
while ((dma = vfio_find_dma(iommu, iova, size))) {
n = vfio_find_dma_first_node(iommu, iova, size);
while (n) {
dma = rb_entry(n, struct vfio_dma, node);
if (dma->iova >= iova + size)
break;
if (!iommu->v2 && iova > dma->iova)
break;
/*
@ -1194,6 +1226,7 @@ again:
}
unmapped += dma->size;
n = rb_next(n);
vfio_remove_dma(iommu, dma);
}