mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs
VFIO IOMMU type1 currently upmaps IOVA pages synchronously, which requires IOTLB flushing for every unmapping. This results in large IOTLB flushing overhead when handling pass-through devices has a large number of mapped IOVAs. This can be avoided by using the new IOTLB flushing interface. Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> [aw - use LIST_HEAD] Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
c698ca5278
commit
6bd06f5a48
@ -102,6 +102,13 @@ struct vfio_pfn {
|
||||
atomic_t ref_count;
|
||||
};
|
||||
|
||||
struct vfio_regions {
|
||||
struct list_head list;
|
||||
dma_addr_t iova;
|
||||
phys_addr_t phys;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
|
||||
(!list_empty(&iommu->domain_list))
|
||||
|
||||
@ -660,11 +667,102 @@ unpin_exit:
|
||||
return i > npage ? npage : (i > 0 ? i : -EINVAL);
|
||||
}
|
||||
|
||||
static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
|
||||
struct list_head *regions)
|
||||
{
|
||||
long unlocked = 0;
|
||||
struct vfio_regions *entry, *next;
|
||||
|
||||
iommu_tlb_sync(domain->domain);
|
||||
|
||||
list_for_each_entry_safe(entry, next, regions, list) {
|
||||
unlocked += vfio_unpin_pages_remote(dma,
|
||||
entry->iova,
|
||||
entry->phys >> PAGE_SHIFT,
|
||||
entry->len >> PAGE_SHIFT,
|
||||
false);
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
|
||||
return unlocked;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generally, VFIO needs to unpin remote pages after each IOTLB flush.
|
||||
* Therefore, when using IOTLB flush sync interface, VFIO need to keep track
|
||||
* of these regions (currently using a list).
|
||||
*
|
||||
* This value specifies maximum number of regions for each IOTLB flush sync.
|
||||
*/
|
||||
#define VFIO_IOMMU_TLB_SYNC_MAX 512
|
||||
|
||||
static size_t unmap_unpin_fast(struct vfio_domain *domain,
|
||||
struct vfio_dma *dma, dma_addr_t *iova,
|
||||
size_t len, phys_addr_t phys, long *unlocked,
|
||||
struct list_head *unmapped_list,
|
||||
int *unmapped_cnt)
|
||||
{
|
||||
size_t unmapped = 0;
|
||||
struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
|
||||
if (entry) {
|
||||
unmapped = iommu_unmap_fast(domain->domain, *iova, len);
|
||||
|
||||
if (!unmapped) {
|
||||
kfree(entry);
|
||||
} else {
|
||||
iommu_tlb_range_add(domain->domain, *iova, unmapped);
|
||||
entry->iova = *iova;
|
||||
entry->phys = phys;
|
||||
entry->len = unmapped;
|
||||
list_add_tail(&entry->list, unmapped_list);
|
||||
|
||||
*iova += unmapped;
|
||||
(*unmapped_cnt)++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync if the number of fast-unmap regions hits the limit
|
||||
* or in case of errors.
|
||||
*/
|
||||
if (*unmapped_cnt >= VFIO_IOMMU_TLB_SYNC_MAX || !unmapped) {
|
||||
*unlocked += vfio_sync_unpin(dma, domain,
|
||||
unmapped_list);
|
||||
*unmapped_cnt = 0;
|
||||
}
|
||||
|
||||
return unmapped;
|
||||
}
|
||||
|
||||
static size_t unmap_unpin_slow(struct vfio_domain *domain,
|
||||
struct vfio_dma *dma, dma_addr_t *iova,
|
||||
size_t len, phys_addr_t phys,
|
||||
long *unlocked)
|
||||
{
|
||||
size_t unmapped = iommu_unmap(domain->domain, *iova, len);
|
||||
|
||||
if (unmapped) {
|
||||
*unlocked += vfio_unpin_pages_remote(dma, *iova,
|
||||
phys >> PAGE_SHIFT,
|
||||
unmapped >> PAGE_SHIFT,
|
||||
false);
|
||||
*iova += unmapped;
|
||||
cond_resched();
|
||||
}
|
||||
return unmapped;
|
||||
}
|
||||
|
||||
static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
|
||||
bool do_accounting)
|
||||
{
|
||||
dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
|
||||
struct vfio_domain *domain, *d;
|
||||
LIST_HEAD(unmapped_region_list);
|
||||
int unmapped_region_cnt = 0;
|
||||
long unlocked = 0;
|
||||
|
||||
if (!dma->size)
|
||||
@ -710,20 +808,26 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
|
||||
break;
|
||||
}
|
||||
|
||||
unmapped = iommu_unmap(domain->domain, iova, len);
|
||||
if (WARN_ON(!unmapped))
|
||||
break;
|
||||
|
||||
unlocked += vfio_unpin_pages_remote(dma, iova,
|
||||
phys >> PAGE_SHIFT,
|
||||
unmapped >> PAGE_SHIFT,
|
||||
false);
|
||||
iova += unmapped;
|
||||
|
||||
cond_resched();
|
||||
/*
|
||||
* First, try to use fast unmap/unpin. In case of failure,
|
||||
* switch to slow unmap/unpin path.
|
||||
*/
|
||||
unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
|
||||
&unlocked, &unmapped_region_list,
|
||||
&unmapped_region_cnt);
|
||||
if (!unmapped) {
|
||||
unmapped = unmap_unpin_slow(domain, dma, &iova, len,
|
||||
phys, &unlocked);
|
||||
if (WARN_ON(!unmapped))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
dma->iommu_mapped = false;
|
||||
|
||||
if (unmapped_region_cnt)
|
||||
unlocked += vfio_sync_unpin(dma, domain, &unmapped_region_list);
|
||||
|
||||
if (do_accounting) {
|
||||
vfio_lock_acct(dma->task, -unlocked, NULL);
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user