vfio/iommu_type1: rename vfio_group struck to vfio_iommu_group

The vfio_group structure is already defined in vfio module so in order
to improve code readability and for simplicity, rename the vfio_group
structure in vfio_iommu_type1 module to vfio_iommu_group.

Signed-off-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Link: https://lore.kernel.org/r/20210608112841.51897-1-mgurtovoy@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Max Gurtovoy 2021-06-08 14:28:41 +03:00 committed by Alex Williamson
parent bc01b7617d
commit c7396f2eac

View File

@ -110,7 +110,7 @@ struct vfio_batch {
int offset; /* of next entry in pages */
};
struct vfio_group {
struct vfio_iommu_group {
struct iommu_group *iommu_group;
struct list_head next;
bool mdev_group; /* An mdev group */
@ -160,8 +160,9 @@ struct vfio_regions {
static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);
static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group);
/*
* This code handles mapping and unmapping of user data buffers
@ -836,7 +837,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
unsigned long *phys_pfn)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_group *group;
struct vfio_iommu_group *group;
int i, j, ret;
unsigned long remote_vaddr;
struct vfio_dma *dma;
@ -1875,10 +1876,10 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
__free_pages(pages, order);
}
static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
struct iommu_group *iommu_group)
static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
struct iommu_group *iommu_group)
{
struct vfio_group *g;
struct vfio_iommu_group *g;
list_for_each_entry(g, &domain->group_list, next) {
if (g->iommu_group == iommu_group)
@ -1888,11 +1889,12 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group)
static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group)
{
struct vfio_domain *domain;
struct vfio_group *group = NULL;
struct vfio_iommu_group *group = NULL;
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
@ -1967,7 +1969,7 @@ static int vfio_mdev_detach_domain(struct device *dev, void *data)
}
static int vfio_iommu_attach_group(struct vfio_domain *domain,
struct vfio_group *group)
struct vfio_iommu_group *group)
{
if (group->mdev_group)
return iommu_group_for_each_dev(group->iommu_group,
@ -1978,7 +1980,7 @@ static int vfio_iommu_attach_group(struct vfio_domain *domain,
}
static void vfio_iommu_detach_group(struct vfio_domain *domain,
struct vfio_group *group)
struct vfio_iommu_group *group)
{
if (group->mdev_group)
iommu_group_for_each_dev(group->iommu_group, domain->domain,
@ -2242,7 +2244,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_group *group;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL;
int ret;
@ -2518,7 +2520,7 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
struct list_head *iova_copy)
{
struct vfio_domain *d;
struct vfio_group *g;
struct vfio_iommu_group *g;
struct vfio_iova *node;
dma_addr_t start, end;
LIST_HEAD(resv_regions);
@ -2560,7 +2562,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain;
struct vfio_group *group;
struct vfio_iommu_group *group;
bool update_dirty_scope = false;
LIST_HEAD(iova_copy);
@ -2681,7 +2683,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
static void vfio_release_domain(struct vfio_domain *domain, bool external)
{
struct vfio_group *group, *group_tmp;
struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {