mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 23:54:04 +08:00
iommu/sva: Restore SVA handle sharing
Prior to commit092edaddb6
("iommu: Support mm PASID 1:n with sva domains") the code allowed a SVA handle to be bound multiple times to the same (mm, device) pair. This was alluded to in the kdoc comment, but we had understood this to be more a remark about allowing multiple devices, not a literal same-driver re-opening the same SVA. It turns out uacce and idxd were both relying on the core code to handle reference counting for same-device same-mm scenarios. As this looks hard to resolve in the drivers bring it back to the core code. The new design has changed the meaning of the domain->users refcount to refer to the number of devices that are sharing that domain for the same mm. This is part of the design to lift the SVA domain de-duplication out of the drivers. Return the old behavior by explicitly de-duplicating the struct iommu_sva handle. The same (mm, device) will return the same handle pointer and the core code will handle tracking this. The last unbind of the handle will destroy it. Fixes:092edaddb6
("iommu: Support mm PASID 1:n with sva domains") Reported-by: Zhangfei Gao <zhangfei.gao@linaro.org> Closes: https://lore.kernel.org/all/20240221110658.529-1-zhangfei.gao@linaro.org/ Tested-by: Zhangfei Gao <zhangfei.gao@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/0-v1-9455fc497a6f+3b4-iommu_sva_sharing_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
16b1b39126
commit
65d4418c50
@ -41,6 +41,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
|
||||
}
|
||||
iommu_mm->pasid = pasid;
|
||||
INIT_LIST_HEAD(&iommu_mm->sva_domains);
|
||||
INIT_LIST_HEAD(&iommu_mm->sva_handles);
|
||||
/*
|
||||
* Make sure the write to mm->iommu_mm is not reordered in front of
|
||||
* initialization to iommu_mm fields. If it does, readers may see a
|
||||
@ -82,6 +83,14 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_for_each_entry(handle, &mm->iommu_mm->sva_handles, handle_item) {
|
||||
if (handle->dev == dev) {
|
||||
refcount_inc(&handle->users);
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
return handle;
|
||||
}
|
||||
}
|
||||
|
||||
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
|
||||
if (!handle) {
|
||||
ret = -ENOMEM;
|
||||
@ -108,7 +117,9 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
||||
if (ret)
|
||||
goto out_free_domain;
|
||||
domain->users = 1;
|
||||
refcount_set(&handle->users, 1);
|
||||
list_add(&domain->next, &mm->iommu_mm->sva_domains);
|
||||
list_add(&handle->handle_item, &mm->iommu_mm->sva_handles);
|
||||
|
||||
out:
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
@ -141,6 +152,12 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
struct device *dev = handle->dev;
|
||||
|
||||
mutex_lock(&iommu_sva_lock);
|
||||
if (!refcount_dec_and_test(&handle->users)) {
|
||||
mutex_unlock(&iommu_sva_lock);
|
||||
return;
|
||||
}
|
||||
list_del(&handle->handle_item);
|
||||
|
||||
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
|
||||
if (--domain->users == 0) {
|
||||
list_del(&domain->next);
|
||||
|
@ -892,11 +892,14 @@ struct iommu_fwspec {
|
||||
struct iommu_sva {
|
||||
struct device *dev;
|
||||
struct iommu_domain *domain;
|
||||
struct list_head handle_item;
|
||||
refcount_t users;
|
||||
};
|
||||
|
||||
struct iommu_mm_data {
|
||||
u32 pasid;
|
||||
struct list_head sva_domains;
|
||||
struct list_head sva_handles;
|
||||
};
|
||||
|
||||
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
|
||||
|
Loading…
Reference in New Issue
Block a user