mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
iommu: Convert simple drivers with DOMAIN_DMA to domain_alloc_paging()
These drivers are all trivially converted since the function is only called if the domain type is going to be IOMMU_DOMAIN_UNMANAGED/DMA. Tested-by: Heiko Stuebner <heiko@sntech.de> Tested-by: Steven Price <steven.price@arm.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Tested-by: Yong Wu <yong.wu@mediatek.com> #For mtk_iommu.c Link: https://lore.kernel.org/r/23-v8-81230027b2fa+9d-iommu_all_defdom_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
4601cd2d7c
commit
3529375e77
@ -332,12 +332,10 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct qcom_iommu_domain *qcom_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
/*
|
||||
* Allocate the domain and initialise some of its data structures.
|
||||
* We can't really do anything meaningful until we've added a
|
||||
@ -605,7 +603,7 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
static const struct iommu_ops qcom_iommu_ops = {
|
||||
.identity_domain = &qcom_iommu_identity_domain,
|
||||
.capable = qcom_iommu_capable,
|
||||
.domain_alloc = qcom_iommu_domain_alloc,
|
||||
.domain_alloc_paging = qcom_iommu_domain_alloc_paging,
|
||||
.probe_device = qcom_iommu_probe_device,
|
||||
.device_group = generic_device_group,
|
||||
.of_xlate = qcom_iommu_of_xlate,
|
||||
|
@ -887,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct exynos_iommu_domain *domain;
|
||||
dma_addr_t handle;
|
||||
@ -896,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
|
||||
/* Check if correct PTE offsets are initialized */
|
||||
BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
|
||||
|
||||
if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return NULL;
|
||||
|
||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
@ -1472,7 +1469,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
|
||||
|
||||
static const struct iommu_ops exynos_iommu_ops = {
|
||||
.identity_domain = &exynos_identity_domain,
|
||||
.domain_alloc = exynos_iommu_domain_alloc,
|
||||
.domain_alloc_paging = exynos_iommu_domain_alloc_paging,
|
||||
.device_group = generic_device_group,
|
||||
.probe_device = exynos_iommu_probe_device,
|
||||
.release_device = exynos_iommu_release_device,
|
||||
|
@ -563,13 +563,10 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
|
||||
* IOMMU Operations
|
||||
*/
|
||||
|
||||
static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct ipmmu_vmsa_domain *domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
|
||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
@ -892,7 +889,7 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
|
||||
|
||||
static const struct iommu_ops ipmmu_ops = {
|
||||
.identity_domain = &ipmmu_iommu_identity_domain,
|
||||
.domain_alloc = ipmmu_domain_alloc,
|
||||
.domain_alloc_paging = ipmmu_domain_alloc_paging,
|
||||
.probe_device = ipmmu_probe_device,
|
||||
.release_device = ipmmu_release_device,
|
||||
.probe_finalize = ipmmu_probe_finalize,
|
||||
|
@ -689,13 +689,10 @@ update_iova_region:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct mtk_iommu_domain *dom;
|
||||
|
||||
if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return NULL;
|
||||
|
||||
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
|
||||
if (!dom)
|
||||
return NULL;
|
||||
@ -1019,7 +1016,7 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
|
||||
|
||||
static const struct iommu_ops mtk_iommu_ops = {
|
||||
.identity_domain = &mtk_iommu_identity_domain,
|
||||
.domain_alloc = mtk_iommu_domain_alloc,
|
||||
.domain_alloc_paging = mtk_iommu_domain_alloc_paging,
|
||||
.probe_device = mtk_iommu_probe_device,
|
||||
.release_device = mtk_iommu_release_device,
|
||||
.device_group = mtk_iommu_device_group,
|
||||
|
@ -1043,13 +1043,10 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct rk_iommu_domain *rk_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
|
||||
if (!dma_dev)
|
||||
return NULL;
|
||||
|
||||
@ -1172,7 +1169,7 @@ static int rk_iommu_of_xlate(struct device *dev,
|
||||
|
||||
static const struct iommu_ops rk_iommu_ops = {
|
||||
.identity_domain = &rk_identity_domain,
|
||||
.domain_alloc = rk_iommu_domain_alloc,
|
||||
.domain_alloc_paging = rk_iommu_domain_alloc_paging,
|
||||
.probe_device = rk_iommu_probe_device,
|
||||
.release_device = rk_iommu_release_device,
|
||||
.device_group = rk_iommu_device_group,
|
||||
|
@ -134,13 +134,10 @@ sprd_iommu_pgt_size(struct iommu_domain *domain)
|
||||
SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
|
||||
}
|
||||
|
||||
static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
|
||||
static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct sprd_iommu_domain *dom;
|
||||
|
||||
if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return NULL;
|
||||
|
||||
dom = kzalloc(sizeof(*dom), GFP_KERNEL);
|
||||
if (!dom)
|
||||
return NULL;
|
||||
@ -421,7 +418,7 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
|
||||
|
||||
|
||||
static const struct iommu_ops sprd_iommu_ops = {
|
||||
.domain_alloc = sprd_iommu_domain_alloc,
|
||||
.domain_alloc_paging = sprd_iommu_domain_alloc_paging,
|
||||
.probe_device = sprd_iommu_probe_device,
|
||||
.device_group = sprd_iommu_device_group,
|
||||
.of_xlate = sprd_iommu_of_xlate,
|
||||
|
@ -667,14 +667,11 @@ static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
sun50i_iova_get_page_offset(iova);
|
||||
}
|
||||
|
||||
static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *
|
||||
sun50i_iommu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct sun50i_iommu_domain *sun50i_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_DMA &&
|
||||
type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return NULL;
|
||||
|
||||
sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
|
||||
if (!sun50i_domain)
|
||||
return NULL;
|
||||
@ -840,7 +837,7 @@ static const struct iommu_ops sun50i_iommu_ops = {
|
||||
.identity_domain = &sun50i_iommu_identity_domain,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
.device_group = sun50i_iommu_device_group,
|
||||
.domain_alloc = sun50i_iommu_domain_alloc,
|
||||
.domain_alloc_paging = sun50i_iommu_domain_alloc_paging,
|
||||
.of_xlate = sun50i_iommu_of_xlate,
|
||||
.probe_device = sun50i_iommu_probe_device,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
|
@ -272,13 +272,10 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
|
||||
clear_bit(id, smmu->asids);
|
||||
}
|
||||
|
||||
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct tegra_smmu_as *as;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
|
||||
as = kzalloc(sizeof(*as), GFP_KERNEL);
|
||||
if (!as)
|
||||
return NULL;
|
||||
@ -991,7 +988,7 @@ static int tegra_smmu_def_domain_type(struct device *dev)
|
||||
static const struct iommu_ops tegra_smmu_ops = {
|
||||
.identity_domain = &tegra_smmu_identity_domain,
|
||||
.def_domain_type = &tegra_smmu_def_domain_type,
|
||||
.domain_alloc = tegra_smmu_domain_alloc,
|
||||
.domain_alloc_paging = tegra_smmu_domain_alloc_paging,
|
||||
.probe_device = tegra_smmu_probe_device,
|
||||
.device_group = tegra_smmu_device_group,
|
||||
.of_xlate = tegra_smmu_of_xlate,
|
||||
|
Loading…
Reference in New Issue
Block a user