mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
b67483b3c4
It's somewhat hard to see, but arm64's arch_setup_dma_ops() should only ever call iommu_setup_dma_ops() after a successful iommu_probe_device(), which means there should be no harm in achieving the same order of operations by running it off the back of iommu_probe_device() itself. This then puts it in line with the x86 and s390 .probe_finalize bodges, letting us pull it all into the main flow properly. As a bonus this lets us fold in and de-scope the PCI workaround setup as well. At this point we can also then pull the call up inside the group mutex, and avoid having to think about whether iommu_group_store_type() could theoretically race and free the domain if iommu_setup_dma_ops() ran just *before* iommu_device_use_default_domain() claims it... Furthermore we replace one .probe_finalize call completely, since the only remaining implementations are now one which only needs to run once for the initial boot-time probe, and two which themselves render that path unreachable. This leaves us a big step closer to realistically being able to unpick the variety of different things that iommu_setup_dma_ops() has been muddling together, and further streamline iommu-dma into core API flows in future. Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> # For Intel IOMMU Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Tested-by: Hanjun Guo <guohanjun@huawei.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/bebea331c1d688b34d9862eefd5ede47503961b8.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
49 lines
1003 B
C
49 lines
1003 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2014-2015 ARM Ltd.
|
|
*/
|
|
#ifndef __DMA_IOMMU_H
|
|
#define __DMA_IOMMU_H
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#ifdef CONFIG_IOMMU_DMA
|
|
|
|
void iommu_setup_dma_ops(struct device *dev);
|
|
|
|
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
|
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
|
|
|
int iommu_dma_init_fq(struct iommu_domain *domain);
|
|
|
|
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
|
|
|
extern bool iommu_dma_forcedac;
|
|
|
|
#else /* CONFIG_IOMMU_DMA */
|
|
|
|
static inline void iommu_setup_dma_ops(struct device *dev)
|
|
{
|
|
}
|
|
|
|
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
}
|
|
|
|
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_IOMMU_DMA */
|
|
#endif /* __DMA_IOMMU_H */
|