mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
iommu/dma: Pass address limit rather than size to iommu_setup_dma_ops()
Passing a 64-bit address width to iommu_setup_dma_ops() is valid on virtual platforms, but isn't currently possible. The overflow check in iommu_dma_init_domain() prevents this even when @dma_base isn't 0. Pass a limit address instead of a size, so callers don't have to fake a size to work around the check. The base and limit parameters are being phased out, because: * they are redundant for x86 callers. dma-iommu already reserves the first page, and the upper limit is already in domain->geometry. * they can now be obtained from dev->dma_range_map on Arm. But removing them on Arm isn't completely straightforward so is left for future work. As an intermediate step, simplify the x86 callers by passing dummy limits. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20210618152059.1194210-5-jean-philippe@linaro.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
3cf485540e
commit
ac6d704679
@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
if (iommu)
|
||||
iommu_setup_dma_ops(dev, dma_base, size);
|
||||
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
if (xen_swiotlb_detect())
|
||||
|
@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev)
|
||||
/* Domains are initialized for this device - have a look what we ended up with */
|
||||
domain = iommu_get_domain_for_dev(dev);
|
||||
if (domain->type == IOMMU_DOMAIN_DMA)
|
||||
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0);
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
else
|
||||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev)
|
||||
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
||||
* @base: IOVA at which the mappable address space starts
|
||||
* @size: Size of IOVA space
|
||||
* @limit: Last address of the IOVA space
|
||||
* @dev: Device the domain is being initialised for
|
||||
*
|
||||
* @base and @size should be exact multiples of IOMMU page granularity to
|
||||
* @base and @limit + 1 should be exact multiples of IOMMU page granularity to
|
||||
* avoid rounding surprises. If necessary, we reserve the page at address 0
|
||||
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
|
||||
* any change which could make prior IOVAs invalid will fail.
|
||||
*/
|
||||
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
u64 size, struct device *dev)
|
||||
dma_addr_t limit, struct device *dev)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
unsigned long order, base_pfn;
|
||||
@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
/* Check the domain allows at least some access to the device... */
|
||||
if (domain->geometry.force_aperture) {
|
||||
if (base > domain->geometry.aperture_end ||
|
||||
base + size <= domain->geometry.aperture_start) {
|
||||
limit < domain->geometry.aperture_start) {
|
||||
pr_warn("specified DMA range outside IOMMU capability\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = {
|
||||
* The IOMMU core code allocates the default DMA domain, which the underlying
|
||||
* IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, size, dev))
|
||||
if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
|
||||
goto out_err;
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev)
|
||||
|
||||
static void intel_iommu_probe_finalize(struct device *dev)
|
||||
{
|
||||
dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
|
||||
if (domain && domain->type == IOMMU_DOMAIN_DMA)
|
||||
iommu_setup_dma_ops(dev, base,
|
||||
__DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
else
|
||||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
|
||||
|
||||
/* The DMA API isn't _quite_ the whole story, though... */
|
||||
/*
|
||||
@ -50,7 +50,7 @@ struct msi_msg;
|
||||
struct device;
|
||||
|
||||
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size)
|
||||
u64 dma_limit)
|
||||
{
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user