mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
791c2b17fb
Per the reasoning in commit 4bf7fda4dc
("iommu/dma: Add config for
PCI SAC address trick") and its subsequent revert, this mechanism no
longer serves its original purpose, but now only works around broken
hardware/drivers in a way that is unfortunately too impactful to remove.
This does not, however, prevent us from solving the performance impact
which that workaround has on large-scale systems that don't need it.
Once the 32-bit IOVA space fills up and a workload starts allocating and
freeing on both sides of the boundary, the opportunistic SAC allocation
can then end up spending significant time hunting down scattered
fragments of free 32-bit space, or just reestablishing max32_alloc_size.
This can easily be exacerbated by a change in allocation pattern, such
as by changing the network MTU, which can increase pressure on the
32-bit space by leaving a large quantity of cached IOVAs which are now
the wrong size to be recycled, but also won't be freed since the
non-opportunistic allocations can still be satisfied from the whole
64-bit space without triggering the reclaim path.
However, in the context of a workaround where smaller DMA addresses
aren't simply a preference but a necessity, if we get to that point at
all then in fact it's already the endgame. The nature of the allocator
is currently such that the first IOVA we give to a device after the
32-bit space runs out will be the highest possible address for that
device, ever. If that works, then great, we know we can optimise for
speed by always allocating from the full range. And if it doesn't, then
the worst has already happened and any brokenness is now showing, so
there's little point in continuing to try to hide it.
To that end, implement a flag to refine the SAC business into a
per-device policy that can automatically get itself out of the way if
and when it stops being useful.
CC: Linus Torvalds <torvalds@linux-foundation.org>
CC: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: John Garry <john.g.garry@oracle.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Tested-by: Vasant Hegde <vasant.hegde@amd.com>
Tested-by: Jakub Kicinski <kuba@kernel.org>
Link: https://lore.kernel.org/r/b8502b115b915d2a3fabde367e099e39106686c8.1681392791.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
51 lines
1.1 KiB
C
51 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2014-2015 ARM Ltd.
|
|
*/
|
|
#ifndef __DMA_IOMMU_H
|
|
#define __DMA_IOMMU_H
|
|
|
|
#include <linux/iommu.h>
|
|
|
|
#ifdef CONFIG_IOMMU_DMA
|
|
|
|
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
|
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
|
|
|
int iommu_dma_init_fq(struct iommu_domain *domain);
|
|
|
|
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
|
|
|
extern bool iommu_dma_forcedac;
|
|
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
|
{
|
|
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
|
|
}
|
|
|
|
#else /* CONFIG_IOMMU_DMA */
|
|
|
|
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
|
|
{
|
|
}
|
|
|
|
static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
|
|
{
|
|
}
|
|
|
|
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_IOMMU_DMA */
|
|
#endif /* __DMA_IOMMU_H */
|