mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 09:44:18 +08:00
iommu/io-pgtable: Introduce explicit coherency
Once we remove the serialising spinlock, a potential race opens up for non-coherent IOMMUs whereby a caller of .map() can be sure that cache maintenance has been performed on their new PTE, but will have no guarantee that such maintenance for table entries above it has actually completed (e.g. if another CPU took an interrupt immediately after writing the table entry, but before initiating the DMA sync). Handling this race safely will add some potentially non-trivial overhead to installing a table entry, which we would much rather avoid on coherent systems where it will be unnecessary, and where we are stirivng to minimise latency by removing the locking in the first place. To that end, let's introduce an explicit notion of cache-coherency to io-pgtable, such that we will be able to avoid penalising IOMMUs which know enough to know when they are coherent. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
b9f1ef30ac
commit
81b3c25218
@ -1563,6 +1563,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
.iommu_dev = smmu->dev,
|
||||
};
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
||||
if (!pgtbl_ops)
|
||||
return -ENOMEM;
|
||||
|
@ -1018,6 +1018,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
.iommu_dev = smmu->dev,
|
||||
};
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
|
||||
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
|
||||
|
||||
smmu_domain->smmu = smmu;
|
||||
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
|
||||
if (!pgtbl_ops) {
|
||||
|
@ -187,7 +187,8 @@ static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl)
|
||||
static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
|
||||
struct arm_v7s_io_pgtable *data)
|
||||
{
|
||||
struct device *dev = data->iop.cfg.iommu_dev;
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
dma_addr_t dma;
|
||||
size_t size = ARM_V7S_TABLE_SIZE(lvl);
|
||||
void *table = NULL;
|
||||
@ -196,7 +197,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
|
||||
table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
|
||||
else if (lvl == 2)
|
||||
table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
|
||||
if (table && !selftest_running) {
|
||||
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
goto out_free;
|
||||
@ -225,10 +226,11 @@ out_free:
|
||||
static void __arm_v7s_free_table(void *table, int lvl,
|
||||
struct arm_v7s_io_pgtable *data)
|
||||
{
|
||||
struct device *dev = data->iop.cfg.iommu_dev;
|
||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||
struct device *dev = cfg->iommu_dev;
|
||||
size_t size = ARM_V7S_TABLE_SIZE(lvl);
|
||||
|
||||
if (!selftest_running)
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
|
||||
DMA_TO_DEVICE);
|
||||
if (lvl == 1)
|
||||
@ -240,7 +242,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
|
||||
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
if (selftest_running)
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
return;
|
||||
|
||||
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
|
||||
@ -657,7 +659,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|
||||
IO_PGTABLE_QUIRK_NO_PERMS |
|
||||
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
|
||||
IO_PGTABLE_QUIRK_ARM_MTK_4GB))
|
||||
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
|
||||
IO_PGTABLE_QUIRK_NO_DMA))
|
||||
return NULL;
|
||||
|
||||
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
|
||||
@ -774,7 +777,7 @@ static int __init arm_v7s_do_selftests(void)
|
||||
.tlb = &dummy_tlb_ops,
|
||||
.oas = 32,
|
||||
.ias = 32,
|
||||
.quirks = IO_PGTABLE_QUIRK_ARM_NS,
|
||||
.quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA,
|
||||
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
|
||||
};
|
||||
unsigned int iova, size, iova_start;
|
||||
|
@ -217,7 +217,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
if (!selftest_running) {
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
goto out_free;
|
||||
@ -243,7 +243,7 @@ out_free:
|
||||
static void __arm_lpae_free_pages(void *pages, size_t size,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
if (!selftest_running)
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
|
||||
size, DMA_TO_DEVICE);
|
||||
free_pages_exact(pages, size);
|
||||
@ -254,7 +254,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
|
||||
{
|
||||
*ptep = pte;
|
||||
|
||||
if (!selftest_running)
|
||||
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
|
||||
dma_sync_single_for_device(cfg->iommu_dev,
|
||||
__arm_lpae_dma_addr(ptep),
|
||||
sizeof(pte), DMA_TO_DEVICE);
|
||||
@ -693,7 +693,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
u64 reg;
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
@ -782,7 +782,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
struct arm_lpae_io_pgtable *data;
|
||||
|
||||
/* The NS quirk doesn't apply at stage 2 */
|
||||
if (cfg->quirks)
|
||||
if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
@ -1086,6 +1086,7 @@ static int __init arm_lpae_do_selftests(void)
|
||||
struct io_pgtable_cfg cfg = {
|
||||
.tlb = &dummy_tlb_ops,
|
||||
.oas = 48,
|
||||
.quirks = IO_PGTABLE_QUIRK_NO_DMA,
|
||||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
|
||||
|
@ -65,11 +65,17 @@ struct io_pgtable_cfg {
|
||||
* PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
|
||||
* when the SoC is in "4GB mode" and they can only access the high
|
||||
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
|
||||
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
|
||||
* software-emulated IOMMU), such that pagetable updates need not
|
||||
* be treated as explicit DMA data.
|
||||
*/
|
||||
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
|
||||
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
||||
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
|
||||
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
|
||||
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
|
||||
unsigned long quirks;
|
||||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
|
Loading…
Reference in New Issue
Block a user