2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00

iommu/arm-smmu: Clean up DMA API usage

With the correct DMA API calls now integrated into the io-pgtable code,
let that handle the flushing of non-coherent page table updates.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Robin Murphy 2015-07-29 19:46:06 +01:00 committed by Will Deacon
parent f8d5496131
commit 2df7a25ce4

View File

@ -611,24 +611,13 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
/*
* Ensure new page tables are visible to a coherent hardware walker.
* The page table code deals with flushing for the non-coherent case.
*/
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
dsb(ishst);
} else {
/*
* If the SMMU can't walk tables in the CPU caches, treat them
* like non-coherent DMA since we need to flush the new entries
* all the way out to memory. There's no possibility of
* recursion here as the SMMU table walker will not be wired
* through another SMMU.
*/
dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
DMA_TO_DEVICE);
}
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
@ -899,6 +888,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
};
smmu_domain->smmu = smmu;