mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 14:04:05 +08:00
ARM/dma-mapping: merge IOMMU ops
The dma_sync_* operations are now the only difference between the coherent and non-coherent IOMMU ops. Some minor tweaks to make those safe for coherent devices with minimal overhead, and we can condense down to a single set of DMA ops. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
d563bccfa3
commit
4136ce90f0
@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (dev->dma_coherent)
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
if (dev->dma_coherent)
|
||||
return;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
}
|
||||
@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
|
||||
if (!iova)
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_dev_to_cpu(page, offset, size, dir);
|
||||
}
|
||||
|
||||
@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
|
||||
{
|
||||
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
|
||||
dma_addr_t iova = handle & PAGE_MASK;
|
||||
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
struct page *page;
|
||||
unsigned int offset = handle & ~PAGE_MASK;
|
||||
|
||||
if (!iova)
|
||||
if (dev->dma_coherent || !iova)
|
||||
return;
|
||||
|
||||
page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
};
|
||||
|
||||
static const struct dma_map_ops iommu_coherent_ops = {
|
||||
.alloc = arm_iommu_alloc_attrs,
|
||||
.free = arm_iommu_free_attrs,
|
||||
.mmap = arm_iommu_mmap_attrs,
|
||||
.get_sgtable = arm_iommu_get_sgtable,
|
||||
|
||||
.map_page = arm_iommu_map_page,
|
||||
.unmap_page = arm_iommu_unmap_page,
|
||||
|
||||
.map_sg = arm_iommu_map_sg,
|
||||
.unmap_sg = arm_iommu_unmap_sg,
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
};
|
||||
|
||||
/**
|
||||
* arm_iommu_create_mapping
|
||||
* @bus: pointer to the bus holding the client device (for IOMMU calls)
|
||||
@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
return;
|
||||
}
|
||||
|
||||
if (coherent)
|
||||
set_dma_ops(dev, &iommu_coherent_ops);
|
||||
else
|
||||
set_dma_ops(dev, &iommu_ops);
|
||||
set_dma_ops(dev, &iommu_ops);
|
||||
}
|
||||
|
||||
static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
|
Loading…
Reference in New Issue
Block a user