mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
arm64: Implement custom mmap functions for dma mapping
The current dma_ops do not specify an mmap function so maping falls back to the default implementation. There are at least two issues with using the default implementation: 1) The pgprot is always pgprot_noncached (strongly ordered) memory even with coherent operations 2) dma_common_mmap calls virt_to_page on the remapped non-coherent address which leads to invalid memory being mapped. Fix both these issue by implementing a custom mmap function which correctly accounts for remapped addresses and sets vm_pg_prot appropriately. Signed-off-by: Laura Abbott <lauraa@codeaurora.org> [catalin.marinas@arm.com: replaced "arm64_" with "__" prefix for consistency] Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
31b1e940c5
commit
6e8d7968e9
@ -221,9 +221,52 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
|
|||||||
sg->length, dir);
|
sg->length, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* vma->vm_page_prot must be set appropriately before calling this function */
|
||||||
|
static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
int ret = -ENXIO;
|
||||||
|
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
|
||||||
|
PAGE_SHIFT;
|
||||||
|
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
||||||
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
|
||||||
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||||
|
ret = remap_pfn_range(vma, vma->vm_start,
|
||||||
|
pfn + off,
|
||||||
|
vma->vm_end - vma->vm_start,
|
||||||
|
vma->vm_page_prot);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __swiotlb_mmap_noncoherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
|
||||||
|
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __swiotlb_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
/* Just use whatever page_prot attributes were specified */
|
||||||
|
return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
|
||||||
|
}
|
||||||
|
|
||||||
struct dma_map_ops noncoherent_swiotlb_dma_ops = {
|
struct dma_map_ops noncoherent_swiotlb_dma_ops = {
|
||||||
.alloc = __dma_alloc_noncoherent,
|
.alloc = __dma_alloc_noncoherent,
|
||||||
.free = __dma_free_noncoherent,
|
.free = __dma_free_noncoherent,
|
||||||
|
.mmap = __swiotlb_mmap_noncoherent,
|
||||||
.map_page = __swiotlb_map_page,
|
.map_page = __swiotlb_map_page,
|
||||||
.unmap_page = __swiotlb_unmap_page,
|
.unmap_page = __swiotlb_unmap_page,
|
||||||
.map_sg = __swiotlb_map_sg_attrs,
|
.map_sg = __swiotlb_map_sg_attrs,
|
||||||
@ -240,6 +283,7 @@ EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
|
|||||||
struct dma_map_ops coherent_swiotlb_dma_ops = {
|
struct dma_map_ops coherent_swiotlb_dma_ops = {
|
||||||
.alloc = __dma_alloc_coherent,
|
.alloc = __dma_alloc_coherent,
|
||||||
.free = __dma_free_coherent,
|
.free = __dma_free_coherent,
|
||||||
|
.mmap = __swiotlb_mmap_coherent,
|
||||||
.map_page = swiotlb_map_page,
|
.map_page = swiotlb_map_page,
|
||||||
.unmap_page = swiotlb_unmap_page,
|
.unmap_page = swiotlb_unmap_page,
|
||||||
.map_sg = swiotlb_map_sg_attrs,
|
.map_sg = swiotlb_map_sg_attrs,
|
||||||
|
Loading…
Reference in New Issue
Block a user