2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-30 16:13:54 +08:00

dma-mapping: add a dma_mmap_pages helper

Add a helper to map memory allocated using dma_alloc_pages into
a user address space, similar to the dma_alloc_attrs function for
coherent allocations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tomasz Figa <tfiga@chromium.org>
Tested-by: Ricardo Ribalda <ribalda@chromium.org>
This commit is contained in:
Christoph Hellwig 2021-01-28 14:53:22 +01:00
parent 1e28eed176
commit eedb0b12d0
3 changed files with 25 additions and 0 deletions

View File

@ -563,6 +563,16 @@ Free a region of memory previously allocated using dma_alloc_pages().
dev, size, dma_handle and dir must all be the same as those passed into
dma_alloc_pages(). page must be the pointer returned by dma_alloc_pages().
::
int
dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page)
Map an allocation returned from dma_alloc_pages() into a user address space.
dev and size must be the same as those passed into dma_alloc_pages().
page must be the pointer returned by dma_alloc_pages().
::
void *

View File

@ -263,6 +263,8 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
dma_addr_t dma_handle, enum dma_data_direction dir);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page);
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)

View File

@ -517,6 +517,19 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page,
}
EXPORT_SYMBOL_GPL(dma_free_pages);
int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
size_t size, struct page *page)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start,
page_to_pfn(page) + vma->vm_pgoff,
vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
}
EXPORT_SYMBOL_GPL(dma_mmap_pages);
int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);