mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
dma-iommu: refactor iommu_dma_alloc_remap
Split out a new helper that only allocates a sg_table worth of memory without mapping it into contiguous kernel address space. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Tomasz Figa <tfiga@chromium.org> Tested-by: Ricardo Ribalda <ribalda@chromium.org>
This commit is contained in:
parent
7d5b5738d1
commit
8230ce9a4e
@ -650,23 +650,12 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
|
||||
return pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
|
||||
* @dev: Device to allocate memory for. Must be a real device
|
||||
* attached to an iommu_dma_domain
|
||||
* @size: Size of buffer in bytes
|
||||
* @dma_handle: Out argument for allocated DMA handle
|
||||
* @gfp: Allocation flags
|
||||
* @prot: pgprot_t to use for the remapped mapping
|
||||
* @attrs: DMA attributes for this allocation
|
||||
*
|
||||
* If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
|
||||
/*
|
||||
* If size is less than PAGE_SIZE, then a full CPU page will be allocated,
|
||||
* but an IOMMU which supports smaller pages might not map the whole thing.
|
||||
*
|
||||
* Return: Mapped virtual address, or NULL on failure.
|
||||
*/
|
||||
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
|
||||
static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
|
||||
size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
@ -676,11 +665,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
dma_addr_t iova;
|
||||
void *vaddr;
|
||||
|
||||
*dma_handle = DMA_MAPPING_ERROR;
|
||||
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_deferred_attach(dev, domain))
|
||||
@ -707,34 +692,26 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
if (!iova)
|
||||
goto out_free_pages;
|
||||
|
||||
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
|
||||
if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
|
||||
goto out_free_iova;
|
||||
|
||||
if (!(ioprot & IOMMU_CACHE)) {
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
|
||||
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
|
||||
arch_dma_prep_coherent(sg_page(sg), sg->length);
|
||||
}
|
||||
|
||||
if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
|
||||
if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
|
||||
< size)
|
||||
goto out_free_sg;
|
||||
|
||||
vaddr = dma_common_pages_remap(pages, size, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!vaddr)
|
||||
goto out_unmap;
|
||||
sgt->sgl->dma_address = iova;
|
||||
return pages;
|
||||
|
||||
*dma_handle = iova;
|
||||
sg_free_table(&sgt);
|
||||
return vaddr;
|
||||
|
||||
out_unmap:
|
||||
__iommu_dma_unmap(dev, iova, size);
|
||||
out_free_sg:
|
||||
sg_free_table(&sgt);
|
||||
sg_free_table(sgt);
|
||||
out_free_iova:
|
||||
iommu_dma_free_iova(cookie, iova, size, NULL);
|
||||
out_free_pages:
|
||||
@ -742,6 +719,32 @@ out_free_pages:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
void *vaddr;
|
||||
|
||||
pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
|
||||
attrs);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
*dma_handle = sgt.sgl->dma_address;
|
||||
sg_free_table(&sgt);
|
||||
vaddr = dma_common_pages_remap(pages, size, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!vaddr)
|
||||
goto out_unmap;
|
||||
return vaddr;
|
||||
|
||||
out_unmap:
|
||||
__iommu_dma_unmap(dev, *dma_handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user