mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
iommu/dma: Refactor iommu_dma_alloc
Shuffle around the self-contained atomic and non-contiguous cases to return early and get out of the way of the CMA case that we're about to work on next. Signed-off-by: Robin Murphy <robin.murphy@arm.com> [hch: slight changes to the code flow] Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
bcf4b9c4c2
commit
072bebc069
@ -973,14 +973,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
size_t iosize = size;
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (gfpflags_allow_blocking(gfp) &&
|
||||
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
|
||||
return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
|
||||
|
||||
if (!gfpflags_allow_blocking(gfp)) {
|
||||
struct page *page;
|
||||
/*
|
||||
* In atomic context we can't remap anything, so we'll only
|
||||
* get the virtually contiguous buffer we need by way of a
|
||||
@ -1002,39 +1007,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
__free_pages(page, get_order(size));
|
||||
else
|
||||
dma_free_from_pool(addr, size);
|
||||
addr = NULL;
|
||||
}
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
|
||||
struct page *page;
|
||||
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size), gfp & __GFP_NOWARN);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR) {
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
prot,
|
||||
__builtin_return_address(0));
|
||||
if (addr) {
|
||||
if (!coherent)
|
||||
arch_dma_prep_coherent(page, iosize);
|
||||
memset(addr, 0, size);
|
||||
} else {
|
||||
__iommu_dma_unmap(dev, *handle, iosize);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
}
|
||||
} else {
|
||||
addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
|
||||
return addr;
|
||||
}
|
||||
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size), gfp & __GFP_NOWARN);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
|
||||
if (*handle == DMA_MAPPING_ERROR)
|
||||
goto out_free_pages;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
goto out_unmap;
|
||||
|
||||
if (!coherent)
|
||||
arch_dma_prep_coherent(page, iosize);
|
||||
memset(addr, 0, size);
|
||||
return addr;
|
||||
out_unmap:
|
||||
__iommu_dma_unmap(dev, *handle, iosize);
|
||||
out_free_pages:
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
|
||||
|
Loading…
Reference in New Issue
Block a user