2018-08-24 16:31:08 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
2018-11-05 03:29:28 +08:00
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
2018-08-24 16:31:08 +08:00
|
|
|
* Copyright (c) 2014 The Linux Foundation
|
|
|
|
*/
|
2018-11-05 03:29:28 +08:00
|
|
|
#include <linux/dma-direct.h>
|
|
|
|
#include <linux/dma-noncoherent.h>
|
|
|
|
#include <linux/dma-contiguous.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/genalloc.h>
|
2018-08-24 16:31:08 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
|
|
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
|
|
|
size_t size, unsigned long vm_flags, pgprot_t prot,
|
|
|
|
const void *caller)
|
|
|
|
{
|
|
|
|
struct vm_struct *area;
|
|
|
|
|
|
|
|
area = get_vm_area_caller(size, vm_flags, caller);
|
|
|
|
if (!area)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (map_vm_area(area, prot, pages)) {
|
|
|
|
vunmap(area->addr);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return area;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remaps an array of PAGE_SIZE pages into another vm_area.
|
|
|
|
* Cannot be used in non-sleeping contexts
|
|
|
|
*/
|
|
|
|
void *dma_common_pages_remap(struct page **pages, size_t size,
|
|
|
|
unsigned long vm_flags, pgprot_t prot,
|
|
|
|
const void *caller)
|
|
|
|
{
|
|
|
|
struct vm_struct *area;
|
|
|
|
|
|
|
|
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
|
|
|
if (!area)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
area->pages = pages;
|
|
|
|
|
|
|
|
return area->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remaps an allocated contiguous region into another vm_area.
|
|
|
|
* Cannot be used in non-sleeping contexts
|
|
|
|
*/
|
|
|
|
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
|
|
|
unsigned long vm_flags,
|
|
|
|
pgprot_t prot, const void *caller)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct page **pages;
|
|
|
|
struct vm_struct *area;
|
|
|
|
|
|
|
|
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
|
|
|
|
if (!pages)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
|
|
|
pages[i] = nth_page(page, i);
|
|
|
|
|
|
|
|
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
|
|
|
|
|
|
|
kfree(pages);
|
|
|
|
|
|
|
|
if (!area)
|
|
|
|
return NULL;
|
|
|
|
return area->addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmaps a range previously mapped by dma_common_*_remap
|
|
|
|
*/
|
|
|
|
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
|
|
|
|
{
|
|
|
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
|
|
|
|
|
if (!area || (area->flags & vm_flags) != vm_flags) {
|
|
|
|
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
|
|
|
|
vunmap(cpu_addr);
|
|
|
|
}
|
2018-11-05 03:29:28 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_DMA_DIRECT_REMAP
|
|
|
|
static struct gen_pool *atomic_pool __ro_after_init;
|
|
|
|
|
|
|
|
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
|
|
|
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
|
|
|
|
|
|
|
|
static int __init early_coherent_pool(char *p)
|
|
|
|
{
|
|
|
|
atomic_pool_size = memparse(p, &p);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("coherent_pool", early_coherent_pool);
|
|
|
|
|
|
|
|
int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot)
|
|
|
|
{
|
|
|
|
unsigned int pool_size_order = get_order(atomic_pool_size);
|
|
|
|
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
|
|
|
struct page *page;
|
|
|
|
void *addr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (dev_get_cma_area(NULL))
|
|
|
|
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
|
|
|
pool_size_order, false);
|
|
|
|
else
|
|
|
|
page = alloc_pages(gfp, pool_size_order);
|
|
|
|
if (!page)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memset(page_address(page), 0, atomic_pool_size);
|
|
|
|
arch_dma_prep_coherent(page, atomic_pool_size);
|
|
|
|
|
|
|
|
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
|
|
|
if (!atomic_pool)
|
|
|
|
goto free_page;
|
|
|
|
|
|
|
|
addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
|
|
|
|
prot, __builtin_return_address(0));
|
|
|
|
if (!addr)
|
|
|
|
goto destroy_genpool;
|
|
|
|
|
|
|
|
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
|
|
|
|
page_to_phys(page), atomic_pool_size, -1);
|
|
|
|
if (ret)
|
|
|
|
goto remove_mapping;
|
|
|
|
gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
|
|
|
|
|
|
|
|
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
|
|
|
atomic_pool_size / 1024);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
remove_mapping:
|
|
|
|
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
|
|
|
|
destroy_genpool:
|
|
|
|
gen_pool_destroy(atomic_pool);
|
|
|
|
atomic_pool = NULL;
|
|
|
|
free_page:
|
|
|
|
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
|
|
|
__free_pages(page, pool_size_order);
|
|
|
|
out:
|
|
|
|
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
|
|
|
atomic_pool_size / 1024);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool dma_in_atomic_pool(void *start, size_t size)
|
|
|
|
{
|
|
|
|
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
|
|
|
{
|
|
|
|
unsigned long val;
|
|
|
|
void *ptr = NULL;
|
|
|
|
|
|
|
|
if (!atomic_pool) {
|
|
|
|
WARN(1, "coherent pool not initialised!\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
val = gen_pool_alloc(atomic_pool, size);
|
|
|
|
if (val) {
|
|
|
|
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
|
|
|
|
|
|
|
|
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
|
|
|
ptr = (void *)val;
|
|
|
|
memset(ptr, 0, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool dma_free_from_pool(void *start, size_t size)
|
|
|
|
{
|
|
|
|
if (!dma_in_atomic_pool(start, size))
|
|
|
|
return false;
|
|
|
|
gen_pool_free(atomic_pool, (unsigned long)start, size);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
|
|
gfp_t flags, unsigned long attrs)
|
|
|
|
{
|
|
|
|
struct page *page = NULL;
|
|
|
|
void *ret, *kaddr;
|
|
|
|
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
|
|
|
|
if (!gfpflags_allow_blocking(flags)) {
|
|
|
|
ret = dma_alloc_from_pool(size, &page, flags);
|
|
|
|
if (!ret)
|
|
|
|
return NULL;
|
|
|
|
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
kaddr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
|
|
|
|
if (!kaddr)
|
|
|
|
return NULL;
|
|
|
|
page = virt_to_page(kaddr);
|
|
|
|
|
|
|
|
/* remove any dirty cache lines on the kernel alias */
|
|
|
|
arch_dma_prep_coherent(page, size);
|
|
|
|
|
|
|
|
/* create a coherent mapping */
|
|
|
|
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
|
|
|
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
|
|
|
|
__builtin_return_address(0));
|
|
|
|
if (!ret)
|
|
|
|
dma_direct_free_pages(dev, size, kaddr, *dma_handle, attrs);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
|
|
dma_addr_t dma_handle, unsigned long attrs)
|
|
|
|
{
|
|
|
|
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
|
|
|
|
void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
|
|
|
|
|
|
|
|
vunmap(vaddr);
|
|
|
|
dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
|
|
|
dma_addr_t dma_addr)
|
|
|
|
{
|
|
|
|
return __phys_to_pfn(dma_to_phys(dev, dma_addr));
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_DMA_DIRECT_REMAP */
|