mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
8c8c5a4994
Although the device tree might contain a reserved-memory DT node dedicated as the default CMA pool, users might want to change CMA's parameters using the kernel command line for debugging purposes and whatnot. Honor this by bypassing the reserved memory CMA setup, which will ultimately end up freeing the memblock and allow the command line CMA configuration routine to run. Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Reviewed-by: Phil Elwell <phil@raspberrypi.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
345 lines
10 KiB
C
345 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* Contiguous Memory Allocator for DMA mapping framework
|
|
* Copyright (c) 2010-2011 by Samsung Electronics.
|
|
* Written by:
|
|
* Marek Szyprowski <m.szyprowski@samsung.com>
|
|
* Michal Nazarewicz <mina86@mina86.com>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "cma: " fmt
|
|
|
|
#ifdef CONFIG_CMA_DEBUG
|
|
#ifndef DEBUG
|
|
# define DEBUG
|
|
#endif
|
|
#endif
|
|
|
|
#include <asm/page.h>
|
|
#include <asm/dma-contiguous.h>
|
|
|
|
#include <linux/memblock.h>
|
|
#include <linux/err.h>
|
|
#include <linux/sizes.h>
|
|
#include <linux/dma-contiguous.h>
|
|
#include <linux/cma.h>
|
|
|
|
#ifdef CONFIG_CMA_SIZE_MBYTES
|
|
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
|
#else
|
|
#define CMA_SIZE_MBYTES 0
|
|
#endif
|
|
|
|
struct cma *dma_contiguous_default_area;
|
|
|
|
/*
|
|
* Default global CMA area size can be defined in kernel's .config.
|
|
* This is useful mainly for distro maintainers to create a kernel
|
|
* that works correctly for most supported systems.
|
|
* The size can be set in bytes or as a percentage of the total memory
|
|
* in the system.
|
|
*
|
|
* Users, who want to set the size of global CMA area for their system
|
|
* should use cma= kernel parameter.
|
|
*/
|
|
static const phys_addr_t size_bytes __initconst =
|
|
(phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
|
|
static phys_addr_t size_cmdline __initdata = -1;
|
|
static phys_addr_t base_cmdline __initdata;
|
|
static phys_addr_t limit_cmdline __initdata;
|
|
|
|
static int __init early_cma(char *p)
|
|
{
|
|
if (!p) {
|
|
pr_err("Config string not provided\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
size_cmdline = memparse(p, &p);
|
|
if (*p != '@')
|
|
return 0;
|
|
base_cmdline = memparse(p + 1, &p);
|
|
if (*p != '-') {
|
|
limit_cmdline = base_cmdline + size_cmdline;
|
|
return 0;
|
|
}
|
|
limit_cmdline = memparse(p + 1, &p);
|
|
|
|
return 0;
|
|
}
|
|
early_param("cma", early_cma);
|
|
|
|
#ifdef CONFIG_CMA_SIZE_PERCENTAGE
|
|
|
|
static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
|
|
{
|
|
struct memblock_region *reg;
|
|
unsigned long total_pages = 0;
|
|
|
|
/*
|
|
* We cannot use memblock_phys_mem_size() here, because
|
|
* memblock_analyze() has not been called yet.
|
|
*/
|
|
for_each_memblock(memory, reg)
|
|
total_pages += memblock_region_memory_end_pfn(reg) -
|
|
memblock_region_memory_base_pfn(reg);
|
|
|
|
return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
|
|
}
|
|
|
|
#else
|
|
|
|
static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif
|
|
|
|
/**
|
|
* dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
|
|
* @limit: End address of the reserved memory (optional, 0 for any).
|
|
*
|
|
* This function reserves memory from early allocator. It should be
|
|
* called by arch specific code once the early allocator (memblock or bootmem)
|
|
* has been activated and all other subsystems have already allocated/reserved
|
|
* memory.
|
|
*/
|
|
void __init dma_contiguous_reserve(phys_addr_t limit)
|
|
{
|
|
phys_addr_t selected_size = 0;
|
|
phys_addr_t selected_base = 0;
|
|
phys_addr_t selected_limit = limit;
|
|
bool fixed = false;
|
|
|
|
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
|
|
|
|
if (size_cmdline != -1) {
|
|
selected_size = size_cmdline;
|
|
selected_base = base_cmdline;
|
|
selected_limit = min_not_zero(limit_cmdline, limit);
|
|
if (base_cmdline + size_cmdline == limit_cmdline)
|
|
fixed = true;
|
|
} else {
|
|
#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
|
|
selected_size = size_bytes;
|
|
#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
|
|
selected_size = cma_early_percent_memory();
|
|
#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
|
|
selected_size = min(size_bytes, cma_early_percent_memory());
|
|
#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
|
|
selected_size = max(size_bytes, cma_early_percent_memory());
|
|
#endif
|
|
}
|
|
|
|
if (selected_size && !dma_contiguous_default_area) {
|
|
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
|
|
(unsigned long)selected_size / SZ_1M);
|
|
|
|
dma_contiguous_reserve_area(selected_size, selected_base,
|
|
selected_limit,
|
|
&dma_contiguous_default_area,
|
|
fixed);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* dma_contiguous_reserve_area() - reserve custom contiguous area
|
|
* @size: Size of the reserved area (in bytes),
|
|
* @base: Base address of the reserved area optional, use 0 for any
|
|
* @limit: End address of the reserved memory (optional, 0 for any).
|
|
* @res_cma: Pointer to store the created cma region.
|
|
* @fixed: hint about where to place the reserved area
|
|
*
|
|
* This function reserves memory from early allocator. It should be
|
|
* called by arch specific code once the early allocator (memblock or bootmem)
|
|
* has been activated and all other subsystems have already allocated/reserved
|
|
* memory. This function allows to create custom reserved areas for specific
|
|
* devices.
|
|
*
|
|
* If @fixed is true, reserve contiguous area at exactly @base. If false,
|
|
* reserve in range from @base to @limit.
|
|
*/
|
|
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
phys_addr_t limit, struct cma **res_cma,
|
|
bool fixed)
|
|
{
|
|
int ret;
|
|
|
|
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
|
|
"reserved", res_cma);
|
|
if (ret)
|
|
return ret;
|
|
|
|
/* Architecture specific contiguous memory fixup. */
|
|
dma_contiguous_early_fixup(cma_get_base(*res_cma),
|
|
cma_get_size(*res_cma));
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* dma_alloc_from_contiguous() - allocate pages from contiguous area
|
|
* @dev: Pointer to device for which the allocation is performed.
|
|
* @count: Requested number of pages.
|
|
* @align: Requested alignment of pages (in PAGE_SIZE order).
|
|
* @no_warn: Avoid printing message about failed allocation.
|
|
*
|
|
* This function allocates memory buffer for specified device. It uses
|
|
* device specific contiguous memory area if available or the default
|
|
* global one. Requires architecture specific dev_get_cma_area() helper
|
|
* function.
|
|
*/
|
|
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
|
unsigned int align, bool no_warn)
|
|
{
|
|
if (align > CONFIG_CMA_ALIGNMENT)
|
|
align = CONFIG_CMA_ALIGNMENT;
|
|
|
|
return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
|
|
}
|
|
|
|
/**
|
|
* dma_release_from_contiguous() - release allocated pages
|
|
* @dev: Pointer to device for which the pages were allocated.
|
|
* @pages: Allocated pages.
|
|
* @count: Number of allocated pages.
|
|
*
|
|
* This function releases memory allocated by dma_alloc_from_contiguous().
|
|
* It returns false when provided pages do not belong to contiguous area and
|
|
* true otherwise.
|
|
*/
|
|
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
int count)
|
|
{
|
|
return cma_release(dev_get_cma_area(dev), pages, count);
|
|
}
|
|
|
|
/**
|
|
* dma_alloc_contiguous() - allocate contiguous pages
|
|
* @dev: Pointer to device for which the allocation is performed.
|
|
* @size: Requested allocation size.
|
|
* @gfp: Allocation flags.
|
|
*
|
|
* This function allocates contiguous memory buffer for specified device. It
|
|
* first tries to use device specific contiguous memory area if available or
|
|
* the default global one, then tries a fallback allocation of normal pages.
|
|
*
|
|
* Note that it byapss one-page size of allocations from the global area as
|
|
* the addresses within one page are always contiguous, so there is no need
|
|
* to waste CMA pages for that kind; it also helps reduce fragmentations.
|
|
*/
|
|
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
|
{
|
|
size_t count = size >> PAGE_SHIFT;
|
|
struct page *page = NULL;
|
|
struct cma *cma = NULL;
|
|
|
|
if (dev && dev->cma_area)
|
|
cma = dev->cma_area;
|
|
else if (count > 1)
|
|
cma = dma_contiguous_default_area;
|
|
|
|
/* CMA can be used only in the context which permits sleeping */
|
|
if (cma && gfpflags_allow_blocking(gfp)) {
|
|
size_t align = get_order(size);
|
|
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
|
|
|
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
|
|
}
|
|
|
|
return page;
|
|
}
|
|
|
|
/**
|
|
* dma_free_contiguous() - release allocated pages
|
|
* @dev: Pointer to device for which the pages were allocated.
|
|
* @page: Pointer to the allocated pages.
|
|
* @size: Size of allocated pages.
|
|
*
|
|
* This function releases memory allocated by dma_alloc_contiguous(). As the
|
|
* cma_release returns false when provided pages do not belong to contiguous
|
|
* area and true otherwise, this function then does a fallback __free_pages()
|
|
* upon a false-return.
|
|
*/
|
|
void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
|
|
{
|
|
if (!cma_release(dev_get_cma_area(dev), page,
|
|
PAGE_ALIGN(size) >> PAGE_SHIFT))
|
|
__free_pages(page, get_order(size));
|
|
}
|
|
|
|
/*
|
|
* Support for reserved memory regions defined in device tree
|
|
*/
|
|
#ifdef CONFIG_OF_RESERVED_MEM
|
|
#include <linux/of.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/of_reserved_mem.h>
|
|
|
|
#undef pr_fmt
|
|
#define pr_fmt(fmt) fmt
|
|
|
|
static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
|
|
{
|
|
dev_set_cma_area(dev, rmem->priv);
|
|
return 0;
|
|
}
|
|
|
|
static void rmem_cma_device_release(struct reserved_mem *rmem,
|
|
struct device *dev)
|
|
{
|
|
dev_set_cma_area(dev, NULL);
|
|
}
|
|
|
|
static const struct reserved_mem_ops rmem_cma_ops = {
|
|
.device_init = rmem_cma_device_init,
|
|
.device_release = rmem_cma_device_release,
|
|
};
|
|
|
|
static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
|
{
|
|
phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
|
phys_addr_t mask = align - 1;
|
|
unsigned long node = rmem->fdt_node;
|
|
bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
|
|
struct cma *cma;
|
|
int err;
|
|
|
|
if (size_cmdline != -1 && default_cma) {
|
|
pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
|
|
rmem->name);
|
|
return -EBUSY;
|
|
}
|
|
|
|
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
|
|
of_get_flat_dt_prop(node, "no-map", NULL))
|
|
return -EINVAL;
|
|
|
|
if ((rmem->base & mask) || (rmem->size & mask)) {
|
|
pr_err("Reserved memory: incorrect alignment of CMA region\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
|
|
if (err) {
|
|
pr_err("Reserved memory: unable to setup CMA region\n");
|
|
return err;
|
|
}
|
|
/* Architecture specific contiguous memory fixup. */
|
|
dma_contiguous_early_fixup(rmem->base, rmem->size);
|
|
|
|
if (default_cma)
|
|
dma_contiguous_set_default(cma);
|
|
|
|
rmem->ops = &rmem_cma_ops;
|
|
rmem->priv = cma;
|
|
|
|
pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
|
|
&rmem->base, (unsigned long)rmem->size / SZ_1M);
|
|
|
|
return 0;
|
|
}
|
|
RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
|
|
#endif
|