2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 19:53:59 +08:00
linux-next/drivers/base/dma-coherent.c
Brian Starkey 20d7a35bc8 drivers: dma-coherent: use memset_io for DMA_MEMORY_IO mappings
Use memset_io() for DMA_MEMORY_IO mappings which are mapped as I/O
memory, and regular memset() for DMA_MEMORY_MAP mappings.

This fixes the below alignment fault on arm64 for DMA_MEMORY_IO
mappings, where memset() uses the DC ZVA instruction which is invalid on
device memory.

   Unhandled fault: alignment fault (0x96000061) at 0xffffff8000380000
   Internal error: : 96000061 [#1] PREEMPT SMP
   Modules linked in: hdlcd(+) clk_scpi
   CPU: 4 PID: 1355 Comm: systemd-udevd Not tainted 4.4.0-rc1+ #5
   Hardware name: ARM Juno development board (r0) (DT)
   task: ffffffc9763eee00 ti: ffffffc9758c4000 task.ti: ffffffc9758c4000
   PC is at __efistub_memset+0x1ac/0x200
   LR is at dma_alloc_from_coherent+0xb0/0x120
   pc : [<ffffffc00030ff2c>] lr : [<ffffffc00042a918>] pstate: 400001c5
   sp : ffffffc9758c79a0
   x29: ffffffc9758c79a0 x28: ffffffc000635cd0
   x27: 0000000000000124 x26: ffffffc000119ef4
   x25: 0000000000010000 x24: 0000000000000140
   x23: ffffffc07e9ac3a8 x22: ffffffc9758c7a58
   x21: ffffffc9758c7a68 x20: 0000000000000004
   x19: ffffffc07e9ac380 x18: 0000000000000001
   x17: 0000007fae1bbba8 x16: ffffffc0001b2d1c
   x15: ffffffffffffffff x14: 0ffffffffffffffe
   x13: 0000000000000010 x12: ffffff800837ffff
   x11: ffffff800837ffff x10: 0000000040000000
   x9 : 0000000000000000 x8 : ffffff8000380000
   x7 : 0000000000000000 x6 : 000000000000003f
   x5 : 0000000000000040 x4 : 0000000000000000
   x3 : 0000000000000004 x2 : 000000000000ffc0
   x1 : 0000000000000000 x0 : ffffff8000380000

Signed-off-by: Brian Starkey <brian.starkey@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-22 15:36:02 -07:00

337 lines
8.8 KiB
C

/*
* Coherent per-device memory handling.
* Borrowed from i386
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
unsigned long pfn_base;
int size;
int flags;
unsigned long *bitmap;
spinlock_t spinlock;
};
static bool dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
{
struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
goto out;
if (!size)
goto out;
if (flags & DMA_MEMORY_MAP)
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
else
mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dma_mem)
goto out;
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dma_mem->bitmap)
goto out;
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
dma_mem->pfn_base = PFN_DOWN(phys_addr);
dma_mem->size = pages;
dma_mem->flags = flags;
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
return true;
out:
kfree(dma_mem);
if (mem_base) {
if (flags & DMA_MEMORY_MAP)
memunmap(mem_base);
else
iounmap(mem_base);
}
return false;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
if (!mem)
return;
if (mem->flags & DMA_MEMORY_MAP)
memunmap(mem->virt_base);
else
iounmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
static int dma_assign_coherent_memory(struct device *dev,
struct dma_coherent_mem *mem)
{
if (dev->dma_mem)
return -EBUSY;
dev->dma_mem = mem;
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
return 0;
}
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
&mem))
return 0;
if (dma_assign_coherent_memory(dev, mem) == 0)
return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
dma_release_coherent_memory(mem);
return 0;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
{
struct dma_coherent_mem *mem = dev->dma_mem;
if (!mem)
return;
dma_release_coherent_memory(mem);
dev->dma_mem = NULL;
}
EXPORT_SYMBOL(dma_release_declared_memory);
void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
unsigned long flags;
int pos, err;
size += device_addr & ~PAGE_MASK;
if (!mem)
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&mem->spinlock, flags);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
spin_unlock_irqrestore(&mem->spinlock, flags);
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
/**
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
*
* @dev: device from which we allocate memory
* @size: size of requested memory area
* @dma_handle: This will be filled with the correct dma handle
* @ret: This pointer will be filled with the virtual address
* to allocated area.
*
* This function should be only called from per-arch dma_alloc_coherent()
* to support allocation from per-device coherent memory pools.
*
* Returns 0 if dma_alloc_coherent should continue with allocating from
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
struct dma_coherent_mem *mem;
int order = get_order(size);
unsigned long flags;
int pageno;
if (!dev)
return 0;
mem = dev->dma_mem;
if (!mem)
return 0;
*ret = NULL;
spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
if (unlikely(pageno < 0))
goto err;
/*
* Memory was found in the per-device area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
if (mem->flags & DMA_MEMORY_MAP)
memset(*ret, 0, size);
else
memset_io(*ret, 0, size);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
* constraints allow it.
*/
return mem->flags & DMA_MEMORY_EXCLUSIVE;
}
EXPORT_SYMBOL(dma_alloc_from_coherent);
/**
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
* @dev: device from which the memory was allocated
* @order: the order of pages allocated
* @vaddr: virtual address of allocated pages
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, releases that memory.
*
* Returns 1 if we correctly released the memory, or 0 if
* dma_release_coherent() should proceed with releasing memory from
* generic pools.
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
unsigned long flags;
spin_lock_irqsave(&mem->spinlock, flags);
bitmap_release_region(mem->bitmap, page, order);
spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
}
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
/**
* dma_mmap_from_coherent() - try to mmap the memory allocated from
* per-device coherent memory pool to userspace
* @dev: device from which the memory was allocated
* @vma: vm_area for the userspace memory
* @vaddr: cpu address returned by dma_alloc_from_coherent
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
* @ret: result from remap_pfn_range()
*
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
* proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
unsigned long off = vma->vm_pgoff;
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int count = size >> PAGE_SHIFT;
*ret = -ENXIO;
if (off < count && user_count <= count - off) {
unsigned long pfn = mem->pfn_base + start + off;
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(dma_mmap_from_coherent);
/*
* Support for reserved memory regions defined in device tree
*/
#ifdef CONFIG_OF_RESERVED_MEM
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
if (!mem &&
!dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
&mem)) {
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
}
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
return 0;
}
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
dev->dma_mem = NULL;
}
static const struct reserved_mem_ops rmem_dma_ops = {
.device_init = rmem_dma_device_init,
.device_release = rmem_dma_device_release,
};
static int __init rmem_dma_setup(struct reserved_mem *rmem)
{
unsigned long node = rmem->fdt_node;
if (of_get_flat_dt_prop(node, "reusable", NULL))
return -EINVAL;
#ifdef CONFIG_ARM
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
}
#endif
rmem->ops = &rmem_dma_ops;
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif