linux/arch/arc/mm/dma.c
Krzysztof Kozlowski 00085f1efa dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer.  Thus the pointer can point to const data.
However the attributes do not have to be a bitfield.  Instead unsigned
long will do fine:

1. This is just simpler.  Both in terms of reading the code and setting
   attributes.  Instead of initializing local attributes on the stack
   and passing pointer to it to dma_set_attr(), just set the bits.

2. It brings safeness and checking for const correctness because the
   attributes are passed by value.

Semantic patches for this change (at least most of them):

    virtual patch
    virtual context

    @r@
    identifier f, attrs;

    @@
    f(...,
    - struct dma_attrs *attrs
    + unsigned long attrs
    , ...)
    {
    ...
    }

    @@
    identifier r.f;
    @@
    f(...,
    - NULL
    + 0
     )

and

    // Options: --all-includes
    virtual patch
    virtual context

    @r@
    identifier f, attrs;
    type t;

    @@
    t f(..., struct dma_attrs *attrs);

    @@
    identifier r.f;
    @@
    f(...,
    - NULL
    + 0
     )

Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Acked-by: Mark Salter <msalter@redhat.com> [c6x]
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris]
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm]
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp]
Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core]
Acked-by: David Vrabel <david.vrabel@citrix.com> [xen]
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb]
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32]
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc]
Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-08-04 08:50:07 -04:00

205 lines
5.5 KiB
C

/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* DMA Coherent API Notes
*
* I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
* implemented by accessing it using a kernel virtual address, with
* Cache bit off in the TLB entry.
*
* The default DMA address == Phy address which is 0x8000_0000 based.
*/
#include <linux/dma-mapping.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
static void *arc_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
phys_addr_t paddr;
void *kvaddr;
int need_coh = 1, need_kvaddr = 0;
page = alloc_pages(gfp, order);
if (!page)
return NULL;
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
*
* The gains with IOC are two pronged:
* -For streaming data, elides need for cache maintenance, saving
* cycles in flush code, and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
/*
* - A coherent buffer needs MMU mapping to enforce non-cachability
* - A highmem page needs a virtual handle (hence MMU mapping)
* independent of cachability
*/
if (PageHighMem(page) || need_coh)
need_kvaddr = 1;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
*dma_handle = plat_phys_to_dma(dev, paddr);
/* This is kernel Virtual address (0x7000_0000 based) */
if (need_kvaddr) {
kvaddr = ioremap_nocache(paddr, size);
if (kvaddr == NULL) {
__free_pages(page, order);
return NULL;
}
} else {
kvaddr = (void *)(u32)paddr;
}
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
* Yeah this bit us - STAR 9000898266
*
* Although core does call flush_cache_vmap(), it gets kvaddr hence
* can't be used to efficiently flush L1 and/or L2 which need paddr
* Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit
*/
if (need_coh)
dma_cache_wback_inv(paddr, size);
return kvaddr;
}
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size));
}
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
}
}
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
return plat_phys_to_dma(dev, paddr);
}
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i)
s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;
}
static void arc_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
}
static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
}
static void arc_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
_dma_cache_sync(sg_phys(sg), sg->length, dir);
}
static void arc_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nelems, i)
_dma_cache_sync(sg_phys(sg), sg->length, dir);
}
static int arc_dma_supported(struct device *dev, u64 dma_mask)
{
/* Support 32 bit DMA mask exclusively */
return dma_mask == DMA_BIT_MASK(32);
}
struct dma_map_ops arc_dma_ops = {
.alloc = arc_dma_alloc,
.free = arc_dma_free,
.map_page = arc_dma_map_page,
.map_sg = arc_dma_map_sg,
.sync_single_for_device = arc_dma_sync_single_for_device,
.sync_single_for_cpu = arc_dma_sync_single_for_cpu,
.sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
.sync_sg_for_device = arc_dma_sync_sg_for_device,
.dma_supported = arc_dma_supported,
};
EXPORT_SYMBOL(arc_dma_ops);