2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00
linux-next/arch/frv/include/asm/dma-mapping.h
Geert Uytterhoeven 5fd381126e frv: Provide dummy dma_mmap_coherent() and dma_get_sgtable()
frv/allmodconfig:

drivers/media/v4l2-core/videobuf2-dma-contig.c: In function ‘vb2_dc_mmap’:
drivers/media/v4l2-core/videobuf2-dma-contig.c:204: error: implicit declaration of function ‘dma_mmap_coherent’
drivers/media/v4l2-core/videobuf2-dma-contig.c: In function ‘vb2_dc_get_base_sgt’:
drivers/media/v4l2-core/videobuf2-dma-contig.c:387: error: implicit declaration of function ‘dma_get_sgtable’

For architectures using dma_map_ops, dma_mmap_coherent() and
dma_get_sgtable() are provided in <asm-generic/dma-mapping-common.h>.

Frv does not use dma_map_ops, hence it should implement them itself.
For now, use dummy implementations that just return -EINVAL, until the API
has been finalized.

Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
2013-01-29 08:11:15 +01:00

151 lines
3.7 KiB
C

#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
#include <linux/device.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
/*
* See Documentation/DMA-API.txt for the description of how the
* following DMA API should work.
*/
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end;
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction);
static inline
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
static inline
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
extern
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction);
static inline
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
}
static inline
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
flush_write_buffers();
}
static inline
void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
}
static inline
void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
flush_write_buffers();
}
static inline
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
}
static inline
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
flush_write_buffers();
}
static inline
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline
int dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < 0x00ffffff)
return 0;
return 1;
}
static inline
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_write_buffers();
}
/* Not supported for now */
static inline int dma_mmap_coherent(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size)
{
return -EINVAL;
}
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
return -EINVAL;
}
#endif /* _ASM_DMA_MAPPING_H */