mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
dma-mapping fixes for Linux 4.21-rc1
Fix various regressions introduced in this cycles: - fix dma-debug tracking for the map_page / map_single consolidatation - properly stub out DMA mapping symbols for !HAS_DMA builds to avoid link failures - fix AMD Gart direct mappings - setup the dma address for no kernel mappings using the remap allocator -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlwyR9ULHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYPvOA/+L+32p2pm8o6NTgvtRvqsKNrbOm02fORLrhBqAiok AcirFDxTfMuUWU2isr7E7WNqwEmUQ1nVUa+I0IJ/IJFfKdTggXcaTX1M19+62KWa 1LHpZLg1t2rl2yFQHgTrFKr5sz1PwUKZO8UbrYaYYgLgQkWDRzJs4E/tFNju8pMm 0Usexo/bkI5mreJBImMsFwAnuk0k3NT058XIeD+eNttKjcuz5kEH+bE/999vySW3 sOj9Peic/EFelOGb4ODxUIPjhiGFMv5dVusSAsFBH26iwQfX/tFSmXhrI5cnDewg NlREennfyM+6uTH/DO+BlX7eGCRYbFc1GU5H9q4rRMXhEam6oc2AzVKuElJOVstZ XVjP6zTwmuOh/5ff0NG6EPjA/OFcmlBEsmeWu4xSS8KsNILOkpUaPed/uWnA7O+2 mvU104NA5cHgVMgiGNM/4ilirkEZEFEHYhafH42bQxjMigm7ZHN14NtwM7StLTu6 QgyfPUcW/LmHj2scgvB1AZ+iQX0z7yJJMGifUxtz+eMCWCC7neOJ7JLvNnS9WI5w 9RwYaCOcDAZyAmCpbSADWxeG9cfsCDp8wmaGs3YVyhkDU8tCSqbxWJutvyDQnC17 GtZ0vYLTaJXBCq1L/FC0y8NCCGgvySPXYU7/ZYuOCzS4q2jvjwTWD3dKodvnS+mb B0s= =H9J6 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.21-1' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: "Fix various regressions introduced in this cycles: - fix dma-debug tracking for the map_page / map_single consolidatation - properly stub out DMA mapping symbols for !HAS_DMA builds to avoid link failures - fix AMD Gart direct mappings - setup the dma address for no kernel mappings using the remap allocator" * tag 'dma-mapping-4.21-1' of git://git.infradead.org/users/hch/dma-mapping: dma-direct: fix DMA_ATTR_NO_KERNEL_MAPPING for remapped allocations x86/amd_gart: fix unmapping of non-GART mappings dma-mapping: remove a few unused exports dma-mapping: properly stub out the DMA API for !CONFIG_HAS_DMA dma-mapping: remove dmam_{declare,release}_coherent_memory dma-mapping: implement dmam_alloc_coherent using dmam_alloc_attrs dma-mapping: implement dma_map_single_attrs using dma_map_page_attrs
This commit is contained in:
commit
e2b745f469
@ -250,7 +250,6 @@ DMA
|
||||
dmaenginem_async_device_register()
|
||||
dmam_alloc_coherent()
|
||||
dmam_alloc_attrs()
|
||||
dmam_declare_coherent_memory()
|
||||
dmam_free_coherent()
|
||||
dmam_pool_create()
|
||||
dmam_pool_destroy()
|
||||
|
@ -256,7 +256,15 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
int npages;
|
||||
int i;
|
||||
|
||||
if (dma_addr == DMA_MAPPING_ERROR ||
|
||||
if (WARN_ON_ONCE(dma_addr == DMA_MAPPING_ERROR))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This driver will not always use a GART mapping, but might have
|
||||
* created a direct mapping instead. If that is the case there is
|
||||
* nothing to unmap here.
|
||||
*/
|
||||
if (dma_addr < iommu_bus_base ||
|
||||
dma_addr >= iommu_bus_base + iommu_size)
|
||||
return;
|
||||
|
||||
|
@ -35,13 +35,12 @@ extern void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
|
||||
extern void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
bool map_single);
|
||||
int direction, dma_addr_t dma_addr);
|
||||
|
||||
extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
|
||||
|
||||
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction, bool map_single);
|
||||
size_t size, int direction);
|
||||
|
||||
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int mapped_ents, int direction);
|
||||
@ -95,8 +94,7 @@ static inline void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
|
||||
static inline void debug_dma_map_page(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size,
|
||||
int direction, dma_addr_t dma_addr,
|
||||
bool map_single)
|
||||
int direction, dma_addr_t dma_addr)
|
||||
{
|
||||
}
|
||||
|
||||
@ -106,8 +104,7 @@ static inline void debug_dma_mapping_error(struct device *dev,
|
||||
}
|
||||
|
||||
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction,
|
||||
bool map_single)
|
||||
size_t size, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -194,33 +194,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
|
||||
}
|
||||
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
return get_arch_dma_ops(dev ? dev->bus : NULL);
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
dev->dma_ops = dma_ops;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Define the dma api to allow compilation of dma dependent code.
|
||||
* Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
|
||||
* in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
|
||||
* where <something> guarantuees the availability of the dma-mapping API.
|
||||
*/
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool dma_is_direct(const struct dma_map_ops *ops)
|
||||
{
|
||||
return likely(!ops);
|
||||
@ -284,32 +257,41 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
return get_arch_dma_ops(dev ? dev->bus : NULL);
|
||||
}
|
||||
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
dev->dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page, size_t offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_map_single(dev, ptr, size);
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size, dir, attrs);
|
||||
debug_dma_map_page(dev, virt_to_page(ptr),
|
||||
offset_in_page(ptr), size,
|
||||
dir, addr, true);
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
@ -318,13 +300,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir, true);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -363,25 +339,6 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page,
|
||||
size_t offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
size_t size,
|
||||
@ -431,13 +388,6 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
@ -452,13 +402,6 @@ static inline void dma_sync_single_for_device(struct device *dev,
|
||||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
@ -488,15 +431,174 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs);
|
||||
void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle);
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
u64 dma_get_required_mask(struct device *dev);
|
||||
#else /* CONFIG_HAS_DMA */
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page, size_t offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void dma_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void dmam_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
}
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline int dma_get_sgtable_attrs(struct device *dev,
|
||||
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, unsigned long attrs)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
static inline u64 dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
debug_dma_map_single(dev, ptr, size);
|
||||
return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
|
||||
size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
|
||||
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
|
||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
|
||||
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
|
||||
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
|
||||
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
|
||||
|
||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
|
||||
|
||||
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
@ -516,25 +618,10 @@ bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
bool dma_free_from_pool(void *start, size_t size);
|
||||
|
||||
int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
|
||||
|
||||
int
|
||||
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
||||
dma_addr_t dma_addr, size_t size, unsigned long attrs);
|
||||
|
||||
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle, unsigned long attrs);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
@ -549,18 +636,6 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
|
||||
if (dma_addr == DMA_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dma_supported(struct device *dev, u64 mask);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
|
||||
static inline u64 dma_get_mask(struct device *dev)
|
||||
{
|
||||
@ -593,8 +668,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
||||
return dma_set_mask_and_coherent(dev, mask);
|
||||
}
|
||||
|
||||
extern u64 dma_get_required_mask(struct device *dev);
|
||||
|
||||
#ifndef arch_setup_dma_ops
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, const struct iommu_ops *iommu,
|
||||
@ -691,44 +764,13 @@ dma_mark_declared_memory_occupied(struct device *dev,
|
||||
}
|
||||
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
|
||||
|
||||
/*
|
||||
* Managed DMA API
|
||||
*/
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
extern void *dmam_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp);
|
||||
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle);
|
||||
#else /* !CONFIG_HAS_DMA */
|
||||
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{ return NULL; }
|
||||
static inline void dmam_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle) { }
|
||||
#endif /* !CONFIG_HAS_DMA */
|
||||
|
||||
extern void *dmam_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs);
|
||||
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
|
||||
extern int dmam_declare_coherent_memory(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size,
|
||||
int flags);
|
||||
extern void dmam_release_declared_memory(struct device *dev);
|
||||
#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
|
||||
static inline int dmam_declare_coherent_memory(struct device *dev,
|
||||
phys_addr_t phys_addr, dma_addr_t device_addr,
|
||||
size_t size, gfp_t gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
return 0;
|
||||
return dmam_alloc_attrs(dev, size, dma_handle, gfp,
|
||||
(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
|
||||
}
|
||||
|
||||
static inline void dmam_release_declared_memory(struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
|
||||
|
||||
static inline void *dma_alloc_wc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t gfp)
|
||||
{
|
||||
|
@ -223,7 +223,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
||||
*/
|
||||
return mem->flags & DMA_MEMORY_EXCLUSIVE;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
|
||||
|
||||
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
@ -268,7 +267,6 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
|
||||
|
||||
return __dma_release_from_coherent(mem, order, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_from_dev_coherent);
|
||||
|
||||
int dma_release_from_global_coherent(int order, void *vaddr)
|
||||
{
|
||||
|
@ -49,7 +49,6 @@
|
||||
|
||||
enum {
|
||||
dma_debug_single,
|
||||
dma_debug_page,
|
||||
dma_debug_sg,
|
||||
dma_debug_coherent,
|
||||
dma_debug_resource,
|
||||
@ -1300,8 +1299,7 @@ void debug_dma_map_single(struct device *dev, const void *addr,
|
||||
EXPORT_SYMBOL(debug_dma_map_single);
|
||||
|
||||
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
size_t size, int direction, dma_addr_t dma_addr,
|
||||
bool map_single)
|
||||
size_t size, int direction, dma_addr_t dma_addr)
|
||||
{
|
||||
struct dma_debug_entry *entry;
|
||||
|
||||
@ -1316,7 +1314,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
return;
|
||||
|
||||
entry->dev = dev;
|
||||
entry->type = dma_debug_page;
|
||||
entry->type = dma_debug_single;
|
||||
entry->pfn = page_to_pfn(page);
|
||||
entry->offset = offset,
|
||||
entry->dev_addr = dma_addr;
|
||||
@ -1324,9 +1322,6 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
entry->direction = direction;
|
||||
entry->map_err_type = MAP_ERR_NOT_CHECKED;
|
||||
|
||||
if (map_single)
|
||||
entry->type = dma_debug_single;
|
||||
|
||||
check_for_stack(dev, page, offset);
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
@ -1378,10 +1373,10 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
EXPORT_SYMBOL(debug_dma_mapping_error);
|
||||
|
||||
void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, int direction, bool map_single)
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_page,
|
||||
.type = dma_debug_single,
|
||||
.dev = dev,
|
||||
.dev_addr = addr,
|
||||
.size = size,
|
||||
@ -1390,10 +1385,6 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
|
||||
if (unlikely(dma_debug_disabled()))
|
||||
return;
|
||||
|
||||
if (map_single)
|
||||
ref.type = dma_debug_single;
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_unmap_page);
|
||||
@ -1521,7 +1512,6 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
add_dma_entry(entry);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_alloc_coherent);
|
||||
|
||||
void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *virt, dma_addr_t addr)
|
||||
@ -1549,7 +1539,6 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_free_coherent);
|
||||
|
||||
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
|
||||
int direction, dma_addr_t dma_addr)
|
||||
|
@ -45,45 +45,6 @@ static int dmam_match(struct device *dev, void *res, void *match_data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dmam_alloc_coherent - Managed dma_alloc_coherent()
|
||||
* @dev: Device to allocate coherent memory for
|
||||
* @size: Size of allocation
|
||||
* @dma_handle: Out argument for allocated DMA handle
|
||||
* @gfp: Allocation flags
|
||||
*
|
||||
* Managed dma_alloc_coherent(). Memory allocated using this function
|
||||
* will be automatically released on driver detach.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to allocated memory on success, NULL on failure.
|
||||
*/
|
||||
void *dmam_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
struct dma_devres *dr;
|
||||
void *vaddr;
|
||||
|
||||
dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
|
||||
if (!dr)
|
||||
return NULL;
|
||||
|
||||
vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
if (!vaddr) {
|
||||
devres_free(dr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dr->vaddr = vaddr;
|
||||
dr->dma_handle = *dma_handle;
|
||||
dr->size = size;
|
||||
|
||||
devres_add(dev, dr);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL(dmam_alloc_coherent);
|
||||
|
||||
/**
|
||||
* dmam_free_coherent - Managed dma_free_coherent()
|
||||
* @dev: Device to free coherent memory for
|
||||
@ -144,61 +105,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
}
|
||||
EXPORT_SYMBOL(dmam_alloc_attrs);
|
||||
|
||||
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
|
||||
|
||||
static void dmam_coherent_decl_release(struct device *dev, void *res)
|
||||
{
|
||||
dma_release_declared_memory(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
|
||||
* @dev: Device to declare coherent memory for
|
||||
* @phys_addr: Physical address of coherent memory to be declared
|
||||
* @device_addr: Device address of coherent memory to be declared
|
||||
* @size: Size of coherent memory to be declared
|
||||
* @flags: Flags
|
||||
*
|
||||
* Managed dma_declare_coherent_memory().
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -errno on failure.
|
||||
*/
|
||||
int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags)
|
||||
{
|
||||
void *res;
|
||||
int rc;
|
||||
|
||||
res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
|
||||
flags);
|
||||
if (!rc)
|
||||
devres_add(dev, res);
|
||||
else
|
||||
devres_free(res);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(dmam_declare_coherent_memory);
|
||||
|
||||
/**
|
||||
* dmam_release_declared_memory - Managed dma_release_declared_memory().
|
||||
* @dev: Device to release declared coherent memory for
|
||||
*
|
||||
* Managed dmam_release_declared_memory().
|
||||
*/
|
||||
void dmam_release_declared_memory(struct device *dev)
|
||||
{
|
||||
WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
|
||||
}
|
||||
EXPORT_SYMBOL(dmam_release_declared_memory);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Create scatter-list for the already allocated DMA buffer.
|
||||
*/
|
||||
|
@ -204,8 +204,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
ret = dma_alloc_from_pool(size, &page, flags);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
return ret;
|
||||
goto done;
|
||||
}
|
||||
|
||||
page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
|
||||
@ -215,8 +214,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
arch_dma_prep_coherent(page, size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
|
||||
return page; /* opaque cookie */
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
ret = page; /* opaque cookie */
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
@ -227,9 +228,9 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
return ret;
|
||||
}
|
||||
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
memset(ret, 0, size);
|
||||
|
||||
done:
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user