2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

dma-mapping: remove dma_virt_ops

Now that the RDMA core deals with devices that only do DMA mapping in
lower layers properly, there is no user for dma_virt_ops and it can be
removed.

Link: https://lore.kernel.org/r/20201106181941.1878556-11-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Christoph Hellwig 2020-11-06 19:19:41 +01:00 committed by Jason Gunthorpe
parent 73063ec58c
commit 172292be01
4 changed files with 0 additions and 69 deletions

View File

@ -565,6 +565,4 @@ static inline int dma_mmap_wc(struct device *dev,
int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
dma_addr_t dma_start, u64 size);
extern const struct dma_map_ops dma_virt_ops;
#endif /* _LINUX_DMA_MAPPING_H */

View File

@ -75,11 +75,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
config DMA_VIRT_OPS
bool
depends on HAS_DMA
select DMA_OPS
config SWIOTLB
bool
select NEED_DMA_MAP_STATE

View File

@ -5,7 +5,6 @@ obj-$(CONFIG_DMA_OPS) += ops_helpers.o
obj-$(CONFIG_DMA_OPS) += dummy.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o

View File

@ -1,61 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* DMA operations that map to virtual addresses without flushing memory.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
static void *dma_virt_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
unsigned long attrs)
{
void *ret;
ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
if (ret)
*dma_handle = (uintptr_t)ret;
return ret;
}
static void dma_virt_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
return (uintptr_t)(page_address(page) + offset);
}
static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
BUG_ON(!sg_page(sg));
sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
sg_dma_len(sg) = sg->length;
}
return nents;
}
const struct dma_map_ops dma_virt_ops = {
.alloc = dma_virt_alloc,
.free = dma_virt_free,
.map_page = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
EXPORT_SYMBOL(dma_virt_ops);