mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 07:24:39 +08:00
7641842164
The mapping function should always return DMA_ERROR_CODE when a mapping has failed as this is what the DMA API expects when a DMA error has occurred. The current function for mapping a page in Xen was returning either DMA_ERROR_CODE or 0 depending on where it failed. On x86 DMA_ERROR_CODE is 0, but on other architectures such as ARM it is ~0. We need to make sure we return the same error value if either the mapping failed or the device is not capable of accessing the mapping. If we are returning DMA_ERROR_CODE as our error value we can drop the function for checking the error code as the default is to compare the return value against DMA_ERROR_CODE if no function is defined. Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad@kernel.org>
59 lines
1.8 KiB
C
59 lines
1.8 KiB
C
#ifndef __LINUX_SWIOTLB_XEN_H
|
|
#define __LINUX_SWIOTLB_XEN_H
|
|
|
|
#include <linux/dma-direction.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
extern int xen_swiotlb_init(int verbose, bool early);
|
|
|
|
extern void
|
|
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flags,
|
|
unsigned long attrs);
|
|
|
|
extern void
|
|
xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle,
|
|
unsigned long attrs);
|
|
|
|
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
|
|
extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
extern int
|
|
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
int nelems, enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
|
|
extern void
|
|
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
int nelems, enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
|
|
extern void
|
|
xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern int
|
|
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
|
|
|
extern int
|
|
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
|
|
#endif /* __LINUX_SWIOTLB_XEN_H */
|