mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 07:34:06 +08:00
eda670c626
- SWIOTLB has tracing added when doing bounce buffer. - Xen ARM/ARM64 can use Xen-SWIOTLB. This work allows Linux to safely program real devices for DMA operations when running as a guest on Xen on ARM, without IOMMU support.*1 - xen_raw_printk works with PVHVM guests if needed. Bug-fixes: - Make memory ballooning work under HVM with large MMIO region. - Inform hypervisor of MCFG regions found in ACPI DSDT. - Remove deprecated IRQF_DISABLED. - Remove deprecated __cpuinit. [*1]: "On arm and arm64 all Xen guests, including dom0, run with second stage translation enabled. As a consequence when dom0 programs a device for a DMA operation is going to use (pseudo) physical addresses instead machine addresses. This work introduces two trees to track physical to machine and machine to physical mappings of foreign pages. Local pages are assumed mapped 1:1 (physical address == machine address). It enables the SWIOTLB-Xen driver on ARM and ARM64, so that Linux can translate physical addresses to machine addresses for dma operations when necessary. " (Stefano). -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.15 (GNU/Linux) iQEcBAABAgAGBQJSgS86AAoJEFjIrFwIi8fJpY4H/R2gke1A1p9UvTwbkaDhgPs/ u/mkI6aH+ktgvu5QZNprki660uydtc4Ck7y8leeLGYw+ed1Ys559SJhRc/x8jBYZ Hh2chnplld0LAjSpdIDTTePArE1xBo4Gz+fT0zc5cVh0leJwOXn92Kx8N5AWD/T3 gwH4Ok4K1dzZBIls7imM2AM/L1xcApcx3Dl/QpNcoePQtR4yLuPWMUbb3LM8pbUY 0B6ZVN4GOhtJ84z8HRKnh4uMnBYmhmky6laTlHVa6L+j1fv7aAPCdNbePjIt/Pvj HVYB1O/ht73yHw0zGfK6lhoGG8zlu+Q7sgiut9UsGZZfh34+BRKzNTypqJ3ezQo= =xc43 -----END PGP SIGNATURE----- Merge tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip Pull Xen updates from Konrad Rzeszutek Wilk: "This has tons of fixes and two major features which are concentrated around the Xen SWIOTLB library. The short <blurb> is that the tracing facility (just one function) has been added to SWIOTLB to make it easier to track I/O progress. Additionally under Xen and ARM (32 & 64) the Xen-SWIOTLB driver "is used to translate physical to machine and machine to physical addresses of foreign[guest] pages for DMA operations" (Stefano) when booting under hardware without proper IOMMU. There are also bug-fixes, cleanups, compile warning fixes, etc. The commit times for some of the commits is a bit fresh - that is b/c we wanted to make sure we have the Ack's from the ARM folks - which with the string of back-to-back conferences took a bit of time. Rest assured - the code has been stewing in #linux-next for some time. Features: - SWIOTLB has tracing added when doing bounce buffer. - Xen ARM/ARM64 can use Xen-SWIOTLB. This work allows Linux to safely program real devices for DMA operations when running as a guest on Xen on ARM, without IOMMU support. [*1] - xen_raw_printk works with PVHVM guests if needed. Bug-fixes: - Make memory ballooning work under HVM with large MMIO region. - Inform hypervisor of MCFG regions found in ACPI DSDT. - Remove deprecated IRQF_DISABLED. - Remove deprecated __cpuinit. [*1]: "On arm and arm64 all Xen guests, including dom0, run with second stage translation enabled. As a consequence when dom0 programs a device for a DMA operation is going to use (pseudo) physical addresses instead machine addresses. This work introduces two trees to track physical to machine and machine to physical mappings of foreign pages. Local pages are assumed mapped 1:1 (physical address == machine address). It enables the SWIOTLB-Xen driver on ARM and ARM64, so that Linux can translate physical addresses to machine addresses for dma operations when necessary. " (Stefano)" * tag 'stable/for-linus-3.13-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (32 commits) xen/arm: pfn_to_mfn and mfn_to_pfn return the argument if nothing is in the p2m arm,arm64/include/asm/io.h: define struct bio_vec swiotlb-xen: missing include dma-direction.h pci-swiotlb-xen: call pci_request_acs only ifdef CONFIG_PCI arm: make SWIOTLB available xen: delete new instances of added __cpuinit xen/balloon: Set balloon's initial state to number of existing RAM pages xen/mcfg: Call PHYSDEVOP_pci_mmcfg_reserved for MCFG areas. xen: remove deprecated IRQF_DISABLED x86/xen: remove deprecated IRQF_DISABLED swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary grant-table: call set_phys_to_machine after mapping grant refs arm,arm64: do not always merge biovec if we are running on Xen swiotlb: print a warning when the swiotlb is full swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device tracing/events: Fix swiotlb tracepoint creation swiotlb-xen: use xen_alloc/free_coherent_pages xen: introduce xen_alloc/free_coherent_pages ...
326 lines
9.6 KiB
C
326 lines
9.6 KiB
C
#ifndef ASMARM_DMA_MAPPING_H
|
|
#define ASMARM_DMA_MAPPING_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/mm_types.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/dma-attrs.h>
|
|
#include <linux/dma-debug.h>
|
|
|
|
#include <asm-generic/dma-coherent.h>
|
|
#include <asm/memory.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
#define DMA_ERROR_CODE (~0)
|
|
extern struct dma_map_ops arm_dma_ops;
|
|
extern struct dma_map_ops arm_coherent_dma_ops;
|
|
|
|
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
|
{
|
|
if (dev && dev->archdata.dma_ops)
|
|
return dev->archdata.dma_ops;
|
|
return &arm_dma_ops;
|
|
}
|
|
|
|
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
{
|
|
if (xen_initial_domain())
|
|
return xen_dma_ops;
|
|
else
|
|
return __generic_dma_ops(dev);
|
|
}
|
|
|
|
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
|
{
|
|
BUG_ON(!dev);
|
|
dev->archdata.dma_ops = ops;
|
|
}
|
|
|
|
#include <asm-generic/dma-mapping-common.h>
|
|
|
|
static inline int dma_set_mask(struct device *dev, u64 mask)
|
|
{
|
|
return get_dma_ops(dev)->set_dma_mask(dev, mask);
|
|
}
|
|
|
|
#ifdef __arch_page_to_dma
|
|
#error Please update to __arch_pfn_to_dma
|
|
#endif
|
|
|
|
/*
|
|
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
|
|
* functions used internally by the DMA-mapping API to provide DMA
|
|
* addresses. They must not be used by drivers.
|
|
*/
|
|
#ifndef __arch_pfn_to_dma
|
|
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
{
|
|
return (dma_addr_t)__pfn_to_bus(pfn);
|
|
}
|
|
|
|
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
|
{
|
|
return __bus_to_pfn(addr);
|
|
}
|
|
|
|
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
|
{
|
|
return (void *)__bus_to_virt((unsigned long)addr);
|
|
}
|
|
|
|
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|
{
|
|
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
|
}
|
|
|
|
#else
|
|
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
{
|
|
return __arch_pfn_to_dma(dev, pfn);
|
|
}
|
|
|
|
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
|
{
|
|
return __arch_dma_to_pfn(dev, addr);
|
|
}
|
|
|
|
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
|
{
|
|
return __arch_dma_to_virt(dev, addr);
|
|
}
|
|
|
|
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|
{
|
|
return __arch_virt_to_dma(dev, addr);
|
|
}
|
|
#endif
|
|
|
|
/* The ARM override for dma_max_pfn() */
|
|
static inline unsigned long dma_max_pfn(struct device *dev)
|
|
{
|
|
return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
|
|
}
|
|
#define dma_max_pfn(dev) dma_max_pfn(dev)
|
|
|
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
unsigned int offset = paddr & ~PAGE_MASK;
|
|
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
|
}
|
|
|
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|
{
|
|
unsigned int offset = dev_addr & ~PAGE_MASK;
|
|
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
|
}
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
{
|
|
u64 limit, mask;
|
|
|
|
if (!dev->dma_mask)
|
|
return 0;
|
|
|
|
mask = *dev->dma_mask;
|
|
|
|
limit = (mask + 1) & ~mask;
|
|
if (limit && size > limit)
|
|
return 0;
|
|
|
|
if ((addr | (addr + size - 1)) & ~mask)
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
static inline void dma_mark_clean(void *addr, size_t size) { }
|
|
|
|
/*
|
|
* DMA errors are defined by all-bits-set in the DMA address.
|
|
*/
|
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
{
|
|
debug_dma_mapping_error(dev, dma_addr);
|
|
return dma_addr == DMA_ERROR_CODE;
|
|
}
|
|
|
|
/*
|
|
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
|
|
* function so drivers using this API are highlighted with build warnings.
|
|
*/
|
|
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t handle)
|
|
{
|
|
}
|
|
|
|
extern int dma_supported(struct device *dev, u64 mask);
|
|
|
|
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
|
|
|
/**
|
|
* arm_dma_alloc - allocate consistent memory for DMA
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
* @size: required memory size
|
|
* @handle: bus-specific DMA address
|
|
* @attrs: optinal attributes that specific mapping properties
|
|
*
|
|
* Allocate some memory for a device for performing DMA. This function
|
|
* allocates pages, and will return the CPU-viewed address, and sets @handle
|
|
* to be the device-viewed address.
|
|
*/
|
|
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
gfp_t gfp, struct dma_attrs *attrs);
|
|
|
|
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
|
|
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flag,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
void *cpu_addr;
|
|
BUG_ON(!ops);
|
|
|
|
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
return cpu_addr;
|
|
}
|
|
|
|
/**
|
|
* arm_dma_free - free memory allocated by arm_dma_alloc
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
* @size: size of memory originally requested in dma_alloc_coherent
|
|
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
|
|
* @handle: device-view address returned from dma_alloc_coherent
|
|
* @attrs: optinal attributes that specific mapping properties
|
|
*
|
|
* Free (and unmap) a DMA buffer previously allocated by
|
|
* arm_dma_alloc().
|
|
*
|
|
* References to memory and mappings associated with cpu_addr/handle
|
|
* during and after this call executing are illegal.
|
|
*/
|
|
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
dma_addr_t handle, struct dma_attrs *attrs);
|
|
|
|
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
|
|
|
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t dma_handle,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
BUG_ON(!ops);
|
|
|
|
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
}
|
|
|
|
/**
|
|
* arm_dma_mmap - map a coherent DMA allocation into user space
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
* @vma: vm_area_struct describing requested user mapping
|
|
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
|
|
* @handle: device-view address returned from dma_alloc_coherent
|
|
* @size: size of memory originally requested in dma_alloc_coherent
|
|
* @attrs: optinal attributes that specific mapping properties
|
|
*
|
|
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
|
|
* into user space. The coherent DMA buffer must not be freed by the
|
|
* driver until the user space mapping has been released.
|
|
*/
|
|
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
struct dma_attrs *attrs);
|
|
|
|
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flag)
|
|
{
|
|
DEFINE_DMA_ATTRS(attrs);
|
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
|
return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
|
|
}
|
|
|
|
static inline void dma_free_writecombine(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t dma_handle)
|
|
{
|
|
DEFINE_DMA_ATTRS(attrs);
|
|
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
|
|
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
|
}
|
|
|
|
/*
|
|
* This can be called during early boot to increase the size of the atomic
|
|
* coherent DMA pool above the default value of 256KiB. It must be called
|
|
* before postcore_initcall.
|
|
*/
|
|
extern void __init init_dma_coherent_pool_size(unsigned long size);
|
|
|
|
/*
|
|
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
|
|
* and utilize bounce buffers as needed to work around limited DMA windows.
|
|
*
|
|
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
|
|
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
|
|
* On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
|
|
*
|
|
* The following are helper functions used by the dmabounce subystem
|
|
*
|
|
*/
|
|
|
|
/**
|
|
* dmabounce_register_dev
|
|
*
|
|
* @dev: valid struct device pointer
|
|
* @small_buf_size: size of buffers to use with small buffer pool
|
|
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
|
|
* @needs_bounce_fn: called to determine whether buffer needs bouncing
|
|
*
|
|
* This function should be called by low-level platform code to register
|
|
* a device as requireing DMA buffer bouncing. The function will allocate
|
|
* appropriate DMA pools for the device.
|
|
*/
|
|
extern int dmabounce_register_dev(struct device *, unsigned long,
|
|
unsigned long, int (*)(struct device *, dma_addr_t, size_t));
|
|
|
|
/**
|
|
* dmabounce_unregister_dev
|
|
*
|
|
* @dev: valid struct device pointer
|
|
*
|
|
* This function should be called by low-level platform code when device
|
|
* that was previously registered with dmabounce_register_dev is removed
|
|
* from the system.
|
|
*
|
|
*/
|
|
extern void dmabounce_unregister_dev(struct device *);
|
|
|
|
|
|
|
|
/*
|
|
* The scatter list versions of the above methods.
|
|
*/
|
|
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
|
|
enum dma_data_direction, struct dma_attrs *attrs);
|
|
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
|
|
enum dma_data_direction, struct dma_attrs *attrs);
|
|
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
|
|
enum dma_data_direction);
|
|
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
|
|
enum dma_data_direction);
|
|
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
struct dma_attrs *attrs);
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif
|