mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-29 23:24:11 +08:00
swiotlb: merge swiotlb-xen initialization into swiotlb
Reuse the generic swiotlb initialization for xen-swiotlb. For ARM/ARM64 this works trivially, while for x86 xen_swiotlb_fixup needs to be passed as the remap argument to swiotlb_init_remap/swiotlb_init_late. Note that the lower bound of the swiotlb size is changed to the smaller IO_TLB_MIN_SLABS based value with this patch, but that is fine as the 2MB value used in Xen before was just an optimization and is not the hard lower bound. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
parent
7374153d29
commit
3f70356edf
@ -23,22 +23,20 @@
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/interface.h>
|
||||
|
||||
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
||||
static gfp_t xen_swiotlb_gfp(void)
|
||||
{
|
||||
phys_addr_t base;
|
||||
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
|
||||
u64 i;
|
||||
|
||||
for_each_mem_range(i, &base, NULL) {
|
||||
if (base < (phys_addr_t)0xffffffff) {
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
flags |= __GFP_DMA32;
|
||||
else
|
||||
flags |= __GFP_DMA;
|
||||
break;
|
||||
return __GFP_DMA32;
|
||||
return __GFP_DMA;
|
||||
}
|
||||
}
|
||||
return __get_free_pages(flags, order);
|
||||
|
||||
return GFP_KERNEL;
|
||||
}
|
||||
|
||||
static bool hypercall_cflush = false;
|
||||
@ -140,10 +138,13 @@ static int __init xen_mm_init(void)
|
||||
if (!xen_swiotlb_detect())
|
||||
return 0;
|
||||
|
||||
rc = xen_swiotlb_init();
|
||||
/* we can work with the default swiotlb */
|
||||
if (rc < 0 && rc != -EEXIST)
|
||||
return rc;
|
||||
if (!io_tlb_default_mem.nslabs) {
|
||||
rc = swiotlb_init_late(swiotlb_size_or_default(),
|
||||
xen_swiotlb_gfp(), NULL);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
cflush.op = 0;
|
||||
cflush.a.dev_bus_addr = 0;
|
||||
|
@ -357,9 +357,4 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
||||
{
|
||||
return __get_free_pages(__GFP_NOWARN, order);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_XEN_PAGE_H */
|
||||
|
@ -72,15 +72,13 @@ static inline void __init pci_swiotlb_detect(void)
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
static bool xen_swiotlb;
|
||||
|
||||
static void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
if (!xen_initial_domain() && !x86_swiotlb_enable)
|
||||
return;
|
||||
x86_swiotlb_enable = true;
|
||||
xen_swiotlb = true;
|
||||
xen_swiotlb_init_early();
|
||||
x86_swiotlb_flags |= SWIOTLB_ANY;
|
||||
swiotlb_init_remap(true, x86_swiotlb_flags, xen_swiotlb_fixup);
|
||||
dma_ops = &xen_swiotlb_dma_ops;
|
||||
if (IS_ENABLED(CONFIG_PCI))
|
||||
pci_request_acs();
|
||||
@ -88,14 +86,16 @@ static void __init pci_xen_swiotlb_init(void)
|
||||
|
||||
int pci_xen_swiotlb_init_late(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (xen_swiotlb)
|
||||
if (dma_ops == &xen_swiotlb_dma_ops)
|
||||
return 0;
|
||||
|
||||
rc = xen_swiotlb_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
/* we can work with the default swiotlb */
|
||||
if (!io_tlb_default_mem.nslabs) {
|
||||
int rc = swiotlb_init_late(swiotlb_size_or_default(),
|
||||
GFP_KERNEL, xen_swiotlb_fixup);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* XXX: this switches the dma ops under live devices! */
|
||||
dma_ops = &xen_swiotlb_dma_ops;
|
||||
|
@ -104,7 +104,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
||||
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
||||
{
|
||||
int rc;
|
||||
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
|
||||
@ -130,132 +130,6 @@ static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum xen_swiotlb_err {
|
||||
XEN_SWIOTLB_UNKNOWN = 0,
|
||||
XEN_SWIOTLB_ENOMEM,
|
||||
XEN_SWIOTLB_EFIXUP
|
||||
};
|
||||
|
||||
static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
|
||||
{
|
||||
switch (err) {
|
||||
case XEN_SWIOTLB_ENOMEM:
|
||||
return "Cannot allocate Xen-SWIOTLB buffer\n";
|
||||
case XEN_SWIOTLB_EFIXUP:
|
||||
return "Failed to get contiguous memory for DMA from Xen!\n"\
|
||||
"You either: don't have the permissions, do not have"\
|
||||
" enough free memory under 4GB, or the hypervisor memory"\
|
||||
" is too fragmented!";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
int xen_swiotlb_init(void)
|
||||
{
|
||||
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
|
||||
unsigned long bytes = swiotlb_size_or_default();
|
||||
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
|
||||
unsigned int order, repeat = 3;
|
||||
int rc = -ENOMEM;
|
||||
char *start;
|
||||
|
||||
if (io_tlb_default_mem.nslabs) {
|
||||
pr_warn("swiotlb buffer already initialized\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
retry:
|
||||
m_ret = XEN_SWIOTLB_ENOMEM;
|
||||
order = get_order(bytes);
|
||||
|
||||
/*
|
||||
* Get IO TLB memory from any location.
|
||||
*/
|
||||
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
|
||||
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
|
||||
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
||||
start = (void *)xen_get_swiotlb_free_pages(order);
|
||||
if (start)
|
||||
break;
|
||||
order--;
|
||||
}
|
||||
if (!start)
|
||||
goto exit;
|
||||
if (order != get_order(bytes)) {
|
||||
pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
|
||||
(PAGE_SIZE << order) >> 20);
|
||||
nslabs = SLABS_PER_PAGE << order;
|
||||
bytes = nslabs << IO_TLB_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* And replace that memory with pages under 4GB.
|
||||
*/
|
||||
rc = xen_swiotlb_fixup(start, nslabs);
|
||||
if (rc) {
|
||||
free_pages((unsigned long)start, order);
|
||||
m_ret = XEN_SWIOTLB_EFIXUP;
|
||||
goto error;
|
||||
}
|
||||
rc = swiotlb_late_init_with_tbl(start, nslabs);
|
||||
if (rc)
|
||||
return rc;
|
||||
return 0;
|
||||
error:
|
||||
if (nslabs > 1024 && repeat--) {
|
||||
/* Min is 2MB */
|
||||
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
|
||||
bytes = nslabs << IO_TLB_SHIFT;
|
||||
pr_info("Lowering to %luMB\n", bytes >> 20);
|
||||
goto retry;
|
||||
}
|
||||
exit:
|
||||
pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
void __init xen_swiotlb_init_early(void)
|
||||
{
|
||||
unsigned long bytes = swiotlb_size_or_default();
|
||||
unsigned long nslabs = bytes >> IO_TLB_SHIFT;
|
||||
unsigned int repeat = 3;
|
||||
char *start;
|
||||
int rc;
|
||||
|
||||
retry:
|
||||
/*
|
||||
* Get IO TLB memory from any location.
|
||||
*/
|
||||
start = memblock_alloc(PAGE_ALIGN(bytes),
|
||||
IO_TLB_SEGSIZE << IO_TLB_SHIFT);
|
||||
if (!start)
|
||||
panic("%s: Failed to allocate %lu bytes\n",
|
||||
__func__, PAGE_ALIGN(bytes));
|
||||
|
||||
/*
|
||||
* And replace that memory with pages under 4GB.
|
||||
*/
|
||||
rc = xen_swiotlb_fixup(start, nslabs);
|
||||
if (rc) {
|
||||
memblock_free(start, PAGE_ALIGN(bytes));
|
||||
if (nslabs > 1024 && repeat--) {
|
||||
/* Min is 2MB */
|
||||
nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
|
||||
bytes = nslabs << IO_TLB_SHIFT;
|
||||
pr_info("Lowering to %luMB\n", bytes >> 20);
|
||||
goto retry;
|
||||
}
|
||||
panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
|
||||
}
|
||||
|
||||
if (swiotlb_init_with_tbl(start, nslabs, SWIOTLB_VERBOSE))
|
||||
panic("Cannot allocate SWIOTLB buffer");
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
static void *
|
||||
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
|
@ -115,6 +115,5 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
||||
bool xen_arch_need_swiotlb(struct device *dev,
|
||||
phys_addr_t phys,
|
||||
dma_addr_t dev_addr);
|
||||
unsigned long xen_get_swiotlb_free_pages(unsigned int order);
|
||||
|
||||
#endif /* _ASM_ARM_XEN_PAGE_H */
|
||||
|
@ -10,8 +10,12 @@ void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
|
||||
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
|
||||
int xen_swiotlb_init(void);
|
||||
void __init xen_swiotlb_init_early(void);
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
|
||||
#else
|
||||
#define xen_swiotlb_fixup NULL
|
||||
#endif
|
||||
|
||||
extern const struct dma_map_ops xen_swiotlb_dma_ops;
|
||||
|
||||
#endif /* __LINUX_SWIOTLB_XEN_H */
|
||||
|
Loading…
Reference in New Issue
Block a user