mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 16:23:51 +08:00
xen/arm,arm64: fix xen_dma_ops after 815dd18
"Consolidate get_dma_ops..."
The following commit:
commit 815dd18788
Author: Bart Van Assche <bart.vanassche@sandisk.com>
Date: Fri Jan 20 13:04:04 2017 -0800
treewide: Consolidate get_dma_ops() implementations
rearranges get_dma_ops in a way that xen_dma_ops are not returned when
running on Xen anymore, dev->dma_ops is returned instead (see
arch/arm/include/asm/dma-mapping.h:get_arch_dma_ops and
include/linux/dma-mapping.h:get_dma_ops).
Fix the problem by storing dev->dma_ops in dev_archdata, and setting
dev->dma_ops to xen_dma_ops. This way, xen_dma_ops is returned naturally
by get_dma_ops. The Xen code can retrieve the original dev->dma_ops from
dev_archdata when needed. It also allows us to remove __generic_dma_ops
from common headers.
Signed-off-by: Stefano Stabellini <sstabellini@kernel.org>
Tested-by: Julien Grall <julien.grall@arm.com>
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: <stable@vger.kernel.org> [4.11+]
CC: linux@armlinux.org.uk
CC: catalin.marinas@arm.com
CC: will.deacon@arm.com
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
CC: Julien Grall <julien.grall@arm.com>
This commit is contained in:
parent
4a80601622
commit
e058632670
@ -15,6 +15,9 @@ struct dev_archdata {
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
||||
struct dma_iommu_mapping *mapping;
|
||||
#endif
|
||||
#ifdef CONFIG_XEN
|
||||
const struct dma_map_ops *dev_dma_ops;
|
||||
#endif
|
||||
bool dma_coherent;
|
||||
};
|
||||
|
@ -16,19 +16,9 @@
|
||||
extern const struct dma_map_ops arm_dma_ops;
|
||||
extern const struct dma_map_ops arm_coherent_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
return &arm_dma_ops;
|
||||
}
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
if (xen_initial_domain())
|
||||
return xen_dma_ops;
|
||||
else
|
||||
return __generic_dma_ops(NULL);
|
||||
return &arm_dma_ops;
|
||||
}
|
||||
|
||||
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||
|
@ -2414,6 +2414,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
dma_ops = arm_get_dma_map_ops(coherent);
|
||||
|
||||
set_dma_ops(dev, dma_ops);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
if (xen_initial_domain()) {
|
||||
dev->archdata.dev_dma_ops = dev->dma_ops;
|
||||
dev->dma_ops = xen_dma_ops;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
|
@ -19,6 +19,9 @@
|
||||
struct dev_archdata {
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
void *iommu; /* private IOMMU data */
|
||||
#endif
|
||||
#ifdef CONFIG_XEN
|
||||
const struct dma_map_ops *dev_dma_ops;
|
||||
#endif
|
||||
bool dma_coherent;
|
||||
};
|
||||
|
@ -27,11 +27,8 @@
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
extern const struct dma_map_ops dummy_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
if (dev && dev->dma_ops)
|
||||
return dev->dma_ops;
|
||||
|
||||
/*
|
||||
* We expect no ISA devices, and all other DMA masters are expected to
|
||||
* have someone call arch_setup_dma_ops at device creation time.
|
||||
@ -39,14 +36,6 @@ static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
return &dummy_dma_ops;
|
||||
}
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
if (xen_initial_domain())
|
||||
return xen_dma_ops;
|
||||
else
|
||||
return __generic_dma_ops(NULL);
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
|
@ -977,4 +977,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
if (xen_initial_domain()) {
|
||||
dev->archdata.dev_dma_ops = dev->dma_ops;
|
||||
dev->dma_ops = xen_dma_ops;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -2,8 +2,16 @@
|
||||
#define _ASM_ARM_XEN_PAGE_COHERENT_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/dma-mapping.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev && dev->archdata.dev_dma_ops)
|
||||
return dev->archdata.dev_dma_ops;
|
||||
return get_arch_dma_ops(NULL);
|
||||
}
|
||||
|
||||
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
|
Loading…
Reference in New Issue
Block a user