2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 18:23:53 +08:00

dmaengine: idma64: Use actual device for DMA transfers

Intel IOMMU, when enabled, tries to find the domain of the device,
assuming it's a PCI one, during DMA operations, such as mapping or
unmapping. Since we are splitting the actual PCI device to couple of
children via MFD framework (see drivers/mfd/intel-lpss.c for details),
the DMA device appears to be a platform one, and thus not an actual one
that performs DMA. In a such situation IOMMU can't find or allocate
a proper domain for its operations. As a result, all DMA operations are
failed.

In order to fix this, supply parent of the platform device
to the DMA engine framework and fix filter functions accordingly.

We may rely on the fact that parent is a real PCI device, because no
other configuration is present in the wild.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Mark Brown <broonie@kernel.org>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> [for tty parts]
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Andy Shevchenko 2019-03-18 18:39:30 +03:00 committed by Vinod Koul
parent 9e98c678c2
commit 5ba846b1ee
4 changed files with 9 additions and 10 deletions

View File

@ -592,7 +592,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
idma64->dma.dev = chip->dev;
idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
@ -632,6 +632,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
struct resource *mem;
int ret;
@ -648,11 +649,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
chip->dev = dev;
chip->sysdev = sysdev;
ret = idma64_probe(chip);
if (ret)

View File

@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/**
* struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller
* @sysdev: struct device of the physical device that does DMA
* @irq: irq line
* @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe()
*/
struct idma64_chip {
struct device *dev;
struct device *sysdev;
int irq;
void __iomem *regs;
struct idma64 *idma64;

View File

@ -1487,12 +1487,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
struct device *dev = param;
if (dev != chan->device->dev->parent)
return false;
return true;
return param == chan->device->dev;
}
#endif /* CONFIG_PCI */

View File

@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{
return param == chan->device->dev->parent;
return param == chan->device->dev;
}
/*
@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
data->uart_16550_compatible = true;
}
/* Platforms with iDMA */
/* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) {
data->dma.rx_param = p->dev->parent;