mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
dmaengine: xilinx: xdma: Fix synchronization issue
commit6a40fb8245
upstream. The current xdma_synchronize method does not properly wait for the last transfer to be done. Due to limitations of the XMDA engine, it is not possible to stop a transfer in the middle of a descriptor. Said otherwise, if a stop is requested at the end of descriptor "N" and the OS is fast enough, the DMA controller will effectively stop immediately. However, if the OS is slightly too slow to request the stop and the DMA engine starts descriptor "N+1", the N+1 transfer will be performed until its end. This means that after a terminate_all, the last descriptor must remain valid and the synchronization must wait for this last descriptor to be terminated. Fixes:855c2e1d18
("dmaengine: xilinx: xdma: Rework xdma_terminate_all()") Fixes:f5c392d106
("dmaengine: xilinx: xdma: Add terminate_all/synchronize callbacks") Cc: stable@vger.kernel.org Suggested-by: Miquel Raynal <miquel.raynal@bootlin.com> Signed-off-by: Louis Chauvet <louis.chauvet@bootlin.com> Link: https://lore.kernel.org/r/20240327-digigram-xdma-fixes-v1-2-45f4a52c0283@bootlin.com Signed-off-by: Vinod Koul <vkoul@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
f70e6b9e4b
commit
582ce5d734
@ -117,6 +117,9 @@ struct xdma_hw_desc {
|
|||||||
CHAN_CTRL_IE_WRITE_ERROR | \
|
CHAN_CTRL_IE_WRITE_ERROR | \
|
||||||
CHAN_CTRL_IE_DESC_ERROR)
|
CHAN_CTRL_IE_DESC_ERROR)
|
||||||
|
|
||||||
|
/* bits of the channel status register */
|
||||||
|
#define XDMA_CHAN_STATUS_BUSY BIT(0)
|
||||||
|
|
||||||
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
|
#define XDMA_CHAN_STATUS_MASK CHAN_CTRL_START
|
||||||
|
|
||||||
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
|
#define XDMA_CHAN_ERROR_MASK (CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
|
||||||
|
@ -71,6 +71,8 @@ struct xdma_chan {
|
|||||||
enum dma_transfer_direction dir;
|
enum dma_transfer_direction dir;
|
||||||
struct dma_slave_config cfg;
|
struct dma_slave_config cfg;
|
||||||
u32 irq;
|
u32 irq;
|
||||||
|
struct completion last_interrupt;
|
||||||
|
bool stop_requested;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -376,6 +378,8 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
xchan->busy = true;
|
xchan->busy = true;
|
||||||
|
xchan->stop_requested = false;
|
||||||
|
reinit_completion(&xchan->last_interrupt);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -387,7 +391,6 @@ static int xdma_xfer_start(struct xdma_chan *xchan)
|
|||||||
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
static int xdma_xfer_stop(struct xdma_chan *xchan)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u32 val;
|
|
||||||
struct xdma_device *xdev = xchan->xdev_hdl;
|
struct xdma_device *xdev = xchan->xdev_hdl;
|
||||||
|
|
||||||
/* clear run stop bit to prevent any further auto-triggering */
|
/* clear run stop bit to prevent any further auto-triggering */
|
||||||
@ -395,13 +398,7 @@ static int xdma_xfer_stop(struct xdma_chan *xchan)
|
|||||||
CHAN_CTRL_RUN_STOP);
|
CHAN_CTRL_RUN_STOP);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
return ret;
|
||||||
/* Clear the channel status register */
|
|
||||||
ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &val);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -474,6 +471,8 @@ static int xdma_alloc_channels(struct xdma_device *xdev,
|
|||||||
xchan->xdev_hdl = xdev;
|
xchan->xdev_hdl = xdev;
|
||||||
xchan->base = base + i * XDMA_CHAN_STRIDE;
|
xchan->base = base + i * XDMA_CHAN_STRIDE;
|
||||||
xchan->dir = dir;
|
xchan->dir = dir;
|
||||||
|
xchan->stop_requested = false;
|
||||||
|
init_completion(&xchan->last_interrupt);
|
||||||
|
|
||||||
ret = xdma_channel_init(xchan);
|
ret = xdma_channel_init(xchan);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -521,6 +520,7 @@ static int xdma_terminate_all(struct dma_chan *chan)
|
|||||||
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
|
spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
|
||||||
|
|
||||||
xdma_chan->busy = false;
|
xdma_chan->busy = false;
|
||||||
|
xdma_chan->stop_requested = true;
|
||||||
vd = vchan_next_desc(&xdma_chan->vchan);
|
vd = vchan_next_desc(&xdma_chan->vchan);
|
||||||
if (vd) {
|
if (vd) {
|
||||||
list_del(&vd->node);
|
list_del(&vd->node);
|
||||||
@ -542,6 +542,13 @@ static int xdma_terminate_all(struct dma_chan *chan)
|
|||||||
static void xdma_synchronize(struct dma_chan *chan)
|
static void xdma_synchronize(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
|
struct xdma_chan *xdma_chan = to_xdma_chan(chan);
|
||||||
|
struct xdma_device *xdev = xdma_chan->xdev_hdl;
|
||||||
|
int st = 0;
|
||||||
|
|
||||||
|
/* If the engine continues running, wait for the last interrupt */
|
||||||
|
regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
|
||||||
|
if (st & XDMA_CHAN_STATUS_BUSY)
|
||||||
|
wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
|
||||||
|
|
||||||
vchan_synchronize(&xdma_chan->vchan);
|
vchan_synchronize(&xdma_chan->vchan);
|
||||||
}
|
}
|
||||||
@ -876,6 +883,9 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
|
|||||||
u32 st;
|
u32 st;
|
||||||
bool repeat_tx;
|
bool repeat_tx;
|
||||||
|
|
||||||
|
if (xchan->stop_requested)
|
||||||
|
complete(&xchan->last_interrupt);
|
||||||
|
|
||||||
spin_lock(&xchan->vchan.lock);
|
spin_lock(&xchan->vchan.lock);
|
||||||
|
|
||||||
/* get submitted request */
|
/* get submitted request */
|
||||||
|
Loading…
Reference in New Issue
Block a user