2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 21:24:00 +08:00

dmaengine: rcar-dmac: fixup spinlock in rcar-dmac

Current rcar-dmac driver is using spin_lock_irq() / spin_unlock_irq()
in some functions. But, some other driver might call DMAEngine API
during interrupt disabled. In such case, rcar-dmac side spin_unlock_irq()
forcefully allows all interrupts. Therefore, other driver receives
unexpected interruption, and its exclusive access control will be broken.
This patch replaces spin_lock_irq() to spin_lock_irqsave(),
and spin_unlock_irq() to spin_unlock_irqrestore().

Reported-by: Cao Minh Hiep <cm-hiep@jinso.co.jp>
Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Tested-by: Keita Kobayashi <keita.kobayashi.ym@renesas.com>
Acked-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
Kuninori Morimoto 2015-05-21 03:48:38 +00:00 committed by Mark Brown
parent b787f68c36
commit d23c9a0a5c

View File

@ -465,6 +465,7 @@ static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx)
static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
{
struct rcar_dmac_desc_page *page;
unsigned long flags;
LIST_HEAD(list);
unsigned int i;
@ -482,10 +483,10 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
list_add_tail(&desc->node, &list);
}
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
list_splice_tail(&list, &chan->desc.free);
list_add_tail(&page->node, &chan->desc.pages);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
@ -516,6 +517,7 @@ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan,
static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
{
struct rcar_dmac_desc *desc, *_desc;
unsigned long flags;
LIST_HEAD(list);
/*
@ -524,9 +526,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
* list_for_each_entry_safe, isn't safe if we release the channel lock
* around the rcar_dmac_desc_put() call.
*/
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
list_splice_init(&chan->desc.wait, &list);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, node) {
if (async_tx_test_ack(&desc->async_tx)) {
@ -539,9 +541,9 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
return;
/* Put the remaining descriptors back in the wait list. */
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
list_splice(&list, &chan->desc.wait);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
/*
@ -556,12 +558,13 @@ static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan)
static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
{
struct rcar_dmac_desc *desc;
unsigned long flags;
int ret;
/* Recycle acked descriptors before attempting allocation. */
rcar_dmac_desc_recycle_acked(chan);
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
while (list_empty(&chan->desc.free)) {
/*
@ -570,17 +573,17 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
* allocated descriptors. If the allocation fails return an
* error.
*/
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT);
if (ret < 0)
return NULL;
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
}
desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node);
list_del(&desc->node);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
return desc;
}
@ -593,6 +596,7 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan)
static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
{
struct rcar_dmac_desc_page *page;
unsigned long flags;
LIST_HEAD(list);
unsigned int i;
@ -606,10 +610,10 @@ static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
list_add_tail(&chunk->node, &list);
}
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
list_splice_tail(&list, &chan->desc.chunks_free);
list_add_tail(&page->node, &chan->desc.pages);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
@ -627,9 +631,10 @@ static struct rcar_dmac_xfer_chunk *
rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
{
struct rcar_dmac_xfer_chunk *chunk;
unsigned long flags;
int ret;
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
while (list_empty(&chan->desc.chunks_free)) {
/*
@ -638,18 +643,18 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan)
* allocated descriptors. If the allocation fails return an
* error.
*/
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT);
if (ret < 0)
return NULL;
spin_lock_irq(&chan->lock);
spin_lock_irqsave(&chan->lock, flags);
}
chunk = list_first_entry(&chan->desc.chunks_free,
struct rcar_dmac_xfer_chunk, node);
list_del(&chunk->node);
spin_unlock_irq(&chan->lock);
spin_unlock_irqrestore(&chan->lock, flags);
return chunk;
}