2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00

dmaengine: xilinx_dma: Drop SG support for VDMA IP

xilinx_vdma_start_transfer() is used only for VDMA IP, still it contains
conditional code on has_sg variable. has_sg is set only whenever the HW
does support SG mode, that is never true for VDMA IP.

This patch drops the never-taken branches.

Signed-off-by: Andrea Merello <andrea.merello@gmail.com>
Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Andrea Merello 2018-11-20 16:31:51 +01:00 committed by Vinod Koul
parent 29b9ee4a0c
commit b8349172b4

View File

@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_dma_tx_descriptor *desc, *tail_desc; struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg, j; u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment; struct xilinx_vdma_tx_segment *tail_segment;
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
/* This function was invoked with lock held */ /* This function was invoked with lock held */
if (chan->err) if (chan->err)
@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments, tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node); struct xilinx_vdma_tx_segment, node);
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
if (chan->has_sg)
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */ /* Configure the hardware using info in the config structure */
if (chan->has_vflip) { if (chan->has_vflip) {
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
/* /* If not parking, enable circular mode */
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
*/
if (chan->has_sg || !config->park)
reg |= XILINX_DMA_DMACR_CIRC_EN;
if (config->park) if (config->park)
reg &= ~XILINX_DMA_DMACR_CIRC_EN; reg &= ~XILINX_DMA_DMACR_CIRC_EN;
else
reg |= XILINX_DMA_DMACR_CIRC_EN;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
@ -1175,49 +1165,39 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return; return;
/* Start the transfer */ /* Start the transfer */
if (chan->has_sg) { if (chan->desc_submitcount < chan->num_frms)
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, i = chan->desc_submitcount;
tail_segment->phys);
list_splice_tail_init(&chan->pending_list, &chan->active_list);
chan->desc_pendingcount = 0;
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
if (chan->desc_submitcount < chan->num_frms) list_for_each_entry(segment, &desc->segments, node) {
i = chan->desc_submitcount; if (chan->ext_addr)
vdma_desc_write_64(chan,
list_for_each_entry(segment, &desc->segments, node) { XILINX_VDMA_REG_START_ADDRESS_64(i++),
if (chan->ext_addr) segment->hw.buf_addr,
vdma_desc_write_64(chan, segment->hw.buf_addr_msb);
XILINX_VDMA_REG_START_ADDRESS_64(i++), else
segment->hw.buf_addr, vdma_desc_write(chan,
segment->hw.buf_addr_msb);
else
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++), XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr); segment->hw.buf_addr);
last = segment; last = segment;
}
if (!last)
return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
chan->desc_submitcount++;
chan->desc_pendingcount--;
list_del(&desc->node);
list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
} }
if (!last)
return;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
chan->desc_submitcount++;
chan->desc_pendingcount--;
list_del(&desc->node);
list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
chan->idle = false; chan->idle = false;
} }