mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
dmaengine: xilinx_dma: Drop SG support for VDMA IP
xilinx_vdma_start_transfer() is used only for VDMA IP, still it contains conditional code on has_sg variable. has_sg is set only whenever the HW does support SG mode, that is never true for VDMA IP. This patch drops the never-taken branches. Signed-off-by: Andrea Merello <andrea.merello@gmail.com> Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com> Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
29b9ee4a0c
commit
b8349172b4
@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
struct xilinx_dma_tx_descriptor *desc, *tail_desc;
|
||||
u32 reg, j;
|
||||
struct xilinx_vdma_tx_segment *tail_segment;
|
||||
struct xilinx_vdma_tx_segment *segment, *last = NULL;
|
||||
int i = 0;
|
||||
|
||||
/* This function was invoked with lock held */
|
||||
if (chan->err)
|
||||
@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
tail_segment = list_last_entry(&tail_desc->segments,
|
||||
struct xilinx_vdma_tx_segment, node);
|
||||
|
||||
/*
|
||||
* If hardware is idle, then all descriptors on the running lists are
|
||||
* done, start new transfers
|
||||
*/
|
||||
if (chan->has_sg)
|
||||
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
|
||||
desc->async_tx.phys);
|
||||
|
||||
/* Configure the hardware using info in the config structure */
|
||||
if (chan->has_vflip) {
|
||||
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
|
||||
@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
else
|
||||
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
|
||||
|
||||
/*
|
||||
* With SG, start with circular mode, so that BDs can be fetched.
|
||||
* In direct register mode, if not parking, enable circular mode
|
||||
*/
|
||||
if (chan->has_sg || !config->park)
|
||||
reg |= XILINX_DMA_DMACR_CIRC_EN;
|
||||
|
||||
/* If not parking, enable circular mode */
|
||||
if (config->park)
|
||||
reg &= ~XILINX_DMA_DMACR_CIRC_EN;
|
||||
else
|
||||
reg |= XILINX_DMA_DMACR_CIRC_EN;
|
||||
|
||||
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
|
||||
|
||||
@ -1175,49 +1165,39 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
|
||||
return;
|
||||
|
||||
/* Start the transfer */
|
||||
if (chan->has_sg) {
|
||||
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
|
||||
tail_segment->phys);
|
||||
list_splice_tail_init(&chan->pending_list, &chan->active_list);
|
||||
chan->desc_pendingcount = 0;
|
||||
} else {
|
||||
struct xilinx_vdma_tx_segment *segment, *last = NULL;
|
||||
int i = 0;
|
||||
if (chan->desc_submitcount < chan->num_frms)
|
||||
i = chan->desc_submitcount;
|
||||
|
||||
if (chan->desc_submitcount < chan->num_frms)
|
||||
i = chan->desc_submitcount;
|
||||
|
||||
list_for_each_entry(segment, &desc->segments, node) {
|
||||
if (chan->ext_addr)
|
||||
vdma_desc_write_64(chan,
|
||||
XILINX_VDMA_REG_START_ADDRESS_64(i++),
|
||||
segment->hw.buf_addr,
|
||||
segment->hw.buf_addr_msb);
|
||||
else
|
||||
vdma_desc_write(chan,
|
||||
list_for_each_entry(segment, &desc->segments, node) {
|
||||
if (chan->ext_addr)
|
||||
vdma_desc_write_64(chan,
|
||||
XILINX_VDMA_REG_START_ADDRESS_64(i++),
|
||||
segment->hw.buf_addr,
|
||||
segment->hw.buf_addr_msb);
|
||||
else
|
||||
vdma_desc_write(chan,
|
||||
XILINX_VDMA_REG_START_ADDRESS(i++),
|
||||
segment->hw.buf_addr);
|
||||
|
||||
last = segment;
|
||||
}
|
||||
|
||||
if (!last)
|
||||
return;
|
||||
|
||||
/* HW expects these parameters to be same for one transaction */
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
|
||||
last->hw.stride);
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
|
||||
|
||||
chan->desc_submitcount++;
|
||||
chan->desc_pendingcount--;
|
||||
list_del(&desc->node);
|
||||
list_add_tail(&desc->node, &chan->active_list);
|
||||
if (chan->desc_submitcount == chan->num_frms)
|
||||
chan->desc_submitcount = 0;
|
||||
last = segment;
|
||||
}
|
||||
|
||||
if (!last)
|
||||
return;
|
||||
|
||||
/* HW expects these parameters to be same for one transaction */
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
|
||||
last->hw.stride);
|
||||
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
|
||||
|
||||
chan->desc_submitcount++;
|
||||
chan->desc_pendingcount--;
|
||||
list_del(&desc->node);
|
||||
list_add_tail(&desc->node, &chan->active_list);
|
||||
if (chan->desc_submitcount == chan->num_frms)
|
||||
chan->desc_submitcount = 0;
|
||||
|
||||
chan->idle = false;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user