mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 08:34:20 +08:00
spi: imx: drop bogus tests for rx/tx bufs in DMA transfer
The driver tries to be clever by only setting up DMA channels when the corresponding sg tables are non NULL. The sg tables are embedded structs in struct spi_transfer, so they are guaranteed to be non NULL which makes the if(tx)/if(rx) tests completely bogus. The driver even sets the SPI_MASTER_MUST_RX / SPI_MASTER_MUST_TX flags which makes sure the sg tables are not only present but also non empty. Drop the tests and make the DMA path easier to follow. Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de> Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
b03c3884ca
commit
6b6192c04b
@ -974,51 +974,40 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
|
|||||||
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
||||||
struct spi_transfer *transfer)
|
struct spi_transfer *transfer)
|
||||||
{
|
{
|
||||||
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
|
struct dma_async_tx_descriptor *desc_tx, *desc_rx;
|
||||||
int ret;
|
|
||||||
unsigned long transfer_timeout;
|
unsigned long transfer_timeout;
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
struct spi_master *master = spi_imx->bitbang.master;
|
struct spi_master *master = spi_imx->bitbang.master;
|
||||||
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
|
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
|
||||||
|
|
||||||
if (tx) {
|
|
||||||
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
|
|
||||||
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
|
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
||||||
if (!desc_tx)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
desc_tx->callback = spi_imx_dma_tx_callback;
|
|
||||||
desc_tx->callback_param = (void *)spi_imx;
|
|
||||||
dmaengine_submit(desc_tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (rx) {
|
|
||||||
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
|
|
||||||
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
|
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
||||||
if (!desc_rx) {
|
|
||||||
dmaengine_terminate_all(master->dma_tx);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
desc_rx->callback = spi_imx_dma_rx_callback;
|
|
||||||
desc_rx->callback_param = (void *)spi_imx;
|
|
||||||
dmaengine_submit(desc_rx);
|
|
||||||
}
|
|
||||||
|
|
||||||
reinit_completion(&spi_imx->dma_rx_completion);
|
|
||||||
reinit_completion(&spi_imx->dma_tx_completion);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set these order to avoid potential RX overflow. The overflow may
|
* The TX DMA setup starts the transfer, so make sure RX is configured
|
||||||
* happen if we enable SPI HW before starting RX DMA due to rescheduling
|
* before TX.
|
||||||
* for another task and/or interrupt.
|
|
||||||
* So RX DMA enabled first to make sure data would be read out from FIFO
|
|
||||||
* ASAP. TX DMA enabled next to start filling TX FIFO with new data.
|
|
||||||
* And finaly SPI HW enabled to start actual data transfer.
|
|
||||||
*/
|
*/
|
||||||
|
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
|
||||||
|
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
|
||||||
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
|
if (!desc_rx)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
desc_rx->callback = spi_imx_dma_rx_callback;
|
||||||
|
desc_rx->callback_param = (void *)spi_imx;
|
||||||
|
dmaengine_submit(desc_rx);
|
||||||
|
reinit_completion(&spi_imx->dma_rx_completion);
|
||||||
dma_async_issue_pending(master->dma_rx);
|
dma_async_issue_pending(master->dma_rx);
|
||||||
|
|
||||||
|
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
|
||||||
|
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
|
||||||
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
|
if (!desc_tx) {
|
||||||
|
dmaengine_terminate_all(master->dma_tx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
desc_tx->callback = spi_imx_dma_tx_callback;
|
||||||
|
desc_tx->callback_param = (void *)spi_imx;
|
||||||
|
dmaengine_submit(desc_tx);
|
||||||
|
reinit_completion(&spi_imx->dma_tx_completion);
|
||||||
dma_async_issue_pending(master->dma_tx);
|
dma_async_issue_pending(master->dma_tx);
|
||||||
|
|
||||||
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
|
||||||
@ -1030,22 +1019,19 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
|||||||
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
|
dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
|
||||||
dmaengine_terminate_all(master->dma_tx);
|
dmaengine_terminate_all(master->dma_tx);
|
||||||
dmaengine_terminate_all(master->dma_rx);
|
dmaengine_terminate_all(master->dma_rx);
|
||||||
} else {
|
return -ETIMEDOUT;
|
||||||
timeout = wait_for_completion_timeout(
|
|
||||||
&spi_imx->dma_rx_completion, transfer_timeout);
|
|
||||||
if (!timeout) {
|
|
||||||
dev_err(spi_imx->dev, "I/O Error in DMA RX\n");
|
|
||||||
spi_imx->devtype_data->reset(spi_imx);
|
|
||||||
dmaengine_terminate_all(master->dma_rx);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!timeout)
|
timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
|
||||||
ret = -ETIMEDOUT;
|
transfer_timeout);
|
||||||
else
|
if (!timeout) {
|
||||||
ret = transfer->len;
|
dev_err(&master->dev, "I/O Error in DMA RX\n");
|
||||||
|
spi_imx->devtype_data->reset(spi_imx);
|
||||||
|
dmaengine_terminate_all(master->dma_rx);
|
||||||
|
return -ETIMEDOUT;
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return transfer->len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int spi_imx_pio_transfer(struct spi_device *spi,
|
static int spi_imx_pio_transfer(struct spi_device *spi,
|
||||||
|
Loading…
Reference in New Issue
Block a user