spi: Rework DMA mapped flag

Merge series from Andy Shevchenko <andriy.shevchenko@linux.intel.com>:

The first part of the series (patches 1 to 7) is an introduction
of a new helper followed by the user conversion.

This consolidates the same code and also makes patch 8 (last one)
be localised to the SPI core part.

The last patch is the main rework to get rid of a recently introduced
hack with a dummy SG list and move to the transfer-based DMA mapped
flag.

That said, the patches 1 to 7 may be applied right away since they
have no functional change intended, while the last one needs more
testing and reviewing.
This commit is contained in:
Mark Brown 2024-06-11 11:38:25 +01:00
commit 412a05d6a9
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0
9 changed files with 59 additions and 69 deletions

View File

@ -40,4 +40,12 @@ static inline void spi_unmap_buf(struct spi_controller *ctlr,
}
#endif /* CONFIG_HAS_DMA */
static inline bool spi_xfer_is_dma_mapped(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
return ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer) &&
(xfer->tx_sg_mapped || xfer->rx_sg_mapped);
}
#endif /* __LINUX_SPI_INTERNALS_H */

View File

@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/of.h>
#include "internals.h"
#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
@ -438,8 +439,7 @@ static int dw_spi_transfer_one(struct spi_controller *host,
transfer->effective_speed_hz = dws->current_freq;
/* Check if current transfer is a DMA transaction */
if (host->can_dma && host->can_dma(host, spi, transfer))
dws->dma_mapped = host->cur_msg_mapped;
dws->dma_mapped = spi_xfer_is_dma_mapped(host, spi, transfer);
/* For poll mode just disable all interrupts */
dw_spi_mask_intr(dws, 0xff);

View File

@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include "internals.h"
#define REG_SSIDR 0x0
#define REG_SSICR0 0x4
@ -242,11 +243,10 @@ static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
{
struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
spi_ingenic_prepare_transfer(priv, spi, xfer);
if (ctlr->cur_msg_mapped && can_dma)
if (spi_xfer_is_dma_mapped(ctlr, spi, xfer))
return spi_ingenic_dma_tx(ctlr, xfer, bits);
if (bits > 16)

View File

@ -27,6 +27,8 @@
#include <linux/spi/spi.h>
#include "internals.h"
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
@ -1208,8 +1210,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
unsigned count;
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
spi_xfer_is_dma_mapped(ctlr, spi, t))
omap2_mcspi_set_fifo(spi, t, 1);
omap2_mcspi_set_enable(spi, 1);
@ -1220,8 +1221,7 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr,
+ OMAP2_MCSPI_TX0);
if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
ctlr->cur_msg_mapped &&
ctlr->can_dma(ctlr, spi, t))
spi_xfer_is_dma_mapped(ctlr, spi, t))
count = omap2_mcspi_txrx_dma(spi, t);
else
count = omap2_mcspi_txrx_pio(spi, t);

View File

@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
@ -15,7 +16,7 @@
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "internals.h"
#define DRV_NAME "spi-pci1xxxx"
@ -567,7 +568,7 @@ error:
static int pci1xxxx_spi_transfer_one(struct spi_controller *spi_ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
if (spi_ctlr->can_dma(spi_ctlr, spi, xfer) && spi_ctlr->cur_msg_mapped)
if (spi_xfer_is_dma_mapped(spi_ctlr, spi, xfer))
return pci1xxxx_spi_transfer_with_dma(spi_ctlr, spi, xfer);
else
return pci1xxxx_spi_transfer_with_io(spi_ctlr, spi, xfer);

View File

@ -26,6 +26,7 @@
#include <linux/spi/spi.h>
#include "internals.h"
#include "spi-pxa2xx.h"
#define TIMOUT_DFLT 1000
@ -993,11 +994,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
}
dma_thresh = SSCR1_RxTresh(RX_THRESH_DFLT) | SSCR1_TxTresh(TX_THRESH_DFLT);
dma_mapped = controller->can_dma &&
controller->can_dma(controller, spi, transfer) &&
controller->cur_msg_mapped;
dma_mapped = spi_xfer_is_dma_mapped(controller, spi, transfer);
if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

View File

@ -5,6 +5,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@ -16,8 +18,7 @@
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include "internals.h"
#define QUP_CONFIG 0x0000
#define QUP_STATE 0x0004
@ -709,9 +710,7 @@ static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer)
if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32)))
controller->mode = QUP_IO_M_MODE_FIFO;
else if (spi->controller->can_dma &&
spi->controller->can_dma(spi->controller, spi, xfer) &&
spi->controller->cur_msg_mapped)
else if (spi_xfer_is_dma_mapped(spi->controller, spi, xfer))
controller->mode = QUP_IO_M_MODE_BAM;
else
controller->mode = QUP_IO_M_MODE_BLOCK;

View File

@ -1220,11 +1220,6 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
}
/* Dummy SG for unidirect transfers */
static struct scatterlist dummy_sg = {
.page_link = SG_END,
};
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
@ -1263,8 +1258,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
attrs);
if (ret != 0)
return ret;
} else {
xfer->tx_sg.sgl = &dummy_sg;
xfer->tx_sg_mapped = true;
}
if (xfer->rx_buf != NULL) {
@ -1278,8 +1273,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return ret;
}
} else {
xfer->rx_sg.sgl = &dummy_sg;
xfer->rx_sg_mapped = true;
}
}
/* No transfer has been mapped, bail out with success */
@ -1288,7 +1283,6 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
ctlr->cur_msg_mapped = true;
return 0;
}
@ -1299,57 +1293,46 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
struct device *tx_dev = ctlr->cur_tx_dma_dev;
struct spi_transfer *xfer;
if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
return 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync has already been done after each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
continue;
if (xfer->rx_sg_mapped)
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
xfer->rx_sg_mapped = false;
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
DMA_FROM_DEVICE, attrs);
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE, attrs);
if (xfer->tx_sg_mapped)
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
DMA_TO_DEVICE, attrs);
xfer->tx_sg_mapped = false;
}
ctlr->cur_msg_mapped = false;
return 0;
}
static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg,
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
return;
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
if (xfer->tx_sg_mapped)
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
if (xfer->rx_sg_mapped)
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg,
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
struct device *rx_dev = ctlr->cur_rx_dma_dev;
struct device *tx_dev = ctlr->cur_tx_dma_dev;
if (!ctlr->cur_msg_mapped)
return;
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
return;
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
if (xfer->rx_sg_mapped)
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
if (xfer->tx_sg_mapped)
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
}
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_controller *ctlr,
@ -1365,13 +1348,11 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
}
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
struct spi_message *msg,
struct spi_transfer *xfer)
{
}
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
struct spi_message *msg,
struct spi_transfer *xfer)
{
}
@ -1643,13 +1624,13 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
reinit_completion(&ctlr->xfer_completion);
fallback_pio:
spi_dma_sync_for_device(ctlr, msg, xfer);
spi_dma_sync_for_device(ctlr, xfer);
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
if (ret < 0) {
spi_dma_sync_for_cpu(ctlr, msg, xfer);
spi_dma_sync_for_cpu(ctlr, xfer);
if (ctlr->cur_msg_mapped &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
__spi_unmap_msg(ctlr, msg);
ctlr->fallback = true;
xfer->error &= ~SPI_TRANS_FAIL_NO_START;
@ -1671,7 +1652,7 @@ fallback_pio:
msg->status = ret;
}
spi_dma_sync_for_cpu(ctlr, msg, xfer);
spi_dma_sync_for_cpu(ctlr, xfer);
} else {
if (xfer->len)
dev_err(&msg->spi->dev,

View File

@ -447,7 +447,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* @cur_msg_need_completion: Flag used internally to opportunistically skip
* the @cur_msg_completion. This flag is used to signal the context that
* is running spi_finalize_current_message() that it needs to complete()
* @cur_msg_mapped: message has been mapped for DMA
* @fallback: fallback to PIO if DMA transfer return failure with
* SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
@ -708,7 +707,6 @@ struct spi_controller {
bool running;
bool rt;
bool auto_runtime_pm;
bool cur_msg_mapped;
bool fallback;
bool last_cs_mode_high;
s8 last_cs[SPI_CS_CNT_MAX];
@ -981,6 +979,8 @@ struct spi_res {
* transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
* @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
@ -1077,10 +1077,13 @@ struct spi_transfer {
#define SPI_TRANS_FAIL_IO BIT(1)
u16 error;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
bool tx_sg_mapped;
bool rx_sg_mapped;
struct sg_table tx_sg;
struct sg_table rx_sg;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
unsigned dummy_data:1;
unsigned cs_off:1;