mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 19:23:57 +08:00
390f0ffe92
RX DMA tail data handling doesn't work correctly in many cases with current
implementation. It happens because SPI core was setup to generates both RX
and RX TAIL events. And RX TAIL event does not work correctly.
This can be easily verified by sending SPI transaction with size modulus
WML(32 in our case) not equal 0.
Also removing change introduced in f6ee9b582d
since this change only fix usecases with transfer size from 33 to 128 bytes
and doesn't fix 129 bytes and bigger.
This is output from transaction with len 138 bytes in loopback mode at 10Mhz:
TX0000: a3 97 a2 55 53 be f1 fc f9 79 6b 52 14 13 e9 e2
TX0010: 2d 51 8e 1f 56 08 57 27 a7 05 d4 d0 52 82 77 75
TX0020: 1b 99 4a ed 58 3d 6a 52 36 d5 24 4a 68 8e ad 95
TX0030: 5f 3c 35 b5 c4 8c dd 6c 11 32 3d e2 b4 b4 59 cf
TX0040: ce 23 3d 27 df a7 f9 96 fc 1e e0 66 2c 0e 7b 8c
TX0050: ca 30 42 8f bc 9f 7b ce d1 b8 b1 87 ec 8a d6 bb
TX0060: 2e 15 63 0e 3c dc a4 3a 7a 06 20 a7 93 1b 34 dd
TX0070: 4c f5 ec 88 96 68 d6 68 a0 09 6f 8e 93 47 c9 41
TX0080: db ac cf 97 89 f3 51 05 79 71
RX0000: a3 97 a2 55 53 be f1 fc f9 79 6b 52 14 13 e9 e2
RX0010: 2d 51 8e 1f 56 08 57 27 a7 05 d4 d0 52 82 77 75
RX0020: 1b 99 4a ed 58 3d 6a 52 36 d5 24 4a 68 8e ad 95
RX0030: 5f 3c 35 00 00 b5 00 00 00 c4 00 00 8c 00 00 dd
RX0040: 6c 11 32 3d e2 b4 b4 59 cf ce 23 3d 27 df a7 f9
RX0050: 96 fc 1e e0 66 2c 0e 7b 8c ca 30 42 8f 1f 1f bc
RX0060: 9f 7b ce d1 b8 b1 87 ec 8a d6 bb 2e 15 63 0e ed
RX0070: ed 3c 58 58 58 dc 3d 3d a4 6a 6a 3a 52 52 7a 36
RX0080: 06 20 a7 93 1b 34 dd 4c f5 ec
Zeros at offset 33 and 34 caused by reading empty RX FIFO which not possible
if DMA RX read was triggered by RX event. This mean DMA was triggered
by RX TAIL event.
Signed-off-by: Anton Bondarenko <anton.bondarenko.sama@gmail.com>
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Mark Brown <broonie@kernel.org>
1278 lines
34 KiB
C
1278 lines
34 KiB
C
/*
|
|
* Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
|
|
* Copyright (C) 2008 Juergen Beisert
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the
|
|
* Free Software Foundation
|
|
* 51 Franklin Street, Fifth Floor
|
|
* Boston, MA 02110-1301, USA.
|
|
*/
|
|
|
|
#include <linux/clk.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/dmaengine.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/err.h>
|
|
#include <linux/gpio.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/io.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/spi/spi.h>
|
|
#include <linux/spi/spi_bitbang.h>
|
|
#include <linux/types.h>
|
|
#include <linux/of.h>
|
|
#include <linux/of_device.h>
|
|
#include <linux/of_gpio.h>
|
|
|
|
#include <linux/platform_data/dma-imx.h>
|
|
#include <linux/platform_data/spi-imx.h>
|
|
|
|
#define DRIVER_NAME "spi_imx"
|
|
|
|
#define MXC_CSPIRXDATA 0x00
|
|
#define MXC_CSPITXDATA 0x04
|
|
#define MXC_CSPICTRL 0x08
|
|
#define MXC_CSPIINT 0x0c
|
|
#define MXC_RESET 0x1c
|
|
|
|
/* generic defines to abstract from the different register layouts */
|
|
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
|
|
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
|
|
|
|
/* The maximum bytes that a sdma BD can transfer.*/
|
|
#define MAX_SDMA_BD_BYTES (1 << 15)
|
|
#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
|
|
struct spi_imx_config {
|
|
unsigned int speed_hz;
|
|
unsigned int bpw;
|
|
unsigned int mode;
|
|
u8 cs;
|
|
};
|
|
|
|
enum spi_imx_devtype {
|
|
IMX1_CSPI,
|
|
IMX21_CSPI,
|
|
IMX27_CSPI,
|
|
IMX31_CSPI,
|
|
IMX35_CSPI, /* CSPI on all i.mx except above */
|
|
IMX51_ECSPI, /* ECSPI on i.mx51 and later */
|
|
};
|
|
|
|
struct spi_imx_data;
|
|
|
|
struct spi_imx_devtype_data {
|
|
void (*intctrl)(struct spi_imx_data *, int);
|
|
int (*config)(struct spi_imx_data *, struct spi_imx_config *);
|
|
void (*trigger)(struct spi_imx_data *);
|
|
int (*rx_available)(struct spi_imx_data *);
|
|
void (*reset)(struct spi_imx_data *);
|
|
enum spi_imx_devtype devtype;
|
|
};
|
|
|
|
struct spi_imx_data {
|
|
struct spi_bitbang bitbang;
|
|
|
|
struct completion xfer_done;
|
|
void __iomem *base;
|
|
struct clk *clk_per;
|
|
struct clk *clk_ipg;
|
|
unsigned long spi_clk;
|
|
|
|
unsigned int count;
|
|
void (*tx)(struct spi_imx_data *);
|
|
void (*rx)(struct spi_imx_data *);
|
|
void *rx_buf;
|
|
const void *tx_buf;
|
|
unsigned int txfifo; /* number of words pushed in tx FIFO */
|
|
|
|
/* DMA */
|
|
unsigned int dma_is_inited;
|
|
unsigned int dma_finished;
|
|
bool usedma;
|
|
u32 wml;
|
|
struct completion dma_rx_completion;
|
|
struct completion dma_tx_completion;
|
|
|
|
const struct spi_imx_devtype_data *devtype_data;
|
|
int chipselect[0];
|
|
};
|
|
|
|
static inline int is_imx27_cspi(struct spi_imx_data *d)
|
|
{
|
|
return d->devtype_data->devtype == IMX27_CSPI;
|
|
}
|
|
|
|
static inline int is_imx35_cspi(struct spi_imx_data *d)
|
|
{
|
|
return d->devtype_data->devtype == IMX35_CSPI;
|
|
}
|
|
|
|
static inline int is_imx51_ecspi(struct spi_imx_data *d)
|
|
{
|
|
return d->devtype_data->devtype == IMX51_ECSPI;
|
|
}
|
|
|
|
static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
|
|
{
|
|
return is_imx51_ecspi(d) ? 64 : 8;
|
|
}
|
|
|
|
#define MXC_SPI_BUF_RX(type) \
|
|
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
|
|
{ \
|
|
unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
|
|
\
|
|
if (spi_imx->rx_buf) { \
|
|
*(type *)spi_imx->rx_buf = val; \
|
|
spi_imx->rx_buf += sizeof(type); \
|
|
} \
|
|
}
|
|
|
|
#define MXC_SPI_BUF_TX(type) \
|
|
static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
|
|
{ \
|
|
type val = 0; \
|
|
\
|
|
if (spi_imx->tx_buf) { \
|
|
val = *(type *)spi_imx->tx_buf; \
|
|
spi_imx->tx_buf += sizeof(type); \
|
|
} \
|
|
\
|
|
spi_imx->count -= sizeof(type); \
|
|
\
|
|
writel(val, spi_imx->base + MXC_CSPITXDATA); \
|
|
}
|
|
|
|
MXC_SPI_BUF_RX(u8)
|
|
MXC_SPI_BUF_TX(u8)
|
|
MXC_SPI_BUF_RX(u16)
|
|
MXC_SPI_BUF_TX(u16)
|
|
MXC_SPI_BUF_RX(u32)
|
|
MXC_SPI_BUF_TX(u32)
|
|
|
|
/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
|
|
* (which is currently not the case in this driver)
|
|
*/
|
|
static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
|
|
256, 384, 512, 768, 1024};
|
|
|
|
/* MX21, MX27 */
|
|
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
|
|
unsigned int fspi, unsigned int max)
|
|
{
|
|
int i;
|
|
|
|
for (i = 2; i < max; i++)
|
|
if (fspi * mxc_clkdivs[i] >= fin)
|
|
return i;
|
|
|
|
return max;
|
|
}
|
|
|
|
/* MX1, MX31, MX35, MX51 CSPI */
|
|
static unsigned int spi_imx_clkdiv_2(unsigned int fin,
|
|
unsigned int fspi)
|
|
{
|
|
int i, div = 4;
|
|
|
|
for (i = 0; i < 7; i++) {
|
|
if (fspi * div >= fin)
|
|
return i;
|
|
div <<= 1;
|
|
}
|
|
|
|
return 7;
|
|
}
|
|
|
|
static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
|
|
struct spi_transfer *transfer)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
|
|
|
|
if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
|
|
(transfer->len % spi_imx->wml) == 0)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
#define MX51_ECSPI_CTRL 0x08
|
|
#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
|
|
#define MX51_ECSPI_CTRL_XCH (1 << 2)
|
|
#define MX51_ECSPI_CTRL_SMC (1 << 3)
|
|
#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
|
|
#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
|
|
#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
|
|
#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
|
|
#define MX51_ECSPI_CTRL_BL_OFFSET 20
|
|
|
|
#define MX51_ECSPI_CONFIG 0x0c
|
|
#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
|
|
#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
|
|
#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
|
|
#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
|
|
#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
|
|
|
|
#define MX51_ECSPI_INT 0x10
|
|
#define MX51_ECSPI_INT_TEEN (1 << 0)
|
|
#define MX51_ECSPI_INT_RREN (1 << 3)
|
|
|
|
#define MX51_ECSPI_DMA 0x14
|
|
#define MX51_ECSPI_DMA_TX_WML_OFFSET 0
|
|
#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F
|
|
#define MX51_ECSPI_DMA_RX_WML_OFFSET 16
|
|
#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
|
|
#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
|
|
#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
|
|
|
|
#define MX51_ECSPI_DMA_TEDEN_OFFSET 7
|
|
#define MX51_ECSPI_DMA_RXDEN_OFFSET 23
|
|
#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31
|
|
|
|
#define MX51_ECSPI_STAT 0x18
|
|
#define MX51_ECSPI_STAT_RR (1 << 3)
|
|
|
|
#define MX51_ECSPI_TESTREG 0x20
|
|
#define MX51_ECSPI_TESTREG_LBC BIT(31)
|
|
|
|
/* MX51 eCSPI */
|
|
static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
|
|
unsigned int *fres)
|
|
{
|
|
/*
|
|
* there are two 4-bit dividers, the pre-divider divides by
|
|
* $pre, the post-divider by 2^$post
|
|
*/
|
|
unsigned int pre, post;
|
|
|
|
if (unlikely(fspi > fin))
|
|
return 0;
|
|
|
|
post = fls(fin) - fls(fspi);
|
|
if (fin > fspi << post)
|
|
post++;
|
|
|
|
/* now we have: (fin <= fspi << post) with post being minimal */
|
|
|
|
post = max(4U, post) - 4;
|
|
if (unlikely(post > 0xf)) {
|
|
pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
|
|
__func__, fspi, fin);
|
|
return 0xff;
|
|
}
|
|
|
|
pre = DIV_ROUND_UP(fin, fspi << post) - 1;
|
|
|
|
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
|
|
__func__, fin, fspi, post, pre);
|
|
|
|
/* Resulting frequency for the SCLK line. */
|
|
*fres = (fin / (pre + 1)) >> post;
|
|
|
|
return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
|
|
(post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
|
|
}
|
|
|
|
static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
|
|
{
|
|
unsigned val = 0;
|
|
|
|
if (enable & MXC_INT_TE)
|
|
val |= MX51_ECSPI_INT_TEEN;
|
|
|
|
if (enable & MXC_INT_RR)
|
|
val |= MX51_ECSPI_INT_RREN;
|
|
|
|
writel(val, spi_imx->base + MX51_ECSPI_INT);
|
|
}
|
|
|
|
static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
|
|
{
|
|
u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
if (!spi_imx->usedma)
|
|
reg |= MX51_ECSPI_CTRL_XCH;
|
|
else if (!spi_imx->dma_finished)
|
|
reg |= MX51_ECSPI_CTRL_SMC;
|
|
else
|
|
reg &= ~MX51_ECSPI_CTRL_SMC;
|
|
writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
|
|
}
|
|
|
|
static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
|
|
struct spi_imx_config *config)
|
|
{
|
|
u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
|
|
u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
|
|
u32 clk = config->speed_hz, delay, reg;
|
|
|
|
/*
|
|
* The hardware seems to have a race condition when changing modes. The
|
|
* current assumption is that the selection of the channel arrives
|
|
* earlier in the hardware than the mode bits when they are written at
|
|
* the same time.
|
|
* So set master mode for all channels as we do not support slave mode.
|
|
*/
|
|
ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
|
|
|
|
/* set clock speed */
|
|
ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
|
|
|
|
/* set chip select to use */
|
|
ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
|
|
|
|
ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
|
|
|
|
cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
|
|
|
|
if (config->mode & SPI_CPHA)
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
|
|
else
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
|
|
|
|
if (config->mode & SPI_CPOL) {
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
|
|
cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
|
|
} else {
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
|
|
cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
|
|
}
|
|
if (config->mode & SPI_CS_HIGH)
|
|
cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
|
|
else
|
|
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs);
|
|
|
|
/* CTRL register always go first to bring out controller from reset */
|
|
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
|
|
|
|
reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
|
|
if (config->mode & SPI_LOOP)
|
|
reg |= MX51_ECSPI_TESTREG_LBC;
|
|
else
|
|
reg &= ~MX51_ECSPI_TESTREG_LBC;
|
|
writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);
|
|
|
|
writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
|
|
|
|
/*
|
|
* Wait until the changes in the configuration register CONFIGREG
|
|
* propagate into the hardware. It takes exactly one tick of the
|
|
* SCLK clock, but we will wait two SCLK clock just to be sure. The
|
|
* effect of the delay it takes for the hardware to apply changes
|
|
* is noticable if the SCLK clock run very slow. In such a case, if
|
|
* the polarity of SCLK should be inverted, the GPIO ChipSelect might
|
|
* be asserted before the SCLK polarity changes, which would disrupt
|
|
* the SPI communication as the device on the other end would consider
|
|
* the change of SCLK polarity as a clock tick already.
|
|
*/
|
|
delay = (2 * 1000000) / clk;
|
|
if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
|
|
udelay(delay);
|
|
else /* SCLK is _very_ slow */
|
|
usleep_range(delay, delay + 10);
|
|
|
|
/*
|
|
* Configure the DMA register: setup the watermark
|
|
* and enable DMA request.
|
|
*/
|
|
if (spi_imx->dma_is_inited) {
|
|
dma = readl(spi_imx->base + MX51_ECSPI_DMA);
|
|
|
|
rx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
|
|
tx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
|
|
rxt_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
|
|
dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
|
|
& ~MX51_ECSPI_DMA_RX_WML_MASK
|
|
& ~MX51_ECSPI_DMA_RXT_WML_MASK)
|
|
| rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
|
|
|(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
|
|
|(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
|
|
|(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
|
|
|
|
writel(dma, spi_imx->base + MX51_ECSPI_DMA);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
|
|
{
|
|
return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
|
|
}
|
|
|
|
static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
|
|
{
|
|
/* drain receive buffer */
|
|
while (mx51_ecspi_rx_available(spi_imx))
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
}
|
|
|
|
#define MX31_INTREG_TEEN (1 << 0)
|
|
#define MX31_INTREG_RREN (1 << 3)
|
|
|
|
#define MX31_CSPICTRL_ENABLE (1 << 0)
|
|
#define MX31_CSPICTRL_MASTER (1 << 1)
|
|
#define MX31_CSPICTRL_XCH (1 << 2)
|
|
#define MX31_CSPICTRL_POL (1 << 4)
|
|
#define MX31_CSPICTRL_PHA (1 << 5)
|
|
#define MX31_CSPICTRL_SSCTL (1 << 6)
|
|
#define MX31_CSPICTRL_SSPOL (1 << 7)
|
|
#define MX31_CSPICTRL_BC_SHIFT 8
|
|
#define MX35_CSPICTRL_BL_SHIFT 20
|
|
#define MX31_CSPICTRL_CS_SHIFT 24
|
|
#define MX35_CSPICTRL_CS_SHIFT 12
|
|
#define MX31_CSPICTRL_DR_SHIFT 16
|
|
|
|
#define MX31_CSPISTATUS 0x14
|
|
#define MX31_STATUS_RR (1 << 3)
|
|
|
|
/* These functions also work for the i.MX35, but be aware that
|
|
* the i.MX35 has a slightly different register layout for bits
|
|
* we do not use here.
|
|
*/
|
|
static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
|
|
{
|
|
unsigned int val = 0;
|
|
|
|
if (enable & MXC_INT_TE)
|
|
val |= MX31_INTREG_TEEN;
|
|
if (enable & MXC_INT_RR)
|
|
val |= MX31_INTREG_RREN;
|
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
|
}
|
|
|
|
static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
|
|
{
|
|
unsigned int reg;
|
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
|
reg |= MX31_CSPICTRL_XCH;
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
}
|
|
|
|
static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
|
|
struct spi_imx_config *config)
|
|
{
|
|
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
|
|
int cs = spi_imx->chipselect[config->cs];
|
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
|
|
MX31_CSPICTRL_DR_SHIFT;
|
|
|
|
if (is_imx35_cspi(spi_imx)) {
|
|
reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
|
|
reg |= MX31_CSPICTRL_SSCTL;
|
|
} else {
|
|
reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
|
|
}
|
|
|
|
if (config->mode & SPI_CPHA)
|
|
reg |= MX31_CSPICTRL_PHA;
|
|
if (config->mode & SPI_CPOL)
|
|
reg |= MX31_CSPICTRL_POL;
|
|
if (config->mode & SPI_CS_HIGH)
|
|
reg |= MX31_CSPICTRL_SSPOL;
|
|
if (cs < 0)
|
|
reg |= (cs + 32) <<
|
|
(is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
|
|
MX31_CSPICTRL_CS_SHIFT);
|
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
|
|
{
|
|
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
|
|
}
|
|
|
|
static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
|
|
{
|
|
/* drain receive buffer */
|
|
while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
|
|
readl(spi_imx->base + MXC_CSPIRXDATA);
|
|
}
|
|
|
|
#define MX21_INTREG_RR (1 << 4)
|
|
#define MX21_INTREG_TEEN (1 << 9)
|
|
#define MX21_INTREG_RREN (1 << 13)
|
|
|
|
#define MX21_CSPICTRL_POL (1 << 5)
|
|
#define MX21_CSPICTRL_PHA (1 << 6)
|
|
#define MX21_CSPICTRL_SSPOL (1 << 8)
|
|
#define MX21_CSPICTRL_XCH (1 << 9)
|
|
#define MX21_CSPICTRL_ENABLE (1 << 10)
|
|
#define MX21_CSPICTRL_MASTER (1 << 11)
|
|
#define MX21_CSPICTRL_DR_SHIFT 14
|
|
#define MX21_CSPICTRL_CS_SHIFT 19
|
|
|
|
static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
|
|
{
|
|
unsigned int val = 0;
|
|
|
|
if (enable & MXC_INT_TE)
|
|
val |= MX21_INTREG_TEEN;
|
|
if (enable & MXC_INT_RR)
|
|
val |= MX21_INTREG_RREN;
|
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
|
}
|
|
|
|
static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
|
|
{
|
|
unsigned int reg;
|
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
|
reg |= MX21_CSPICTRL_XCH;
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
}
|
|
|
|
static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
|
|
struct spi_imx_config *config)
|
|
{
|
|
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
|
|
int cs = spi_imx->chipselect[config->cs];
|
|
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
|
|
|
|
reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
|
|
MX21_CSPICTRL_DR_SHIFT;
|
|
reg |= config->bpw - 1;
|
|
|
|
if (config->mode & SPI_CPHA)
|
|
reg |= MX21_CSPICTRL_PHA;
|
|
if (config->mode & SPI_CPOL)
|
|
reg |= MX21_CSPICTRL_POL;
|
|
if (config->mode & SPI_CS_HIGH)
|
|
reg |= MX21_CSPICTRL_SSPOL;
|
|
if (cs < 0)
|
|
reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
|
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
|
|
{
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
|
|
}
|
|
|
|
static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
|
|
{
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
}
|
|
|
|
#define MX1_INTREG_RR (1 << 3)
|
|
#define MX1_INTREG_TEEN (1 << 8)
|
|
#define MX1_INTREG_RREN (1 << 11)
|
|
|
|
#define MX1_CSPICTRL_POL (1 << 4)
|
|
#define MX1_CSPICTRL_PHA (1 << 5)
|
|
#define MX1_CSPICTRL_XCH (1 << 8)
|
|
#define MX1_CSPICTRL_ENABLE (1 << 9)
|
|
#define MX1_CSPICTRL_MASTER (1 << 10)
|
|
#define MX1_CSPICTRL_DR_SHIFT 13
|
|
|
|
static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
|
|
{
|
|
unsigned int val = 0;
|
|
|
|
if (enable & MXC_INT_TE)
|
|
val |= MX1_INTREG_TEEN;
|
|
if (enable & MXC_INT_RR)
|
|
val |= MX1_INTREG_RREN;
|
|
|
|
writel(val, spi_imx->base + MXC_CSPIINT);
|
|
}
|
|
|
|
static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
|
|
{
|
|
unsigned int reg;
|
|
|
|
reg = readl(spi_imx->base + MXC_CSPICTRL);
|
|
reg |= MX1_CSPICTRL_XCH;
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
}
|
|
|
|
static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
|
|
struct spi_imx_config *config)
|
|
{
|
|
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
|
|
|
|
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
|
|
MX1_CSPICTRL_DR_SHIFT;
|
|
reg |= config->bpw - 1;
|
|
|
|
if (config->mode & SPI_CPHA)
|
|
reg |= MX1_CSPICTRL_PHA;
|
|
if (config->mode & SPI_CPOL)
|
|
reg |= MX1_CSPICTRL_POL;
|
|
|
|
writel(reg, spi_imx->base + MXC_CSPICTRL);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
|
|
{
|
|
return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
|
|
}
|
|
|
|
static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
|
|
{
|
|
writel(1, spi_imx->base + MXC_RESET);
|
|
}
|
|
|
|
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
|
|
.intctrl = mx1_intctrl,
|
|
.config = mx1_config,
|
|
.trigger = mx1_trigger,
|
|
.rx_available = mx1_rx_available,
|
|
.reset = mx1_reset,
|
|
.devtype = IMX1_CSPI,
|
|
};
|
|
|
|
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
|
|
.intctrl = mx21_intctrl,
|
|
.config = mx21_config,
|
|
.trigger = mx21_trigger,
|
|
.rx_available = mx21_rx_available,
|
|
.reset = mx21_reset,
|
|
.devtype = IMX21_CSPI,
|
|
};
|
|
|
|
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
|
|
/* i.mx27 cspi shares the functions with i.mx21 one */
|
|
.intctrl = mx21_intctrl,
|
|
.config = mx21_config,
|
|
.trigger = mx21_trigger,
|
|
.rx_available = mx21_rx_available,
|
|
.reset = mx21_reset,
|
|
.devtype = IMX27_CSPI,
|
|
};
|
|
|
|
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
|
|
.intctrl = mx31_intctrl,
|
|
.config = mx31_config,
|
|
.trigger = mx31_trigger,
|
|
.rx_available = mx31_rx_available,
|
|
.reset = mx31_reset,
|
|
.devtype = IMX31_CSPI,
|
|
};
|
|
|
|
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
|
|
/* i.mx35 and later cspi shares the functions with i.mx31 one */
|
|
.intctrl = mx31_intctrl,
|
|
.config = mx31_config,
|
|
.trigger = mx31_trigger,
|
|
.rx_available = mx31_rx_available,
|
|
.reset = mx31_reset,
|
|
.devtype = IMX35_CSPI,
|
|
};
|
|
|
|
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
|
|
.intctrl = mx51_ecspi_intctrl,
|
|
.config = mx51_ecspi_config,
|
|
.trigger = mx51_ecspi_trigger,
|
|
.rx_available = mx51_ecspi_rx_available,
|
|
.reset = mx51_ecspi_reset,
|
|
.devtype = IMX51_ECSPI,
|
|
};
|
|
|
|
static const struct platform_device_id spi_imx_devtype[] = {
|
|
{
|
|
.name = "imx1-cspi",
|
|
.driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
|
|
}, {
|
|
.name = "imx21-cspi",
|
|
.driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
|
|
}, {
|
|
.name = "imx27-cspi",
|
|
.driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
|
|
}, {
|
|
.name = "imx31-cspi",
|
|
.driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
|
|
}, {
|
|
.name = "imx35-cspi",
|
|
.driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
|
|
}, {
|
|
.name = "imx51-ecspi",
|
|
.driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
|
|
}, {
|
|
/* sentinel */
|
|
}
|
|
};
|
|
|
|
static const struct of_device_id spi_imx_dt_ids[] = {
|
|
{ .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
|
|
{ .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
|
|
{ .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
|
|
{ .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
|
|
{ .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
|
|
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
|
|
{ /* sentinel */ }
|
|
};
|
|
MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
|
|
|
|
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
|
|
int gpio = spi_imx->chipselect[spi->chip_select];
|
|
int active = is_active != BITBANG_CS_INACTIVE;
|
|
int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
|
|
|
|
if (!gpio_is_valid(gpio))
|
|
return;
|
|
|
|
gpio_set_value(gpio, dev_is_lowactive ^ active);
|
|
}
|
|
|
|
static void spi_imx_push(struct spi_imx_data *spi_imx)
|
|
{
|
|
while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
|
|
if (!spi_imx->count)
|
|
break;
|
|
spi_imx->tx(spi_imx);
|
|
spi_imx->txfifo++;
|
|
}
|
|
|
|
spi_imx->devtype_data->trigger(spi_imx);
|
|
}
|
|
|
|
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
|
|
{
|
|
struct spi_imx_data *spi_imx = dev_id;
|
|
|
|
while (spi_imx->devtype_data->rx_available(spi_imx)) {
|
|
spi_imx->rx(spi_imx);
|
|
spi_imx->txfifo--;
|
|
}
|
|
|
|
if (spi_imx->count) {
|
|
spi_imx_push(spi_imx);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
if (spi_imx->txfifo) {
|
|
/* No data left to push, but still waiting for rx data,
|
|
* enable receive data available interrupt.
|
|
*/
|
|
spi_imx->devtype_data->intctrl(
|
|
spi_imx, MXC_INT_RR);
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
|
complete(&spi_imx->xfer_done);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static int spi_imx_setupxfer(struct spi_device *spi,
|
|
struct spi_transfer *t)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
|
|
struct spi_imx_config config;
|
|
|
|
config.bpw = t ? t->bits_per_word : spi->bits_per_word;
|
|
config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
|
|
config.mode = spi->mode;
|
|
config.cs = spi->chip_select;
|
|
|
|
if (!config.speed_hz)
|
|
config.speed_hz = spi->max_speed_hz;
|
|
if (!config.bpw)
|
|
config.bpw = spi->bits_per_word;
|
|
|
|
/* Initialize the functions for transfer */
|
|
if (config.bpw <= 8) {
|
|
spi_imx->rx = spi_imx_buf_rx_u8;
|
|
spi_imx->tx = spi_imx_buf_tx_u8;
|
|
} else if (config.bpw <= 16) {
|
|
spi_imx->rx = spi_imx_buf_rx_u16;
|
|
spi_imx->tx = spi_imx_buf_tx_u16;
|
|
} else {
|
|
spi_imx->rx = spi_imx_buf_rx_u32;
|
|
spi_imx->tx = spi_imx_buf_tx_u32;
|
|
}
|
|
|
|
spi_imx->devtype_data->config(spi_imx, &config);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
|
|
{
|
|
struct spi_master *master = spi_imx->bitbang.master;
|
|
|
|
if (master->dma_rx) {
|
|
dma_release_channel(master->dma_rx);
|
|
master->dma_rx = NULL;
|
|
}
|
|
|
|
if (master->dma_tx) {
|
|
dma_release_channel(master->dma_tx);
|
|
master->dma_tx = NULL;
|
|
}
|
|
|
|
spi_imx->dma_is_inited = 0;
|
|
}
|
|
|
|
static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
|
|
struct spi_master *master,
|
|
const struct resource *res)
|
|
{
|
|
struct dma_slave_config slave_config = {};
|
|
int ret;
|
|
|
|
/* use pio mode for i.mx6dl chip TKT238285 */
|
|
if (of_machine_is_compatible("fsl,imx6dl"))
|
|
return 0;
|
|
|
|
spi_imx->wml = spi_imx_get_fifosize(spi_imx) / 2;
|
|
|
|
/* Prepare for TX DMA: */
|
|
master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
|
|
if (IS_ERR(master->dma_tx)) {
|
|
ret = PTR_ERR(master->dma_tx);
|
|
dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
|
|
master->dma_tx = NULL;
|
|
goto err;
|
|
}
|
|
|
|
slave_config.direction = DMA_MEM_TO_DEV;
|
|
slave_config.dst_addr = res->start + MXC_CSPITXDATA;
|
|
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
slave_config.dst_maxburst = spi_imx->wml;
|
|
ret = dmaengine_slave_config(master->dma_tx, &slave_config);
|
|
if (ret) {
|
|
dev_err(dev, "error in TX dma configuration.\n");
|
|
goto err;
|
|
}
|
|
|
|
/* Prepare for RX : */
|
|
master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
|
|
if (IS_ERR(master->dma_rx)) {
|
|
ret = PTR_ERR(master->dma_rx);
|
|
dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
|
|
master->dma_rx = NULL;
|
|
goto err;
|
|
}
|
|
|
|
slave_config.direction = DMA_DEV_TO_MEM;
|
|
slave_config.src_addr = res->start + MXC_CSPIRXDATA;
|
|
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
slave_config.src_maxburst = spi_imx->wml;
|
|
ret = dmaengine_slave_config(master->dma_rx, &slave_config);
|
|
if (ret) {
|
|
dev_err(dev, "error in RX dma configuration.\n");
|
|
goto err;
|
|
}
|
|
|
|
init_completion(&spi_imx->dma_rx_completion);
|
|
init_completion(&spi_imx->dma_tx_completion);
|
|
master->can_dma = spi_imx_can_dma;
|
|
master->max_dma_len = MAX_SDMA_BD_BYTES;
|
|
spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
|
|
SPI_MASTER_MUST_TX;
|
|
spi_imx->dma_is_inited = 1;
|
|
|
|
return 0;
|
|
err:
|
|
spi_imx_sdma_exit(spi_imx);
|
|
return ret;
|
|
}
|
|
|
|
static void spi_imx_dma_rx_callback(void *cookie)
|
|
{
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
complete(&spi_imx->dma_rx_completion);
|
|
}
|
|
|
|
static void spi_imx_dma_tx_callback(void *cookie)
|
|
{
|
|
struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
|
|
|
|
complete(&spi_imx->dma_tx_completion);
|
|
}
|
|
|
|
static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
|
|
struct spi_transfer *transfer)
|
|
{
|
|
struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
|
|
int ret;
|
|
unsigned long timeout;
|
|
struct spi_master *master = spi_imx->bitbang.master;
|
|
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
|
|
|
|
if (tx) {
|
|
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
|
|
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
if (!desc_tx)
|
|
goto no_dma;
|
|
|
|
desc_tx->callback = spi_imx_dma_tx_callback;
|
|
desc_tx->callback_param = (void *)spi_imx;
|
|
dmaengine_submit(desc_tx);
|
|
}
|
|
|
|
if (rx) {
|
|
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
|
|
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
if (!desc_rx)
|
|
goto no_dma;
|
|
|
|
desc_rx->callback = spi_imx_dma_rx_callback;
|
|
desc_rx->callback_param = (void *)spi_imx;
|
|
dmaengine_submit(desc_rx);
|
|
}
|
|
|
|
reinit_completion(&spi_imx->dma_rx_completion);
|
|
reinit_completion(&spi_imx->dma_tx_completion);
|
|
|
|
/* Trigger the cspi module. */
|
|
spi_imx->dma_finished = 0;
|
|
|
|
/*
|
|
* Set these order to avoid potential RX overflow. The overflow may
|
|
* happen if we enable SPI HW before starting RX DMA due to rescheduling
|
|
* for another task and/or interrupt.
|
|
* So RX DMA enabled first to make sure data would be read out from FIFO
|
|
* ASAP. TX DMA enabled next to start filling TX FIFO with new data.
|
|
* And finaly SPI HW enabled to start actual data transfer.
|
|
*/
|
|
dma_async_issue_pending(master->dma_rx);
|
|
dma_async_issue_pending(master->dma_tx);
|
|
spi_imx->devtype_data->trigger(spi_imx);
|
|
|
|
/* Wait SDMA to finish the data transfer.*/
|
|
timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
|
|
IMX_DMA_TIMEOUT);
|
|
if (!timeout) {
|
|
pr_warn("%s %s: I/O Error in DMA TX\n",
|
|
dev_driver_string(&master->dev),
|
|
dev_name(&master->dev));
|
|
dmaengine_terminate_all(master->dma_tx);
|
|
dmaengine_terminate_all(master->dma_rx);
|
|
} else {
|
|
timeout = wait_for_completion_timeout(
|
|
&spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
|
|
if (!timeout) {
|
|
pr_warn("%s %s: I/O Error in DMA RX\n",
|
|
dev_driver_string(&master->dev),
|
|
dev_name(&master->dev));
|
|
spi_imx->devtype_data->reset(spi_imx);
|
|
dmaengine_terminate_all(master->dma_rx);
|
|
}
|
|
}
|
|
|
|
spi_imx->dma_finished = 1;
|
|
spi_imx->devtype_data->trigger(spi_imx);
|
|
|
|
if (!timeout)
|
|
ret = -ETIMEDOUT;
|
|
else
|
|
ret = transfer->len;
|
|
|
|
return ret;
|
|
|
|
no_dma:
|
|
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
|
|
dev_driver_string(&master->dev),
|
|
dev_name(&master->dev));
|
|
return -EAGAIN;
|
|
}
|
|
|
|
static int spi_imx_pio_transfer(struct spi_device *spi,
|
|
struct spi_transfer *transfer)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
|
|
|
|
spi_imx->tx_buf = transfer->tx_buf;
|
|
spi_imx->rx_buf = transfer->rx_buf;
|
|
spi_imx->count = transfer->len;
|
|
spi_imx->txfifo = 0;
|
|
|
|
reinit_completion(&spi_imx->xfer_done);
|
|
|
|
spi_imx_push(spi_imx);
|
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
|
|
|
|
wait_for_completion(&spi_imx->xfer_done);
|
|
|
|
return transfer->len;
|
|
}
|
|
|
|
static int spi_imx_transfer(struct spi_device *spi,
|
|
struct spi_transfer *transfer)
|
|
{
|
|
int ret;
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
|
|
|
|
if (spi_imx->bitbang.master->can_dma &&
|
|
spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
|
|
spi_imx->usedma = true;
|
|
ret = spi_imx_dma_transfer(spi_imx, transfer);
|
|
if (ret != -EAGAIN)
|
|
return ret;
|
|
}
|
|
spi_imx->usedma = false;
|
|
|
|
return spi_imx_pio_transfer(spi, transfer);
|
|
}
|
|
|
|
static int spi_imx_setup(struct spi_device *spi)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
|
|
int gpio = spi_imx->chipselect[spi->chip_select];
|
|
|
|
dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
|
|
spi->mode, spi->bits_per_word, spi->max_speed_hz);
|
|
|
|
if (gpio_is_valid(gpio))
|
|
gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
|
|
|
|
spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void spi_imx_cleanup(struct spi_device *spi)
|
|
{
|
|
}
|
|
|
|
static int
|
|
spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
|
|
int ret;
|
|
|
|
ret = clk_enable(spi_imx->clk_per);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = clk_enable(spi_imx->clk_ipg);
|
|
if (ret) {
|
|
clk_disable(spi_imx->clk_per);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
|
|
{
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
|
|
|
|
clk_disable(spi_imx->clk_ipg);
|
|
clk_disable(spi_imx->clk_per);
|
|
return 0;
|
|
}
|
|
|
|
static int spi_imx_probe(struct platform_device *pdev)
|
|
{
|
|
struct device_node *np = pdev->dev.of_node;
|
|
const struct of_device_id *of_id =
|
|
of_match_device(spi_imx_dt_ids, &pdev->dev);
|
|
struct spi_imx_master *mxc_platform_info =
|
|
dev_get_platdata(&pdev->dev);
|
|
struct spi_master *master;
|
|
struct spi_imx_data *spi_imx;
|
|
struct resource *res;
|
|
int i, ret, num_cs, irq;
|
|
|
|
if (!np && !mxc_platform_info) {
|
|
dev_err(&pdev->dev, "can't get the platform data\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
|
|
if (ret < 0) {
|
|
if (mxc_platform_info)
|
|
num_cs = mxc_platform_info->num_chipselect;
|
|
else
|
|
return ret;
|
|
}
|
|
|
|
master = spi_alloc_master(&pdev->dev,
|
|
sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
|
|
if (!master)
|
|
return -ENOMEM;
|
|
|
|
platform_set_drvdata(pdev, master);
|
|
|
|
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
|
|
master->bus_num = pdev->id;
|
|
master->num_chipselect = num_cs;
|
|
|
|
spi_imx = spi_master_get_devdata(master);
|
|
spi_imx->bitbang.master = master;
|
|
|
|
spi_imx->devtype_data = of_id ? of_id->data :
|
|
(struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
|
|
|
|
for (i = 0; i < master->num_chipselect; i++) {
|
|
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
|
|
if (!gpio_is_valid(cs_gpio) && mxc_platform_info)
|
|
cs_gpio = mxc_platform_info->chipselect[i];
|
|
|
|
spi_imx->chipselect[i] = cs_gpio;
|
|
if (!gpio_is_valid(cs_gpio))
|
|
continue;
|
|
|
|
ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
|
|
DRIVER_NAME);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "can't get cs gpios\n");
|
|
goto out_master_put;
|
|
}
|
|
}
|
|
|
|
spi_imx->bitbang.chipselect = spi_imx_chipselect;
|
|
spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
|
|
spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
|
|
spi_imx->bitbang.master->setup = spi_imx_setup;
|
|
spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
|
|
spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
|
|
spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
|
|
spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
|
if (is_imx51_ecspi(spi_imx))
|
|
spi_imx->bitbang.master->mode_bits |= SPI_LOOP;
|
|
|
|
init_completion(&spi_imx->xfer_done);
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
|
|
if (IS_ERR(spi_imx->base)) {
|
|
ret = PTR_ERR(spi_imx->base);
|
|
goto out_master_put;
|
|
}
|
|
|
|
irq = platform_get_irq(pdev, 0);
|
|
if (irq < 0) {
|
|
ret = irq;
|
|
goto out_master_put;
|
|
}
|
|
|
|
ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
|
|
dev_name(&pdev->dev), spi_imx);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
|
|
goto out_master_put;
|
|
}
|
|
|
|
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
|
|
if (IS_ERR(spi_imx->clk_ipg)) {
|
|
ret = PTR_ERR(spi_imx->clk_ipg);
|
|
goto out_master_put;
|
|
}
|
|
|
|
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
|
|
if (IS_ERR(spi_imx->clk_per)) {
|
|
ret = PTR_ERR(spi_imx->clk_per);
|
|
goto out_master_put;
|
|
}
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_per);
|
|
if (ret)
|
|
goto out_master_put;
|
|
|
|
ret = clk_prepare_enable(spi_imx->clk_ipg);
|
|
if (ret)
|
|
goto out_put_per;
|
|
|
|
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
|
|
/*
|
|
* Only validated on i.mx6 now, can remove the constrain if validated on
|
|
* other chips.
|
|
*/
|
|
if (is_imx51_ecspi(spi_imx)) {
|
|
ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master, res);
|
|
if (ret == -EPROBE_DEFER)
|
|
goto out_clk_put;
|
|
|
|
if (ret < 0)
|
|
dev_err(&pdev->dev, "dma setup error %d, use pio\n",
|
|
ret);
|
|
}
|
|
|
|
spi_imx->devtype_data->reset(spi_imx);
|
|
|
|
spi_imx->devtype_data->intctrl(spi_imx, 0);
|
|
|
|
master->dev.of_node = pdev->dev.of_node;
|
|
ret = spi_bitbang_start(&spi_imx->bitbang);
|
|
if (ret) {
|
|
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
|
|
goto out_clk_put;
|
|
}
|
|
|
|
dev_info(&pdev->dev, "probed\n");
|
|
|
|
clk_disable(spi_imx->clk_ipg);
|
|
clk_disable(spi_imx->clk_per);
|
|
return ret;
|
|
|
|
out_clk_put:
|
|
clk_disable_unprepare(spi_imx->clk_ipg);
|
|
out_put_per:
|
|
clk_disable_unprepare(spi_imx->clk_per);
|
|
out_master_put:
|
|
spi_master_put(master);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int spi_imx_remove(struct platform_device *pdev)
|
|
{
|
|
struct spi_master *master = platform_get_drvdata(pdev);
|
|
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
|
|
|
|
spi_bitbang_stop(&spi_imx->bitbang);
|
|
|
|
writel(0, spi_imx->base + MXC_CSPICTRL);
|
|
clk_unprepare(spi_imx->clk_ipg);
|
|
clk_unprepare(spi_imx->clk_per);
|
|
spi_imx_sdma_exit(spi_imx);
|
|
spi_master_put(master);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct platform_driver spi_imx_driver = {
|
|
.driver = {
|
|
.name = DRIVER_NAME,
|
|
.of_match_table = spi_imx_dt_ids,
|
|
},
|
|
.id_table = spi_imx_devtype,
|
|
.probe = spi_imx_probe,
|
|
.remove = spi_imx_remove,
|
|
};
|
|
module_platform_driver(spi_imx_driver);
|
|
|
|
MODULE_DESCRIPTION("SPI Master Controller driver");
|
|
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_ALIAS("platform:" DRIVER_NAME);
|