mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
dmaengine: mpc512x: add support for peripheral transfers
Introduce support for slave s/g transfer preparation and the associated device control callback in the MPC512x DMA controller driver, which adds support for data transfers between memory and peripheral I/O to the previously supported mem-to-mem transfers. Signed-off-by: Alexander Popov <a13xp0p0v88@gmail.com> [fixed subsytem name] Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
parent
ba730340f9
commit
63da8e0d4f
@ -2,6 +2,7 @@
|
|||||||
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
|
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
|
||||||
* Copyright (C) Semihalf 2009
|
* Copyright (C) Semihalf 2009
|
||||||
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
|
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
|
||||||
|
* Copyright (C) Alexander Popov, Promcontroller 2014
|
||||||
*
|
*
|
||||||
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
|
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
|
||||||
* (defines, structures and comments) was taken from MPC5121 DMA driver
|
* (defines, structures and comments) was taken from MPC5121 DMA driver
|
||||||
@ -29,8 +30,18 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is initial version of MPC5121 DMA driver. Only memory to memory
|
* MPC512x and MPC8308 DMA driver. It supports
|
||||||
* transfers are supported (tested using dmatest module).
|
* memory to memory data transfers (tested using dmatest module) and
|
||||||
|
* data transfers between memory and peripheral I/O memory
|
||||||
|
* by means of slave scatter/gather with these limitations:
|
||||||
|
* - chunked transfers (described by s/g lists with more than one item)
|
||||||
|
* are refused as long as proper support for scatter/gather is missing;
|
||||||
|
* - transfers on MPC8308 always start from software as this SoC appears
|
||||||
|
* not to have external request lines for peripheral flow control;
|
||||||
|
* - only peripheral devices with 4-byte FIFO access register are supported;
|
||||||
|
* - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
|
||||||
|
* source and destination addresses must be 4-byte aligned
|
||||||
|
* and transfer size must be aligned on (4 * maxburst) boundary;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -189,6 +200,7 @@ struct mpc_dma_desc {
|
|||||||
dma_addr_t tcd_paddr;
|
dma_addr_t tcd_paddr;
|
||||||
int error;
|
int error;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
|
int will_access_peripheral;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct mpc_dma_chan {
|
struct mpc_dma_chan {
|
||||||
@ -201,6 +213,12 @@ struct mpc_dma_chan {
|
|||||||
struct mpc_dma_tcd *tcd;
|
struct mpc_dma_tcd *tcd;
|
||||||
dma_addr_t tcd_paddr;
|
dma_addr_t tcd_paddr;
|
||||||
|
|
||||||
|
/* Settings for access to peripheral FIFO */
|
||||||
|
dma_addr_t src_per_paddr;
|
||||||
|
u32 src_tcd_nunits;
|
||||||
|
dma_addr_t dst_per_paddr;
|
||||||
|
u32 dst_tcd_nunits;
|
||||||
|
|
||||||
/* Lock for this structure */
|
/* Lock for this structure */
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
};
|
};
|
||||||
@ -251,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
|
|||||||
struct mpc_dma_desc *mdesc;
|
struct mpc_dma_desc *mdesc;
|
||||||
int cid = mchan->chan.chan_id;
|
int cid = mchan->chan.chan_id;
|
||||||
|
|
||||||
/* Move all queued descriptors to active list */
|
while (!list_empty(&mchan->queued)) {
|
||||||
list_splice_tail_init(&mchan->queued, &mchan->active);
|
mdesc = list_first_entry(&mchan->queued,
|
||||||
|
struct mpc_dma_desc, node);
|
||||||
|
/*
|
||||||
|
* Grab either several mem-to-mem transfer descriptors
|
||||||
|
* or one peripheral transfer descriptor,
|
||||||
|
* don't mix mem-to-mem and peripheral transfer descriptors
|
||||||
|
* within the same 'active' list.
|
||||||
|
*/
|
||||||
|
if (mdesc->will_access_peripheral) {
|
||||||
|
if (list_empty(&mchan->active))
|
||||||
|
list_move_tail(&mdesc->node, &mchan->active);
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
list_move_tail(&mdesc->node, &mchan->active);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Chain descriptors into one transaction */
|
/* Chain descriptors into one transaction */
|
||||||
list_for_each_entry(mdesc, &mchan->active, node) {
|
list_for_each_entry(mdesc, &mchan->active, node) {
|
||||||
@ -278,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
|
|||||||
|
|
||||||
if (first != prev)
|
if (first != prev)
|
||||||
mdma->tcd[cid].e_sg = 1;
|
mdma->tcd[cid].e_sg = 1;
|
||||||
|
|
||||||
|
if (mdma->is_mpc8308) {
|
||||||
|
/* MPC8308, no request lines, software initiated start */
|
||||||
out_8(&mdma->regs->dmassrt, cid);
|
out_8(&mdma->regs->dmassrt, cid);
|
||||||
|
} else if (first->will_access_peripheral) {
|
||||||
|
/* Peripherals involved, start by external request signal */
|
||||||
|
out_8(&mdma->regs->dmaserq, cid);
|
||||||
|
} else {
|
||||||
|
/* Memory to memory transfer, software initiated start */
|
||||||
|
out_8(&mdma->regs->dmassrt, cid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Handle interrupt on one half of DMA controller (32 channels) */
|
/* Handle interrupt on one half of DMA controller (32 channels) */
|
||||||
@ -596,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mdesc->error = 0;
|
mdesc->error = 0;
|
||||||
|
mdesc->will_access_peripheral = 0;
|
||||||
tcd = mdesc->tcd;
|
tcd = mdesc->tcd;
|
||||||
|
|
||||||
/* Prepare Transfer Control Descriptor for this transaction */
|
/* Prepare Transfer Control Descriptor for this transaction */
|
||||||
@ -643,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
|
|||||||
return &mdesc->desc;
|
return &mdesc->desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct dma_async_tx_descriptor *
|
||||||
|
mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
|
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||||
|
unsigned long flags, void *context)
|
||||||
|
{
|
||||||
|
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
|
||||||
|
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
|
||||||
|
struct mpc_dma_desc *mdesc = NULL;
|
||||||
|
dma_addr_t per_paddr;
|
||||||
|
u32 tcd_nunits;
|
||||||
|
struct mpc_dma_tcd *tcd;
|
||||||
|
unsigned long iflags;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
size_t len;
|
||||||
|
int iter, i;
|
||||||
|
|
||||||
|
/* Currently there is no proper support for scatter/gather */
|
||||||
|
if (sg_len != 1)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
if (!is_slave_direction(direction))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
for_each_sg(sgl, sg, sg_len, i) {
|
||||||
|
spin_lock_irqsave(&mchan->lock, iflags);
|
||||||
|
|
||||||
|
mdesc = list_first_entry(&mchan->free,
|
||||||
|
struct mpc_dma_desc, node);
|
||||||
|
if (!mdesc) {
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, iflags);
|
||||||
|
/* Try to free completed descriptors */
|
||||||
|
mpc_dma_process_completed(mdma);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_del(&mdesc->node);
|
||||||
|
|
||||||
|
if (direction == DMA_DEV_TO_MEM) {
|
||||||
|
per_paddr = mchan->src_per_paddr;
|
||||||
|
tcd_nunits = mchan->src_tcd_nunits;
|
||||||
|
} else {
|
||||||
|
per_paddr = mchan->dst_per_paddr;
|
||||||
|
tcd_nunits = mchan->dst_tcd_nunits;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, iflags);
|
||||||
|
|
||||||
|
if (per_paddr == 0 || tcd_nunits == 0)
|
||||||
|
goto err_prep;
|
||||||
|
|
||||||
|
mdesc->error = 0;
|
||||||
|
mdesc->will_access_peripheral = 1;
|
||||||
|
|
||||||
|
/* Prepare Transfer Control Descriptor for this transaction */
|
||||||
|
tcd = mdesc->tcd;
|
||||||
|
|
||||||
|
memset(tcd, 0, sizeof(struct mpc_dma_tcd));
|
||||||
|
|
||||||
|
if (!IS_ALIGNED(sg_dma_address(sg), 4))
|
||||||
|
goto err_prep;
|
||||||
|
|
||||||
|
if (direction == DMA_DEV_TO_MEM) {
|
||||||
|
tcd->saddr = per_paddr;
|
||||||
|
tcd->daddr = sg_dma_address(sg);
|
||||||
|
tcd->soff = 0;
|
||||||
|
tcd->doff = 4;
|
||||||
|
} else {
|
||||||
|
tcd->saddr = sg_dma_address(sg);
|
||||||
|
tcd->daddr = per_paddr;
|
||||||
|
tcd->soff = 4;
|
||||||
|
tcd->doff = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
tcd->ssize = MPC_DMA_TSIZE_4;
|
||||||
|
tcd->dsize = MPC_DMA_TSIZE_4;
|
||||||
|
|
||||||
|
len = sg_dma_len(sg);
|
||||||
|
tcd->nbytes = tcd_nunits * 4;
|
||||||
|
if (!IS_ALIGNED(len, tcd->nbytes))
|
||||||
|
goto err_prep;
|
||||||
|
|
||||||
|
iter = len / tcd->nbytes;
|
||||||
|
if (iter >= 1 << 15) {
|
||||||
|
/* len is too big */
|
||||||
|
goto err_prep;
|
||||||
|
}
|
||||||
|
/* citer_linkch contains the high bits of iter */
|
||||||
|
tcd->biter = iter & 0x1ff;
|
||||||
|
tcd->biter_linkch = iter >> 9;
|
||||||
|
tcd->citer = tcd->biter;
|
||||||
|
tcd->citer_linkch = tcd->biter_linkch;
|
||||||
|
|
||||||
|
tcd->e_sg = 0;
|
||||||
|
tcd->d_req = 1;
|
||||||
|
|
||||||
|
/* Place descriptor in prepared list */
|
||||||
|
spin_lock_irqsave(&mchan->lock, iflags);
|
||||||
|
list_add_tail(&mdesc->node, &mchan->prepared);
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, iflags);
|
||||||
|
}
|
||||||
|
|
||||||
|
return &mdesc->desc;
|
||||||
|
|
||||||
|
err_prep:
|
||||||
|
/* Put the descriptor back */
|
||||||
|
spin_lock_irqsave(&mchan->lock, iflags);
|
||||||
|
list_add_tail(&mdesc->node, &mchan->free);
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, iflags);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
|
unsigned long arg)
|
||||||
|
{
|
||||||
|
struct mpc_dma_chan *mchan;
|
||||||
|
struct mpc_dma *mdma;
|
||||||
|
struct dma_slave_config *cfg;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
mchan = dma_chan_to_mpc_dma_chan(chan);
|
||||||
|
switch (cmd) {
|
||||||
|
case DMA_TERMINATE_ALL:
|
||||||
|
/* Disable channel requests */
|
||||||
|
mdma = dma_chan_to_mpc_dma(chan);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&mchan->lock, flags);
|
||||||
|
|
||||||
|
out_8(&mdma->regs->dmacerq, chan->chan_id);
|
||||||
|
list_splice_tail_init(&mchan->prepared, &mchan->free);
|
||||||
|
list_splice_tail_init(&mchan->queued, &mchan->free);
|
||||||
|
list_splice_tail_init(&mchan->active, &mchan->free);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
case DMA_SLAVE_CONFIG:
|
||||||
|
/*
|
||||||
|
* Software constraints:
|
||||||
|
* - only transfers between a peripheral device and
|
||||||
|
* memory are supported;
|
||||||
|
* - only peripheral devices with 4-byte FIFO access register
|
||||||
|
* are supported;
|
||||||
|
* - minimal transfer chunk is 4 bytes and consequently
|
||||||
|
* source and destination addresses must be 4-byte aligned
|
||||||
|
* and transfer size must be aligned on (4 * maxburst)
|
||||||
|
* boundary;
|
||||||
|
* - during the transfer RAM address is being incremented by
|
||||||
|
* the size of minimal transfer chunk;
|
||||||
|
* - peripheral port's address is constant during the transfer.
|
||||||
|
*/
|
||||||
|
|
||||||
|
cfg = (void *)arg;
|
||||||
|
|
||||||
|
if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
|
||||||
|
cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
|
||||||
|
!IS_ALIGNED(cfg->src_addr, 4) ||
|
||||||
|
!IS_ALIGNED(cfg->dst_addr, 4)) {
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&mchan->lock, flags);
|
||||||
|
|
||||||
|
mchan->src_per_paddr = cfg->src_addr;
|
||||||
|
mchan->src_tcd_nunits = cfg->src_maxburst;
|
||||||
|
mchan->dst_per_paddr = cfg->dst_addr;
|
||||||
|
mchan->dst_tcd_nunits = cfg->dst_maxburst;
|
||||||
|
|
||||||
|
/* Apply defaults */
|
||||||
|
if (mchan->src_tcd_nunits == 0)
|
||||||
|
mchan->src_tcd_nunits = 1;
|
||||||
|
if (mchan->dst_tcd_nunits == 0)
|
||||||
|
mchan->dst_tcd_nunits = 1;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&mchan->lock, flags);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
default:
|
||||||
|
/* Unknown command */
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
|
||||||
static int mpc_dma_probe(struct platform_device *op)
|
static int mpc_dma_probe(struct platform_device *op)
|
||||||
{
|
{
|
||||||
struct device_node *dn = op->dev.of_node;
|
struct device_node *dn = op->dev.of_node;
|
||||||
@ -733,9 +964,12 @@ static int mpc_dma_probe(struct platform_device *op)
|
|||||||
dma->device_issue_pending = mpc_dma_issue_pending;
|
dma->device_issue_pending = mpc_dma_issue_pending;
|
||||||
dma->device_tx_status = mpc_dma_tx_status;
|
dma->device_tx_status = mpc_dma_tx_status;
|
||||||
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
|
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
|
||||||
|
dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
|
||||||
|
dma->device_control = mpc_dma_device_control;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dma->channels);
|
INIT_LIST_HEAD(&dma->channels);
|
||||||
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
||||||
|
dma_cap_set(DMA_SLAVE, dma->cap_mask);
|
||||||
|
|
||||||
for (i = 0; i < dma->chancnt; i++) {
|
for (i = 0; i < dma->chancnt; i++) {
|
||||||
mchan = &mdma->channels[i];
|
mchan = &mdma->channels[i];
|
||||||
|
Loading…
Reference in New Issue
Block a user