2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

dmaengine: Create a generic dma_slave_caps callback

dma_slave_caps is very important to the generic layers that might interact with
dmaengine, such as ASoC. Unfortunately, it has been added as yet another
dma_device callback, and most of the existing drivers haven't implemented it,
reducing its reliability.

Introduce a generic behaviour to implement this, that rely on both the split of
device_control to derive which functions are supported and on new variables to
be set in the dma_device structure.

These variables holds what used to be the capabilities, that were set
per-channel. However, this proved to be a bit overkill, since every driver
filling these so far were hardcoding it, disregarding which channel was
actually given.

Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Maxime Ripard 2014-11-17 14:42:04 +01:00 committed by Vinod Koul
parent 4f8ef9f414
commit cb8cea513c

View File

@ -594,6 +594,14 @@ struct dma_tx_state {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @src_addr_widths: bit mask of src addr widths the device supports
* @dst_addr_widths: bit mask of dst addr widths the device supports
* @directions: bit mask of slave direction the device supports since
* the enum dma_transfer_direction is not defined as bits for
* each type of direction, the dma controller should fill (1 <<
* <TYPE>) and same should be checked by controller as well
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
* number of allocated descriptors
* @device_free_chan_resources: release DMA channel's resources
@ -643,6 +651,11 @@ struct dma_device {
int dev_id;
struct device *dev;
u32 src_addr_widths;
u32 dst_addr_widths;
u32 directions;
enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
@ -784,17 +797,37 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
{
struct dma_device *device;
if (!chan || !caps)
return -EINVAL;
device = chan->device;
/* check if the channel supports slave transactions */
if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
return -ENXIO;
if (chan->device->device_slave_caps)
return chan->device->device_slave_caps(chan, caps);
if (device->device_slave_caps)
return device->device_slave_caps(chan, caps);
return -ENXIO;
/*
* Check whether it reports it uses the generic slave
* capabilities, if not, that means it doesn't support any
* kind of slave capabilities reporting.
*/
if (!device->directions)
return -ENXIO;
caps->src_addr_widths = device->src_addr_widths;
caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions;
caps->residue_granularity = device->residue_granularity;
caps->cmd_pause = !!device->device_pause;
caps->cmd_terminate = !!device->device_terminate_all;
return 0;
}
static inline int dmaengine_terminate_all(struct dma_chan *chan)