2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-26 19:44:21 +08:00

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine changes from Vinod Koul:
 "This brings for slave dmaengine:

   - Change dma notification flag to DMA_COMPLETE from DMA_SUCCESS as
     dmaengine can only transfer and not verify validaty of dma
     transfers

   - Bunch of fixes across drivers:

      - cppi41 driver fixes from Daniel

      - 8 channel freescale dma engine support and updated bindings from
        Hongbo

      - msx-dma fixes and cleanup by Markus

   - DMAengine updates from Dan:

      - Bartlomiej and Dan finalized a rework of the dma address unmap
        implementation.

      - In the course of testing 1/ a collection of enhancements to
        dmatest fell out.  Notably basic performance statistics, and
        fixed / enhanced test control through new module parameters
        'run', 'wait', 'noverify', and 'verbose'.  Thanks to Andriy and
        Linus [Walleij] for their review.

      - Testing the raid related corner cases of 1/ triggered bugs in
        the recently added 16-source operation support in the ioatdma
        driver.

      - Some minor fixes / cleanups to mv_xor and ioatdma"

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (99 commits)
  dma: mv_xor: Fix mis-usage of mmio 'base' and 'high_base' registers
  dma: mv_xor: Remove unneeded NULL address check
  ioat: fix ioat3_irq_reinit
  ioat: kill msix_single_vector support
  raid6test: add new corner case for ioatdma driver
  ioatdma: clean up sed pool kmem_cache
  ioatdma: fix selection of 16 vs 8 source path
  ioatdma: fix sed pool selection
  ioatdma: Fix bug in selftest after removal of DMA_MEMSET.
  dmatest: verbose mode
  dmatest: convert to dmaengine_unmap_data
  dmatest: add a 'wait' parameter
  dmatest: add basic performance metrics
  dmatest: add support for skipping verification and random data setup
  dmatest: use pseudo random numbers
  dmatest: support xor-only, or pq-only channels in tests
  dmatest: restore ability to start test at module load and init
  dmatest: cleanup redundant "dmatest: " prefixes
  dmatest: replace stored results mechanism, with uniform messages
  Revert "dmatest: append verify result to results"
  ...
This commit is contained in:
Linus Torvalds 2013-11-20 13:20:24 -08:00
commit e6d69a60b7
71 changed files with 1994 additions and 2287 deletions

View File

@ -28,7 +28,7 @@ The three cells in order are:
dependent:
- bit 7-0: peripheral identifier for the hardware handshaking interface. The
identifier can be different for tx and rx.
- bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
- bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
Example:

View File

@ -1,33 +1,30 @@
* Freescale 83xx DMA Controller
* Freescale DMA Controllers
Freescale PowerPC 83xx have on chip general purpose DMA controllers.
** Freescale Elo DMA Controller
This is a little-endian 4-channel DMA controller, used in Freescale mpc83xx
series chips such as mpc8315, mpc8349, mpc8379 etc.
Required properties:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma", where CHIP is the processor
(mpc8349, mpc8360, etc.) and the second is
"fsl,elo-dma"
- reg : <registers mapping for DMA general status reg>
- ranges : Should be defined as specified in 1) to describe the
DMA controller channels.
- compatible : must include "fsl,elo-dma"
- reg : DMA General Status Register, i.e. DGSR which contains
status for all the 4 DMA channels
- ranges : describes the mapping between the address space of the
DMA channels and the address space of the DMA controller
- cell-index : controller index. 0 for controller @ 0x8100
- interrupts : <interrupt mapping for DMA IRQ>
- interrupts : interrupt specifier for DMA IRQ
- interrupt-parent : optional, if needed for interrupt mapping
- DMA channel nodes:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma-channel", where CHIP is the processor
(mpc8349, mpc8350, etc.) and the second is
"fsl,elo-dma-channel". However, see note below.
- reg : <registers mapping for channel>
- cell-index : dma channel index starts at 0.
- compatible : must include "fsl,elo-dma-channel"
However, see note below.
- reg : DMA channel specific registers
- cell-index : DMA channel index starts at 0.
Optional properties:
- interrupts : <interrupt mapping for DMA channel IRQ>
(on 83xx this is expected to be identical to
the interrupts property of the parent node)
- interrupts : interrupt specifier for DMA channel IRQ
(on 83xx this is expected to be identical to
the interrupts property of the parent node)
- interrupt-parent : optional, if needed for interrupt mapping
Example:
@ -70,30 +67,27 @@ Example:
};
};
* Freescale 85xx/86xx DMA Controller
Freescale PowerPC 85xx/86xx have on chip general purpose DMA controllers.
** Freescale EloPlus DMA Controller
This is a 4-channel DMA controller with extended addresses and chaining,
mainly used in Freescale mpc85xx/86xx, Pxxx and BSC series chips, such as
mpc8540, mpc8641 p4080, bsc9131 etc.
Required properties:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma", where CHIP is the processor
(mpc8540, mpc8540, etc.) and the second is
"fsl,eloplus-dma"
- reg : <registers mapping for DMA general status reg>
- compatible : must include "fsl,eloplus-dma"
- reg : DMA General Status Register, i.e. DGSR which contains
status for all the 4 DMA channels
- cell-index : controller index. 0 for controller @ 0x21000,
1 for controller @ 0xc000
- ranges : Should be defined as specified in 1) to describe the
DMA controller channels.
- ranges : describes the mapping between the address space of the
DMA channels and the address space of the DMA controller
- DMA channel nodes:
- compatible : compatible list, contains 2 entries, first is
"fsl,CHIP-dma-channel", where CHIP is the processor
(mpc8540, mpc8560, etc.) and the second is
"fsl,eloplus-dma-channel". However, see note below.
- cell-index : dma channel index starts at 0.
- reg : <registers mapping for channel>
- interrupts : <interrupt mapping for DMA channel IRQ>
- compatible : must include "fsl,eloplus-dma-channel"
However, see note below.
- cell-index : DMA channel index starts at 0.
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- interrupt-parent : optional, if needed for interrupt mapping
Example:
@ -134,6 +128,76 @@ Example:
};
};
** Freescale Elo3 DMA Controller
DMA controller which has same function as EloPlus except that Elo3 has 8
channels while EloPlus has only 4, it is used in Freescale Txxx and Bxxx
series chips, such as t1040, t4240, b4860.
Required properties:
- compatible : must include "fsl,elo3-dma"
- reg : contains two entries for DMA General Status Registers,
i.e. DGSR0 which includes status for channel 1~4, and
DGSR1 for channel 5~8
- ranges : describes the mapping between the address space of the
DMA channels and the address space of the DMA controller
- DMA channel nodes:
- compatible : must include "fsl,eloplus-dma-channel"
- reg : DMA channel specific registers
- interrupts : interrupt specifier for DMA channel IRQ
- interrupt-parent : optional, if needed for interrupt mapping
Example:
dma@100300 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,elo3-dma";
reg = <0x100300 0x4>,
<0x100600 0x4>;
ranges = <0x0 0x100100 0x500>;
dma-channel@0 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x0 0x80>;
interrupts = <28 2 0 0>;
};
dma-channel@80 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x80 0x80>;
interrupts = <29 2 0 0>;
};
dma-channel@100 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x100 0x80>;
interrupts = <30 2 0 0>;
};
dma-channel@180 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x180 0x80>;
interrupts = <31 2 0 0>;
};
dma-channel@300 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x300 0x80>;
interrupts = <76 2 0 0>;
};
dma-channel@380 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x380 0x80>;
interrupts = <77 2 0 0>;
};
dma-channel@400 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x400 0x80>;
interrupts = <78 2 0 0>;
};
dma-channel@480 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x480 0x80>;
interrupts = <79 2 0 0>;
};
};
Note on DMA channel compatible properties: The compatible property must say
"fsl,elo-dma-channel" or "fsl,eloplus-dma-channel" to be used by the Elo DMA
driver (fsldma). Any DMA channel used by fsldma cannot be used by another

View File

@ -15,39 +15,48 @@ be built as module or inside kernel. Let's consider those cases.
Part 2 - When dmatest is built as a module...
After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest
folder with nodes will be created. There are two important files located. First
is the 'run' node that controls run and stop phases of the test, and the second
one, 'results', is used to get the test case results.
Note that in this case test will not run on load automatically.
Example of usage:
% modprobe dmatest channel=dma0chan0 timeout=2000 iterations=1 run=1
...or:
% modprobe dmatest
% echo dma0chan0 > /sys/module/dmatest/parameters/channel
% echo 2000 > /sys/module/dmatest/parameters/timeout
% echo 1 > /sys/module/dmatest/parameters/iterations
% echo 1 > /sys/kernel/debug/dmatest/run
% echo 1 > /sys/module/dmatest/parameters/run
...or on the kernel command line:
dmatest.channel=dma0chan0 dmatest.timeout=2000 dmatest.iterations=1 dmatest.run=1
Hint: available channel list could be extracted by running the following
command:
% ls -1 /sys/class/dma/
After a while you will start to get messages about current status or error like
in the original code.
Once started a message like "dmatest: Started 1 threads using dma0chan0" is
emitted. After that only test failure messages are reported until the test
stops.
Note that running a new test will not stop any in progress test.
The following command should return actual state of the test.
% cat /sys/kernel/debug/dmatest/run
The following command returns the state of the test.
% cat /sys/module/dmatest/parameters/run
To wait for test done the user may perform a busy loop that checks the state.
To wait for test completion userpace can poll 'run' until it is false, or use
the wait parameter. Specifying 'wait=1' when loading the module causes module
initialization to pause until a test run has completed, while reading
/sys/module/dmatest/parameters/wait waits for any running test to complete
before returning. For example, the following scripts wait for 42 tests
to complete before exiting. Note that if 'iterations' is set to 'infinite' then
waiting is disabled.
% while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ]
> do
> echo -n "."
> sleep 1
> done
> echo
Example:
% modprobe dmatest run=1 iterations=42 wait=1
% modprobe -r dmatest
...or:
% modprobe dmatest run=1 iterations=42
% cat /sys/module/dmatest/parameters/wait
% modprobe -r dmatest
Part 3 - When built-in in the kernel...
@ -62,21 +71,22 @@ case. You always could check them at run-time by running
Part 4 - Gathering the test results
The module provides a storage for the test results in the memory. The gathered
data could be used after test is done.
Test results are printed to the kernel log buffer with the format:
The special file 'results' in the debugfs represents gathered data of the in
progress test. The messages collected are printed to the kernel log as well.
"dmatest: result <channel>: <test id>: '<error msg>' with src_off=<val> dst_off=<val> len=<val> (<err code>)"
Example of output:
% cat /sys/kernel/debug/dmatest/results
dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
% dmesg | tail -n 1
dmatest: result dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0)
The message format is unified across the different types of errors. A number in
the parens represents additional information, e.g. error code, error counter,
or status.
or status. A test thread also emits a summary line at completion listing the
number of tests executed, number that failed, and a result code.
Comparison between buffers is stored to the dedicated structure.
Example:
% dmesg | tail -n 1
dmatest: dma0chan0-copy0: summary 1 test, 0 failures 1000 iops 100000 KB/s (0)
Note that the verify result is now accessible only via file 'results' in the
debugfs.
The details of a data miscompare error are also emitted, but do not follow the
above format.

View File

@ -404,7 +404,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
BIT(slot));
if (edma_cc[ctlr]->intr_data[channel].callback)
edma_cc[ctlr]->intr_data[channel].callback(
channel, DMA_COMPLETE,
channel, EDMA_DMA_COMPLETE,
edma_cc[ctlr]->intr_data[channel].data);
}
} while (sh_ipr);
@ -459,7 +459,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
callback) {
edma_cc[ctlr]->intr_data[k].
callback(k,
DMA_CC_ERROR,
EDMA_DMA_CC_ERROR,
edma_cc[ctlr]->intr_data
[k].data);
}

View File

@ -393,36 +393,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
return slot_cnt;
}
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
return 0;
}
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
switch (chan->device->id) {
case DMA0_ID:
case DMA1_ID:
return hw_desc.dma->dest_addr;
case AAU_ID:
return hw_desc.aau->dest_addr;
default:
BUG();
}
return 0;
}
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
BUG();
return 0;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{

View File

@ -82,8 +82,6 @@ struct iop_adma_chan {
* @slot_cnt: total slots used in an transaction (group of operations)
* @slots_per_op: number of slots per operation
* @idx: pool index
* @unmap_src_cnt: number of xor sources
* @unmap_len: transaction bytecount
* @tx_list: list of descriptors that are associated with one operation
* @async_tx: support for the async_tx api
* @group_list: list of slots that make up a multi-descriptor transaction
@ -99,8 +97,6 @@ struct iop_adma_desc_slot {
u16 slot_cnt;
u16 slots_per_op;
u16 idx;
u16 unmap_src_cnt;
size_t unmap_len;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
union {

View File

@ -218,20 +218,6 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op)
#define iop_chan_pq_slot_count iop_chan_xor_slot_count
#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count
static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
return hw_desc->dest_addr;
}
static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
return hw_desc->q_dest_addr;
}
static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *chan)
{
@ -350,18 +336,6 @@ iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
u32 value;
struct iop13xx_adma_desc_ctrl field;
} u_desc_ctrl;
u_desc_ctrl.value = hw_desc->desc_ctrl;
return u_desc_ctrl.field.pq_xfer_en;
}
static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
unsigned long flags)

View File

@ -223,13 +223,13 @@
reg = <0xe2000 0x1000>;
};
/include/ "qoriq-dma-0.dtsi"
/include/ "elo3-dma-0.dtsi"
dma@100300 {
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
};
/include/ "qoriq-dma-1.dtsi"
/include/ "elo3-dma-1.dtsi"
dma@101300 {
fsl,iommu-parent = <&pamu0>;
fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */

View File

@ -0,0 +1,82 @@
/*
* QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ]
*
* Copyright 2013 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
dma0: dma@100300 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,elo3-dma";
reg = <0x100300 0x4>,
<0x100600 0x4>;
ranges = <0x0 0x100100 0x500>;
dma-channel@0 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x0 0x80>;
interrupts = <28 2 0 0>;
};
dma-channel@80 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x80 0x80>;
interrupts = <29 2 0 0>;
};
dma-channel@100 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x100 0x80>;
interrupts = <30 2 0 0>;
};
dma-channel@180 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x180 0x80>;
interrupts = <31 2 0 0>;
};
dma-channel@300 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x300 0x80>;
interrupts = <76 2 0 0>;
};
dma-channel@380 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x380 0x80>;
interrupts = <77 2 0 0>;
};
dma-channel@400 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x400 0x80>;
interrupts = <78 2 0 0>;
};
dma-channel@480 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x480 0x80>;
interrupts = <79 2 0 0>;
};
};

View File

@ -0,0 +1,82 @@
/*
* QorIQ Elo3 DMA device tree stub [ controller @ offset 0x101000 ]
*
* Copyright 2013 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Freescale Semiconductor nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation, either version 2 of that License or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
dma1: dma@101300 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,elo3-dma";
reg = <0x101300 0x4>,
<0x101600 0x4>;
ranges = <0x0 0x101100 0x500>;
dma-channel@0 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x0 0x80>;
interrupts = <32 2 0 0>;
};
dma-channel@80 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x80 0x80>;
interrupts = <33 2 0 0>;
};
dma-channel@100 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x100 0x80>;
interrupts = <34 2 0 0>;
};
dma-channel@180 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x180 0x80>;
interrupts = <35 2 0 0>;
};
dma-channel@300 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x300 0x80>;
interrupts = <80 2 0 0>;
};
dma-channel@380 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x380 0x80>;
interrupts = <81 2 0 0>;
};
dma-channel@400 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x400 0x80>;
interrupts = <82 2 0 0>;
};
dma-channel@480 {
compatible = "fsl,eloplus-dma-channel";
reg = <0x480 0x80>;
interrupts = <83 2 0 0>;
};
};

View File

@ -387,8 +387,8 @@
reg = <0xea000 0x4000>;
};
/include/ "qoriq-dma-0.dtsi"
/include/ "qoriq-dma-1.dtsi"
/include/ "elo3-dma-0.dtsi"
/include/ "elo3-dma-1.dtsi"
/include/ "qoriq-espi-0.dtsi"
spi@110000 {

View File

@ -50,33 +50,36 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
&dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *unmap = NULL;
if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0;
if (submit->cb_fn)
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
dma_src = dma_map_page(device->dev, src, src_offset, len,
DMA_TO_DEVICE);
unmap->to_cnt = 1;
unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
DMA_TO_DEVICE);
unmap->from_cnt = 1;
unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
DMA_FROM_DEVICE);
unmap->len = len;
tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
len, dma_prep_flags);
if (!tx) {
dma_unmap_page(device->dev, dma_dest, len,
DMA_FROM_DEVICE);
dma_unmap_page(device->dev, dma_src, len,
DMA_TO_DEVICE);
}
tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len,
dma_prep_flags);
}
if (tx) {
pr_debug("%s: (async) len: %zu\n", __func__, len);
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
void *dest_buf, *src_buf;
@ -96,6 +99,8 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
async_tx_sync_epilog(submit);
}
dmaengine_unmap_put(unmap);
return tx;
}
EXPORT_SYMBOL_GPL(async_memcpy);

View File

@ -46,49 +46,24 @@ static struct page *pq_scribble_page;
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
const unsigned char *scfs, unsigned int offset, int disks,
size_t len, dma_addr_t *dma_src,
do_async_gen_syndrome(struct dma_chan *chan,
const unsigned char *scfs, int disks,
struct dmaengine_unmap_data *unmap,
enum dma_ctrl_flags dma_flags,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
int idx;
int i;
/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
if (P(blocks, disks))
dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_P;
if (Q(blocks, disks))
dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
/* convert source addresses being careful to collapse 'empty'
* sources and update the coefficients accordingly
*/
for (i = 0, idx = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE);
coefs[idx] = scfs[i];
idx++;
}
src_cnt = idx;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
while (src_cnt > 0) {
submit->flags = flags_orig;
@ -100,28 +75,25 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward
* progress in case they can not provide a descriptor
/* Drivers force forward progress in case they can not provide
* a descriptor
*/
for (;;) {
dma_dest[0] = unmap->addr[disks - 2];
dma_dest[1] = unmap->addr[disks - 1];
tx = dma->device_prep_dma_pq(chan, dma_dest,
&dma_src[src_off],
&unmap->addr[src_off],
pq_src_cnt,
&coefs[src_off], len,
&scfs[src_off], unmap->len,
dma_flags);
if (likely(tx))
break;
@ -129,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
dma_async_issue_pending(chan);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
@ -188,10 +161,6 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
*
* 'blocks' note: if submit->scribble is NULL then the contents of
* 'blocks' may be overwritten to perform address conversions
* (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
@ -202,26 +171,69 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
dma_addr_t *dma_src = NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
if (dma_src && device &&
if (unmap &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = 0;
unsigned char coefs[src_cnt];
int i, j;
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
disks, len, dma_src, submit);
/* convert source addresses being careful to collapse 'empty'
* sources and update the coefficients accordingly
*/
unmap->len = len;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
unmap->addr[j] = dma_map_page(device->dev, blocks[i], offset,
len, DMA_TO_DEVICE);
coefs[j] = raid6_gfexp[i];
unmap->to_cnt++;
j++;
}
/*
* DMAs use destinations as sources,
* so use BIDIRECTIONAL mapping
*/
unmap->bidi_cnt++;
if (P(blocks, disks))
unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks),
offset, len, DMA_BIDIRECTIONAL);
else {
unmap->addr[j++] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
}
unmap->bidi_cnt++;
if (Q(blocks, disks))
unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks),
offset, len, DMA_BIDIRECTIONAL);
else {
unmap->addr[j++] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
}
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit);
dmaengine_unmap_put(unmap);
return tx;
}
dmaengine_unmap_put(unmap);
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
@ -277,50 +289,60 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL;
int src_cnt = 0;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(disks < 4);
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
dma_addr_t *pq = &dma_src[disks-2];
int i;
dma_addr_t pq[2];
int i, j = 0, src_cnt = 0;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
if (!P(blocks, disks))
unmap->len = len;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
coefs[j] = raid6_gfexp[i];
unmap->to_cnt++;
src_cnt++;
j++;
}
if (!P(blocks, disks)) {
pq[0] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
else
} else {
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (!Q(blocks, disks))
unmap->addr[j++] = pq[0];
unmap->to_cnt++;
}
if (!Q(blocks, disks)) {
pq[1] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
else
} else {
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
unmap->addr[j++] = pq[1];
unmap->to_cnt++;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
dma_src[src_cnt] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
coefs[src_cnt] = raid6_gfexp[i];
src_cnt++;
}
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
tx = device->device_prep_dma_pq_val(chan, pq,
unmap->addr,
src_cnt,
coefs,
len, pqres,
@ -330,6 +352,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;

View File

@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
#include <linux/dmaengine.h>
static struct dma_async_tx_descriptor *
async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
@ -34,35 +35,45 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, srcs, 2, len);
struct dma_device *dma = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
const u8 *amul, *bmul;
u8 ax, bx;
u8 *a, *b, *c;
if (dma) {
dma_addr_t dma_dest[2];
dma_addr_t dma_src[2];
if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
if (unmap) {
struct device *dev = dma->dev;
dma_addr_t pq[2];
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef,
unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE);
unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE);
unmap->to_cnt = 2;
unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
unmap->bidi_cnt = 1;
/* engine only looks at Q, but expects it to follow P */
pq[1] = unmap->addr[2];
unmap->len = len;
tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef,
len, dma_flags);
if (tx) {
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
dmaengine_unmap_put(unmap);
return tx;
}
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
dma_unmap_page(dev, dma_src[1], len, DMA_TO_DEVICE);
dmaengine_unmap_put(unmap);
}
/* run the operation synchronously */
@ -89,23 +100,38 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&dest, 1, &src, 1, len);
struct dma_device *dma = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
const u8 *qmul; /* Q multiplier table */
u8 *d, *s;
if (dma) {
if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
if (unmap) {
dma_addr_t dma_dest[2];
dma_addr_t dma_src[1];
struct device *dev = dma->dev;
struct dma_async_tx_descriptor *tx;
enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef,
len, dma_flags);
unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE);
unmap->to_cnt++;
unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL);
dma_dest[1] = unmap->addr[1];
unmap->bidi_cnt++;
unmap->len = len;
/* this looks funny, but the engine looks for Q at
* dma_dest[1] and ignores dma_dest[0] as a dest
* due to DMA_PREP_PQ_DISABLE_P
*/
tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr,
1, &coef, len, dma_flags);
if (tx) {
dma_set_unmap(tx, unmap);
dmaengine_unmap_put(unmap);
async_tx_submit(chan, tx, submit);
return tx;
}
@ -113,8 +139,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
/* could not get a descriptor, unmap and fall through to
* the synchronous path
*/
dma_unmap_page(dev, dma_dest[1], len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, dma_src[0], len, DMA_TO_DEVICE);
dmaengine_unmap_put(unmap);
}
/* no channel available, or failed to allocate a descriptor, so

View File

@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
}
device->device_issue_pending(chan);
} else {
if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for depend_tx\n",
__func__);
tx->tx_submit(tx);
@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
panic("%s: DMA error waiting for transaction\n",
__func__);
async_tx_ack(*tx);

View File

@ -33,48 +33,31 @@
/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src,
do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
struct async_submit_ctl *submit)
{
struct dma_device *dma = chan->device;
struct dma_async_tx_descriptor *tx = NULL;
int src_off = 0;
int i;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
int xor_src_cnt = 0;
dma_addr_t dma_dest;
/* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
if (!src_list[i])
continue;
if (unlikely(src_list[i] == dest)) {
dma_src[xor_src_cnt++] = dma_dest;
continue;
}
dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE);
}
src_cnt = xor_src_cnt;
enum dma_ctrl_flags dma_flags = 0;
int src_cnt = unmap->to_cnt;
int xor_src_cnt;
dma_addr_t dma_dest = unmap->addr[unmap->to_cnt];
dma_addr_t *src_list = unmap->addr;
while (src_cnt) {
dma_addr_t tmp;
submit->flags = flags_orig;
dma_flags = 0;
xor_src_cnt = min(src_cnt, (int)dma->max_xor);
/* if we are submitting additional xors, leave the chain open,
* clear the callback parameters, and leave the destination
* buffer mapped
/* if we are submitting additional xors, leave the chain open
* and clear the callback parameters
*/
if (src_cnt > xor_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
@ -85,12 +68,18 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
dma_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward progress
* in case they can not provide a descriptor
/* Drivers force forward progress in case they can not provide a
* descriptor
*/
tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
xor_src_cnt, len, dma_flags);
tmp = src_list[0];
if (src_list > unmap->addr)
src_list[0] = dma_dest;
tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
xor_src_cnt, unmap->len,
dma_flags);
src_list[0] = tmp;
if (unlikely(!tx))
async_tx_quiesce(&submit->depend_tx);
@ -99,22 +88,21 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest,
&dma_src[src_off],
xor_src_cnt, len,
src_list,
xor_src_cnt, unmap->len,
dma_flags);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
if (src_cnt > xor_src_cnt) {
/* drop completed sources */
src_cnt -= xor_src_cnt;
src_off += xor_src_cnt;
/* use the intermediate result a source */
dma_src[--src_off] = dma_dest;
src_cnt++;
src_list += xor_src_cnt - 1;
} else
break;
}
@ -189,22 +177,40 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR,
&dest, 1, src_list,
src_cnt, len);
dma_addr_t *dma_src = NULL;
struct dma_device *device = chan ? chan->device : NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx;
int i, j;
if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);
return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
dma_src, submit);
unmap->len = len;
for (i = 0, j = 0; i < src_cnt; i++) {
if (!src_list[i])
continue;
unmap->to_cnt++;
unmap->addr[j++] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
}
/* map it bidirectional as it may be re-used as a source */
unmap->addr[j] = dma_map_page(device->dev, dest, offset, len,
DMA_BIDIRECTIONAL);
unmap->bidi_cnt = 1;
tx = do_async_xor(chan, unmap, submit);
dmaengine_unmap_put(unmap);
return tx;
} else {
dmaengine_unmap_put(unmap);
/* run the xor synchronously */
pr_debug("%s (sync): len: %zu\n", __func__, len);
WARN_ONCE(chan, "%s: no space for dma address conversion\n",
@ -268,16 +274,14 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t *dma_src = NULL;
struct dmaengine_unmap_data *unmap = NULL;
BUG_ON(src_cnt <= 1);
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;
if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO);
if (dma_src && device && src_cnt <= device->max_xor &&
if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;
@ -288,11 +292,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
dma_prep_flags |= DMA_PREP_INTERRUPT;
if (submit->flags & ASYNC_TX_FENCE)
dma_prep_flags |= DMA_PREP_FENCE;
for (i = 0; i < src_cnt; i++)
dma_src[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt,
for (i = 0; i < src_cnt; i++) {
unmap->addr[i] = dma_map_page(device->dev, src_list[i],
offset, len, DMA_TO_DEVICE);
unmap->to_cnt++;
}
unmap->len = len;
tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt,
len, result,
dma_prep_flags);
if (unlikely(!tx)) {
@ -301,11 +309,11 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
while (!tx) {
dma_async_issue_pending(chan);
tx = device->device_prep_dma_xor_val(chan,
dma_src, src_cnt, len, result,
unmap->addr, src_cnt, len, result,
dma_prep_flags);
}
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
} else {
enum async_tx_flags flags_orig = submit->flags;
@ -327,6 +335,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
async_tx_sync_epilog(submit);
submit->flags = flags_orig;
}
dmaengine_unmap_put(unmap);
return tx;
}

View File

@ -28,7 +28,7 @@
#undef pr
#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
#define NDISKS 16 /* Including P and Q */
#define NDISKS 64 /* Including P and Q */
static struct page *dataptrs[NDISKS];
static addr_conv_t addr_conv[NDISKS];
@ -219,6 +219,14 @@ static int raid6_test(void)
err += test(11, &tests);
err += test(12, &tests);
}
/* the 24 disk case is special for ioatdma as it is the boudary point
* at which it needs to switch from 8-source ops to 16-source
* ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
*/
if (NDISKS > 24)
err += test(24, &tests);
err += test(NDISKS, &tests);
pr("\n");

View File

@ -396,8 +396,7 @@ dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
struct dma_async_tx_descriptor *tx;
struct dma_chan *chan = acdev->dma_chan;
dma_cookie_t cookie;
unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP;
unsigned long flags = DMA_PREP_INTERRUPT;
int ret = 0;
tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);

View File

@ -89,14 +89,15 @@ config AT_HDMAC
Support the Atmel AHB DMA controller.
config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support"
tristate "Freescale Elo series DMA support"
depends on FSL_SOC
select DMA_ENGINE
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Freescale Elo and Elo Plus DMA controllers.
The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts.
Enable support for the Freescale Elo series DMA controllers.
The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
some Txxx and Bxxx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"

View File

@ -1164,42 +1164,12 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->vd.tx.chan->device->dev;
struct pl08x_sg *dsg;
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
else {
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
}
}
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
else
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
}
}
static void pl08x_desc_free(struct virt_dma_desc *vd)
{
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
if (!plchan->slave)
pl08x_unmap_buffers(txd);
dma_descriptor_unmap(txd);
if (!txd->done)
pl08x_release_mux(plchan);
@ -1252,7 +1222,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
/*
@ -1267,7 +1237,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&plchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
if (ret != DMA_COMPLETE) {
vd = vchan_find_desc(&plchan->vc, cookie);
if (vd) {
/* On the issued list, so hasn't been processed yet */
@ -2138,8 +2108,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
DRIVER_NAME, pl08x);
ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
__func__, adev->irq[0]);

View File

@ -344,31 +344,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list);
/* unmap dma addresses (not on slave channels) */
if (!atchan->chan_common.private) {
struct device *parent = chan2parent(&atchan->chan_common);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent,
desc->lli.daddr,
desc->len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent,
desc->lli.daddr,
desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent,
desc->lli.saddr,
desc->len, DMA_TO_DEVICE);
else
dma_unmap_page(parent,
desc->lli.saddr,
desc->len, DMA_TO_DEVICE);
}
}
dma_descriptor_unmap(txd);
/* for cyclic transfers,
* no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) {
@ -1102,7 +1078,7 @@ atc_tx_status(struct dma_chan *chan,
int bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
/*
* There's no point calculating the residue if there's

View File

@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
"coh901318", base);
if (err)
return err;

View File

@ -141,6 +141,9 @@ struct cppi41_dd {
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
/* context for suspend/resume */
unsigned int dma_tdfdq;
};
#define FIST_COMPLETION_QUEUE 93
@ -263,6 +266,15 @@ static u32 pd_trans_len(u32 val)
return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
}
static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
{
u32 desc;
desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
desc &= ~0x1f;
return desc;
}
static irqreturn_t cppi41_irq(int irq, void *data)
{
struct cppi41_dd *cdd = data;
@ -300,8 +312,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
q_num = __fls(val);
val &= ~(1 << q_num);
q_num += 32 * i;
desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
desc &= ~0x1f;
desc = cppi41_pop_desc(cdd, q_num);
c = desc_to_chan(cdd, desc);
if (WARN_ON(!c)) {
pr_err("%s() q %d desc %08x\n", __func__,
@ -353,7 +364,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
/* lock */
ret = dma_cookie_status(chan, cookie, txstate);
if (txstate && ret == DMA_SUCCESS)
if (txstate && ret == DMA_COMPLETE)
txstate->residue = c->residue;
/* unlock */
@ -517,15 +528,6 @@ static void cppi41_compute_td_desc(struct cppi41_desc *d)
d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
}
static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
{
u32 desc;
desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
desc &= ~0x1f;
return desc;
}
static int cppi41_tear_down_chan(struct cppi41_channel *c)
{
struct cppi41_dd *cdd = c->cdd;
@ -561,36 +563,26 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
c->td_retry = 100;
}
if (!c->td_seen) {
unsigned td_comp_queue;
if (!c->td_seen || !c->td_desc_seen) {
if (c->is_tx)
td_comp_queue = cdd->td_queue.complete;
else
td_comp_queue = c->q_comp_num;
desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
if (!desc_phys)
desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
if (desc_phys) {
__iormb();
if (desc_phys == td_desc_phys) {
u32 pd0;
pd0 = td->pd0;
WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
WARN_ON((pd0 & 0x1f) != c->port_num);
} else {
WARN_ON_ONCE(1);
}
c->td_seen = 1;
}
}
if (!c->td_desc_seen) {
desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
if (desc_phys) {
__iormb();
WARN_ON(c->desc_phys != desc_phys);
if (desc_phys == c->desc_phys) {
c->td_desc_seen = 1;
} else if (desc_phys == td_desc_phys) {
u32 pd0;
__iormb();
pd0 = td->pd0;
WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
WARN_ON((pd0 & 0x1f) != c->port_num);
c->td_seen = 1;
} else if (desc_phys) {
WARN_ON_ONCE(1);
}
}
c->td_retry--;
@ -609,7 +601,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
WARN_ON(!c->td_retry);
if (!c->td_desc_seen) {
desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
desc_phys = cppi41_pop_desc(cdd, c->q_num);
WARN_ON(!desc_phys);
}
@ -674,14 +666,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
}
}
static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
struct cppi41_channel *cchan;
int i;
int ret;
u32 n_chans;
ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
ret = of_property_read_u32(dev->of_node, "#dma-channels",
&n_chans);
if (ret)
return ret;
@ -719,7 +711,7 @@ err:
return -ENOMEM;
}
static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int mem_decs;
int i;
@ -731,7 +723,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
dma_free_coherent(dev, mem_decs, cdd->cd,
cdd->descs_phys);
}
}
@ -741,19 +733,19 @@ static void disable_sched(struct cppi41_dd *cdd)
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
}
static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
disable_sched(cdd);
purge_descs(pdev, cdd);
purge_descs(dev, cdd);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
cdd->scratch_phys);
}
static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
static int init_descs(struct device *dev, struct cppi41_dd *cdd)
{
unsigned int desc_size;
unsigned int mem_decs;
@ -777,7 +769,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
reg |= ilog2(ALLOC_DECS_NUM) - 5;
BUILD_BUG_ON(DESCS_AREAS != 1);
cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
cdd->cd = dma_alloc_coherent(dev, mem_decs,
&cdd->descs_phys, GFP_KERNEL);
if (!cdd->cd)
return -ENOMEM;
@ -813,12 +805,12 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
{
int ret;
BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
&cdd->scratch_phys, GFP_KERNEL);
if (!cdd->qmgr_scratch)
return -ENOMEM;
@ -827,7 +819,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
ret = init_descs(pdev, cdd);
ret = init_descs(dev, cdd);
if (ret)
goto err_td;
@ -835,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
init_sched(cdd);
return 0;
err_td:
deinit_cpii41(pdev, cdd);
deinit_cppi41(dev, cdd);
return ret;
}
@ -914,11 +906,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static const struct cppi_glue_infos *get_glue_info(struct device *dev)
{
const struct of_device_id *of_id;
of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
of_id = of_match_node(cppi41_dma_ids, dev->of_node);
if (!of_id)
return NULL;
return of_id->data;
@ -927,11 +919,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
static int cppi41_dma_probe(struct platform_device *pdev)
{
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
int irq;
int ret;
glue_info = get_glue_info(pdev);
glue_info = get_glue_info(dev);
if (!glue_info)
return -EINVAL;
@ -946,14 +939,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
cdd->ddev.device_control = cppi41_dma_control;
cdd->ddev.dev = &pdev->dev;
cdd->ddev.dev = dev;
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
cdd->usbss_mem = of_iomap(dev->of_node, 0);
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
cdd->sched_mem = of_iomap(dev->of_node, 2);
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem) {
@ -961,31 +954,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
goto err_remap;
}
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_get_sync;
cdd->queues_rx = glue_info->queues_rx;
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
ret = init_cppi41(pdev, cdd);
ret = init_cppi41(dev, cdd);
if (ret)
goto err_init_cppi;
ret = cppi41_add_chans(pdev, cdd);
ret = cppi41_add_chans(dev, cdd);
if (ret)
goto err_chans;
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq)
goto err_irq;
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
dev_name(&pdev->dev), cdd);
dev_name(dev), cdd);
if (ret)
goto err_irq;
cdd->irq = irq;
@ -994,7 +987,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (ret)
goto err_dma_reg;
ret = of_dma_controller_register(pdev->dev.of_node,
ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
if (ret)
goto err_of;
@ -1009,11 +1002,11 @@ err_irq:
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
cleanup_chans(cdd);
err_chans:
deinit_cpii41(pdev, cdd);
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_put(&pdev->dev);
pm_runtime_put(dev);
err_get_sync:
pm_runtime_disable(&pdev->dev);
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@ -1033,7 +1026,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
free_irq(cdd->irq, cdd);
cleanup_chans(cdd);
deinit_cpii41(pdev, cdd);
deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
@ -1044,12 +1037,53 @@ static int cppi41_dma_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int cppi41_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
disable_sched(cdd);
return 0;
}
static int cppi41_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
struct cppi41_channel *c;
int i;
for (i = 0; i < DESCS_AREAS; i++)
cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
if (!c->is_tx)
cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
init_sched(cdd);
cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
static struct platform_driver cpp41_dma_driver = {
.probe = cppi41_dma_probe,
.remove = cppi41_dma_remove,
.driver = {
.name = "cppi41-dma-engine",
.owner = THIS_MODULE,
.pm = &cppi41_pm_ops,
.of_match_table = of_match_ptr(cppi41_dma_ids),
},
};

View File

@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
unsigned long flags;
status = dma_cookie_status(c, cookie, state);
if (status == DMA_SUCCESS || !state)
if (status == DMA_COMPLETE || !state)
return status;
spin_lock_irqsave(&chan->vchan.lock, flags);

View File

@ -65,6 +65,7 @@
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
#include <linux/of_dma.h>
#include <linux/mempool.h>
static DEFINE_MUTEX(dma_list_mutex);
static DEFINE_IDR(dma_idr);
@ -901,98 +902,132 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages).
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
void *src, size_t len)
struct dmaengine_unmap_pool {
struct kmem_cache *cache;
const char *name;
mempool_t *pool;
size_t size;
};
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
__UNMAP_POOL(2),
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
__UNMAP_POOL(16),
__UNMAP_POOL(128),
__UNMAP_POOL(256),
#endif
};
static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
unsigned long flags;
int order = get_count_order(nr);
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
flags = DMA_CTRL_ACK |
DMA_COMPL_SRC_UNMAP_SINGLE |
DMA_COMPL_DEST_UNMAP_SINGLE;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
switch (order) {
case 0 ... 1:
return &unmap_pool[0];
case 2 ... 4:
return &unmap_pool[1];
case 5 ... 7:
return &unmap_pool[2];
case 8:
return &unmap_pool[3];
default:
BUG();
return NULL;
}
}
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
static void dmaengine_unmap(struct kref *kref)
{
struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
struct device *dev = unmap->dev;
int cnt, i;
cnt = unmap->to_cnt;
for (i = 0; i < cnt; i++)
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_TO_DEVICE);
cnt += unmap->from_cnt;
for (; i < cnt; i++)
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_FROM_DEVICE);
cnt += unmap->bidi_cnt;
for (; i < cnt; i++) {
if (unmap->addr[i] == 0)
continue;
dma_unmap_page(dev, unmap->addr[i], unmap->len,
DMA_BIDIRECTIONAL);
}
mempool_free(unmap, __get_unmap_pool(cnt)->pool);
}
void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
{
if (unmap)
kref_put(&unmap->kref, dmaengine_unmap);
}
EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
static void dmaengine_destroy_unmap_pool(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
struct dmaengine_unmap_pool *p = &unmap_pool[i];
if (p->pool)
mempool_destroy(p->pool);
p->pool = NULL;
if (p->cache)
kmem_cache_destroy(p->cache);
p->cache = NULL;
}
}
static int __init dmaengine_init_unmap_pool(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
struct dmaengine_unmap_pool *p = &unmap_pool[i];
size_t size;
size = sizeof(struct dmaengine_unmap_data) +
sizeof(dma_addr_t) * p->size;
p->cache = kmem_cache_create(p->name, size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!p->cache)
break;
p->pool = mempool_create_slab_pool(1, p->cache);
if (!p->pool)
break;
}
tx->callback = NULL;
cookie = tx->tx_submit(tx);
if (i == ARRAY_SIZE(unmap_pool))
return 0;
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
dmaengine_destroy_unmap_pool();
return -ENOMEM;
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
/**
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
* @kdata: source address (virtual)
* @len: length
*
* Both @page/@offset and @kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings.
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
* locked user space pages)
*/
dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
unsigned int offset, void *kdata, size_t len)
struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
unsigned long flags;
struct dmaengine_unmap_data *unmap;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
if (!unmap)
return NULL;
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}
memset(unmap, 0, sizeof(*unmap));
kref_init(&unmap->kref);
unmap->dev = dev;
tx->callback = NULL;
cookie = tx->tx_submit(tx);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
return unmap;
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
EXPORT_SYMBOL(dmaengine_get_unmap_data);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
@ -1015,24 +1050,33 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
if (!unmap)
return -ENOMEM;
unmap->to_cnt = 1;
unmap->from_cnt = 1;
unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
DMA_TO_DEVICE);
unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
unmap->len = len;
flags = DMA_CTRL_ACK;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
len, flags);
if (!tx) {
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
dmaengine_unmap_put(unmap);
return -ENOMEM;
}
tx->callback = NULL;
dma_set_unmap(tx, unmap);
cookie = tx->tx_submit(tx);
dmaengine_unmap_put(unmap);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
@ -1043,6 +1087,52 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages).
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
void *src, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
(unsigned long) dest & ~PAGE_MASK,
virt_to_page(src),
(unsigned long) src & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
/**
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
* @chan: DMA channel to offload copy to
* @page: destination page
* @offset: offset in page to copy to
* @kdata: source address (virtual)
* @len: length
*
* Both @page/@offset and @kdata must be mappable to a bus address according
* to the DMA mapping API rules for streaming mappings.
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
* locked user space pages)
*/
dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
unsigned int offset, void *kdata, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, page, offset,
virt_to_page(kdata),
(unsigned long) kdata & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan)
{
@ -1062,7 +1152,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx)
return DMA_SUCCESS;
return DMA_COMPLETE;
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
@ -1116,6 +1206,10 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
int err = dmaengine_init_unmap_pool();
if (err)
return err;
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);

File diff suppressed because it is too large Load Diff

View File

@ -85,10 +85,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
@ -311,26 +307,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list);
if (!is_slave_direction(dwc->direction)) {
struct device *parent = chan2parent(&dwc->chan);
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.dar,
desc->total_len, DMA_FROM_DEVICE);
else
dma_unmap_page(parent, desc->lli.dar,
desc->total_len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(parent, desc->lli.sar,
desc->total_len, DMA_TO_DEVICE);
else
dma_unmap_page(parent, desc->lli.sar,
desc->total_len, DMA_TO_DEVICE);
}
}
dma_descriptor_unmap(txd);
spin_unlock_irqrestore(&dwc->lock, flags);
if (callback)
@ -1098,13 +1075,13 @@ dwc_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
if (ret != DMA_COMPLETE)
dma_set_residue(txstate, dwc_get_residue(dwc));
if (dwc->paused && ret == DMA_IN_PROGRESS)

View File

@ -46,14 +46,21 @@
#define EDMA_CHANS 64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
/* Max of 16 segments per channel to conserve PaRAM slots */
#define MAX_NR_SG 16
/*
* Max of 20 segments per channel to conserve PaRAM slots
* Also note that MAX_NR_SG should be atleast the no.of periods
* that are required for ASoC, otherwise DMA prep calls will
* fail. Today davinci-pcm is the only user of this driver and
* requires atleast 17 slots, so we setup the default to 20.
*/
#define MAX_NR_SG 20
#define EDMA_MAX_SLOTS MAX_NR_SG
#define EDMA_DESCRIPTORS 16
struct edma_desc {
struct virt_dma_desc vdesc;
struct list_head node;
int cyclic;
int absync;
int pset_nr;
int processed;
@ -167,8 +174,13 @@ static void edma_execute(struct edma_chan *echan)
* then setup a link to the dummy slot, this results in all future
* events being absorbed and that's OK because we're done
*/
if (edesc->processed == edesc->pset_nr)
edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot);
if (edesc->processed == edesc->pset_nr) {
if (edesc->cyclic)
edma_link(echan->slot[nslots-1], echan->slot[1]);
else
edma_link(echan->slot[nslots-1],
echan->ecc->dummy_slot);
}
edma_resume(echan->ch_num);
@ -250,6 +262,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return ret;
}
/*
* A PaRAM set configuration abstraction used by other modes
* @chan: Channel who's PaRAM set we're configuring
* @pset: PaRAM set to initialize and setup.
* @src_addr: Source address of the DMA
* @dst_addr: Destination address of the DMA
* @burst: In units of dev_width, how much to send
* @dev_width: How much is the dev_width
* @dma_length: Total length of the DMA transfer
* @direction: Direction of the transfer
*/
static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
enum dma_slave_buswidth dev_width, unsigned int dma_length,
enum dma_transfer_direction direction)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
int acnt, bcnt, ccnt, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int absync;
acnt = dev_width;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
* buffer transfers using only one PaRAM set.
*/
if (burst == 1) {
/*
* For the A-sync case, bcnt and ccnt are the remainder
* and quotient respectively of the division of:
* (dma_length / acnt) by (SZ_64K -1). This is so
* that in case bcnt over flows, we have ccnt to use.
* Note: In A-sync tranfer only, bcntrld is used, but it
* only applies for sg_dma_len(sg) >= SZ_64K.
* In this case, the best way adopted is- bccnt for the
* first frame will be the remainder below. Then for
* every successive frame, bcnt will be SZ_64K-1. This
* is assured as bcntrld = 0xffff in end of function.
*/
absync = false;
ccnt = dma_length / acnt / (SZ_64K - 1);
bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
/*
* If bcnt is non-zero, we have a remainder and hence an
* extra frame to transfer, so increment ccnt.
*/
if (bcnt)
ccnt++;
else
bcnt = SZ_64K - 1;
cidx = acnt;
} else {
/*
* If maxburst is greater than the fifo address_width,
* use AB-synced transfers where A count is the fifo
* address_width and B count is the maxburst. In this
* case, we are limited to transfers of C count frames
* of (address_width * maxburst) where C count is limited
* to SZ_64K-1. This places an upper bound on the length
* of an SG segment that can be handled.
*/
absync = true;
bcnt = burst;
ccnt = dma_length / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
return -EINVAL;
}
cidx = acnt * bcnt;
}
if (direction == DMA_MEM_TO_DEV) {
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
} else if (direction == DMA_DEV_TO_MEM) {
src_bidx = 0;
src_cidx = 0;
dst_bidx = acnt;
dst_cidx = cidx;
} else {
dev_err(dev, "%s: direction not implemented yet\n", __func__);
return -EINVAL;
}
pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
/* Configure A or AB synchronized transfers */
if (absync)
pset->opt |= SYNCDIM;
pset->src = src_addr;
pset->dst = dst_addr;
pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
pset->a_b_cnt = bcnt << 16 | acnt;
pset->ccnt = ccnt;
/*
* Only time when (bcntrld) auto reload is required is for
* A-sync case, and in this case, a requirement of reload value
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
* and then later will be populated by edma_execute.
*/
pset->link_bcntrld = 0xffffffff;
return absync;
}
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@ -258,23 +381,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
dma_addr_t dev_addr;
dma_addr_t src_addr = 0, dst_addr = 0;
enum dma_slave_buswidth dev_width;
u32 burst;
struct scatterlist *sg;
int acnt, bcnt, ccnt, src, dst, cidx;
int src_bidx, dst_bidx, src_cidx, dst_cidx;
int i, nslots;
int i, nslots, ret;
if (unlikely(!echan || !sgl || !sg_len))
return NULL;
if (direction == DMA_DEV_TO_MEM) {
dev_addr = echan->cfg.src_addr;
src_addr = echan->cfg.src_addr;
dev_width = echan->cfg.src_addr_width;
burst = echan->cfg.src_maxburst;
} else if (direction == DMA_MEM_TO_DEV) {
dev_addr = echan->cfg.dst_addr;
dst_addr = echan->cfg.dst_addr;
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
@ -307,7 +428,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
if (echan->slot[i] < 0) {
kfree(edesc);
dev_err(dev, "Failed to allocate slot\n");
kfree(edesc);
return NULL;
}
}
@ -315,64 +435,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* Configure PaRAM sets for each SG */
for_each_sg(sgl, sg, sg_len, i) {
/* Get address for each SG */
if (direction == DMA_DEV_TO_MEM)
dst_addr = sg_dma_address(sg);
else
src_addr = sg_dma_address(sg);
acnt = dev_width;
/*
* If the maxburst is equal to the fifo width, use
* A-synced transfers. This allows for large contiguous
* buffer transfers using only one PaRAM set.
*/
if (burst == 1) {
edesc->absync = false;
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
if (bcnt)
ccnt++;
else
bcnt = SZ_64K - 1;
cidx = acnt;
/*
* If maxburst is greater than the fifo address_width,
* use AB-synced transfers where A count is the fifo
* address_width and B count is the maxburst. In this
* case, we are limited to transfers of C count frames
* of (address_width * maxburst) where C count is limited
* to SZ_64K-1. This places an upper bound on the length
* of an SG segment that can be handled.
*/
} else {
edesc->absync = true;
bcnt = burst;
ccnt = sg_dma_len(sg) / (acnt * bcnt);
if (ccnt > (SZ_64K - 1)) {
dev_err(dev, "Exceeded max SG segment size\n");
kfree(edesc);
return NULL;
}
cidx = acnt * bcnt;
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
dst_addr, burst, dev_width,
sg_dma_len(sg), direction);
if (ret < 0) {
kfree(edesc);
return NULL;
}
if (direction == DMA_MEM_TO_DEV) {
src = sg_dma_address(sg);
dst = dev_addr;
src_bidx = acnt;
src_cidx = cidx;
dst_bidx = 0;
dst_cidx = 0;
} else {
src = dev_addr;
dst = sg_dma_address(sg);
src_bidx = 0;
src_cidx = 0;
dst_bidx = acnt;
dst_cidx = cidx;
}
edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
/* Configure A or AB synchronized transfers */
if (edesc->absync)
edesc->pset[i].opt |= SYNCDIM;
edesc->absync = ret;
/* If this is the last in a current SG set of transactions,
enable interrupts so that next set is processed */
@ -382,17 +459,138 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
/* If this is the last set, enable completion interrupt flag */
if (i == sg_len - 1)
edesc->pset[i].opt |= TCINTEN;
}
edesc->pset[i].src = src;
edesc->pset[i].dst = dst;
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long tx_flags, void *context)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width;
u32 burst;
int i, ret, nslots;
edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
edesc->pset[i].ccnt = ccnt;
edesc->pset[i].link_bcntrld = 0xffffffff;
if (unlikely(!echan || !buf_len || !period_len))
return NULL;
if (direction == DMA_DEV_TO_MEM) {
src_addr = echan->cfg.src_addr;
dst_addr = buf_addr;
dev_width = echan->cfg.src_addr_width;
burst = echan->cfg.src_maxburst;
} else if (direction == DMA_MEM_TO_DEV) {
src_addr = buf_addr;
dst_addr = echan->cfg.dst_addr;
dev_width = echan->cfg.dst_addr_width;
burst = echan->cfg.dst_maxburst;
} else {
dev_err(dev, "%s: bad direction?\n", __func__);
return NULL;
}
if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
dev_err(dev, "Undefined slave buswidth\n");
return NULL;
}
if (unlikely(buf_len % period_len)) {
dev_err(dev, "Period should be multiple of Buffer length\n");
return NULL;
}
nslots = (buf_len / period_len) + 1;
/*
* Cyclic DMA users such as audio cannot tolerate delays introduced
* by cases where the number of periods is more than the maximum
* number of SGs the EDMA driver can handle at a time. For DMA types
* such as Slave SGs, such delays are tolerable and synchronized,
* but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early.
*/
if (nslots > MAX_NR_SG)
return NULL;
edesc = kzalloc(sizeof(*edesc) + nslots *
sizeof(edesc->pset[0]), GFP_ATOMIC);
if (!edesc) {
dev_dbg(dev, "Failed to allocate a descriptor\n");
return NULL;
}
edesc->cyclic = 1;
edesc->pset_nr = nslots;
dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots);
dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len);
dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len);
for (i = 0; i < nslots; i++) {
/* Allocate a PaRAM slot, if needed */
if (echan->slot[i] < 0) {
echan->slot[i] =
edma_alloc_slot(EDMA_CTLR(echan->ch_num),
EDMA_SLOT_ANY);
if (echan->slot[i] < 0) {
dev_err(dev, "Failed to allocate slot\n");
return NULL;
}
}
if (i == nslots - 1) {
memcpy(&edesc->pset[i], &edesc->pset[0],
sizeof(edesc->pset[0]));
break;
}
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
dst_addr, burst, dev_width, period_len,
direction);
if (ret < 0)
return NULL;
if (direction == DMA_DEV_TO_MEM)
dst_addr += period_len;
else
src_addr += period_len;
dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
dev_dbg(dev,
"\n pset[%d]:\n"
" chnum\t%d\n"
" slot\t%d\n"
" opt\t%08x\n"
" src\t%08x\n"
" dst\t%08x\n"
" abcnt\t%08x\n"
" ccnt\t%08x\n"
" bidx\t%08x\n"
" cidx\t%08x\n"
" lkrld\t%08x\n",
i, echan->ch_num, echan->slot[i],
edesc->pset[i].opt,
edesc->pset[i].src,
edesc->pset[i].dst,
edesc->pset[i].a_b_cnt,
edesc->pset[i].ccnt,
edesc->pset[i].src_dst_bidx,
edesc->pset[i].src_dst_cidx,
edesc->pset[i].link_bcntrld);
edesc->absync = ret;
/*
* Enable interrupts for every period because callback
* has to be called for every period.
*/
edesc->pset[i].opt |= TCINTEN;
}
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@ -406,30 +604,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
unsigned long flags;
struct edmacc_param p;
/* Pause the channel */
edma_pause(echan->ch_num);
edesc = echan->edesc;
/* Pause the channel for non-cyclic */
if (!edesc || (edesc && !edesc->cyclic))
edma_pause(echan->ch_num);
switch (ch_status) {
case DMA_COMPLETE:
case EDMA_DMA_COMPLETE:
spin_lock_irqsave(&echan->vchan.lock, flags);
edesc = echan->edesc;
if (edesc) {
if (edesc->processed == edesc->pset_nr) {
if (edesc->cyclic) {
vchan_cyclic_callback(&edesc->vdesc);
} else if (edesc->processed == edesc->pset_nr) {
dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
edma_stop(echan->ch_num);
vchan_cookie_complete(&edesc->vdesc);
edma_execute(echan);
} else {
dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
edma_execute(echan);
}
edma_execute(echan);
}
spin_unlock_irqrestore(&echan->vchan.lock, flags);
break;
case DMA_CC_ERROR:
case EDMA_DMA_CC_ERROR:
spin_lock_irqsave(&echan->vchan.lock, flags);
edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);
@ -579,7 +781,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS || !txstate)
if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&echan->vchan.lock, flags);
@ -619,6 +821,7 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
struct device *dev)
{
dma->device_prep_slave_sg = edma_prep_slave_sg;
dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
dma->device_alloc_chan_resources = edma_alloc_chan_resources;
dma->device_free_chan_resources = edma_free_chan_resources;
dma->device_issue_pending = edma_issue_pending;

View File

@ -733,28 +733,6 @@ static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
spin_unlock_irqrestore(&edmac->lock, flags);
}
static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
{
struct device *dev = desc->txd.chan->device->dev;
if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(dev, desc->src_addr, desc->size,
DMA_TO_DEVICE);
else
dma_unmap_page(dev, desc->src_addr, desc->size,
DMA_TO_DEVICE);
}
if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(dev, desc->dst_addr, desc->size,
DMA_FROM_DEVICE);
else
dma_unmap_page(dev, desc->dst_addr, desc->size,
DMA_FROM_DEVICE);
}
}
static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
@ -787,13 +765,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
* For the memcpy channels the API requires us to unmap the
* buffers unless requested otherwise.
*/
if (!edmac->chan.private)
ep93xx_dma_unmap_buffers(desc);
dma_descriptor_unmap(&desc->txd);
ep93xx_dma_desc_put(edmac, desc);
}

View File

@ -870,22 +870,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
/* Run any dependencies */
dma_run_dependencies(txd);
/* Unmap the dst buffer, if requested */
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
else
dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
}
/* Unmap the src buffer, if requested */
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
else
dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
}
dma_descriptor_unmap(txd);
#ifdef FSL_DMA_LD_DEBUG
chan_dbg(chan, "LD %p free\n", desc);
#endif
@ -1255,7 +1240,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
WARN_ON(fdev->feature != chan->feature);
chan->dev = fdev->dev;
chan->id = ((res.start - 0x100) & 0xfff) >> 7;
chan->id = (res.start & 0xfff) < 0x300 ?
((res.start - 0x100) & 0xfff) >> 7 :
((res.start - 0x200) & 0xfff) >> 7;
if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
@ -1428,6 +1415,7 @@ static int fsldma_of_remove(struct platform_device *op)
}
static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
@ -1449,7 +1437,7 @@ static struct platform_driver fsldma_of_driver = {
static __init int fsldma_init(void)
{
pr_info("Freescale Elo / Elo Plus DMA driver\n");
pr_info("Freescale Elo series DMA driver\n");
return platform_driver_register(&fsldma_of_driver);
}
@ -1461,5 +1449,5 @@ static void __exit fsldma_exit(void)
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_DESCRIPTION("Freescale Elo series DMA driver");
MODULE_LICENSE("GPL");

View File

@ -112,7 +112,7 @@ struct fsldma_chan_regs {
};
struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
struct fsldma_device {
void __iomem *regs; /* DGSR register base */

View File

@ -572,9 +572,11 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
"dma_length=%d\n", __func__, imxdmac->channel,
d->dest, d->src, d->len);
dev_dbg(imxdma->dev,
"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
__func__, imxdmac->channel,
(unsigned long long)d->dest,
(unsigned long long)d->src, d->len);
break;
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
@ -586,20 +588,22 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
DMA_CCR(imxdmac->channel));
dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
"total length=%d dev_addr=0x%08x (dev2mem)\n",
__func__, imxdmac->channel, d->sg, d->sgcount,
d->len, imxdmac->per_address);
dev_dbg(imxdma->dev,
"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
__func__, imxdmac->channel,
d->sg, d->sgcount, d->len,
(unsigned long long)imxdmac->per_address);
} else if (d->direction == DMA_MEM_TO_DEV) {
imx_dmav1_writel(imxdma, imxdmac->per_address,
DMA_DAR(imxdmac->channel));
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
DMA_CCR(imxdmac->channel));
dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
"total length=%d dev_addr=0x%08x (mem2dev)\n",
__func__, imxdmac->channel, d->sg, d->sgcount,
d->len, imxdmac->per_address);
dev_dbg(imxdma->dev,
"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
__func__, imxdmac->channel,
d->sg, d->sgcount, d->len,
(unsigned long long)imxdmac->per_address);
} else {
dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
__func__, imxdmac->channel);
@ -771,7 +775,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
desc->desc.tx_submit = imxdma_tx_submit;
/* txd.flags will be overwritten in prep funcs */
desc->desc.flags = DMA_CTRL_ACK;
desc->status = DMA_SUCCESS;
desc->status = DMA_COMPLETE;
list_add_tail(&desc->node, &imxdmac->ld_free);
imxdmac->descs_allocated++;
@ -870,7 +874,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
int i;
unsigned int periods = buf_len / period_len;
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
__func__, imxdmac->channel, buf_len, period_len);
if (list_empty(&imxdmac->ld_free) ||
@ -926,8 +930,9 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
__func__, imxdmac->channel, src, dest, len);
dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
__func__, imxdmac->channel, (unsigned long long)src,
(unsigned long long)dest, len);
if (list_empty(&imxdmac->ld_free) ||
imxdma_chan_is_doing_cyclic(imxdmac))
@ -956,9 +961,10 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
struct imxdma_engine *imxdma = imxdmac->imxdma;
struct imxdma_desc *desc;
dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
" src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
imxdmac->channel, xt->src_start, xt->dst_start,
dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
" src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
imxdmac->channel, (unsigned long long)xt->src_start,
(unsigned long long) xt->dst_start,
xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
xt->numf, xt->frame_size);

View File

@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (error)
sdmac->status = DMA_ERROR;
else
sdmac->status = DMA_SUCCESS;
sdmac->status = DMA_COMPLETE;
dma_cookie_complete(&sdmac->desc);
if (sdmac->desc.callback)
@ -1089,8 +1089,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
param &= ~BD_CONT;
}
dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
i, count, sg->dma_address,
dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
i, count, (u64)sg->dma_address,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@ -1163,8 +1163,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
i, period_len, dma_addr,
dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
i, period_len, (u64)dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");

View File

@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
callback_txd(param_txd);
}
if (midc->raw_tfr) {
desc->status = DMA_SUCCESS;
desc->status = DMA_COMPLETE;
if (desc->lli != NULL) {
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS) {
if (ret != DMA_COMPLETE) {
spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock);

View File

@ -531,21 +531,6 @@ static void ioat1_cleanup_event(unsigned long data)
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw)
{
struct pci_dev *pdev = chan->device->pdev;
size_t offset = len - hw->size;
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
ioat_unmap(pdev, hw->dst_addr - offset, len,
PCI_DMA_FROMDEVICE, flags, 1);
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
ioat_unmap(pdev, hw->src_addr - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
dma_addr_t phys_complete;
@ -602,7 +587,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
dma_cookie_complete(tx);
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
dma_descriptor_unmap(tx);
ioat->active -= desc->hw->tx_cnt;
if (tx->callback) {
tx->callback(tx->callback_param);
@ -733,7 +718,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
device->cleanup_fn((unsigned long) c);
@ -833,8 +818,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
if (!tx) {
@ -859,7 +843,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
if (tmo == 0 ||
dma->device_tx_status(dma_chan, cookie, NULL)
!= DMA_SUCCESS) {
!= DMA_COMPLETE) {
dev_err(dev, "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto unmap_dma;
@ -885,8 +869,7 @@ static char ioat_interrupt_style[32] = "msix";
module_param_string(ioat_interrupt_style, ioat_interrupt_style,
sizeof(ioat_interrupt_style), 0644);
MODULE_PARM_DESC(ioat_interrupt_style,
"set ioat interrupt style: msix (default), "
"msix-single-vector, msi, intx)");
"set ioat interrupt style: msix (default), msi, intx");
/**
* ioat_dma_setup_interrupts - setup interrupt handler
@ -904,8 +887,6 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *device)
if (!strcmp(ioat_interrupt_style, "msix"))
goto msix;
if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
goto msix_single_vector;
if (!strcmp(ioat_interrupt_style, "msi"))
goto msi;
if (!strcmp(ioat_interrupt_style, "intx"))
@ -920,10 +901,8 @@ msix:
device->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
if (err < 0)
if (err)
goto msi;
if (err > 0)
goto msix_single_vector;
for (i = 0; i < msixcnt; i++) {
msix = &device->msix_entries[i];
@ -937,29 +916,13 @@ msix:
chan = ioat_chan_by_index(device, j);
devm_free_irq(dev, msix->vector, chan);
}
goto msix_single_vector;
goto msi;
}
}
intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
device->irq_mode = IOAT_MSIX;
goto done;
msix_single_vector:
msix = &device->msix_entries[0];
msix->entry = 0;
err = pci_enable_msix(pdev, device->msix_entries, 1);
if (err)
goto msi;
err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
"ioat-msix", device);
if (err) {
pci_disable_msix(pdev);
goto msi;
}
device->irq_mode = IOAT_MSIX_SINGLE;
goto done;
msi:
err = pci_enable_msi(pdev);
if (err)
@ -971,7 +934,7 @@ msi:
pci_disable_msi(pdev);
goto intx;
}
device->irq_mode = IOAT_MSIX;
device->irq_mode = IOAT_MSI;
goto done;
intx:

View File

@ -52,7 +52,6 @@
enum ioat_irq_mode {
IOAT_NOIRQ = 0,
IOAT_MSIX,
IOAT_MSIX_SINGLE,
IOAT_MSI,
IOAT_INTX
};
@ -83,7 +82,6 @@ struct ioatdma_device {
struct pci_pool *completion_pool;
#define MAX_SED_POOLS 5
struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
struct kmem_cache *sed_pool;
struct dma_device common;
u8 version;
struct msix_entry msix_entries[4];
@ -342,16 +340,6 @@ static inline bool is_ioat_bug(unsigned long err)
return !!err;
}
static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
int direction, enum dma_ctrl_flags flags, bool dst)
{
if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) ||
(!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE)))
pci_unmap_single(pdev, addr, len, direction);
else
pci_unmap_page(pdev, addr, len, direction);
}
int ioat_probe(struct ioatdma_device *device);
int ioat_register(struct ioatdma_device *device);
int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
@ -363,8 +351,6 @@ void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);

View File

@ -148,7 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
dump_desc_dbg(ioat, desc);
if (tx->cookie) {
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
dma_descriptor_unmap(tx);
dma_cookie_complete(tx);
if (tx->callback) {
tx->callback(tx->callback_param);

View File

@ -157,7 +157,6 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
void ioat3_dma_remove(struct ioatdma_device *dev);
struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);

View File

@ -67,6 +67,8 @@
#include "dma.h"
#include "dma_v2.h"
extern struct kmem_cache *ioat3_sed_cache;
/* ioat hardware assumes at least two sources for raid operations */
#define src_cnt_to_sw(x) ((x) + 2)
#define src_cnt_to_hw(x) ((x) - 2)
@ -87,22 +89,8 @@ static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
0, 1, 2, 3, 4, 5, 6 };
/*
* technically sources 1 and 2 do not require SED, but the op will have
* at least 9 descriptors so that's irrelevant.
*/
static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1 };
static void ioat3_eh(struct ioat2_dma_chan *ioat);
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
return raw->field[xor_idx_to_field[idx]];
}
static void xor_set_src(struct ioat_raw_descriptor *descs[2],
dma_addr_t addr, u32 offset, int idx)
{
@ -135,12 +123,6 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2],
pq->coef[idx] = coef;
}
static int sed_get_pq16_pool_idx(int src_cnt)
{
return pq16_idx_to_sed[src_cnt];
}
static bool is_jf_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
@ -272,7 +254,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
struct ioat_sed_ent *sed;
gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
sed = kmem_cache_alloc(device->sed_pool, flags);
sed = kmem_cache_alloc(ioat3_sed_cache, flags);
if (!sed)
return NULL;
@ -280,7 +262,7 @@ ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
flags, &sed->dma);
if (!sed->hw) {
kmem_cache_free(device->sed_pool, sed);
kmem_cache_free(ioat3_sed_cache, sed);
return NULL;
}
@ -293,165 +275,7 @@ static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *s
return;
dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
kmem_cache_free(device->sed_pool, sed);
}
static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
struct ioat_ring_ent *desc, int idx)
{
struct ioat_chan_common *chan = &ioat->base;
struct pci_dev *pdev = chan->device->pdev;
size_t len = desc->len;
size_t offset = len - desc->hw->size;
struct dma_async_tx_descriptor *tx = &desc->txd;
enum dma_ctrl_flags flags = tx->flags;
switch (desc->hw->ctl_f.op) {
case IOAT_OP_COPY:
if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
ioat_dma_unmap(chan, flags, len, desc->hw);
break;
case IOAT_OP_XOR_VAL:
case IOAT_OP_XOR: {
struct ioat_xor_descriptor *xor = desc->xor;
struct ioat_ring_ent *ext;
struct ioat_xor_ext_descriptor *xor_ex = NULL;
int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[2];
int i;
if (src_cnt > 5) {
ext = ioat2_get_ring_ent(ioat, idx + 1);
xor_ex = ext->xor_ex;
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *) xor;
descs[1] = (struct ioat_raw_descriptor *) xor_ex;
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = xor_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* dest is a source in xor validate operations */
if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
ioat_unmap(pdev, xor->dst_addr - offset, len,
PCI_DMA_TODEVICE, flags, 1);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
ioat_unmap(pdev, xor->dst_addr - offset, len,
PCI_DMA_FROMDEVICE, flags, 1);
break;
}
case IOAT_OP_PQ_VAL:
case IOAT_OP_PQ: {
struct ioat_pq_descriptor *pq = desc->pq;
struct ioat_ring_ent *ext;
struct ioat_pq_ext_descriptor *pq_ex = NULL;
int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[2];
int i;
if (src_cnt > 3) {
ext = ioat2_get_ring_ent(ioat, idx + 1);
pq_ex = ext->pq_ex;
}
/* in the 'continue' case don't unmap the dests as sources */
if (dmaf_p_disabled_continue(flags))
src_cnt--;
else if (dmaf_continue(flags))
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *) pq;
descs[1] = (struct ioat_raw_descriptor *) pq_ex;
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = pq_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* the dests are sources in pq validate operations */
if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset,
len, PCI_DMA_TODEVICE, flags, 0);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset,
len, PCI_DMA_TODEVICE, flags, 0);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
}
break;
}
case IOAT_OP_PQ_16S:
case IOAT_OP_PQ_VAL_16S: {
struct ioat_pq_descriptor *pq = desc->pq;
int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
struct ioat_raw_descriptor *descs[4];
int i;
/* in the 'continue' case don't unmap the dests as sources */
if (dmaf_p_disabled_continue(flags))
src_cnt--;
else if (dmaf_continue(flags))
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
descs[0] = (struct ioat_raw_descriptor *)pq;
descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
for (i = 0; i < src_cnt; i++) {
dma_addr_t src = pq16_get_src(descs, i);
ioat_unmap(pdev, src - offset, len,
PCI_DMA_TODEVICE, flags, 0);
}
/* the dests are sources in pq validate operations */
if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset,
len, PCI_DMA_TODEVICE,
flags, 0);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset,
len, PCI_DMA_TODEVICE,
flags, 0);
break;
}
}
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (!(flags & DMA_PREP_PQ_DISABLE_P))
ioat_unmap(pdev, pq->p_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
if (!(flags & DMA_PREP_PQ_DISABLE_Q))
ioat_unmap(pdev, pq->q_addr - offset, len,
PCI_DMA_BIDIRECTIONAL, flags, 1);
}
break;
}
default:
dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
__func__, desc->hw->ctl_f.op);
}
kmem_cache_free(ioat3_sed_cache, sed);
}
static bool desc_has_ext(struct ioat_ring_ent *desc)
@ -577,7 +401,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
tx = &desc->txd;
if (tx->cookie) {
dma_cookie_complete(tx);
ioat3_dma_unmap(ioat, desc, idx + i);
dma_descriptor_unmap(tx);
if (tx->callback) {
tx->callback(tx->callback_param);
tx->callback = NULL;
@ -807,7 +631,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(c, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
ioat3_cleanup(ioat);
@ -1129,9 +953,6 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
u8 op;
int i, s, idx, num_descs;
/* this function only handles src_cnt 9 - 16 */
BUG_ON(src_cnt < 9);
/* this function is only called with 9-16 sources */
op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
@ -1159,8 +980,7 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
descs[0] = (struct ioat_raw_descriptor *) pq;
desc->sed = ioat3_alloc_sed(device,
sed_get_pq16_pool_idx(src_cnt));
desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
if (!desc->sed) {
dev_err(to_dev(chan),
"%s: no free sed entries\n", __func__);
@ -1218,13 +1038,21 @@ __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
return &desc->txd;
}
static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
{
if (dmaf_p_disabled_continue(flags))
return src_cnt + 1;
else if (dmaf_continue(flags))
return src_cnt + 3;
else
return src_cnt;
}
static struct dma_async_tx_descriptor *
ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
unsigned long flags)
{
struct dma_device *dma = chan->device;
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
dst[0] = dst[1];
@ -1244,7 +1072,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef[0] = scf[0];
single_source_coef[1] = 0;
return (src_cnt > 8) && (dma->max_pq > 8) ?
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
2, single_source_coef, len,
flags) :
@ -1252,7 +1080,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
single_source_coef, len, flags);
} else {
return (src_cnt > 8) && (dma->max_pq > 8) ?
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
@ -1265,8 +1093,6 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags)
{
struct dma_device *dma = chan->device;
/* specify valid address for disabled result */
if (flags & DMA_PREP_PQ_DISABLE_P)
pq[0] = pq[1];
@ -1278,7 +1104,7 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
*/
*pqres = 0;
return (src_cnt > 8) && (dma->max_pq > 8) ?
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
@ -1289,7 +1115,6 @@ static struct dma_async_tx_descriptor *
ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@ -1298,7 +1123,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = dst; /* specify valid address for disabled result */
return (src_cnt > 8) && (dma->max_pq > 8) ?
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
flags) :
__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
@ -1310,7 +1135,6 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
unsigned int src_cnt, size_t len,
enum sum_check_flags *result, unsigned long flags)
{
struct dma_device *dma = chan->device;
unsigned char scf[src_cnt];
dma_addr_t pq[2];
@ -1324,8 +1148,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
flags |= DMA_PREP_PQ_DISABLE_Q;
pq[1] = pq[0]; /* specify valid address for disabled result */
return (src_cnt > 8) && (dma->max_pq > 8) ?
return src_cnt_flags(src_cnt, flags) > 8 ?
__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
scf, len, flags) :
__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
@ -1444,9 +1267,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
IOAT_NUM_SRC_TEST, PAGE_SIZE,
DMA_PREP_INTERRUPT |
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test xor prep failed\n");
@ -1468,7 +1289,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test xor timed out\n");
err = -ENODEV;
goto dma_unmap;
@ -1507,9 +1328,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT |
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
&xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test zero prep failed\n");
err = -ENODEV;
@ -1530,7 +1349,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@ -1545,6 +1364,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
goto free_resources;
}
memset(page_address(dest), 0, PAGE_SIZE);
/* test for non-zero parity sum */
op = IOAT_OP_XOR_VAL;
@ -1554,9 +1375,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
DMA_TO_DEVICE);
tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
&xor_val_result, DMA_PREP_INTERRUPT |
DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP);
&xor_val_result, DMA_PREP_INTERRUPT);
if (!tx) {
dev_err(dev, "Self-test 2nd zero prep failed\n");
err = -ENODEV;
@ -1577,7 +1396,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dev, "Self-test 2nd validate timed out\n");
err = -ENODEV;
goto dma_unmap;
@ -1630,52 +1449,36 @@ static int ioat3_dma_self_test(struct ioatdma_device *device)
static int ioat3_irq_reinit(struct ioatdma_device *device)
{
int msixcnt = device->common.chancnt;
struct pci_dev *pdev = device->pdev;
int i;
struct msix_entry *msix;
struct ioat_chan_common *chan;
int err = 0;
int irq = pdev->irq, i;
if (!is_bwd_ioat(pdev))
return 0;
switch (device->irq_mode) {
case IOAT_MSIX:
for (i = 0; i < device->common.chancnt; i++) {
struct msix_entry *msix = &device->msix_entries[i];
struct ioat_chan_common *chan;
for (i = 0; i < msixcnt; i++) {
msix = &device->msix_entries[i];
chan = ioat_chan_by_index(device, i);
devm_free_irq(&pdev->dev, msix->vector, chan);
}
pci_disable_msix(pdev);
break;
case IOAT_MSIX_SINGLE:
msix = &device->msix_entries[0];
chan = ioat_chan_by_index(device, 0);
devm_free_irq(&pdev->dev, msix->vector, chan);
pci_disable_msix(pdev);
break;
case IOAT_MSI:
chan = ioat_chan_by_index(device, 0);
devm_free_irq(&pdev->dev, pdev->irq, chan);
pci_disable_msi(pdev);
break;
/* fall through */
case IOAT_INTX:
chan = ioat_chan_by_index(device, 0);
devm_free_irq(&pdev->dev, pdev->irq, chan);
devm_free_irq(&pdev->dev, irq, device);
break;
default:
return 0;
}
device->irq_mode = IOAT_NOIRQ;
err = ioat_dma_setup_interrupts(device);
return err;
return ioat_dma_setup_interrupts(device);
}
static int ioat3_reset_hw(struct ioat_chan_common *chan)
@ -1718,14 +1521,12 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
}
err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
if (err) {
dev_err(&pdev->dev, "Failed to reset!\n");
return err;
}
if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
if (!err)
err = ioat3_irq_reinit(device);
if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err);
return err;
}
@ -1835,21 +1636,15 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
char pool_name[14];
int i;
/* allocate sw descriptor pool for SED */
device->sed_pool = kmem_cache_create("ioat_sed",
sizeof(struct ioat_sed_ent), 0, 0, NULL);
if (!device->sed_pool)
return -ENOMEM;
for (i = 0; i < MAX_SED_POOLS; i++) {
snprintf(pool_name, 14, "ioat_hw%d_sed", i);
/* allocate SED DMA pool */
device->sed_hw_pool[i] = dma_pool_create(pool_name,
device->sed_hw_pool[i] = dmam_pool_create(pool_name,
&pdev->dev,
SED_SIZE * (i + 1), 64, 0);
if (!device->sed_hw_pool[i])
goto sed_pool_cleanup;
return -ENOMEM;
}
}
@ -1875,28 +1670,4 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->dca = ioat3_dca_init(pdev, device->reg_base);
return 0;
sed_pool_cleanup:
if (device->sed_pool) {
int i;
kmem_cache_destroy(device->sed_pool);
for (i = 0; i < MAX_SED_POOLS; i++)
if (device->sed_hw_pool[i])
dma_pool_destroy(device->sed_hw_pool[i]);
}
return -ENOMEM;
}
void ioat3_dma_remove(struct ioatdma_device *device)
{
if (device->sed_pool) {
int i;
kmem_cache_destroy(device->sed_pool);
for (i = 0; i < MAX_SED_POOLS; i++)
if (device->sed_hw_pool[i])
dma_pool_destroy(device->sed_hw_pool[i]);
}
}

View File

@ -123,6 +123,7 @@ module_param(ioat_dca_enabled, int, 0644);
MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
struct kmem_cache *ioat2_cache;
struct kmem_cache *ioat3_sed_cache;
#define DRV_NAME "ioatdma"
@ -207,9 +208,6 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device)
return;
if (device->version >= IOAT_VER_3_0)
ioat3_dma_remove(device);
dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev);
@ -221,7 +219,7 @@ static void ioat_remove(struct pci_dev *pdev)
static int __init ioat_init_module(void)
{
int err;
int err = -ENOMEM;
pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
DRV_NAME, IOAT_DMA_VERSION);
@ -231,9 +229,21 @@ static int __init ioat_init_module(void)
if (!ioat2_cache)
return -ENOMEM;
ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
if (!ioat3_sed_cache)
goto err_ioat2_cache;
err = pci_register_driver(&ioat_pci_driver);
if (err)
kmem_cache_destroy(ioat2_cache);
goto err_ioat3_cache;
return 0;
err_ioat3_cache:
kmem_cache_destroy(ioat3_sed_cache);
err_ioat2_cache:
kmem_cache_destroy(ioat2_cache);
return err;
}

View File

@ -61,80 +61,6 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
}
}
static void
iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
{
struct dma_async_tx_descriptor *tx = &desc->async_tx;
struct iop_adma_desc_slot *unmap = desc->group_head;
struct device *dev = &iop_chan->device->pdev->dev;
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = tx->flags;
u32 src_cnt;
dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
enum dma_data_direction dir;
if (src_cnt > 1) /* is xor? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
}
}
desc->group_head = NULL;
}
static void
iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
{
struct dma_async_tx_descriptor *tx = &desc->async_tx;
struct iop_adma_desc_slot *unmap = desc->group_head;
struct device *dev = &iop_chan->device->pdev->dev;
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = tx->flags;
u32 src_cnt = unmap->unmap_src_cnt;
dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
int i;
if (tx->flags & DMA_PREP_CONTINUE)
src_cnt -= 3;
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dma_addr_t addr;
for (i = 0; i < src_cnt; i++) {
addr = iop_desc_get_src_addr(unmap, iop_chan, i);
dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
}
if (desc->pq_check_result) {
dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
}
}
desc->group_head = NULL;
}
static dma_cookie_t
iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
@ -152,15 +78,9 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
if (tx->callback)
tx->callback(tx->callback_param);
/* unmap dma addresses
* (unmap_single vs unmap_page?)
*/
if (desc->group_head && desc->unmap_len) {
if (iop_desc_is_pq(desc))
iop_desc_unmap_pq(iop_chan, desc);
else
iop_desc_unmap(iop_chan, desc);
}
dma_descriptor_unmap(tx);
if (desc->group_head)
desc->group_head = NULL;
}
/* run dependent operations */
@ -591,7 +511,6 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
if (sw_desc) {
grp_start = sw_desc->group_head;
iop_desc_init_interrupt(grp_start, iop_chan);
grp_start->unmap_len = 0;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@ -623,8 +542,6 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
}
spin_unlock_bh(&iop_chan->lock);
@ -657,8 +574,6 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_xor_src_addr(grp_start, src_cnt,
@ -694,8 +609,6 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__func__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
@ -748,8 +661,6 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
dst[0] = dst[1] & 0x7;
iop_desc_set_pq_addr(g, dst);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
for (i = 0; i < src_cnt; i++)
iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
@ -804,8 +715,6 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
g->pq_check_result = pqres;
pr_debug("\t%s: g->pq_check_result: %p\n",
__func__, g->pq_check_result);
sw_desc->unmap_src_cnt = src_cnt+2;
sw_desc->unmap_len = len;
sw_desc->async_tx.flags = flags;
while (src_cnt--)
iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
@ -864,7 +773,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
int ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
iop_adma_slot_cleanup(iop_chan);
@ -983,7 +892,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
msleep(1);
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@ -1083,7 +992,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@ -1129,7 +1038,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test zero sum timed out, disabling\n");
err = -ENODEV;
@ -1158,7 +1067,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
iop_adma_issue_pending(dma_chan);
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test non-zero sum timed out, disabling\n");
err = -ENODEV;
@ -1254,7 +1163,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dev, "Self-test pq timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@ -1291,7 +1200,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;
@ -1323,7 +1232,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
msleep(8);
if (iop_adma_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
err = -ENODEV;
goto free_resources;

View File

@ -1232,8 +1232,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
descnew = desc;
dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n",
irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf);
dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
irq, (u64)sg_dma_address(*sg),
sgnext ? (u64)sg_dma_address(sgnext) : 0,
ichan->active_buffer, curbuf);
/* Find the descriptor of sgnext */
sgnew = idmac_sg_next(ichan, &descnew, *sg);

View File

@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
size_t bytes = 0;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);
@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
irq = platform_get_irq(op, 0);
ret = devm_request_irq(&op->dev, irq,
k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
k3_dma_int_handler, 0, DRIVER_NAME, d);
if (ret)
return ret;

View File

@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
* move the descriptors to a temporary list so we can drop
* the lock during the entire cleanup operation
*/
list_del(&desc->node);
list_add(&desc->node, &chain_cleanup);
list_move(&desc->node, &chain_cleanup);
/*
* Look for the first list entry which has the ENDIRQEN flag
@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
if (irq) {
ret = devm_request_irq(pdev->dev, irq,
mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
mmp_pdma_chan_handler, 0, "pdma", phy);
if (ret) {
dev_err(pdev->dev, "channel request irq fail!\n");
return ret;
@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
/* all chan share one irq, demux inside */
irq = platform_get_irq(op, 0);
ret = devm_request_irq(pdev->dev, irq,
mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
mmp_pdma_int_handler, 0, "pdma", pdev);
if (ret)
return ret;
}

View File

@ -62,6 +62,11 @@
#define TDCR_BURSTSZ_16B (0x3 << 6)
#define TDCR_BURSTSZ_32B (0x6 << 6)
#define TDCR_BURSTSZ_64B (0x7 << 6)
#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
#define TDCR_BURSTSZ_128B (0x5 << 6)
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
/* disable irq */
writel(0, tdmac->reg_base + TDIMR);
tdmac->status = DMA_SUCCESS;
tdmac->status = DMA_COMPLETE;
}
static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
return -EINVAL;
}
} else if (tdmac->type == PXA910_SQU) {
tdcr |= TDCR_BURSTSZ_SQU_32B;
tdcr |= TDCR_SSPMOD;
switch (tdmac->burst_sz) {
case 1:
tdcr |= TDCR_BURSTSZ_SQU_1B;
break;
case 2:
tdcr |= TDCR_BURSTSZ_SQU_2B;
break;
case 4:
tdcr |= TDCR_BURSTSZ_SQU_4B;
break;
case 8:
tdcr |= TDCR_BURSTSZ_SQU_8B;
break;
case 16:
tdcr |= TDCR_BURSTSZ_SQU_16B;
break;
case 32:
tdcr |= TDCR_BURSTSZ_SQU_32B;
break;
default:
dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
return -EINVAL;
}
}
writel(tdcr, tdmac->reg_base + TDCR);
@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
if (tdmac->irq) {
ret = devm_request_irq(tdmac->dev, tdmac->irq,
mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
mmp_tdma_chan_handler, 0, "tdma", tdmac);
if (ret)
return ret;
}
@ -365,7 +393,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
int num_periods = buf_len / period_len;
int i = 0, buf = 0;
if (tdmac->status != DMA_SUCCESS)
if (tdmac->status != DMA_COMPLETE)
return NULL;
if (period_len > TDMA_MAX_XFER_BYTES) {
@ -499,7 +527,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
tdmac->idx = idx;
tdmac->type = type;
tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
tdmac->status = DMA_SUCCESS;
tdmac->status = DMA_COMPLETE;
tdev->tdmac[tdmac->idx] = tdmac;
tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@ -554,7 +582,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (irq_num != chan_num) {
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq,
mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
mmp_tdma_int_handler, 0, "tdma", tdev);
if (ret)
return ret;
}

View File

@ -60,14 +60,6 @@ static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
return hw_desc->phy_dest_addr;
}
static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
int src_idx)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
}
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@ -278,42 +270,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback(
desc->async_tx.callback_param);
/* unmap dma addresses
* (unmap_single vs unmap_page?)
*/
if (desc->group_head && desc->unmap_len) {
struct mv_xor_desc_slot *unmap = desc->group_head;
struct device *dev = mv_chan_to_devp(mv_chan);
u32 len = unmap->unmap_len;
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
enum dma_data_direction dir;
if (src_cnt > 1) /* is xor ? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap,
src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
}
dma_descriptor_unmap(&desc->async_tx);
if (desc->group_head)
desc->group_head = NULL;
}
}
/* run dependent operations */
@ -749,7 +708,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS) {
if (ret == DMA_COMPLETE) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
@ -874,7 +833,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@ -968,7 +927,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
dev_err(dma_chan->device->dev,
"Self-test xor timed out, disabling\n");
err = -ENODEV;
@ -1076,10 +1035,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
}
mv_chan->mmr_base = xordev->xor_base;
if (!mv_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_dma;
}
mv_chan->mmr_high_base = xordev->xor_high_base;
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
mv_chan);
@ -1138,7 +1094,7 @@ static void
mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
void __iomem *base = xordev->xor_base;
void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;

View File

@ -34,13 +34,13 @@
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
@ -50,11 +50,11 @@
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
#define XOR_INTR_MASK_VALUE 0x3F5
#define WINDOW_BASE(w) (0x250 + ((w) << 2))
#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
#define WINDOW_BASE(w) (0x50 + ((w) << 2))
#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
struct mv_xor_device {
void __iomem *xor_base;
@ -82,6 +82,7 @@ struct mv_xor_chan {
int pending;
spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base;
void __iomem *mmr_high_base;
unsigned int idx;
int irq;
enum dma_transaction_type current_type;

View File

@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
#include <asm/irq.h>
@ -57,6 +58,9 @@
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
#define HW_APBHX_CHn_SEMA(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
#define HW_APBHX_CHn_BAR(d, n) \
(((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70)
#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70)
/*
* ccw bits definitions
@ -115,7 +119,9 @@ struct mxs_dma_chan {
int desc_count;
enum dma_status status;
unsigned int flags;
bool reset;
#define MXS_DMA_SG_LOOP (1 << 0)
#define MXS_DMA_USE_SEMAPHORE (1 << 1)
};
#define MXS_DMA_CHANNELS 16
@ -201,12 +207,47 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
/*
* mxs dma channel resets can cause a channel stall. To recover from a
* channel stall, we have to reset the whole DMA engine. To avoid this,
* we use cyclic DMA with semaphores, that are enhanced in
* mxs_dma_int_handler. To reset the channel, we can simply stop writing
* into the semaphore counter.
*/
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
mxs_chan->reset = true;
} else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) {
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
} else {
unsigned long elapsed = 0;
const unsigned long max_wait = 50000; /* 50ms */
void __iomem *reg_dbg1 = mxs_dma->base +
HW_APBX_CHn_DEBUG1(mxs_dma, chan_id);
/*
* On i.MX28 APBX, the DMA channel can stop working if we reset
* the channel while it is in READ_FLUSH (0x08) state.
* We wait here until we leave the state. Then we trigger the
* reset. Waiting a maximum of 50ms, the kernel shouldn't crash
* because of this.
*/
while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) {
udelay(100);
elapsed += 100;
}
if (elapsed >= max_wait)
dev_err(&mxs_chan->mxs_dma->pdev->dev,
"Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now\n",
chan_id);
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
}
mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@ -219,12 +260,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
/* A cyclic DMA consists of at least 2 segments, so initialize
* the semaphore with 2 so we have enough time to add 1 to the
* semaphore if we need to */
writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
} else {
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
}
mxs_chan->reset = false;
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
mxs_chan->status = DMA_SUCCESS;
mxs_chan->status = DMA_COMPLETE;
}
static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@ -272,58 +322,88 @@ static void mxs_dma_tasklet(unsigned long data)
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
}
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
{
int i;
for (i = 0; i != mxs_dma->nr_channels; ++i)
if (mxs_dma->mxs_chans[i].chan_irq == irq)
return i;
return -EINVAL;
}
static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
{
struct mxs_dma_engine *mxs_dma = dev_id;
u32 stat1, stat2;
struct mxs_dma_chan *mxs_chan;
u32 completed;
u32 err;
int chan = mxs_dma_irq_to_chan(mxs_dma, irq);
if (chan < 0)
return IRQ_NONE;
/* completion status */
stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
stat1 &= MXS_DMA_CHANNELS_MASK;
writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
completed = readl(mxs_dma->base + HW_APBHX_CTRL1);
completed = (completed >> chan) & 0x1;
/* Clear interrupt */
writel((1 << chan),
mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
err = readl(mxs_dma->base + HW_APBHX_CTRL2);
err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan);
/*
* error status bit is in the upper 16 bits, error irq bit in the lower
* 16 bits. We transform it into a simpler error code:
* err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR
*/
err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan);
/* Clear error irq */
writel((1 << chan),
mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
* an error we need to handle here in case of either it's (1) a bus
* error or (2) a termination error with no completion.
* an error we need to handle here in case of either it's a bus
* error or a termination error with no completion. 0x01 is termination
* error, so we can subtract err & completed to get the real error case.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
(~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */
err -= err & completed;
/* combine error and completion status for checking */
stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1;
while (stat1) {
int channel = fls(stat1) - 1;
struct mxs_dma_chan *mxs_chan =
&mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS];
mxs_chan = &mxs_dma->mxs_chans[chan];
if (channel >= MXS_DMA_CHANNELS) {
dev_dbg(mxs_dma->dma_device.dev,
"%s: error in channel %d\n", __func__,
channel - MXS_DMA_CHANNELS);
mxs_chan->status = DMA_ERROR;
mxs_dma_reset_chan(mxs_chan);
if (err) {
dev_dbg(mxs_dma->dma_device.dev,
"%s: error in channel %d\n", __func__,
chan);
mxs_chan->status = DMA_ERROR;
mxs_dma_reset_chan(mxs_chan);
} else if (mxs_chan->status != DMA_COMPLETE) {
if (mxs_chan->flags & MXS_DMA_SG_LOOP) {
mxs_chan->status = DMA_IN_PROGRESS;
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE)
writel(1, mxs_dma->base +
HW_APBHX_CHn_SEMA(mxs_dma, chan));
} else {
if (mxs_chan->flags & MXS_DMA_SG_LOOP)
mxs_chan->status = DMA_IN_PROGRESS;
else
mxs_chan->status = DMA_SUCCESS;
mxs_chan->status = DMA_COMPLETE;
}
stat1 &= ~(1 << channel);
if (mxs_chan->status == DMA_SUCCESS)
dma_cookie_complete(&mxs_chan->desc);
/* schedule tasklet on this channel */
tasklet_schedule(&mxs_chan->tasklet);
}
if (mxs_chan->status == DMA_COMPLETE) {
if (mxs_chan->reset)
return IRQ_HANDLED;
dma_cookie_complete(&mxs_chan->desc);
}
/* schedule tasklet on this channel */
tasklet_schedule(&mxs_chan->tasklet);
return IRQ_HANDLED;
}
@ -523,6 +603,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
mxs_chan->status = DMA_IN_PROGRESS;
mxs_chan->flags |= MXS_DMA_SG_LOOP;
mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE;
if (num_periods > NUM_CCW) {
dev_err(mxs_dma->dma_device.dev,
@ -554,6 +635,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
ccw->bits |= CCW_DEC_SEM;
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
@ -599,8 +681,24 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
u32 residue = 0;
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 0);
if (mxs_chan->status == DMA_IN_PROGRESS &&
mxs_chan->flags & MXS_DMA_SG_LOOP) {
struct mxs_dma_ccw *last_ccw;
u32 bar;
last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1];
residue = last_ccw->xfer_bytes + last_ccw->bufaddr;
bar = readl(mxs_dma->base +
HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id));
residue -= bar;
}
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
residue);
return mxs_chan->status;
}

View File

@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
unsigned long flags;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS || !txstate)
if (ret == DMA_COMPLETE || !txstate)
return ret;
spin_lock_irqsave(&c->vc.lock, flags);

View File

@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
list_move_tail(&desc->node, &pch->dmac->desc_pool);
}
dma_descriptor_unmap(&desc->txd);
if (callback) {
spin_unlock_irqrestore(&pch->lock, flags);
callback(callback_param);
@ -2314,7 +2316,7 @@ bool pl330_filter(struct dma_chan *chan, void *param)
return false;
peri_id = chan->private;
return *peri_id == (unsigned)param;
return *peri_id == (unsigned long)param;
}
EXPORT_SYMBOL(pl330_filter);
@ -2926,16 +2928,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
return ret;
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq) {
ret = devm_request_irq(&adev->dev, irq,
pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
if (ret)
return ret;
} else {
break;
}
}
pi->pcfg.periph_id = adev->periphid;
ret = pl330_add(pi);
if (ret)
goto probe_err1;
return ret;
INIT_LIST_HEAD(&pdmac->desc_pool);
spin_lock_init(&pdmac->pool_lock);
@ -3033,8 +3042,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
probe_err3:
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
chan.device_node) {
@ -3048,8 +3055,6 @@ probe_err3:
}
probe_err2:
pl330_del(pi);
probe_err1:
free_irq(irq, pi);
return ret;
}
@ -3059,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev)
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
struct pl330_info *pi;
int irq;
if (!pdmac)
return 0;
@ -3068,7 +3072,6 @@ static int pl330_remove(struct amba_device *adev)
of_dma_controller_free(adev->dev.of_node);
dma_async_device_unregister(&pdmac->ddma);
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
@ -3086,9 +3089,6 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pi);
irq = adev->irq[0];
free_irq(irq, pi);
return 0;
}

View File

@ -803,218 +803,6 @@ static void ppc440spe_desc_set_link(struct ppc440spe_adma_chan *chan,
local_irq_restore(flags);
}
/**
* ppc440spe_desc_get_src_addr - extract the source address from the descriptor
*/
static u32 ppc440spe_desc_get_src_addr(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan, int src_idx)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
/* May have 0, 1, 2, or 3 sources */
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DFILL128:
return 0;
case DMA_CDB_OPC_DCHECK128:
if (unlikely(src_idx)) {
printk(KERN_ERR "%s: try to get %d source for"
" DCHECK128\n", __func__, src_idx);
BUG();
}
return le32_to_cpu(dma_hw_desc->sg1l);
case DMA_CDB_OPC_MULTICAST:
case DMA_CDB_OPC_MV_SG1_SG2:
if (unlikely(src_idx > 2)) {
printk(KERN_ERR "%s: try to get %d source from"
" DMA descr\n", __func__, src_idx);
BUG();
}
if (src_idx) {
if (le32_to_cpu(dma_hw_desc->sg1u) &
DMA_CUED_XOR_WIN_MSK) {
u8 region;
if (src_idx == 1)
return le32_to_cpu(
dma_hw_desc->sg1l) +
desc->unmap_len;
region = (le32_to_cpu(
dma_hw_desc->sg1u)) >>
DMA_CUED_REGION_OFF;
region &= DMA_CUED_REGION_MSK;
switch (region) {
case DMA_RXOR123:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len << 1);
case DMA_RXOR124:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len * 3);
case DMA_RXOR125:
return le32_to_cpu(
dma_hw_desc->sg1l) +
(desc->unmap_len << 2);
default:
printk(KERN_ERR
"%s: try to"
" get src3 for region %02x"
"PPC440SPE_DESC_RXOR12?\n",
__func__, region);
BUG();
}
} else {
printk(KERN_ERR
"%s: try to get %d"
" source for non-cued descr\n",
__func__, src_idx);
BUG();
}
}
return le32_to_cpu(dma_hw_desc->sg1l);
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
return le32_to_cpu(dma_hw_desc->sg1l);
case PPC440SPE_XOR_ID:
/* May have up to 16 sources */
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->ops[src_idx].l;
}
return 0;
}
/**
* ppc440spe_desc_get_dest_addr - extract the destination address from the
* descriptor
*/
static u32 ppc440spe_desc_get_dest_addr(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan, int idx)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
if (likely(!idx))
return le32_to_cpu(dma_hw_desc->sg2l);
return le32_to_cpu(dma_hw_desc->sg3l);
case PPC440SPE_XOR_ID:
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->cbtal;
}
return 0;
}
/**
* ppc440spe_desc_get_src_num - extract the number of source addresses from
* the descriptor
*/
static u32 ppc440spe_desc_get_src_num(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan)
{
struct dma_cdb *dma_hw_desc;
struct xor_cb *xor_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
dma_hw_desc = desc->hw_desc;
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DFILL128:
return 0;
case DMA_CDB_OPC_DCHECK128:
return 1;
case DMA_CDB_OPC_MV_SG1_SG2:
case DMA_CDB_OPC_MULTICAST:
/*
* Only for RXOR operations we have more than
* one source
*/
if (le32_to_cpu(dma_hw_desc->sg1u) &
DMA_CUED_XOR_WIN_MSK) {
/* RXOR op, there are 2 or 3 sources */
if (((le32_to_cpu(dma_hw_desc->sg1u) >>
DMA_CUED_REGION_OFF) &
DMA_CUED_REGION_MSK) == DMA_RXOR12) {
/* RXOR 1-2 */
return 2;
} else {
/* RXOR 1-2-3/1-2-4/1-2-5 */
return 3;
}
}
return 1;
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
case PPC440SPE_XOR_ID:
/* up to 16 sources */
xor_hw_desc = desc->hw_desc;
return xor_hw_desc->cbc & XOR_CDCR_OAC_MSK;
default:
BUG();
}
return 0;
}
/**
* ppc440spe_desc_get_dst_num - get the number of destination addresses in
* this descriptor
*/
static u32 ppc440spe_desc_get_dst_num(struct ppc440spe_adma_desc_slot *desc,
struct ppc440spe_adma_chan *chan)
{
struct dma_cdb *dma_hw_desc;
switch (chan->device->id) {
case PPC440SPE_DMA0_ID:
case PPC440SPE_DMA1_ID:
/* May be 1 or 2 destinations */
dma_hw_desc = desc->hw_desc;
switch (dma_hw_desc->opc) {
case DMA_CDB_OPC_NO_OP:
case DMA_CDB_OPC_DCHECK128:
return 0;
case DMA_CDB_OPC_MV_SG1_SG2:
case DMA_CDB_OPC_DFILL128:
return 1;
case DMA_CDB_OPC_MULTICAST:
if (desc->dst_cnt == 2)
return 2;
else
return 1;
default:
printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
__func__, dma_hw_desc->opc);
BUG();
}
case PPC440SPE_XOR_ID:
/* Always only 1 destination */
return 1;
default:
BUG();
}
return 0;
}
/**
* ppc440spe_desc_get_link - get the address of the descriptor that
* follows this one
@ -1707,43 +1495,6 @@ static void ppc440spe_adma_free_slots(struct ppc440spe_adma_desc_slot *slot,
}
}
static void ppc440spe_adma_unmap(struct ppc440spe_adma_chan *chan,
struct ppc440spe_adma_desc_slot *desc)
{
u32 src_cnt, dst_cnt;
dma_addr_t addr;
/*
* get the number of sources & destination
* included in this descriptor and unmap
* them all
*/
src_cnt = ppc440spe_desc_get_src_num(desc, chan);
dst_cnt = ppc440spe_desc_get_dst_num(desc, chan);
/* unmap destinations */
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
while (dst_cnt--) {
addr = ppc440spe_desc_get_dest_addr(
desc, chan, dst_cnt);
dma_unmap_page(chan->device->dev,
addr, desc->unmap_len,
DMA_FROM_DEVICE);
}
}
/* unmap sources */
if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
while (src_cnt--) {
addr = ppc440spe_desc_get_src_addr(
desc, chan, src_cnt);
dma_unmap_page(chan->device->dev,
addr, desc->unmap_len,
DMA_TO_DEVICE);
}
}
}
/**
* ppc440spe_adma_run_tx_complete_actions - call functions to be called
* upon completion
@ -1767,26 +1518,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
desc->async_tx.callback(
desc->async_tx.callback_param);
/* unmap dma addresses
* (unmap_single vs unmap_page?)
*
* actually, ppc's dma_unmap_page() functions are empty, so
* the following code is just for the sake of completeness
*/
if (chan && chan->needs_unmap && desc->group_head &&
desc->unmap_len) {
struct ppc440spe_adma_desc_slot *unmap =
desc->group_head;
/* assume 1 slot per op always */
u32 slot_count = unmap->slot_cnt;
/* Run through the group list and unmap addresses */
for (i = 0; i < slot_count; i++) {
BUG_ON(!unmap);
ppc440spe_adma_unmap(chan, unmap);
unmap = unmap->hw_next;
}
}
dma_descriptor_unmap(&desc->async_tx);
}
/* run dependent operations */
@ -3893,7 +3625,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
ppc440spe_adma_slot_cleanup(ppc440spe_chan);

View File

@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(&c->vc.chan, cookie, state);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
if (!state)

View File

@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
* If we don't find cookie on the queue, it has been aborted and we have
* to report error
*/
if (status != DMA_SUCCESS) {
if (status != DMA_COMPLETE) {
struct shdma_desc *sdesc;
status = DMA_ERROR;
list_for_each_entry(sdesc, &schan->ld_queue, node)

View File

@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
static int sh_dmae_probe(struct platform_device *pdev)
{
const struct sh_dmae_pdata *pdata;
unsigned long irqflags = IRQF_DISABLED,
unsigned long irqflags = 0,
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
IORESOURCE_IRQ_SHAREABLE)
chan_flag[irq_cnt] = IRQF_SHARED;
else
chan_flag[irq_cnt] = IRQF_DISABLED;
chan_flag[irq_cnt] = 0;
dev_dbg(&pdev->dev,
"Found IRQ %d for channel %d\n",
i, irq_cnt);

View File

@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
}
ret = dma_cookie_status(chan, cookie, txstate);
if (ret != DMA_SUCCESS)
if (ret != DMA_COMPLETE)
dma_set_residue(txstate, stedma40_residue(chan));
if (d40_is_paused(d40c))
@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
((src_addr_width > 1) && (src_addr_width & 1)) ||
((dst_addr_width > 1) && (dst_addr_width & 1)))
!is_power_of_2(src_addr_width) ||
!is_power_of_2(dst_addr_width))
return -EINVAL;
cfg->src_info.data_width = src_addr_width;

View File

@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_del(&sgreq->node);
if (sgreq->last_sg) {
dma_desc->dma_status = DMA_SUCCESS;
dma_desc->dma_status = DMA_COMPLETE;
dma_cookie_complete(&dma_desc->txd);
if (!dma_desc->cb_count)
list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
unsigned int residual;
ret = dma_cookie_status(dc, cookie, txstate);
if (ret == DMA_SUCCESS)
if (ret == DMA_COMPLETE)
return ret;
spin_lock_irqsave(&tdc->lock, flags);
@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
return &dma_desc->txd;
}
struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)

View File

@ -154,38 +154,6 @@ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
return done;
}
static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
bool single)
{
dma_addr_t addr;
int len;
addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
dma_desc[4];
len = (dma_desc[3] << 8) | dma_desc[2];
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
{
struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
struct timb_dma_chan, chan);
u8 *descs;
for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
__td_unmap_desc(td_chan, descs, single);
if (descs[0] & 0x02)
break;
}
}
static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
struct scatterlist *sg, bool last)
{
@ -293,10 +261,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
list_move(&td_desc->desc_node, &td_chan->free_list);
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
__td_unmap_descs(td_desc,
txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here

View File

@ -419,30 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
list_splice_init(&desc->tx_list, &dc->free_list);
list_move(&desc->desc_node, &dc->free_list);
if (!ds) {
dma_addr_t dmaaddr;
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.DAR : desc->hwdesc32.DAR;
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
dma_unmap_single(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_FROM_DEVICE);
else
dma_unmap_page(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_FROM_DEVICE);
}
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
dmaaddr = is_dmac64(dc) ?
desc->hwdesc.SAR : desc->hwdesc32.SAR;
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
dma_unmap_single(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_TO_DEVICE);
else
dma_unmap_page(chan2parent(&dc->chan),
dmaaddr, desc->len, DMA_TO_DEVICE);
}
}
dma_descriptor_unmap(txd);
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
@ -962,8 +939,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return DMA_SUCCESS;
if (ret == DMA_COMPLETE)
return DMA_COMPLETE;
spin_lock_bh(&dc->lock);
txx9dmac_scan_descriptors(dc);

View File

@ -341,8 +341,7 @@ static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op,
ctx->xt->dir = DMA_MEM_TO_MEM;
ctx->xt->src_sgl = false;
ctx->xt->dst_sgl = true;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT |
DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SKIP_SRC_UNMAP;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags);
if (tx == NULL) {

View File

@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
desc = dmaengine_prep_slave_sg(fh->chan,
buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
DMA_PREP_INTERRUPT);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
list_del_init(&vb->queue);

View File

@ -633,8 +633,7 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dst, src;
unsigned long dma_flags = DMA_COMPL_SKIP_DEST_UNMAP |
DMA_COMPL_SKIP_SRC_UNMAP;
unsigned long dma_flags = 0;
dst_sg = buf->vb.sglist;
dst_nents = buf->vb.sglen;

View File

@ -375,8 +375,7 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
dma_dev = host->dma_chan->device;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
DMA_COMPL_SKIP_DEST_UNMAP;
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
if (dma_mapping_error(dma_dev->dev, phys_addr)) {

View File

@ -573,8 +573,6 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;

View File

@ -459,8 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
&ctl->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
&ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@ -571,8 +570,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!ctl->adesc)
goto out;

View File

@ -1034,10 +1034,9 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t pay_off, buff_off;
dma_addr_t src, dest;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
unsigned long flags;
entry->len = len;
@ -1045,35 +1044,49 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err;
if (len < copy_bytes)
goto err1;
goto err_wait;
device = chan->device;
pay_off = (size_t) offset & ~PAGE_MASK;
buff_off = (size_t) buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
goto err1;
goto err_wait;
dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(device->dev, dest))
goto err1;
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (!unmap)
goto err_wait;
src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, src))
goto err2;
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
pay_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
unmap->to_cnt = 1;
unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_FROM_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[1]))
goto err_get_unmap;
unmap->from_cnt = 1;
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (!txd)
goto err3;
goto err_get_unmap;
txd->callback = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err3;
goto err_set_unmap;
dmaengine_unmap_put(unmap);
qp->last_cookie = cookie;
@ -1081,11 +1094,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
return;
err3:
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
err2:
dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
err1:
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err_wait:
/* If the callbacks come out of order, the writing of the index to the
* last completed will be out of order. This may result in the
* receive stalling forever.
@ -1245,12 +1258,12 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t dest_off, buff_off;
dma_addr_t src, dest;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
void __iomem *offset;
size_t len = entry->len;
void *buf = entry->buf;
unsigned long flags;
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
@ -1273,28 +1286,41 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, src))
unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
if (!unmap)
goto err;
flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
unmap->to_cnt = 1;
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (!txd)
goto err1;
goto err_get_unmap;
txd->callback = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err1;
goto err_set_unmap;
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
qp->tx_async++;
return;
err1:
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;

View File

@ -150,7 +150,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->tx_sgl,
1,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
DMA_PREP_INTERRUPT);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
@ -173,7 +173,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
&dws->rx_sgl,
1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
DMA_PREP_INTERRUPT);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;

View File

@ -1433,7 +1433,7 @@ static void work_fn_rx(struct work_struct *work)
desc = s->desc_rx[new];
if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
DMA_SUCCESS) {
DMA_COMPLETE) {
/* Handle incomplete DMA receive */
struct dma_chan *chan = s->chan_rx;
struct shdma_desc *sh_desc = container_of(desc,

View File

@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
/**
* enum dma_status - DMA transaction status
* @DMA_SUCCESS: transaction completed successfully
* @DMA_COMPLETE: transaction completed
* @DMA_IN_PROGRESS: transaction not yet processed
* @DMA_PAUSED: transaction is paused
* @DMA_ERROR: transaction failed
*/
enum dma_status {
DMA_SUCCESS,
DMA_COMPLETE,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
@ -171,12 +171,6 @@ struct dma_interleaved_template {
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
* @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s)
* @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single
* (if not set, do the source dma-unmapping as page)
* @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single
* (if not set, do the destination dma-unmapping as page)
* @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
* @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
* @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
@ -188,14 +182,10 @@ struct dma_interleaved_template {
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
DMA_PREP_PQ_DISABLE_P = (1 << 6),
DMA_PREP_PQ_DISABLE_Q = (1 << 7),
DMA_PREP_CONTINUE = (1 << 8),
DMA_PREP_FENCE = (1 << 9),
DMA_PREP_PQ_DISABLE_P = (1 << 2),
DMA_PREP_PQ_DISABLE_Q = (1 << 3),
DMA_PREP_CONTINUE = (1 << 4),
DMA_PREP_FENCE = (1 << 5),
};
/**
@ -413,6 +403,17 @@ void dma_chan_cleanup(struct kref *kref);
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
struct dmaengine_unmap_data {
u8 to_cnt;
u8 from_cnt;
u8 bidi_cnt;
struct device *dev;
struct kref kref;
size_t len;
dma_addr_t addr[0];
};
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
@ -438,6 +439,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@ -445,6 +447,40 @@ struct dma_async_tx_descriptor {
#endif
};
#ifdef CONFIG_DMA_ENGINE
static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
struct dmaengine_unmap_data *unmap)
{
kref_get(&unmap->kref);
tx->unmap = unmap;
}
struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
#else
static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
struct dmaengine_unmap_data *unmap)
{
}
static inline struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
{
return NULL;
}
static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
{
}
#endif
static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
{
if (tx->unmap) {
dmaengine_unmap_put(tx->unmap);
tx->unmap = NULL;
}
}
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
{
@ -979,10 +1015,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
return DMA_SUCCESS;
return DMA_COMPLETE;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
return DMA_SUCCESS;
return DMA_COMPLETE;
}
return DMA_IN_PROGRESS;
}
@ -1013,11 +1049,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
}
static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
return DMA_SUCCESS;
return DMA_COMPLETE;
}
static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
return DMA_SUCCESS;
return DMA_COMPLETE;
}
static inline void dma_issue_pending_all(void)
{

View File

@ -67,10 +67,10 @@ struct edmacc_param {
#define ITCCHEN BIT(23)
/*ch_status paramater of callback function possible values*/
#define DMA_COMPLETE 1
#define DMA_CC_ERROR 2
#define DMA_TC1_ERROR 3
#define DMA_TC2_ERROR 4
#define EDMA_DMA_COMPLETE 1
#define EDMA_DMA_CC_ERROR 2
#define EDMA_DMA_TC1_ERROR 3
#define EDMA_DMA_TC2_ERROR 4
enum address_mode {
INCR = 0,

View File

@ -1425,7 +1425,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
do {
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
&used) == DMA_SUCCESS) {
&used) == DMA_COMPLETE) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
@ -1433,7 +1433,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_SUCCESS)) {
used) == DMA_COMPLETE)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}

View File

@ -238,7 +238,7 @@ static void davinci_pcm_dma_irq(unsigned link, u16 ch_status, void *data)
print_buf_info(prtd->ram_channel, "i ram_channel");
pr_debug("davinci_pcm: link=%d, status=0x%x\n", link, ch_status);
if (unlikely(ch_status != DMA_COMPLETE))
if (unlikely(ch_status != EDMA_DMA_COMPLETE))
return;
if (snd_pcm_running(substream)) {