2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

NTB: convert to dmaengine_unmap_data

Use the generic unmap object to unmap dma buffers.

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Jon Mason <jon.mason@intel.com>
[djbw: fix up unmap len, and GFP flags]
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Bartlomiej Zolnierkiewicz 2013-10-18 19:35:31 +02:00 committed by Dan Williams
parent 1786b943da
commit 6f57fd0578

View File

@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t pay_off, buff_off;
dma_addr_t src, dest;
struct dmaengine_unmap_data *unmap;
dma_cookie_t cookie;
void *buf = entry->buf;
unsigned long flags;
@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
goto err;
if (len < copy_bytes)
goto err1;
goto err_wait;
device = chan->device;
pay_off = (size_t) offset & ~PAGE_MASK;
buff_off = (size_t) buf & ~PAGE_MASK;
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
goto err1;
goto err_wait;
dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
if (dma_mapping_error(device->dev, dest))
goto err1;
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (!unmap)
goto err_wait;
src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, src))
goto err2;
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
pay_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
unmap->to_cnt = 1;
unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_FROM_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[1]))
goto err_get_unmap;
unmap->from_cnt = 1;
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
unmap->addr[0], len, flags);
if (!txd)
goto err3;
goto err_get_unmap;
txd->callback = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err3;
goto err_set_unmap;
dmaengine_unmap_put(unmap);
qp->last_cookie = cookie;
@ -1081,11 +1096,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
return;
err3:
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
err2:
dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
err1:
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err_wait:
/* If the callbacks come out of order, the writing of the index to the
* last completed will be out of order. This may result in the
* receive stalling forever.
@ -1245,7 +1260,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
struct dma_chan *chan = qp->dma_chan;
struct dma_device *device;
size_t dest_off, buff_off;
dma_addr_t src, dest;
struct dmaengine_unmap_data *unmap;
dma_addr_t dest;
dma_cookie_t cookie;
void __iomem *offset;
size_t len = entry->len;
@ -1273,28 +1289,43 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
goto err;
src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, src))
unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
if (!unmap)
goto err;
flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
unmap->len = len;
unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
buff_off, len, DMA_TO_DEVICE);
if (dma_mapping_error(device->dev, unmap->addr[0]))
goto err_get_unmap;
unmap->to_cnt = 1;
flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
DMA_PREP_INTERRUPT;
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
flags);
if (!txd)
goto err1;
goto err_get_unmap;
txd->callback = ntb_tx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);
cookie = dmaengine_submit(txd);
if (dma_submit_error(cookie))
goto err1;
goto err_set_unmap;
dmaengine_unmap_put(unmap);
dma_async_issue_pending(chan);
qp->tx_async++;
return;
err1:
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
ntb_memcpy_tx(entry, offset);
qp->tx_memcpy++;