2009-02-06 05:23:50 +08:00
|
|
|
/*
|
|
|
|
* DMA helper functions
|
|
|
|
*
|
2020-10-23 23:19:17 +08:00
|
|
|
* Copyright (c) 2009,2020 Red Hat
|
2009-02-06 05:23:50 +08:00
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU General Public License
|
|
|
|
* (GNU GPL), version 2 or later.
|
|
|
|
*/
|
|
|
|
|
2016-01-30 01:50:05 +08:00
|
|
|
#include "qemu/osdep.h"
|
2014-10-07 19:59:18 +08:00
|
|
|
#include "sysemu/block-backend.h"
|
2012-12-18 01:20:04 +08:00
|
|
|
#include "sysemu/dma.h"
|
2020-02-04 19:20:10 +08:00
|
|
|
#include "trace/trace-root.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/thread.h"
|
2013-08-21 23:02:47 +08:00
|
|
|
#include "qemu/main-loop.h"
|
2020-08-19 19:17:19 +08:00
|
|
|
#include "sysemu/cpu-timers.h"
|
2020-06-03 18:22:02 +08:00
|
|
|
#include "qemu/range.h"
|
2009-02-06 05:23:50 +08:00
|
|
|
|
2012-06-27 12:50:43 +08:00
|
|
|
/* #define DEBUG_IOMMU */
|
|
|
|
|
2020-10-23 23:19:17 +08:00
|
|
|
MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
|
2020-09-03 16:28:32 +08:00
|
|
|
uint8_t c, dma_addr_t len, MemTxAttrs attrs)
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 12:50:38 +08:00
|
|
|
{
|
2013-04-11 00:15:49 +08:00
|
|
|
dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
|
2013-04-10 23:49:04 +08:00
|
|
|
|
2022-01-16 04:37:23 +08:00
|
|
|
return address_space_set(as, addr, c, len, attrs);
|
iommu: Add universal DMA helper functions
Not that long ago, every device implementation using DMA directly
accessed guest memory using cpu_physical_memory_*(). This meant that
adding support for a guest visible IOMMU would require changing every
one of these devices to go through IOMMU translation.
Shortly before qemu 1.0, I made a start on fixing this by providing
helper functions for PCI DMA. These are currently just stubs which
call the direct access functions, but mean that an IOMMU can be
implemented in one place, rather than for every PCI device.
Clearly, this doesn't help for non PCI devices, which could also be
IOMMU translated on some platforms. It is also problematic for the
devices which have both PCI and non-PCI version (e.g. OHCI, AHCI) - we
cannot use the the pci_dma_*() functions, because they assume the
presence of a PCIDevice, but we don't want to have to check between
pci_dma_*() and cpu_physical_memory_*() every time we do a DMA in the
device code.
This patch makes the first step on addressing both these problems, by
introducing new (stub) dma helper functions which can be used for any
DMA capable device.
These dma functions take a DMAContext *, a new (currently empty)
variable describing the DMA address space in which the operation is to
take place. NULL indicates untranslated DMA directly into guest
physical address space. The intention is that in future non-NULL
values will given information about any necessary IOMMU translation.
DMA using devices must obtain a DMAContext (or, potentially, contexts)
from their bus or platform. For now this patch just converts the PCI
wrappers to be implemented in terms of the universal wrappers,
converting other drivers can take place over time.
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Cc: Richard Henderson <rth@twiddle.net>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2012-06-27 12:50:38 +08:00
|
|
|
}
|
|
|
|
|
2013-06-03 20:17:19 +08:00
|
|
|
void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
|
|
|
|
AddressSpace *as)
|
2009-02-06 05:23:50 +08:00
|
|
|
{
|
2022-03-15 22:41:56 +08:00
|
|
|
qsg->sg = g_new(ScatterGatherEntry, alloc_hint);
|
2009-02-06 05:23:50 +08:00
|
|
|
qsg->nsg = 0;
|
|
|
|
qsg->nalloc = alloc_hint;
|
|
|
|
qsg->size = 0;
|
2013-04-11 00:15:49 +08:00
|
|
|
qsg->as = as;
|
2013-06-03 20:17:19 +08:00
|
|
|
qsg->dev = dev;
|
|
|
|
object_ref(OBJECT(dev));
|
2009-02-06 05:23:50 +08:00
|
|
|
}
|
|
|
|
|
2011-10-31 14:06:46 +08:00
|
|
|
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
|
2009-02-06 05:23:50 +08:00
|
|
|
{
|
|
|
|
if (qsg->nsg == qsg->nalloc) {
|
|
|
|
qsg->nalloc = 2 * qsg->nalloc + 1;
|
2022-03-15 22:41:56 +08:00
|
|
|
qsg->sg = g_renew(ScatterGatherEntry, qsg->sg, qsg->nalloc);
|
2009-02-06 05:23:50 +08:00
|
|
|
}
|
|
|
|
qsg->sg[qsg->nsg].base = base;
|
|
|
|
qsg->sg[qsg->nsg].len = len;
|
|
|
|
qsg->size += len;
|
|
|
|
++qsg->nsg;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemu_sglist_destroy(QEMUSGList *qsg)
|
|
|
|
{
|
2013-06-03 20:17:19 +08:00
|
|
|
object_unref(OBJECT(qsg->dev));
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(qsg->sg);
|
2012-08-04 03:57:10 +08:00
|
|
|
memset(qsg, 0, sizeof(*qsg));
|
2009-02-06 05:23:50 +08:00
|
|
|
}
|
|
|
|
|
2009-02-06 05:23:58 +08:00
|
|
|
typedef struct {
|
2014-10-07 19:59:14 +08:00
|
|
|
BlockAIOCB common;
|
2016-05-23 20:54:06 +08:00
|
|
|
AioContext *ctx;
|
2014-10-07 19:59:14 +08:00
|
|
|
BlockAIOCB *acb;
|
2009-02-06 05:23:58 +08:00
|
|
|
QEMUSGList *sg;
|
2016-10-28 04:29:13 +08:00
|
|
|
uint32_t align;
|
2016-05-07 00:26:31 +08:00
|
|
|
uint64_t offset;
|
2012-03-27 10:42:23 +08:00
|
|
|
DMADirection dir;
|
2009-02-06 05:23:58 +08:00
|
|
|
int sg_cur_index;
|
2011-10-31 14:06:46 +08:00
|
|
|
dma_addr_t sg_cur_byte;
|
2009-02-06 05:23:58 +08:00
|
|
|
QEMUIOVector iov;
|
|
|
|
QEMUBH *bh;
|
2011-05-19 16:57:59 +08:00
|
|
|
DMAIOFunc *io_func;
|
2016-05-23 20:54:06 +08:00
|
|
|
void *io_func_opaque;
|
2009-03-21 02:26:16 +08:00
|
|
|
} DMAAIOCB;
|
2009-02-06 05:23:58 +08:00
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
static void dma_blk_cb(void *opaque, int ret);
|
2009-02-06 05:23:58 +08:00
|
|
|
|
|
|
|
static void reschedule_dma(void *opaque)
|
|
|
|
{
|
2009-03-21 02:26:16 +08:00
|
|
|
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
|
2009-02-06 05:23:58 +08:00
|
|
|
|
2019-07-30 05:34:16 +08:00
|
|
|
assert(!dbs->acb && dbs->bh);
|
2009-02-06 05:23:58 +08:00
|
|
|
qemu_bh_delete(dbs->bh);
|
|
|
|
dbs->bh = NULL;
|
2014-10-07 19:59:18 +08:00
|
|
|
dma_blk_cb(dbs, 0);
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
static void dma_blk_unmap(DMAAIOCB *dbs)
|
2009-02-06 05:23:58 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dbs->iov.niov; ++i) {
|
2013-04-11 00:15:49 +08:00
|
|
|
dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
|
2012-06-27 12:50:40 +08:00
|
|
|
dbs->iov.iov[i].iov_len, dbs->dir,
|
|
|
|
dbs->iov.iov[i].iov_len);
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
2011-09-16 22:40:02 +08:00
|
|
|
qemu_iovec_reset(&dbs->iov);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_complete(DMAAIOCB *dbs, int ret)
|
|
|
|
{
|
2011-11-24 19:15:28 +08:00
|
|
|
trace_dma_complete(dbs, ret, dbs->common.cb);
|
|
|
|
|
2019-07-30 05:34:16 +08:00
|
|
|
assert(!dbs->acb && !dbs->bh);
|
2014-10-07 19:59:18 +08:00
|
|
|
dma_blk_unmap(dbs);
|
2011-09-16 22:40:02 +08:00
|
|
|
if (dbs->common.cb) {
|
|
|
|
dbs->common.cb(dbs->common.opaque, ret);
|
|
|
|
}
|
|
|
|
qemu_iovec_destroy(&dbs->iov);
|
2014-09-11 13:41:28 +08:00
|
|
|
qemu_aio_unref(dbs);
|
2009-03-29 00:11:25 +08:00
|
|
|
}
|
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
static void dma_blk_cb(void *opaque, int ret)
|
2009-03-29 00:11:25 +08:00
|
|
|
{
|
|
|
|
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
|
2012-06-27 12:50:40 +08:00
|
|
|
dma_addr_t cur_addr, cur_len;
|
2009-03-29 00:11:25 +08:00
|
|
|
void *mem;
|
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
trace_dma_blk_cb(dbs, ret);
|
2011-11-24 19:15:28 +08:00
|
|
|
|
2009-03-29 00:11:25 +08:00
|
|
|
dbs->acb = NULL;
|
2016-05-07 00:26:31 +08:00
|
|
|
dbs->offset += dbs->iov.size;
|
2009-02-06 05:23:58 +08:00
|
|
|
|
|
|
|
if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
|
2011-09-16 22:40:02 +08:00
|
|
|
dma_complete(dbs, ret);
|
2009-02-06 05:23:58 +08:00
|
|
|
return;
|
|
|
|
}
|
2014-10-07 19:59:18 +08:00
|
|
|
dma_blk_unmap(dbs);
|
2009-02-06 05:23:58 +08:00
|
|
|
|
|
|
|
while (dbs->sg_cur_index < dbs->sg->nsg) {
|
|
|
|
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
|
|
|
|
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
|
dma: Let dma_memory_map() take MemTxAttrs argument
Let devices specify transaction attributes when calling
dma_memory_map().
Patch created mechanically using spatch with this script:
@@
expression E1, E2, E3, E4;
@@
- dma_memory_map(E1, E2, E3, E4)
+ dma_memory_map(E1, E2, E3, E4, MEMTXATTRS_UNSPECIFIED)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Li Qiang <liq3ea@gmail.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20211223115554.3155328-7-philmd@redhat.com>
2020-09-03 17:00:47 +08:00
|
|
|
mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir,
|
|
|
|
MEMTXATTRS_UNSPECIFIED);
|
2020-06-03 18:22:02 +08:00
|
|
|
/*
|
|
|
|
* Make reads deterministic in icount mode. Windows sometimes issues
|
|
|
|
* disk read requests with overlapping SGs. It leads
|
|
|
|
* to non-determinism, because resulting buffer contents may be mixed
|
|
|
|
* from several sectors. This code splits all SGs into several
|
|
|
|
* groups. SGs in every group do not overlap.
|
|
|
|
*/
|
2020-08-19 19:17:19 +08:00
|
|
|
if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
|
2020-06-03 18:22:02 +08:00
|
|
|
int i;
|
|
|
|
for (i = 0 ; i < dbs->iov.niov ; ++i) {
|
|
|
|
if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
|
|
|
|
dbs->iov.iov[i].iov_len, (intptr_t)mem,
|
|
|
|
cur_len)) {
|
|
|
|
dma_memory_unmap(dbs->sg->as, mem, cur_len,
|
|
|
|
dbs->dir, cur_len);
|
|
|
|
mem = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-02-06 05:23:58 +08:00
|
|
|
if (!mem)
|
|
|
|
break;
|
|
|
|
qemu_iovec_add(&dbs->iov, mem, cur_len);
|
|
|
|
dbs->sg_cur_byte += cur_len;
|
|
|
|
if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
|
|
|
|
dbs->sg_cur_byte = 0;
|
|
|
|
++dbs->sg_cur_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dbs->iov.size == 0) {
|
2011-11-24 19:15:28 +08:00
|
|
|
trace_dma_map_wait(dbs);
|
2016-05-23 20:54:06 +08:00
|
|
|
dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
|
2015-03-16 17:03:37 +08:00
|
|
|
cpu_register_map_client(dbs->bh);
|
2009-02-06 05:23:58 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-10-28 04:29:13 +08:00
|
|
|
if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
|
|
|
|
qemu_iovec_discard_back(&dbs->iov,
|
|
|
|
QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
|
2014-07-10 01:17:30 +08:00
|
|
|
}
|
|
|
|
|
2017-02-13 21:52:31 +08:00
|
|
|
aio_context_acquire(dbs->ctx);
|
2016-05-23 20:54:06 +08:00
|
|
|
dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
|
|
|
|
dma_blk_cb, dbs, dbs->io_func_opaque);
|
2017-02-13 21:52:31 +08:00
|
|
|
aio_context_release(dbs->ctx);
|
2011-11-15 00:50:52 +08:00
|
|
|
assert(dbs->acb);
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
|
|
|
|
2014-10-07 19:59:14 +08:00
|
|
|
static void dma_aio_cancel(BlockAIOCB *acb)
|
2009-05-25 18:37:32 +08:00
|
|
|
{
|
|
|
|
DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
|
|
|
|
|
2011-11-24 19:15:28 +08:00
|
|
|
trace_dma_aio_cancel(dbs);
|
|
|
|
|
2019-07-30 05:34:16 +08:00
|
|
|
assert(!(dbs->acb && dbs->bh));
|
2009-05-25 18:37:32 +08:00
|
|
|
if (dbs->acb) {
|
2019-07-30 05:34:16 +08:00
|
|
|
/* This will invoke dma_blk_cb. */
|
2014-10-07 19:59:18 +08:00
|
|
|
blk_aio_cancel_async(dbs->acb);
|
2019-07-30 05:34:16 +08:00
|
|
|
return;
|
2009-05-25 18:37:32 +08:00
|
|
|
}
|
2019-07-30 05:34:16 +08:00
|
|
|
|
2015-03-16 17:03:37 +08:00
|
|
|
if (dbs->bh) {
|
|
|
|
cpu_unregister_map_client(dbs->bh);
|
|
|
|
qemu_bh_delete(dbs->bh);
|
|
|
|
dbs->bh = NULL;
|
|
|
|
}
|
2019-07-30 05:34:16 +08:00
|
|
|
if (dbs->common.cb) {
|
|
|
|
dbs->common.cb(dbs->common.opaque, -ECANCELED);
|
|
|
|
}
|
2009-05-25 18:37:32 +08:00
|
|
|
}
|
|
|
|
|
2016-06-21 03:36:57 +08:00
|
|
|
static AioContext *dma_get_aio_context(BlockAIOCB *acb)
|
|
|
|
{
|
|
|
|
DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
|
|
|
|
|
|
|
|
return dbs->ctx;
|
|
|
|
}
|
2014-09-11 13:41:14 +08:00
|
|
|
|
2012-10-31 23:34:37 +08:00
|
|
|
static const AIOCBInfo dma_aiocb_info = {
|
2009-05-25 18:37:32 +08:00
|
|
|
.aiocb_size = sizeof(DMAAIOCB),
|
2014-09-11 13:41:14 +08:00
|
|
|
.cancel_async = dma_aio_cancel,
|
2016-06-21 03:36:57 +08:00
|
|
|
.get_aio_context = dma_get_aio_context,
|
2009-05-25 18:37:32 +08:00
|
|
|
};
|
|
|
|
|
2016-05-23 20:54:06 +08:00
|
|
|
BlockAIOCB *dma_blk_io(AioContext *ctx,
|
2016-10-28 04:29:13 +08:00
|
|
|
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
2016-05-23 20:54:06 +08:00
|
|
|
DMAIOFunc *io_func, void *io_func_opaque,
|
|
|
|
BlockCompletionFunc *cb,
|
2012-03-27 10:42:23 +08:00
|
|
|
void *opaque, DMADirection dir)
|
2009-02-06 05:23:58 +08:00
|
|
|
{
|
2016-05-23 20:54:06 +08:00
|
|
|
DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
|
2009-02-06 05:23:58 +08:00
|
|
|
|
2016-05-23 20:54:06 +08:00
|
|
|
trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
|
2011-11-24 19:15:28 +08:00
|
|
|
|
2009-03-21 02:26:16 +08:00
|
|
|
dbs->acb = NULL;
|
2009-02-06 05:23:58 +08:00
|
|
|
dbs->sg = sg;
|
2016-05-23 20:54:06 +08:00
|
|
|
dbs->ctx = ctx;
|
2016-05-23 20:54:05 +08:00
|
|
|
dbs->offset = offset;
|
2016-10-28 04:29:13 +08:00
|
|
|
dbs->align = align;
|
2009-02-06 05:23:58 +08:00
|
|
|
dbs->sg_cur_index = 0;
|
|
|
|
dbs->sg_cur_byte = 0;
|
2012-03-27 10:42:23 +08:00
|
|
|
dbs->dir = dir;
|
2011-05-19 16:57:59 +08:00
|
|
|
dbs->io_func = io_func;
|
2016-05-23 20:54:06 +08:00
|
|
|
dbs->io_func_opaque = io_func_opaque;
|
2009-02-06 05:23:58 +08:00
|
|
|
dbs->bh = NULL;
|
|
|
|
qemu_iovec_init(&dbs->iov, sg->nsg);
|
2014-10-07 19:59:18 +08:00
|
|
|
dma_blk_cb(dbs, 0);
|
2009-03-21 02:26:16 +08:00
|
|
|
return &dbs->common;
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-05-23 20:54:06 +08:00
|
|
|
static
|
|
|
|
BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
|
|
|
|
BlockCompletionFunc *cb, void *cb_opaque,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
BlockBackend *blk = opaque;
|
|
|
|
return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
|
|
|
|
}
|
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
BlockAIOCB *dma_blk_read(BlockBackend *blk,
|
2016-10-28 04:29:13 +08:00
|
|
|
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
2014-10-07 19:59:18 +08:00
|
|
|
void (*cb)(void *opaque, int ret), void *opaque)
|
2009-02-06 05:23:58 +08:00
|
|
|
{
|
2016-10-28 04:29:13 +08:00
|
|
|
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
|
|
|
|
dma_blk_read_io_func, blk, cb, opaque,
|
2014-10-07 19:59:18 +08:00
|
|
|
DMA_DIRECTION_FROM_DEVICE);
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
|
|
|
|
2016-05-23 20:54:06 +08:00
|
|
|
static
|
|
|
|
BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
|
|
|
|
BlockCompletionFunc *cb, void *cb_opaque,
|
|
|
|
void *opaque)
|
|
|
|
{
|
|
|
|
BlockBackend *blk = opaque;
|
|
|
|
return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
|
|
|
|
}
|
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
BlockAIOCB *dma_blk_write(BlockBackend *blk,
|
2016-10-28 04:29:13 +08:00
|
|
|
QEMUSGList *sg, uint64_t offset, uint32_t align,
|
2014-10-07 19:59:18 +08:00
|
|
|
void (*cb)(void *opaque, int ret), void *opaque)
|
2009-02-06 05:23:58 +08:00
|
|
|
{
|
2016-10-28 04:29:13 +08:00
|
|
|
return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
|
|
|
|
dma_blk_write_io_func, blk, cb, opaque,
|
2014-10-07 19:59:18 +08:00
|
|
|
DMA_DIRECTION_TO_DEVICE);
|
2009-02-06 05:23:58 +08:00
|
|
|
}
|
2011-07-06 14:02:14 +08:00
|
|
|
|
|
|
|
|
2021-12-31 18:33:29 +08:00
|
|
|
static MemTxResult dma_buf_rw(void *buf, dma_addr_t len, dma_addr_t *residual,
|
2021-12-16 06:38:52 +08:00
|
|
|
QEMUSGList *sg, DMADirection dir,
|
|
|
|
MemTxAttrs attrs)
|
2011-07-06 14:02:14 +08:00
|
|
|
{
|
2021-12-16 18:24:56 +08:00
|
|
|
uint8_t *ptr = buf;
|
2021-12-31 18:33:29 +08:00
|
|
|
dma_addr_t xresidual;
|
2011-07-06 14:02:14 +08:00
|
|
|
int sg_cur_index;
|
2021-12-16 06:38:52 +08:00
|
|
|
MemTxResult res = MEMTX_OK;
|
2011-07-06 14:02:14 +08:00
|
|
|
|
2021-12-31 18:13:34 +08:00
|
|
|
xresidual = sg->size;
|
2011-07-06 14:02:14 +08:00
|
|
|
sg_cur_index = 0;
|
2021-12-31 18:13:34 +08:00
|
|
|
len = MIN(len, xresidual);
|
2011-07-06 14:02:14 +08:00
|
|
|
while (len > 0) {
|
|
|
|
ScatterGatherEntry entry = sg->sg[sg_cur_index++];
|
2021-12-31 18:33:29 +08:00
|
|
|
dma_addr_t xfer = MIN(len, entry.len);
|
2021-12-16 06:38:52 +08:00
|
|
|
res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs);
|
2011-07-06 14:02:14 +08:00
|
|
|
ptr += xfer;
|
|
|
|
len -= xfer;
|
2021-12-31 18:13:34 +08:00
|
|
|
xresidual -= xfer;
|
2011-07-06 14:02:14 +08:00
|
|
|
}
|
|
|
|
|
2021-12-31 18:13:34 +08:00
|
|
|
if (residual) {
|
|
|
|
*residual = xresidual;
|
2021-12-16 06:38:52 +08:00
|
|
|
}
|
|
|
|
return res;
|
2011-07-06 14:02:14 +08:00
|
|
|
}
|
|
|
|
|
2021-12-16 16:36:38 +08:00
|
|
|
MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
|
2021-12-31 18:33:29 +08:00
|
|
|
QEMUSGList *sg, MemTxAttrs attrs)
|
2011-07-06 14:02:14 +08:00
|
|
|
{
|
2021-12-16 16:36:38 +08:00
|
|
|
return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_FROM_DEVICE, attrs);
|
2011-07-06 14:02:14 +08:00
|
|
|
}
|
|
|
|
|
2021-12-16 16:36:38 +08:00
|
|
|
MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
|
2021-12-31 18:33:29 +08:00
|
|
|
QEMUSGList *sg, MemTxAttrs attrs)
|
2011-07-06 14:02:14 +08:00
|
|
|
{
|
2021-12-16 16:36:38 +08:00
|
|
|
return dma_buf_rw(ptr, len, residual, sg, DMA_DIRECTION_TO_DEVICE, attrs);
|
2011-07-06 14:02:14 +08:00
|
|
|
}
|
2011-09-05 20:20:29 +08:00
|
|
|
|
2014-10-07 19:59:18 +08:00
|
|
|
void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
|
2011-09-05 20:20:29 +08:00
|
|
|
QEMUSGList *sg, enum BlockAcctType type)
|
|
|
|
{
|
2014-10-07 19:59:18 +08:00
|
|
|
block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
|
2011-09-05 20:20:29 +08:00
|
|
|
}
|
2021-03-09 18:27:37 +08:00
|
|
|
|
|
|
|
uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end, int max_addr_bits)
|
|
|
|
{
|
|
|
|
uint64_t max_mask = UINT64_MAX, addr_mask = end - start;
|
|
|
|
uint64_t alignment_mask, size_mask;
|
|
|
|
|
|
|
|
if (max_addr_bits != 64) {
|
|
|
|
max_mask = (1ULL << max_addr_bits) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
alignment_mask = start ? (start & -start) - 1 : max_mask;
|
|
|
|
alignment_mask = MIN(alignment_mask, max_mask);
|
|
|
|
size_mask = MIN(addr_mask, max_mask);
|
|
|
|
|
|
|
|
if (alignment_mask <= size_mask) {
|
|
|
|
/* Increase the alignment of start */
|
|
|
|
return alignment_mask;
|
|
|
|
} else {
|
|
|
|
/* Find the largest page mask from size */
|
|
|
|
if (addr_mask == UINT64_MAX) {
|
|
|
|
return UINT64_MAX;
|
|
|
|
}
|
|
|
|
return (1ULL << (63 - clz64(addr_mask + 1))) - 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|