mirror of
https://github.com/qemu/qemu.git
synced 2024-11-25 11:53:39 +08:00
xen_disk: remove dead code
Xen_disk.c has support for using synchronous I/O instead of asynchronous, but it is compiled out by default. Remove it. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
23e9a39e7d
commit
4e5b184d63
@ -49,7 +49,6 @@ static int syncwrite = 0;
|
||||
static int batch_maps = 0;
|
||||
|
||||
static int max_requests = 32;
|
||||
static int use_aio = 1;
|
||||
|
||||
/* ------------------------------------------------------------- */
|
||||
|
||||
@ -314,76 +313,6 @@ static int ioreq_map(struct ioreq *ioreq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioreq_runio_qemu_sync(struct ioreq *ioreq)
|
||||
{
|
||||
struct XenBlkDev *blkdev = ioreq->blkdev;
|
||||
int i, rc;
|
||||
off_t pos;
|
||||
|
||||
if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
|
||||
goto err_no_map;
|
||||
}
|
||||
if (ioreq->presync) {
|
||||
bdrv_flush(blkdev->bs);
|
||||
}
|
||||
|
||||
switch (ioreq->req.operation) {
|
||||
case BLKIF_OP_READ:
|
||||
pos = ioreq->start;
|
||||
for (i = 0; i < ioreq->v.niov; i++) {
|
||||
rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE,
|
||||
ioreq->v.iov[i].iov_base,
|
||||
ioreq->v.iov[i].iov_len / BLOCK_SIZE);
|
||||
if (rc != 0) {
|
||||
xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n",
|
||||
ioreq->v.iov[i].iov_base,
|
||||
ioreq->v.iov[i].iov_len);
|
||||
goto err;
|
||||
}
|
||||
pos += ioreq->v.iov[i].iov_len;
|
||||
}
|
||||
break;
|
||||
case BLKIF_OP_WRITE:
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
if (!ioreq->req.nr_segments) {
|
||||
break;
|
||||
}
|
||||
pos = ioreq->start;
|
||||
for (i = 0; i < ioreq->v.niov; i++) {
|
||||
rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE,
|
||||
ioreq->v.iov[i].iov_base,
|
||||
ioreq->v.iov[i].iov_len / BLOCK_SIZE);
|
||||
if (rc != 0) {
|
||||
xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n",
|
||||
ioreq->v.iov[i].iov_base,
|
||||
ioreq->v.iov[i].iov_len);
|
||||
goto err;
|
||||
}
|
||||
pos += ioreq->v.iov[i].iov_len;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* unknown operation (shouldn't happen -- parse catches this) */
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ioreq->postsync) {
|
||||
bdrv_flush(blkdev->bs);
|
||||
}
|
||||
ioreq->status = BLKIF_RSP_OKAY;
|
||||
|
||||
ioreq_unmap(ioreq);
|
||||
ioreq_finish(ioreq);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ioreq_unmap(ioreq);
|
||||
err_no_map:
|
||||
ioreq_finish(ioreq);
|
||||
ioreq->status = BLKIF_RSP_ERROR;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void qemu_aio_complete(void *opaque, int ret)
|
||||
{
|
||||
struct ioreq *ioreq = opaque;
|
||||
@ -554,9 +483,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
|
||||
rp = blkdev->rings.common.sring->req_prod;
|
||||
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
|
||||
|
||||
if (use_aio) {
|
||||
blk_send_response_all(blkdev);
|
||||
}
|
||||
blk_send_response_all(blkdev);
|
||||
while (rc != rp) {
|
||||
/* pull request from ring */
|
||||
if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
|
||||
@ -579,16 +506,7 @@ static void blk_handle_requests(struct XenBlkDev *blkdev)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (use_aio) {
|
||||
/* run i/o in aio mode */
|
||||
ioreq_runio_qemu_aio(ioreq);
|
||||
} else {
|
||||
/* run i/o in sync mode */
|
||||
ioreq_runio_qemu_sync(ioreq);
|
||||
}
|
||||
}
|
||||
if (!use_aio) {
|
||||
blk_send_response_all(blkdev);
|
||||
ioreq_runio_qemu_aio(ioreq);
|
||||
}
|
||||
|
||||
if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
|
||||
|
Loading…
Reference in New Issue
Block a user