mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
block: Remove bdrv_aio_multiwrite()
Since virtio-blk implements request merging itself these days, the only remaining users are test cases for the function. That doesn't make the function exactly useful any more. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
parent
66a0fae438
commit
91c6e4b7bb
@ -1067,20 +1067,6 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
|
||||
bdrv_aio_cancel_async(acb);
|
||||
}
|
||||
|
||||
int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return bdrv_aio_multiwrite(blk_bs(blk), reqs, num_reqs);
|
||||
}
|
||||
|
||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
||||
{
|
||||
if (!blk_is_available(blk)) {
|
||||
|
194
block/io.c
194
block/io.c
@ -1878,200 +1878,6 @@ BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
|
||||
cb, opaque, true);
|
||||
}
|
||||
|
||||
|
||||
typedef struct MultiwriteCB {
|
||||
int error;
|
||||
int num_requests;
|
||||
int num_callbacks;
|
||||
struct {
|
||||
BlockCompletionFunc *cb;
|
||||
void *opaque;
|
||||
QEMUIOVector *free_qiov;
|
||||
} callbacks[];
|
||||
} MultiwriteCB;
|
||||
|
||||
static void multiwrite_user_cb(MultiwriteCB *mcb)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mcb->num_callbacks; i++) {
|
||||
mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
|
||||
if (mcb->callbacks[i].free_qiov) {
|
||||
qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
|
||||
}
|
||||
g_free(mcb->callbacks[i].free_qiov);
|
||||
}
|
||||
}
|
||||
|
||||
static void multiwrite_cb(void *opaque, int ret)
|
||||
{
|
||||
MultiwriteCB *mcb = opaque;
|
||||
|
||||
trace_multiwrite_cb(mcb, ret);
|
||||
|
||||
if (ret < 0 && !mcb->error) {
|
||||
mcb->error = ret;
|
||||
}
|
||||
|
||||
mcb->num_requests--;
|
||||
if (mcb->num_requests == 0) {
|
||||
multiwrite_user_cb(mcb);
|
||||
g_free(mcb);
|
||||
}
|
||||
}
|
||||
|
||||
static int multiwrite_req_compare(const void *a, const void *b)
|
||||
{
|
||||
const BlockRequest *req1 = a, *req2 = b;
|
||||
|
||||
/*
|
||||
* Note that we can't simply subtract req2->sector from req1->sector
|
||||
* here as that could overflow the return value.
|
||||
*/
|
||||
if (req1->sector > req2->sector) {
|
||||
return 1;
|
||||
} else if (req1->sector < req2->sector) {
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes a bunch of requests and tries to merge them. Returns the number of
|
||||
* requests that remain after merging.
|
||||
*/
|
||||
static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
|
||||
int num_reqs, MultiwriteCB *mcb)
|
||||
{
|
||||
int i, outidx;
|
||||
|
||||
// Sort requests by start sector
|
||||
qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
|
||||
|
||||
// Check if adjacent requests touch the same clusters. If so, combine them,
|
||||
// filling up gaps with zero sectors.
|
||||
outidx = 0;
|
||||
for (i = 1; i < num_reqs; i++) {
|
||||
int merge = 0;
|
||||
int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
|
||||
|
||||
// Handle exactly sequential writes and overlapping writes.
|
||||
if (reqs[i].sector <= oldreq_last) {
|
||||
merge = 1;
|
||||
}
|
||||
|
||||
if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 >
|
||||
bs->bl.max_iov) {
|
||||
merge = 0;
|
||||
}
|
||||
|
||||
if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
|
||||
reqs[i].nb_sectors > bs->bl.max_transfer_length) {
|
||||
merge = 0;
|
||||
}
|
||||
|
||||
if (merge) {
|
||||
size_t size;
|
||||
QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
|
||||
qemu_iovec_init(qiov,
|
||||
reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
|
||||
|
||||
// Add the first request to the merged one. If the requests are
|
||||
// overlapping, drop the last sectors of the first request.
|
||||
size = (reqs[i].sector - reqs[outidx].sector) << 9;
|
||||
qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
|
||||
|
||||
// We should need to add any zeros between the two requests
|
||||
assert (reqs[i].sector <= oldreq_last);
|
||||
|
||||
// Add the second request
|
||||
qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
|
||||
|
||||
// Add tail of first request, if necessary
|
||||
if (qiov->size < reqs[outidx].qiov->size) {
|
||||
qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
|
||||
reqs[outidx].qiov->size - qiov->size);
|
||||
}
|
||||
|
||||
reqs[outidx].nb_sectors = qiov->size >> 9;
|
||||
reqs[outidx].qiov = qiov;
|
||||
|
||||
mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
|
||||
} else {
|
||||
outidx++;
|
||||
reqs[outidx].sector = reqs[i].sector;
|
||||
reqs[outidx].nb_sectors = reqs[i].nb_sectors;
|
||||
reqs[outidx].qiov = reqs[i].qiov;
|
||||
}
|
||||
}
|
||||
|
||||
if (bs->blk) {
|
||||
block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
|
||||
num_reqs - outidx - 1);
|
||||
}
|
||||
|
||||
return outidx + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Submit multiple AIO write requests at once.
|
||||
*
|
||||
* On success, the function returns 0 and all requests in the reqs array have
|
||||
* been submitted. In error case this function returns -1, and any of the
|
||||
* requests may or may not be submitted yet. In particular, this means that the
|
||||
* callback will be called for some of the requests, for others it won't. The
|
||||
* caller must check the error field of the BlockRequest to wait for the right
|
||||
* callbacks (if error != 0, no callback will be called).
|
||||
*
|
||||
* The implementation may modify the contents of the reqs array, e.g. to merge
|
||||
* requests. However, the fields opaque and error are left unmodified as they
|
||||
* are used to signal failure for a single request to the caller.
|
||||
*/
|
||||
int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
|
||||
{
|
||||
MultiwriteCB *mcb;
|
||||
int i;
|
||||
|
||||
/* don't submit writes if we don't have a medium */
|
||||
if (bs->drv == NULL) {
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
reqs[i].error = -ENOMEDIUM;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (num_reqs == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Create MultiwriteCB structure
|
||||
mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
|
||||
mcb->num_requests = 0;
|
||||
mcb->num_callbacks = num_reqs;
|
||||
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
mcb->callbacks[i].cb = reqs[i].cb;
|
||||
mcb->callbacks[i].opaque = reqs[i].opaque;
|
||||
}
|
||||
|
||||
// Check for mergable requests
|
||||
num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
|
||||
|
||||
trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
|
||||
|
||||
/* Run the aio requests. */
|
||||
mcb->num_requests = num_reqs;
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
|
||||
reqs[i].nb_sectors, reqs[i].flags,
|
||||
multiwrite_cb, mcb,
|
||||
true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bdrv_aio_cancel(BlockAIOCB *acb)
|
||||
{
|
||||
qemu_aio_ref(acb);
|
||||
|
@ -329,7 +329,7 @@ void bdrv_aio_cancel(BlockAIOCB *acb);
|
||||
void bdrv_aio_cancel_async(BlockAIOCB *acb);
|
||||
|
||||
typedef struct BlockRequest {
|
||||
/* Fields to be filled by multiwrite caller */
|
||||
/* Fields to be filled by caller */
|
||||
union {
|
||||
struct {
|
||||
int64_t sector;
|
||||
@ -345,13 +345,10 @@ typedef struct BlockRequest {
|
||||
BlockCompletionFunc *cb;
|
||||
void *opaque;
|
||||
|
||||
/* Filled by multiwrite implementation */
|
||||
/* Filled by block layer */
|
||||
int error;
|
||||
} BlockRequest;
|
||||
|
||||
int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
|
||||
int num_reqs);
|
||||
|
||||
/* sg packet commands */
|
||||
int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
|
||||
BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
|
||||
|
@ -138,7 +138,6 @@ BlockAIOCB *blk_aio_discard(BlockBackend *blk,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
void blk_aio_cancel(BlockAIOCB *acb);
|
||||
void blk_aio_cancel_async(BlockAIOCB *acb);
|
||||
int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs);
|
||||
int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf);
|
||||
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
|
203
qemu-io-cmds.c
203
qemu-io-cmds.c
@ -574,49 +574,6 @@ static int do_aio_writev(BlockBackend *blk, QEMUIOVector *qiov,
|
||||
return async_ret < 0 ? async_ret : 1;
|
||||
}
|
||||
|
||||
struct multiwrite_async_ret {
|
||||
int num_done;
|
||||
int error;
|
||||
};
|
||||
|
||||
static void multiwrite_cb(void *opaque, int ret)
|
||||
{
|
||||
struct multiwrite_async_ret *async_ret = opaque;
|
||||
|
||||
async_ret->num_done++;
|
||||
if (ret < 0) {
|
||||
async_ret->error = ret;
|
||||
}
|
||||
}
|
||||
|
||||
static int do_aio_multiwrite(BlockBackend *blk, BlockRequest* reqs,
|
||||
int num_reqs, int *total)
|
||||
{
|
||||
int i, ret;
|
||||
struct multiwrite_async_ret async_ret = {
|
||||
.num_done = 0,
|
||||
.error = 0,
|
||||
};
|
||||
|
||||
*total = 0;
|
||||
for (i = 0; i < num_reqs; i++) {
|
||||
reqs[i].cb = multiwrite_cb;
|
||||
reqs[i].opaque = &async_ret;
|
||||
*total += reqs[i].qiov->size;
|
||||
}
|
||||
|
||||
ret = blk_aio_multiwrite(blk, reqs, num_reqs);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (async_ret.num_done < num_reqs) {
|
||||
main_loop_wait(false);
|
||||
}
|
||||
|
||||
return async_ret.error < 0 ? async_ret.error : 1;
|
||||
}
|
||||
|
||||
static void read_help(void)
|
||||
{
|
||||
printf(
|
||||
@ -1211,165 +1168,6 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void multiwrite_help(void)
|
||||
{
|
||||
printf(
|
||||
"\n"
|
||||
" writes a range of bytes from the given offset source from multiple buffers,\n"
|
||||
" in a batch of requests that may be merged by qemu\n"
|
||||
"\n"
|
||||
" Example:\n"
|
||||
" 'multiwrite 512 1k 1k ; 4k 1k'\n"
|
||||
" writes 2 kB at 512 bytes and 1 kB at 4 kB into the open file\n"
|
||||
"\n"
|
||||
" Writes into a segment of the currently open file, using a buffer\n"
|
||||
" filled with a set pattern (0xcdcdcdcd). The pattern byte is increased\n"
|
||||
" by one for each request contained in the multiwrite command.\n"
|
||||
" -P, -- use different pattern to fill file\n"
|
||||
" -C, -- report statistics in a machine parsable format\n"
|
||||
" -q, -- quiet mode, do not show I/O statistics\n"
|
||||
"\n");
|
||||
}
|
||||
|
||||
static int multiwrite_f(BlockBackend *blk, int argc, char **argv);
|
||||
|
||||
static const cmdinfo_t multiwrite_cmd = {
|
||||
.name = "multiwrite",
|
||||
.cfunc = multiwrite_f,
|
||||
.argmin = 2,
|
||||
.argmax = -1,
|
||||
.args = "[-Cq] [-P pattern ] off len [len..] [; off len [len..]..]",
|
||||
.oneline = "issues multiple write requests at once",
|
||||
.help = multiwrite_help,
|
||||
};
|
||||
|
||||
static int multiwrite_f(BlockBackend *blk, int argc, char **argv)
|
||||
{
|
||||
struct timeval t1, t2;
|
||||
bool Cflag = false, qflag = false;
|
||||
int c, cnt;
|
||||
char **buf;
|
||||
int64_t offset, first_offset = 0;
|
||||
/* Some compilers get confused and warn if this is not initialized. */
|
||||
int total = 0;
|
||||
int nr_iov;
|
||||
int nr_reqs;
|
||||
int pattern = 0xcd;
|
||||
QEMUIOVector *qiovs;
|
||||
int i;
|
||||
BlockRequest *reqs;
|
||||
|
||||
while ((c = getopt(argc, argv, "CqP:")) != -1) {
|
||||
switch (c) {
|
||||
case 'C':
|
||||
Cflag = true;
|
||||
break;
|
||||
case 'q':
|
||||
qflag = true;
|
||||
break;
|
||||
case 'P':
|
||||
pattern = parse_pattern(optarg);
|
||||
if (pattern < 0) {
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return qemuio_command_usage(&writev_cmd);
|
||||
}
|
||||
}
|
||||
|
||||
if (optind > argc - 2) {
|
||||
return qemuio_command_usage(&writev_cmd);
|
||||
}
|
||||
|
||||
nr_reqs = 1;
|
||||
for (i = optind; i < argc; i++) {
|
||||
if (!strcmp(argv[i], ";")) {
|
||||
nr_reqs++;
|
||||
}
|
||||
}
|
||||
|
||||
reqs = g_new0(BlockRequest, nr_reqs);
|
||||
buf = g_new0(char *, nr_reqs);
|
||||
qiovs = g_new(QEMUIOVector, nr_reqs);
|
||||
|
||||
for (i = 0; i < nr_reqs && optind < argc; i++) {
|
||||
int j;
|
||||
|
||||
/* Read the offset of the request */
|
||||
offset = cvtnum(argv[optind]);
|
||||
if (offset < 0) {
|
||||
print_cvtnum_err(offset, argv[optind]);
|
||||
goto out;
|
||||
}
|
||||
optind++;
|
||||
|
||||
if (offset & 0x1ff) {
|
||||
printf("offset %lld is not sector aligned\n",
|
||||
(long long)offset);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (i == 0) {
|
||||
first_offset = offset;
|
||||
}
|
||||
|
||||
/* Read lengths for qiov entries */
|
||||
for (j = optind; j < argc; j++) {
|
||||
if (!strcmp(argv[j], ";")) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nr_iov = j - optind;
|
||||
|
||||
/* Build request */
|
||||
buf[i] = create_iovec(blk, &qiovs[i], &argv[optind], nr_iov, pattern);
|
||||
if (buf[i] == NULL) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
reqs[i].qiov = &qiovs[i];
|
||||
reqs[i].sector = offset >> 9;
|
||||
reqs[i].nb_sectors = reqs[i].qiov->size >> 9;
|
||||
|
||||
optind = j + 1;
|
||||
|
||||
pattern++;
|
||||
}
|
||||
|
||||
/* If there were empty requests at the end, ignore them */
|
||||
nr_reqs = i;
|
||||
|
||||
gettimeofday(&t1, NULL);
|
||||
cnt = do_aio_multiwrite(blk, reqs, nr_reqs, &total);
|
||||
gettimeofday(&t2, NULL);
|
||||
|
||||
if (cnt < 0) {
|
||||
printf("aio_multiwrite failed: %s\n", strerror(-cnt));
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (qflag) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Finally, report back -- -C gives a parsable format */
|
||||
t2 = tsub(t2, t1);
|
||||
print_report("wrote", &t2, first_offset, total, total, cnt, Cflag);
|
||||
out:
|
||||
for (i = 0; i < nr_reqs; i++) {
|
||||
qemu_io_free(buf[i]);
|
||||
if (reqs[i].qiov != NULL) {
|
||||
qemu_iovec_destroy(&qiovs[i]);
|
||||
}
|
||||
}
|
||||
g_free(buf);
|
||||
g_free(reqs);
|
||||
g_free(qiovs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct aio_ctx {
|
||||
BlockBackend *blk;
|
||||
QEMUIOVector qiov;
|
||||
@ -2436,7 +2234,6 @@ static void __attribute((constructor)) init_qemuio_commands(void)
|
||||
qemuio_add_command(&readv_cmd);
|
||||
qemuio_add_command(&write_cmd);
|
||||
qemuio_add_command(&writev_cmd);
|
||||
qemuio_add_command(&multiwrite_cmd);
|
||||
qemuio_add_command(&aio_read_cmd);
|
||||
qemuio_add_command(&aio_write_cmd);
|
||||
qemuio_add_command(&aio_flush_cmd);
|
||||
|
@ -1,152 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Test simple read/write using plain bdrv_read/bdrv_write
|
||||
#
|
||||
# Copyright (C) 2014 Red Hat, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
# creator
|
||||
owner=stefanha@redhat.com
|
||||
|
||||
seq=`basename $0`
|
||||
echo "QA output created by $seq"
|
||||
|
||||
here=`pwd`
|
||||
status=1 # failure is the default!
|
||||
|
||||
_cleanup()
|
||||
{
|
||||
_cleanup_test_img
|
||||
}
|
||||
trap "_cleanup; exit \$status" 0 1 2 3 15
|
||||
|
||||
# get standard environment, filters and checks
|
||||
. ./common.rc
|
||||
. ./common.filter
|
||||
|
||||
_supported_fmt generic
|
||||
_supported_proto generic
|
||||
_supported_os Linux
|
||||
|
||||
|
||||
size=128M
|
||||
|
||||
echo
|
||||
echo "== Single request =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 0 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Sequential requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 12k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 0 4k ; 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0xce 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 8k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Superset overlapping requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 0 4k ; 1k 2k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
# Order of overlapping in-flight requests is not guaranteed so we cannot verify
|
||||
# [1k, 3k) since it could have either pattern 0xcd or 0xce.
|
||||
$QEMU_IO -c "read -P 0xcd 0 1k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0xcd 3k 1k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Subset overlapping requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 1k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
# Order of overlapping in-flight requests is not guaranteed so we cannot verify
|
||||
# [1k, 3k) since it could have either pattern 0xcd or 0xce.
|
||||
$QEMU_IO -c "read -P 0xce 0 1k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0xce 3k 1k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Head overlapping requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 0k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
# Order of overlapping in-flight requests is not guaranteed so we cannot verify
|
||||
# [0k, 2k) since it could have either pattern 0xcd or 0xce.
|
||||
$QEMU_IO -c "read -P 0xce 2k 2k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Tail overlapping requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 8k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 2k 2k ; 0k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
# Order of overlapping in-flight requests is not guaranteed so we cannot verify
|
||||
# [2k, 4k) since it could have either pattern 0xcd or 0xce.
|
||||
$QEMU_IO -c "read -P 0xce 0k 2k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
_cleanup_test_img
|
||||
|
||||
echo
|
||||
echo "== Disjoint requests =="
|
||||
_make_test_img $size
|
||||
$QEMU_IO -c "write -z 0 72k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "multiwrite 0 4k ; 64k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
echo
|
||||
echo "== verify pattern =="
|
||||
$QEMU_IO -c "read -P 0xcd 0 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 4k 60k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0xce 64k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
$QEMU_IO -c "read -P 0 68k 4k" "$TEST_IMG" | _filter_qemu_io
|
||||
|
||||
# success, all done
|
||||
echo "*** done"
|
||||
rm -f $seq.full
|
||||
status=0
|
@ -1,103 +0,0 @@
|
||||
QA output created by 100
|
||||
|
||||
== Single request ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 4096/4096 bytes at offset 0
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 4096/4096 bytes at offset 0
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Sequential requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 12288/12288 bytes at offset 0
|
||||
12 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 4096/4096 bytes at offset 0
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 8192
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Superset overlapping requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 6144/6144 bytes at offset 0
|
||||
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 1024/1024 bytes at offset 0
|
||||
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 1024/1024 bytes at offset 3072
|
||||
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Subset overlapping requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 6144/6144 bytes at offset 1024
|
||||
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 1024/1024 bytes at offset 0
|
||||
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 1024/1024 bytes at offset 3072
|
||||
1 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Head overlapping requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 6144/6144 bytes at offset 0
|
||||
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 2048/2048 bytes at offset 2048
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Tail overlapping requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 6144/6144 bytes at offset 2048
|
||||
6 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 2048/2048 bytes at offset 0
|
||||
2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 4096
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== Disjoint requests ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
|
||||
wrote 73728/73728 bytes at offset 0
|
||||
72 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
wrote 8192/8192 bytes at offset 0
|
||||
8 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
|
||||
== verify pattern ==
|
||||
read 4096/4096 bytes at offset 0
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 61440/61440 bytes at offset 4096
|
||||
60 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 65536
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
read 4096/4096 bytes at offset 69632
|
||||
4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
*** done
|
@ -248,14 +248,6 @@ sector = "%d"
|
||||
if failed_wr_ops > 0:
|
||||
highest_offset = max(highest_offset, bad_offset + 512)
|
||||
|
||||
for i in range(wr_merged):
|
||||
first = i * wr_size * 2
|
||||
second = first + wr_size
|
||||
ops.append("multiwrite %d %d ; %d %d" %
|
||||
(first, wr_size, second, wr_size))
|
||||
|
||||
highest_offset = max(highest_offset, wr_merged * wr_size * 2)
|
||||
|
||||
# Now perform all operations
|
||||
for op in ops:
|
||||
self.vm.hmp_qemu_io("drive0", op)
|
||||
@ -309,19 +301,15 @@ sector = "%d"
|
||||
def test_flush(self):
|
||||
self.do_test_stats(flush_ops = 8)
|
||||
|
||||
def test_merged(self):
|
||||
for i in range(5):
|
||||
self.do_test_stats(wr_merged = i * 3)
|
||||
|
||||
def test_all(self):
|
||||
# rd_size, rd_ops, wr_size, wr_ops, flush_ops
|
||||
# invalid_rd_ops, invalid_wr_ops,
|
||||
# failed_rd_ops, failed_wr_ops
|
||||
# wr_merged
|
||||
test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 1],
|
||||
[65536, 1, 2048, 12, 7, 7, 5, 2, 5, 5],
|
||||
[32768, 9, 8192, 1, 4, 3, 2, 4, 6, 4],
|
||||
[16384, 11, 3584, 16, 9, 8, 6, 7, 3, 4]]
|
||||
test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 0],
|
||||
[65536, 1, 2048, 12, 7, 7, 5, 2, 5, 0],
|
||||
[32768, 9, 8192, 1, 4, 3, 2, 4, 6, 0],
|
||||
[16384, 11, 3584, 16, 9, 8, 6, 7, 3, 0]]
|
||||
for i in test_values:
|
||||
self.do_test_stats(*i)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
........................................
|
||||
...................................
|
||||
----------------------------------------------------------------------
|
||||
Ran 40 tests
|
||||
Ran 35 tests
|
||||
|
||||
OK
|
||||
|
@ -106,7 +106,7 @@
|
||||
097 rw auto backing
|
||||
098 rw auto backing quick
|
||||
099 rw auto quick
|
||||
100 rw auto quick
|
||||
# 100 was removed, do not reuse
|
||||
101 rw auto quick
|
||||
102 rw auto quick
|
||||
103 rw auto quick
|
||||
|
@ -62,8 +62,6 @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n
|
||||
bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d"
|
||||
|
||||
# block/io.c
|
||||
multiwrite_cb(void *mcb, int ret) "mcb %p ret %d"
|
||||
bdrv_aio_multiwrite(void *mcb, int num_callbacks, int num_reqs) "mcb %p num_callbacks %d num_reqs %d"
|
||||
bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
||||
bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
|
||||
bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
|
||||
|
Loading…
Reference in New Issue
Block a user