mirror of
https://github.com/qemu/qemu.git
synced 2024-12-16 07:53:36 +08:00
d8fbf9aa85
blk_get_geometry() eventually calls bdrv_nb_sectors(), which is a
co_wrapper_mixed_bdrv_rdlock. This means that when it is called from
coroutine context, it already assume to have the graph locked.
However, virtio_blk_sect_range_ok() in block/export/virtio-blk-handler.c
(used by vhost-user-blk and VDUSE exports) runs in a coroutine, but
doesn't take the graph lock - blk_*() functions are generally expected
to do that internally. This causes an assertion failure when accessing
an export for the first time if it runs in an iothread.
This is an example of the crash:
$ ./storage-daemon/qemu-storage-daemon --object iothread,id=th0 --blockdev file,filename=/home/kwolf/images/hd.img,node-name=disk --export vhost-user-blk,addr.type=unix,addr.path=/tmp/vhost.sock,node-name=disk,id=exp0,iothread=th0
qemu-storage-daemon: ../block/graph-lock.c:268: void assert_bdrv_graph_readable(void): Assertion `qemu_in_main_thread() || reader_count()' failed.
(gdb) bt
#0 0x00007ffff6eafe5c in __pthread_kill_implementation () from /lib64/libc.so.6
#1 0x00007ffff6e5fa76 in raise () from /lib64/libc.so.6
#2 0x00007ffff6e497fc in abort () from /lib64/libc.so.6
#3 0x00007ffff6e4971b in __assert_fail_base.cold () from /lib64/libc.so.6
#4 0x00007ffff6e58656 in __assert_fail () from /lib64/libc.so.6
#5 0x00005555556337a3 in assert_bdrv_graph_readable () at ../block/graph-lock.c:268
#6 0x00005555555fd5a2 in bdrv_co_nb_sectors (bs=0x5555564c5ef0) at ../block.c:5847
#7 0x00005555555ee949 in bdrv_nb_sectors (bs=0x5555564c5ef0) at block/block-gen.c:256
#8 0x00005555555fd6b9 in bdrv_get_geometry (bs=0x5555564c5ef0, nb_sectors_ptr=0x7fffef7fedd0) at ../block.c:5884
#9 0x000055555562ad6d in blk_get_geometry (blk=0x5555564cb200, nb_sectors_ptr=0x7fffef7fedd0) at ../block/block-backend.c:1624
#10 0x00005555555ddb74 in virtio_blk_sect_range_ok (blk=0x5555564cb200, block_size=512, sector=0, size=512) at ../block/export/virtio-blk-handler.c:44
#11 0x00005555555dd80d in virtio_blk_process_req (handler=0x5555564cbb98, in_iov=0x7fffe8003830, out_iov=0x7fffe8003860, in_num=1, out_num=0) at ../block/export/virtio-blk-handler.c:189
#12 0x00005555555dd546 in vu_blk_virtio_process_req (opaque=0x7fffe8003800) at ../block/export/vhost-user-blk-server.c:66
#13 0x00005555557bf4a1 in coroutine_trampoline (i0=-402635264, i1=32767) at ../util/coroutine-ucontext.c:177
#14 0x00007ffff6e75c20 in ?? () from /lib64/libc.so.6
#15 0x00007fffefffa870 in ?? ()
#16 0x0000000000000000 in ?? ()
Fix this by creating a new blk_co_get_geometry() that takes the lock,
and changing blk_get_geometry() to be a co_wrapper_mixed around it.
To make the resulting code cleaner, virtio-blk-handler.c can directly
call the coroutine version now (though that wouldn't be necessary for
fixing the bug, taking the lock in blk_co_get_geometry() is what fixes
it).
Fixes: 8ab8140a04
Reported-by: Lukáš Doktor <ldoktor@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230327113959.60071-1-kwolf@redhat.com>
Reviewed-by: Emanuele Giuseppe Esposito <eesposit@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
242 lines
7.2 KiB
C
242 lines
7.2 KiB
C
/*
|
|
* Handler for virtio-blk I/O
|
|
*
|
|
* Copyright (c) 2020 Red Hat, Inc.
|
|
* Copyright (C) 2022 Bytedance Inc. and/or its affiliates. All rights reserved.
|
|
*
|
|
* Author:
|
|
* Coiby Xu <coiby.xu@gmail.com>
|
|
* Xie Yongji <xieyongji@bytedance.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
* later. See the COPYING file in the top-level directory.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/error-report.h"
|
|
#include "virtio-blk-handler.h"
|
|
|
|
#include "standard-headers/linux/virtio_blk.h"
|
|
|
|
struct virtio_blk_inhdr {
|
|
unsigned char status;
|
|
};
|
|
|
|
static bool coroutine_fn
|
|
virtio_blk_sect_range_ok(BlockBackend *blk, uint32_t block_size,
|
|
uint64_t sector, size_t size)
|
|
{
|
|
uint64_t nb_sectors;
|
|
uint64_t total_sectors;
|
|
|
|
if (size % VIRTIO_BLK_SECTOR_SIZE) {
|
|
return false;
|
|
}
|
|
|
|
nb_sectors = size >> VIRTIO_BLK_SECTOR_BITS;
|
|
|
|
QEMU_BUILD_BUG_ON(BDRV_SECTOR_SIZE != VIRTIO_BLK_SECTOR_SIZE);
|
|
if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
|
|
return false;
|
|
}
|
|
if ((sector << VIRTIO_BLK_SECTOR_BITS) % block_size) {
|
|
return false;
|
|
}
|
|
blk_co_get_geometry(blk, &total_sectors);
|
|
if (sector > total_sectors || nb_sectors > total_sectors - sector) {
|
|
return false;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static int coroutine_fn
|
|
virtio_blk_discard_write_zeroes(VirtioBlkHandler *handler, struct iovec *iov,
|
|
uint32_t iovcnt, uint32_t type)
|
|
{
|
|
BlockBackend *blk = handler->blk;
|
|
struct virtio_blk_discard_write_zeroes desc;
|
|
ssize_t size;
|
|
uint64_t sector;
|
|
uint32_t num_sectors;
|
|
uint32_t max_sectors;
|
|
uint32_t flags;
|
|
int bytes;
|
|
|
|
/* Only one desc is currently supported */
|
|
if (unlikely(iov_size(iov, iovcnt) > sizeof(desc))) {
|
|
return VIRTIO_BLK_S_UNSUPP;
|
|
}
|
|
|
|
size = iov_to_buf(iov, iovcnt, 0, &desc, sizeof(desc));
|
|
if (unlikely(size != sizeof(desc))) {
|
|
error_report("Invalid size %zd, expected %zu", size, sizeof(desc));
|
|
return VIRTIO_BLK_S_IOERR;
|
|
}
|
|
|
|
sector = le64_to_cpu(desc.sector);
|
|
num_sectors = le32_to_cpu(desc.num_sectors);
|
|
flags = le32_to_cpu(desc.flags);
|
|
max_sectors = (type == VIRTIO_BLK_T_WRITE_ZEROES) ?
|
|
VIRTIO_BLK_MAX_WRITE_ZEROES_SECTORS :
|
|
VIRTIO_BLK_MAX_DISCARD_SECTORS;
|
|
|
|
/* This check ensures that 'bytes' fits in an int */
|
|
if (unlikely(num_sectors > max_sectors)) {
|
|
return VIRTIO_BLK_S_IOERR;
|
|
}
|
|
|
|
bytes = num_sectors << VIRTIO_BLK_SECTOR_BITS;
|
|
|
|
if (unlikely(!virtio_blk_sect_range_ok(blk, handler->logical_block_size,
|
|
sector, bytes))) {
|
|
return VIRTIO_BLK_S_IOERR;
|
|
}
|
|
|
|
/*
|
|
* The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for discard
|
|
* and write zeroes commands if any unknown flag is set.
|
|
*/
|
|
if (unlikely(flags & ~VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
|
|
return VIRTIO_BLK_S_UNSUPP;
|
|
}
|
|
|
|
if (type == VIRTIO_BLK_T_WRITE_ZEROES) {
|
|
int blk_flags = 0;
|
|
|
|
if (flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP) {
|
|
blk_flags |= BDRV_REQ_MAY_UNMAP;
|
|
}
|
|
|
|
if (blk_co_pwrite_zeroes(blk, sector << VIRTIO_BLK_SECTOR_BITS,
|
|
bytes, blk_flags) == 0) {
|
|
return VIRTIO_BLK_S_OK;
|
|
}
|
|
} else if (type == VIRTIO_BLK_T_DISCARD) {
|
|
/*
|
|
* The device MUST set the status byte to VIRTIO_BLK_S_UNSUPP for
|
|
* discard commands if the unmap flag is set.
|
|
*/
|
|
if (unlikely(flags & VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP)) {
|
|
return VIRTIO_BLK_S_UNSUPP;
|
|
}
|
|
|
|
if (blk_co_pdiscard(blk, sector << VIRTIO_BLK_SECTOR_BITS,
|
|
bytes) == 0) {
|
|
return VIRTIO_BLK_S_OK;
|
|
}
|
|
}
|
|
|
|
return VIRTIO_BLK_S_IOERR;
|
|
}
|
|
|
|
int coroutine_fn virtio_blk_process_req(VirtioBlkHandler *handler,
|
|
struct iovec *in_iov,
|
|
struct iovec *out_iov,
|
|
unsigned int in_num,
|
|
unsigned int out_num)
|
|
{
|
|
BlockBackend *blk = handler->blk;
|
|
struct virtio_blk_inhdr *in;
|
|
struct virtio_blk_outhdr out;
|
|
uint32_t type;
|
|
int in_len;
|
|
|
|
if (out_num < 1 || in_num < 1) {
|
|
error_report("virtio-blk request missing headers");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (unlikely(iov_to_buf(out_iov, out_num, 0, &out,
|
|
sizeof(out)) != sizeof(out))) {
|
|
error_report("virtio-blk request outhdr too short");
|
|
return -EINVAL;
|
|
}
|
|
|
|
iov_discard_front(&out_iov, &out_num, sizeof(out));
|
|
|
|
if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
|
|
error_report("virtio-blk request inhdr too short");
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* We always touch the last byte, so just see how big in_iov is. */
|
|
in_len = iov_size(in_iov, in_num);
|
|
in = (void *)in_iov[in_num - 1].iov_base
|
|
+ in_iov[in_num - 1].iov_len
|
|
- sizeof(struct virtio_blk_inhdr);
|
|
iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
|
|
|
|
type = le32_to_cpu(out.type);
|
|
switch (type & ~VIRTIO_BLK_T_BARRIER) {
|
|
case VIRTIO_BLK_T_IN:
|
|
case VIRTIO_BLK_T_OUT: {
|
|
QEMUIOVector qiov;
|
|
int64_t offset;
|
|
ssize_t ret = 0;
|
|
bool is_write = type & VIRTIO_BLK_T_OUT;
|
|
int64_t sector_num = le64_to_cpu(out.sector);
|
|
|
|
if (is_write && !handler->writable) {
|
|
in->status = VIRTIO_BLK_S_IOERR;
|
|
break;
|
|
}
|
|
|
|
if (is_write) {
|
|
qemu_iovec_init_external(&qiov, out_iov, out_num);
|
|
} else {
|
|
qemu_iovec_init_external(&qiov, in_iov, in_num);
|
|
}
|
|
|
|
if (unlikely(!virtio_blk_sect_range_ok(blk,
|
|
handler->logical_block_size,
|
|
sector_num, qiov.size))) {
|
|
in->status = VIRTIO_BLK_S_IOERR;
|
|
break;
|
|
}
|
|
|
|
offset = sector_num << VIRTIO_BLK_SECTOR_BITS;
|
|
|
|
if (is_write) {
|
|
ret = blk_co_pwritev(blk, offset, qiov.size, &qiov, 0);
|
|
} else {
|
|
ret = blk_co_preadv(blk, offset, qiov.size, &qiov, 0);
|
|
}
|
|
if (ret >= 0) {
|
|
in->status = VIRTIO_BLK_S_OK;
|
|
} else {
|
|
in->status = VIRTIO_BLK_S_IOERR;
|
|
}
|
|
break;
|
|
}
|
|
case VIRTIO_BLK_T_FLUSH:
|
|
if (blk_co_flush(blk) == 0) {
|
|
in->status = VIRTIO_BLK_S_OK;
|
|
} else {
|
|
in->status = VIRTIO_BLK_S_IOERR;
|
|
}
|
|
break;
|
|
case VIRTIO_BLK_T_GET_ID: {
|
|
size_t size = MIN(strlen(handler->serial) + 1,
|
|
MIN(iov_size(in_iov, in_num),
|
|
VIRTIO_BLK_ID_BYTES));
|
|
iov_from_buf(in_iov, in_num, 0, handler->serial, size);
|
|
in->status = VIRTIO_BLK_S_OK;
|
|
break;
|
|
}
|
|
case VIRTIO_BLK_T_DISCARD:
|
|
case VIRTIO_BLK_T_WRITE_ZEROES:
|
|
if (!handler->writable) {
|
|
in->status = VIRTIO_BLK_S_IOERR;
|
|
break;
|
|
}
|
|
in->status = virtio_blk_discard_write_zeroes(handler, out_iov,
|
|
out_num, type);
|
|
break;
|
|
default:
|
|
in->status = VIRTIO_BLK_S_UNSUPP;
|
|
break;
|
|
}
|
|
|
|
return in_len;
|
|
}
|