mirror of
https://github.com/qemu/qemu.git
synced 2024-11-27 05:43:47 +08:00
block: Acquire AioContexts during bdrv_reopen_multiple()
As the BlockReopenQueue can contain nodes in multiple AioContexts, only one of which may be locked when AIO_WAIT_WHILE() can be called, we can't let the caller lock the right contexts. Instead, individually lock the AioContext of a single node when iterating the queue. Reintroduce bdrv_reopen() as a wrapper for reopening a single node that drains the node and temporarily drops the AioContext lock for bdrv_reopen_multiple(). Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210708114709.206487-4-kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
ab5b522879
commit
6cf42ca2f9
51
block.c
51
block.c
@ -4124,19 +4124,26 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue)
|
||||
*
|
||||
* All affected nodes must be drained between bdrv_reopen_queue() and
|
||||
* bdrv_reopen_multiple().
|
||||
*
|
||||
* To be called from the main thread, with all other AioContexts unlocked.
|
||||
*/
|
||||
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
{
|
||||
int ret = -1;
|
||||
BlockReopenQueueEntry *bs_entry, *next;
|
||||
AioContext *ctx;
|
||||
Transaction *tran = tran_new();
|
||||
g_autoptr(GHashTable) found = NULL;
|
||||
g_autoptr(GSList) refresh_list = NULL;
|
||||
|
||||
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
||||
assert(bs_queue != NULL);
|
||||
|
||||
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = bdrv_flush(bs_entry->state.bs);
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
error_setg_errno(errp, -ret, "Error flushing drive");
|
||||
goto abort;
|
||||
@ -4145,7 +4152,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
|
||||
QTAILQ_FOREACH(bs_entry, bs_queue, entry) {
|
||||
assert(bs_entry->state.bs->quiesce_counter > 0);
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp);
|
||||
aio_context_release(ctx);
|
||||
if (ret < 0) {
|
||||
goto abort;
|
||||
}
|
||||
@ -4188,7 +4198,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
* to first element.
|
||||
*/
|
||||
QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_reopen_commit(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
tran_commit(tran);
|
||||
@ -4197,7 +4210,10 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp)
|
||||
BlockDriverState *bs = bs_entry->state.bs;
|
||||
|
||||
if (bs->drv->bdrv_reopen_commit_post) {
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bs->drv->bdrv_reopen_commit_post(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4208,7 +4224,10 @@ abort:
|
||||
tran_abort(tran);
|
||||
QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) {
|
||||
if (bs_entry->prepared) {
|
||||
ctx = bdrv_get_aio_context(bs_entry->state.bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_reopen_abort(&bs_entry->state);
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4218,21 +4237,37 @@ cleanup:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
|
||||
Error **errp)
|
||||
{
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
BlockReopenQueue *queue;
|
||||
int ret;
|
||||
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
|
||||
queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts);
|
||||
ret = bdrv_reopen_multiple(queue, errp);
|
||||
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
bdrv_subtree_drained_end(bs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
Error **errp)
|
||||
{
|
||||
int ret;
|
||||
BlockReopenQueue *queue;
|
||||
QDict *opts = qdict_new();
|
||||
|
||||
qdict_put_bool(opts, BDRV_OPT_READ_ONLY, read_only);
|
||||
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
queue = bdrv_reopen_queue(NULL, bs, opts, true);
|
||||
ret = bdrv_reopen_multiple(queue, errp);
|
||||
bdrv_subtree_drained_end(bs);
|
||||
|
||||
return ret;
|
||||
return bdrv_reopen(bs, opts, true, errp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -390,7 +390,14 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable,
|
||||
}
|
||||
|
||||
if (reopen_queue) {
|
||||
AioContext *ctx = bdrv_get_aio_context(bs);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_release(ctx);
|
||||
}
|
||||
bdrv_reopen_multiple(reopen_queue, errp);
|
||||
if (ctx != qemu_get_aio_context()) {
|
||||
aio_context_acquire(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
bdrv_subtree_drained_end(s->hidden_disk->bs);
|
||||
|
@ -3592,8 +3592,13 @@ void qmp_x_blockdev_reopen(BlockdevOptions *options, Error **errp)
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
queue = bdrv_reopen_queue(NULL, bs, qdict, false);
|
||||
bdrv_reopen_multiple(queue, errp);
|
||||
|
||||
ctx = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(ctx);
|
||||
bdrv_subtree_drained_end(bs);
|
||||
aio_context_release(ctx);
|
||||
|
||||
|
@ -388,6 +388,8 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
|
||||
bool keep_old_opts);
|
||||
void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue);
|
||||
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
|
||||
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
|
||||
Error **errp);
|
||||
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
||||
Error **errp);
|
||||
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
|
||||
|
@ -2116,8 +2116,6 @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
|
||||
bool writethrough = !blk_enable_write_cache(blk);
|
||||
bool has_rw_option = false;
|
||||
bool has_cache_option = false;
|
||||
|
||||
BlockReopenQueue *brq;
|
||||
Error *local_err = NULL;
|
||||
|
||||
while ((c = getopt(argc, argv, "c:o:rw")) != -1) {
|
||||
@ -2210,10 +2208,7 @@ static int reopen_f(BlockBackend *blk, int argc, char **argv)
|
||||
qdict_put_bool(opts, BDRV_OPT_CACHE_NO_FLUSH, flags & BDRV_O_NO_FLUSH);
|
||||
}
|
||||
|
||||
bdrv_subtree_drained_begin(bs);
|
||||
brq = bdrv_reopen_queue(NULL, bs, opts, true);
|
||||
bdrv_reopen_multiple(brq, &local_err);
|
||||
bdrv_subtree_drained_end(bs);
|
||||
bdrv_reopen(bs, opts, true, &local_err);
|
||||
|
||||
if (local_err) {
|
||||
error_report_err(local_err);
|
||||
|
Loading…
Reference in New Issue
Block a user