mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
block: Mark bdrv_skip_filters() and callers GRAPH_RDLOCK
This adds GRAPH_RDLOCK annotations to declare that callers of bdrv_skip_filters() need to hold a reader lock for the graph because it calls bdrv_filter_child(), which accesses bs->file/backing. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20231027155333.420094-9-kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
430da832af
commit
ad74751fc0
@ -2730,6 +2730,7 @@ int blk_commit_all(void)
|
||||
{
|
||||
BlockBackend *blk = NULL;
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
while ((blk = blk_all_next(blk)) != NULL) {
|
||||
AioContext *aio_context = blk_get_aio_context(blk);
|
||||
|
@ -313,7 +313,12 @@ static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
|
||||
{
|
||||
int ret;
|
||||
BlockDriverInfo bdi;
|
||||
bool target_does_cow = bdrv_backing_chain_next(target);
|
||||
bool target_does_cow;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
target_does_cow = bdrv_backing_chain_next(target);
|
||||
|
||||
/*
|
||||
* If there is no backing file on the target, we cannot rely on COW if our
|
||||
@ -355,6 +360,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
|
||||
BdrvDirtyBitmap *copy_bitmap;
|
||||
bool is_fleecing;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
|
||||
if (cluster_size < 0) {
|
||||
return NULL;
|
||||
|
@ -255,10 +255,13 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
assert(top != bs);
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_skip_filters(top) == bdrv_skip_filters(base)) {
|
||||
error_setg(errp, "Invalid files for merge: top and base are the same");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
base_size = bdrv_getlength(base);
|
||||
if (base_size < 0) {
|
||||
@ -324,6 +327,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
* this is the responsibility of the interface (i.e. whoever calls
|
||||
* commit_start()).
|
||||
*/
|
||||
bdrv_graph_wrlock(top);
|
||||
s->base_overlay = bdrv_find_overlay(top, base);
|
||||
assert(s->base_overlay);
|
||||
|
||||
@ -342,7 +346,6 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
*/
|
||||
iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
|
||||
|
||||
bdrv_graph_wrlock(top);
|
||||
for (iter = top; iter != base; iter = bdrv_filter_or_cow_bs(iter)) {
|
||||
if (iter == filtered_base) {
|
||||
/*
|
||||
|
@ -714,7 +714,6 @@ static int mirror_exit_common(Job *job)
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
|
||||
&error_abort);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
|
||||
BlockDriverState *backing = s->is_none_mode ? src : s->base;
|
||||
@ -737,6 +736,7 @@ static int mirror_exit_common(Job *job)
|
||||
local_err = NULL;
|
||||
}
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (s->to_replace) {
|
||||
replace_aio_context = bdrv_get_aio_context(s->to_replace);
|
||||
@ -992,13 +992,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp)
|
||||
} else {
|
||||
s->target_cluster_size = BDRV_SECTOR_SIZE;
|
||||
}
|
||||
bdrv_graph_co_rdunlock();
|
||||
if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
|
||||
s->granularity < s->target_cluster_size) {
|
||||
s->buf_size = MAX(s->buf_size, s->target_cluster_size);
|
||||
s->cow_bitmap = bitmap_new(length);
|
||||
}
|
||||
s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
|
||||
bdrv_graph_co_rdunlock();
|
||||
|
||||
s->buf = qemu_try_blockalign(bs, s->buf_size);
|
||||
if (s->buf == NULL) {
|
||||
@ -1744,12 +1744,15 @@ static BlockJob *mirror_start_job(
|
||||
buf_size = DEFAULT_MIRROR_BUF_SIZE;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
|
||||
error_setg(errp, "Can't mirror node into itself");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
target_is_backing = bdrv_chain_contains(bs, target);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* In the case of active commit, add dummy driver to provide consistent
|
||||
* reads on the top, while disabling it in the intermediate nodes, and make
|
||||
@ -1832,14 +1835,19 @@ static BlockJob *mirror_start_job(
|
||||
}
|
||||
|
||||
target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
|
||||
} else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
|
||||
/*
|
||||
* We may want to allow this in the future, but it would
|
||||
* require taking some extra care.
|
||||
*/
|
||||
error_setg(errp, "Cannot mirror to a filter on top of a node in the "
|
||||
"source's backing chain");
|
||||
goto fail;
|
||||
} else {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
|
||||
/*
|
||||
* We may want to allow this in the future, but it would
|
||||
* require taking some extra care.
|
||||
*/
|
||||
error_setg(errp, "Cannot mirror to a filter on top of a node in "
|
||||
"the source's backing chain");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto fail;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
}
|
||||
|
||||
s->target = blk_new(s->common.job.aio_context,
|
||||
@ -1860,6 +1868,7 @@ static BlockJob *mirror_start_job(
|
||||
blk_set_allow_aio_context_change(s->target, true);
|
||||
blk_set_disable_request_queuing(s->target, true);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
s->replaces = g_strdup(replaces);
|
||||
s->on_source_error = on_source_error;
|
||||
s->on_target_error = on_target_error;
|
||||
@ -1875,6 +1884,7 @@ static BlockJob *mirror_start_job(
|
||||
if (auto_complete) {
|
||||
s->should_complete = true;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
|
||||
NULL, errp);
|
||||
@ -2007,8 +2017,12 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
|
||||
MirrorSyncMode_str(mode));
|
||||
return;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
|
||||
base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
mirror_start_job(job_id, bs, creation_flags, target, replaces,
|
||||
speed, granularity, buf_size, backing_mode, zero_target,
|
||||
on_source_error, on_target_error, unmap, NULL, NULL,
|
||||
|
@ -53,8 +53,8 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
|
||||
static int stream_prepare(Job *job)
|
||||
{
|
||||
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
BlockDriverState *unfiltered_bs_cow = bdrv_cow_bs(unfiltered_bs);
|
||||
BlockDriverState *unfiltered_bs;
|
||||
BlockDriverState *unfiltered_bs_cow;
|
||||
BlockDriverState *base;
|
||||
BlockDriverState *unfiltered_base;
|
||||
Error *local_err = NULL;
|
||||
@ -62,6 +62,11 @@ static int stream_prepare(Job *job)
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
unfiltered_bs_cow = bdrv_cow_bs(unfiltered_bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
/* We should drop filter at this point, as filter hold the backing chain */
|
||||
bdrv_cor_filter_drop(s->cor_filter_bs);
|
||||
s->cor_filter_bs = NULL;
|
||||
@ -142,18 +147,19 @@ static void stream_clean(Job *job)
|
||||
static int coroutine_fn stream_run(Job *job, Error **errp)
|
||||
{
|
||||
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
BlockDriverState *unfiltered_bs;
|
||||
int64_t len;
|
||||
int64_t offset = 0;
|
||||
int error = 0;
|
||||
int64_t n = 0; /* bytes */
|
||||
|
||||
if (unfiltered_bs == s->base_overlay) {
|
||||
/* Nothing to stream */
|
||||
return 0;
|
||||
}
|
||||
|
||||
WITH_GRAPH_RDLOCK_GUARD() {
|
||||
unfiltered_bs = bdrv_skip_filters(s->target_bs);
|
||||
if (unfiltered_bs == s->base_overlay) {
|
||||
/* Nothing to stream */
|
||||
return 0;
|
||||
}
|
||||
|
||||
len = bdrv_co_getlength(s->target_bs);
|
||||
if (len < 0) {
|
||||
return len;
|
||||
|
@ -1710,7 +1710,6 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
flags = bs->open_flags | BDRV_O_RDWR;
|
||||
|
||||
@ -1735,6 +1734,7 @@ static void drive_backup_action(DriveBackup *backup,
|
||||
flags |= BDRV_O_NO_BACKING;
|
||||
set_backing_hd = true;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
size = bdrv_getlength(bs);
|
||||
if (size < 0) {
|
||||
@ -3054,7 +3054,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
return;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
aio_context = bdrv_get_aio_context(bs);
|
||||
aio_context_acquire(aio_context);
|
||||
@ -3076,6 +3075,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
|
||||
if (arg->sync == MIRROR_SYNC_MODE_NONE) {
|
||||
target_backing_bs = bs;
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
size = bdrv_getlength(bs);
|
||||
if (size < 0) {
|
||||
@ -3450,15 +3450,16 @@ void qmp_change_backing_file(const char *device,
|
||||
goto out;
|
||||
}
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_find_base(image_bs) == image_bs) {
|
||||
error_setg(errp, "not allowing backing file change on an image "
|
||||
"without a backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* even though we are not necessarily operating on bs, we need it to
|
||||
* determine if block ops are currently prohibited on the chain */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto out;
|
||||
|
@ -144,9 +144,11 @@ int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
||||
void bdrv_register(BlockDriver *bdrv);
|
||||
int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
||||
const char *backing_file_str);
|
||||
BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
|
||||
BlockDriverState *bs);
|
||||
BlockDriverState *bdrv_find_base(BlockDriverState *bs);
|
||||
|
||||
BlockDriverState * GRAPH_RDLOCK
|
||||
bdrv_find_overlay(BlockDriverState *active, BlockDriverState *bs);
|
||||
|
||||
BlockDriverState * GRAPH_RDLOCK bdrv_find_base(BlockDriverState *bs);
|
||||
bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
|
||||
Error **errp);
|
||||
int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
|
||||
|
@ -134,8 +134,8 @@ BdrvChild *bdrv_cow_child(BlockDriverState *bs);
|
||||
BdrvChild *bdrv_filter_child(BlockDriverState *bs);
|
||||
BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs);
|
||||
BdrvChild * GRAPH_RDLOCK bdrv_primary_child(BlockDriverState *bs);
|
||||
BlockDriverState *bdrv_skip_filters(BlockDriverState *bs);
|
||||
BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs);
|
||||
BlockDriverState * GRAPH_RDLOCK bdrv_skip_filters(BlockDriverState *bs);
|
||||
BlockDriverState * GRAPH_RDLOCK bdrv_backing_chain_next(BlockDriverState *bs);
|
||||
|
||||
static inline BlockDriverState *bdrv_cow_bs(BlockDriverState *bs)
|
||||
{
|
||||
|
18
qemu-img.c
18
qemu-img.c
@ -1050,12 +1050,14 @@ static int img_commit(int argc, char **argv)
|
||||
qemu_progress_init(progress, 1.f);
|
||||
qemu_progress_print(0.f, 100);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
if (base) {
|
||||
base_bs = bdrv_find_backing_image(bs, base);
|
||||
if (!base_bs) {
|
||||
error_setg(&local_err,
|
||||
"Did not find '%s' in the backing chain of '%s'",
|
||||
base, filename);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto done;
|
||||
}
|
||||
} else {
|
||||
@ -1065,9 +1067,11 @@ static int img_commit(int argc, char **argv)
|
||||
base_bs = bdrv_backing_chain_next(bs);
|
||||
if (!base_bs) {
|
||||
error_setg(&local_err, "Image does not have a backing file");
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
cbi = (CommonBlockJobCBInfo){
|
||||
.errp = &local_err,
|
||||
@ -1713,7 +1717,8 @@ static void convert_select_part(ImgConvertState *s, int64_t sector_num,
|
||||
}
|
||||
}
|
||||
|
||||
static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
|
||||
static int coroutine_mixed_fn GRAPH_RDLOCK
|
||||
convert_iteration_sectors(ImgConvertState *s, int64_t sector_num)
|
||||
{
|
||||
int64_t src_cur_offset;
|
||||
int ret, n, src_cur;
|
||||
@ -2115,7 +2120,9 @@ static int convert_do_copy(ImgConvertState *s)
|
||||
}
|
||||
|
||||
while (sector_num < s->total_sectors) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
n = convert_iteration_sectors(s, sector_num);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
if (n < 0) {
|
||||
return n;
|
||||
}
|
||||
@ -2757,8 +2764,10 @@ static int img_convert(int argc, char **argv)
|
||||
* s.target_backing_sectors has to be negative, which it will
|
||||
* be automatically). The backing file length is used only
|
||||
* for optimizations, so such a case is not fatal. */
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
s.target_backing_sectors =
|
||||
bdrv_nb_sectors(bdrv_backing_chain_next(out_bs));
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
} else {
|
||||
s.target_backing_sectors = -1;
|
||||
}
|
||||
@ -3145,6 +3154,9 @@ static int get_block_status(BlockDriverState *bs, int64_t offset,
|
||||
int64_t map;
|
||||
char *filename = NULL;
|
||||
|
||||
GLOBAL_STATE_CODE();
|
||||
GRAPH_RDLOCK_GUARD_MAINLOOP();
|
||||
|
||||
/* As an optimization, we could cache the current range of unallocated
|
||||
* clusters in each file of the chain, and avoid querying the same
|
||||
* range repeatedly.
|
||||
@ -3173,9 +3185,7 @@ static int get_block_status(BlockDriverState *bs, int64_t offset,
|
||||
has_offset = !!(ret & BDRV_BLOCK_OFFSET_VALID);
|
||||
|
||||
if (file && has_offset) {
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
bdrv_refresh_filename(file);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
filename = file->filename;
|
||||
}
|
||||
|
||||
@ -3663,7 +3673,9 @@ static int img_rebase(int argc, char **argv)
|
||||
}
|
||||
bs = blk_bs(blk);
|
||||
|
||||
bdrv_graph_rdlock_main_loop();
|
||||
unfiltered_bs = bdrv_skip_filters(bs);
|
||||
bdrv_graph_rdunlock_main_loop();
|
||||
|
||||
if (compress && !block_driver_can_compress(unfiltered_bs->drv)) {
|
||||
error_report("Compression not supported for this file format");
|
||||
|
Loading…
Reference in New Issue
Block a user