block-copy: move detecting fleecing scheme to block-copy

We want to simplify initialization interface of copy-before-write
filter as we are going to make it public. So, let's detect fleecing
scheme exactly in block-copy code, to not pass this information through
extra levels.

Why not just set BDRV_REQ_SERIALISING unconditionally: because we are
going to implement new more efficient fleecing scheme which will not
rely on backing feature.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Message-Id: <20210824083856.17408-7-vsementsov@virtuozzo.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
This commit is contained in:
Vladimir Sementsov-Ogievskiy 2021-08-24 11:38:28 +03:00 committed by Hanna Reitz
parent d003e0aece
commit 49577723d4
5 changed files with 26 additions and 28 deletions

View File

@ -407,7 +407,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
int64_t len, target_len;
BackupBlockJob *job = NULL;
int64_t cluster_size;
BdrvRequestFlags write_flags;
BlockDriverState *cbw = NULL;
BlockCopyState *bcs = NULL;
@ -504,26 +503,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
goto error;
}
/*
* If source is in backing chain of target assume that target is going to be
* used for "image fleecing", i.e. it should represent a kind of snapshot of
* source at backup-start point in time. And target is going to be read by
* somebody (for example, used as NBD export) during backup job.
*
* In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
* intersection of backup writes and third party reads from target,
* otherwise reading from target we may occasionally read already updated by
* guest data.
*
* For more information see commit f8d59dfb40bb and test
* tests/qemu-iotests/222
*/
write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
cbw = bdrv_cbw_append(bs, target, filter_node_name,
cluster_size, perf,
write_flags, &bcs, errp);
cluster_size, perf, compress, &bcs, errp);
if (!cbw) {
goto error;
}

View File

@ -317,10 +317,11 @@ static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size, bool use_copy_range,
BdrvRequestFlags write_flags, Error **errp)
bool compress, Error **errp)
{
BlockCopyState *s;
BdrvDirtyBitmap *copy_bitmap;
bool is_fleecing;
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
errp);
@ -329,6 +330,22 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
}
bdrv_disable_dirty_bitmap(copy_bitmap);
/*
* If source is in backing chain of target assume that target is going to be
* used for "image fleecing", i.e. it should represent a kind of snapshot of
* source at backup-start point in time. And target is going to be read by
* somebody (for example, used as NBD export) during backup job.
*
* In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
* intersection of backup writes and third party reads from target,
* otherwise reading from target we may occasionally read already updated by
* guest data.
*
* For more information see commit f8d59dfb40bb and test
* tests/qemu-iotests/222
*/
is_fleecing = bdrv_chain_contains(target->bs, source->bs);
s = g_new(BlockCopyState, 1);
*s = (BlockCopyState) {
.source = source,
@ -336,7 +353,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
.copy_bitmap = copy_bitmap,
.cluster_size = cluster_size,
.len = bdrv_dirty_bitmap_size(copy_bitmap),
.write_flags = write_flags,
.write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0) |
(compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
.mem = shres_create(BLOCK_COPY_MAX_MEM),
.max_transfer = QEMU_ALIGN_DOWN(
block_copy_max_transfer(source, target),
@ -351,7 +369,7 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
* behalf).
*/
s->method = COPY_READ_WRITE_CLUSTER;
} else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
} else if (compress) {
/* Compression supports only cluster-size writes and no copy-range. */
s->method = COPY_READ_WRITE_CLUSTER;
} else {

View File

@ -171,7 +171,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
const char *filter_node_name,
uint64_t cluster_size,
BackupPerf *perf,
BdrvRequestFlags write_flags,
bool compress,
BlockCopyState **bcs,
Error **errp)
{
@ -218,7 +218,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
state->cluster_size = cluster_size;
state->bcs = block_copy_state_new(top->backing, state->target,
cluster_size, perf->use_copy_range,
write_flags, errp);
compress, errp);
if (!state->bcs) {
error_prepend(errp, "Cannot create block-copy-state: ");
goto fail;

View File

@ -34,7 +34,7 @@ BlockDriverState *bdrv_cbw_append(BlockDriverState *source,
const char *filter_node_name,
uint64_t cluster_size,
BackupPerf *perf,
BdrvRequestFlags write_flags,
bool compress,
BlockCopyState **bcs,
Error **errp);
void bdrv_cbw_drop(BlockDriverState *bs);

View File

@ -26,8 +26,7 @@ typedef struct BlockCopyCallState BlockCopyCallState;
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
int64_t cluster_size, bool use_copy_range,
BdrvRequestFlags write_flags,
Error **errp);
bool compress, Error **errp);
void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm);