mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
block: make bdrv_co_do_write_zeroes stricter in producing aligned requests
Right now, bdrv_co_do_write_zeroes will only try to align the beginning of the request. However, it is simpler for many formats to expect the block layer to separate both the head *and* the tail. This makes sure that the format's bdrv_co_write_zeroes function will be called with aligned sector_num and nb_sectors for the bulk of the request. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Peter Lieven <pl@kamp.de> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
7ce21016b6
commit
b8d71c09f3
35
block.c
35
block.c
@ -2771,14 +2771,21 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
while (nb_sectors > 0 && !ret) {
|
||||
int num = nb_sectors;
|
||||
|
||||
/* align request */
|
||||
if (bs->bl.write_zeroes_alignment &&
|
||||
num >= bs->bl.write_zeroes_alignment &&
|
||||
sector_num % bs->bl.write_zeroes_alignment) {
|
||||
if (num > bs->bl.write_zeroes_alignment) {
|
||||
/* Align request. Block drivers can expect the "bulk" of the request
|
||||
* to be aligned.
|
||||
*/
|
||||
if (bs->bl.write_zeroes_alignment
|
||||
&& num > bs->bl.write_zeroes_alignment) {
|
||||
if (sector_num % bs->bl.write_zeroes_alignment != 0) {
|
||||
/* Make a small request up to the first aligned sector. */
|
||||
num = bs->bl.write_zeroes_alignment;
|
||||
num -= sector_num % bs->bl.write_zeroes_alignment;
|
||||
} else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
|
||||
/* Shorten the request to the last aligned sector. num cannot
|
||||
* underflow because num > bs->bl.write_zeroes_alignment.
|
||||
*/
|
||||
num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
|
||||
}
|
||||
num -= sector_num % bs->bl.write_zeroes_alignment;
|
||||
}
|
||||
|
||||
/* limit request size */
|
||||
@ -2796,16 +2803,20 @@ static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
|
||||
/* Fall back to bounce buffer if write zeroes is unsupported */
|
||||
iov.iov_len = num * BDRV_SECTOR_SIZE;
|
||||
if (iov.iov_base == NULL) {
|
||||
/* allocate bounce buffer only once and ensure that it
|
||||
* is big enough for this and all future requests.
|
||||
*/
|
||||
size_t bufsize = num <= nb_sectors ? num : max_write_zeroes;
|
||||
iov.iov_base = qemu_blockalign(bs, bufsize * BDRV_SECTOR_SIZE);
|
||||
memset(iov.iov_base, 0, bufsize * BDRV_SECTOR_SIZE);
|
||||
iov.iov_base = qemu_blockalign(bs, num * BDRV_SECTOR_SIZE);
|
||||
memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
|
||||
}
|
||||
qemu_iovec_init_external(&qiov, &iov, 1);
|
||||
|
||||
ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
|
||||
|
||||
/* Keep bounce buffer around if it is big enough for all
|
||||
* all future requests.
|
||||
*/
|
||||
if (num < max_write_zeroes) {
|
||||
qemu_vfree(iov.iov_base);
|
||||
iov.iov_base = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
sector_num += num;
|
||||
|
Loading…
Reference in New Issue
Block a user