mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 19:33:39 +08:00
vmdk: Fix next_cluster_sector for compressed write
This fixes the bug introduced by commit c6ac36e
(vmdk: Optimize cluster
allocation).
Sometimes, write_len could be larger than cluster size, because it
contains both data and marker. We must advance next_cluster_sector in
this case, otherwise the image gets corrupted.
Cc: qemu-stable@nongnu.org
Reported-by: Antoni Villalonga <qemu-list@friki.cat>
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
aacd5650c6
commit
5e82a31eb9
14
block/vmdk.c
14
block/vmdk.c
@ -1302,6 +1302,8 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
|
||||
uLongf buf_len;
|
||||
const uint8_t *write_buf = buf;
|
||||
int write_len = nb_sectors * 512;
|
||||
int64_t write_offset;
|
||||
int64_t write_end_sector;
|
||||
|
||||
if (extent->compressed) {
|
||||
if (!extent->has_marker) {
|
||||
@ -1320,10 +1322,14 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
|
||||
write_buf = (uint8_t *)data;
|
||||
write_len = buf_len + sizeof(VmdkGrainMarker);
|
||||
}
|
||||
ret = bdrv_pwrite(extent->file,
|
||||
cluster_offset + offset_in_cluster,
|
||||
write_buf,
|
||||
write_len);
|
||||
write_offset = cluster_offset + offset_in_cluster,
|
||||
ret = bdrv_pwrite(extent->file, write_offset, write_buf, write_len);
|
||||
|
||||
write_end_sector = DIV_ROUND_UP(write_offset + write_len, BDRV_SECTOR_SIZE);
|
||||
|
||||
extent->next_cluster_sector = MAX(extent->next_cluster_sector,
|
||||
write_end_sector);
|
||||
|
||||
if (ret != write_len) {
|
||||
ret = ret < 0 ? ret : -EIO;
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user