This helps avoid unneeded writes and discards. Signed-off-by: Maxim Levitsky <mlevi...@redhat.com> --- qemu-img.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-)
diff --git a/qemu-img.c b/qemu-img.c index c2c56fc797..7e9b0f659f 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -1722,7 +1722,7 @@ static void convert_select_part(ImgConvertState *s, int64_t sector_num, static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num) { int64_t src_cur_offset; - int ret, n, src_cur; + int ret, n, src_cur, alignment; bool post_backing_zero = false; convert_select_part(s, sector_num, &src_cur, &src_cur_offset); @@ -1785,11 +1785,14 @@ static int convert_iteration_sectors(ImgConvertState *s, int64_t sector_num) n = DIV_ROUND_UP(count, BDRV_SECTOR_SIZE); /* - * Avoid that s->sector_next_status becomes unaligned to the source - * request alignment and/or cluster size to avoid unnecessary read - * cycles. + * Avoid that s->sector_next_status becomes unaligned to the + * source/destination request alignment and/or cluster size to avoid + * unnecessary read/write cycles. */ - tail = (sector_num - src_cur_offset + n) % s->src_alignment[src_cur]; + alignment = MAX(s->src_alignment[src_cur], s->alignment); + assert(is_power_of_2(alignment)); + + tail = (sector_num - src_cur_offset + n) % alignment; if (n > tail) { n -= tail; } -- 2.26.2