1. There is a bug: after detection an allocated area of skip_bytes length we ignore skip_bytes variable and my finish up by copying more than skip_bytes.
2. If request area is allocated we call block_status for each cluster on each loop iteration, even if after the first call we know that the whole request area is allocated. Solve all of this by handling resetting all unallocated bytes from requested area before copying loop. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> --- block/backup.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/block/backup.c b/block/backup.c index 9bddea1b59..d0815b21c8 100644 --- a/block/backup.c +++ b/block/backup.c @@ -257,7 +257,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, int ret = 0; int64_t start, end; /* bytes */ void *bounce_buffer = NULL; - int64_t skip_bytes; qemu_co_rwlock_rdlock(&job->flush_rwlock); @@ -269,6 +268,22 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, wait_for_overlapping_requests(job, start, end); cow_request_begin(&cow_request, job, start, end); + if (job->initializing_bitmap) { + int64_t off = start; + int64_t count; + + while (off < end) { + off = bdrv_dirty_bitmap_next_dirty(job->copy_bitmap, + off, end - off); + if (off < 0) { + break; + } + + backup_bitmap_reset_unallocated(job, off, &count); + off += count; + } + } + while (start < end) { int64_t dirty_end; @@ -277,15 +292,6 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, break; } - if (job->initializing_bitmap) { - ret = backup_bitmap_reset_unallocated(job, start, &skip_bytes); - if (ret == 0) { - trace_backup_do_cow_skip_range(job, start, skip_bytes); - start += skip_bytes; - continue; - } - } - dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start, (end - start)); if (dirty_end < 0) { -- 2.18.0