Use effective bdrv_dirty_bitmap_next_dirty_area interface. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> --- block/backup.c | 56 ++++++++++++++++++++++---------------------------- 1 file changed, 24 insertions(+), 32 deletions(-)
diff --git a/block/backup.c b/block/backup.c index f19c9195fe..5ede0c8290 100644 --- a/block/backup.c +++ b/block/backup.c @@ -235,25 +235,28 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, { CowRequest cow_request; int ret = 0; - int64_t start, end; /* bytes */ + uint64_t off, cur_bytes; + int64_t aligned_offset, aligned_bytes, aligned_end; BdrvRequestFlags read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; qemu_co_rwlock_rdlock(&job->flush_rwlock); - start = QEMU_ALIGN_DOWN(offset, job->cluster_size); - end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); + aligned_offset = QEMU_ALIGN_DOWN(offset, job->cluster_size); + aligned_end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); + aligned_bytes = aligned_end - aligned_offset; - trace_backup_do_cow_enter(job, start, offset, bytes); + trace_backup_do_cow_enter(job, aligned_offset, offset, bytes); - wait_for_overlapping_requests(job, start, end); - cow_request_begin(&cow_request, job, start, end); + wait_for_overlapping_requests(job, aligned_offset, aligned_end); + cow_request_begin(&cow_request, job, aligned_offset, aligned_end); if (job->initializing_bitmap) { - int64_t off, chunk; + int64_t chunk; - for (off = offset; offset < end; offset += chunk) { - ret = backup_bitmap_reset_unallocated(job, off, end - off, &chunk); + for (off = aligned_offset; off < aligned_end; off += chunk) { + ret = backup_bitmap_reset_unallocated(job, off, aligned_end - off, + &chunk); if (ret < 0) { chunk = job->cluster_size; } @@ -261,47 +264,36 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, } ret = 0; - while (start < end) { - int64_t dirty_end; - int64_t cur_bytes; - - if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) { - trace_backup_do_cow_skip(job, start); - start += job->cluster_size; - continue; /* already copied */ - } - - dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start, - end - start); - if (dirty_end < 0) { - dirty_end = end; - } - - trace_backup_do_cow_process(job, start); - cur_bytes = MIN(dirty_end - start, job->len - start); - bdrv_reset_dirty_bitmap(job->copy_bitmap, start, dirty_end - start); + off = aligned_offset; + cur_bytes = aligned_bytes; + while (bdrv_dirty_bitmap_next_dirty_area(job->copy_bitmap, + &off, &cur_bytes)) + { + trace_backup_do_cow_process(job, off); + bdrv_reset_dirty_bitmap(job->copy_bitmap, off, cur_bytes); if (job->use_copy_range) { - ret = backup_cow_with_offload(job, start, cur_bytes, read_flags); + ret = backup_cow_with_offload(job, off, cur_bytes, read_flags); if (ret < 0) { job->use_copy_range = false; } } if (!job->use_copy_range) { - ret = backup_cow_with_bounce_buffer(job, start, cur_bytes, + ret = backup_cow_with_bounce_buffer(job, off, cur_bytes, read_flags, error_is_read); } if (ret < 0) { - bdrv_set_dirty_bitmap(job->copy_bitmap, start, dirty_end - start); + bdrv_set_dirty_bitmap(job->copy_bitmap, off, cur_bytes); break; } /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ - start += cur_bytes; + off += cur_bytes; job->bytes_read += cur_bytes; job_progress_update(&job->common.job, cur_bytes); + cur_bytes = offset + bytes - off; } cow_request_end(&cow_request); -- 2.18.0