Use more effective search for next dirty byte. Trace point is dropped to not introduce additional variable and logic only for trace point.
Signed-off-by: Vladimir Sementsov-Ogievskiy <[email protected]> --- block/backup.c | 7 +++---- block/trace-events | 1 - 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/block/backup.c b/block/backup.c index 07d751aea4..9bddea1b59 100644 --- a/block/backup.c +++ b/block/backup.c @@ -272,10 +272,9 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, while (start < end) { int64_t dirty_end; - if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) { - trace_backup_do_cow_skip(job, start); - start += job->cluster_size; - continue; /* already copied */ + start = bdrv_dirty_bitmap_next_dirty(job->copy_bitmap, start, end); + if (start < 0) { + break; } if (job->initializing_bitmap) { diff --git a/block/trace-events b/block/trace-events index 04209f058d..7e46f7e036 100644 --- a/block/trace-events +++ b/block/trace-events @@ -40,7 +40,6 @@ mirror_yield_in_flight(void *s, int64_t offset, int in_flight) "s %p offset %" P # backup.c backup_do_cow_enter(void *job, int64_t start, int64_t offset, uint64_t bytes) "job %p start %" PRId64 " offset %" PRId64 " bytes %" PRIu64 backup_do_cow_return(void *job, int64_t offset, uint64_t bytes, int ret) "job %p offset %" PRId64 " bytes %" PRIu64 " ret %d" -backup_do_cow_skip(void *job, int64_t start) "job %p start %"PRId64 backup_do_cow_skip_range(void *job, int64_t start, uint64_t bytes) "job %p start %"PRId64" bytes %"PRId64 backup_do_cow_process(void *job, int64_t start) "job %p start %"PRId64 backup_do_cow_read_fail(void *job, int64_t start, int ret) "job %p start %"PRId64" ret %d" -- 2.18.0
