The first iteration's RAMBlock dirty sync can be omitted because QEMU always initializes the RAMBlock's bmap to all 1s by default.
Signed-off-by: Hyman Huang <[email protected]> --- migration/cpu-throttle.c | 2 +- migration/ram.c | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c index 5179019e33..674dc2004e 100644 --- a/migration/cpu-throttle.c +++ b/migration/cpu-throttle.c @@ -141,7 +141,7 @@ void cpu_throttle_dirty_sync_timer_tick(void *opaque) * effect on guest performance, therefore omit it to avoid * paying extra for the sync penalty. */ - if (sync_cnt <= 1) { + if (!sync_cnt) { goto end; } diff --git a/migration/ram.c b/migration/ram.c index 05ff9eb328..571dba10b7 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -2718,7 +2718,7 @@ static void ram_list_init_bitmaps(void) { MigrationState *ms = migrate_get_current(); RAMBlock *block; - unsigned long pages; + unsigned long pages, clear_bmap_pages; uint8_t shift; /* Skip setting bitmap if there is no RAM */ @@ -2736,6 +2736,7 @@ static void ram_list_init_bitmaps(void) RAMBLOCK_FOREACH_NOT_IGNORED(block) { pages = block->max_length >> TARGET_PAGE_BITS; + clear_bmap_pages = clear_bmap_size(pages, shift); /* * The initial dirty bitmap for migration must be set with all * ones to make sure we'll migrate every guest RAM page to @@ -2751,7 +2752,12 @@ static void ram_list_init_bitmaps(void) block->file_bmap = bitmap_new(pages); } block->clear_bmap_shift = shift; - block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift)); + block->clear_bmap = bitmap_new(clear_bmap_pages); + /* + * Set clear_bmap to 1 unconditionally, as we always set bmap + * to all 1s by default. + */ + bitmap_set(block->clear_bmap, 0, clear_bmap_pages); } } } @@ -2783,7 +2789,6 @@ static bool ram_init_bitmaps(RAMState *rs, Error **errp) if (!ret) { goto out_unlock; } - migration_bitmap_sync_precopy(false); } } out_unlock: -- 2.39.1
