During migration, we would sync bitmap from ram_list.dirty_memory to RAMBlock.bmap in cpu_physical_memory_sync_dirty_bitmap().
Since we set RAMBlock.bmap and ram_list.dirty_memory both to all 1, this means at the first round this sync is meaningless and is a duplicated work. Leaving RAMBlock->bmap blank on allocating would have a side effect on migration_dirty_pages, since it is calculated from the result of cpu_physical_memory_sync_dirty_bitmap(). To keep it right, we need to set migration_dirty_pages to 0 in ram_state_init(). Signed-off-by: Wei Yang <richardw.y...@linux.intel.com> --- migration/ram.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 95c51109d2..417874707d 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3151,12 +3151,7 @@ static int ram_state_init(RAMState **rsp) qemu_mutex_init(&(*rsp)->src_page_req_mutex); QSIMPLEQ_INIT(&(*rsp)->src_page_requests); - /* - * Count the total number of pages used by ram blocks not including any - * gaps due to alignment or unplugs. - */ - (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; - + (*rsp)->migration_dirty_pages = 0; ram_state_reset(*rsp); return 0; @@ -3172,7 +3167,6 @@ static void ram_list_init_bitmaps(void) RAMBLOCK_FOREACH_NOT_IGNORED(block) { pages = block->max_length >> TARGET_PAGE_BITS; block->bmap = bitmap_new(pages); - bitmap_set(block->bmap, 0, pages); if (migrate_postcopy_ram()) { block->unsentmap = bitmap_new(pages); bitmap_set(block->unsentmap, 0, pages); -- 2.19.1