* Stefan Hajnoczi (stefa...@redhat.com) wrote: > The dirty memory bitmap is managed by ram_addr.h and copied to > migration_bitmap[] periodically during live migration. > > Move the code to sync the bitmap to ram_addr.h where related code lives.
Is this sync code going to need to gain a barrier (although I'm not quite sure which) to ensure it's picked up all changes? Dave > > Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com> > --- > arch_init.c | 46 ++-------------------------------------------- > include/exec/ram_addr.h | 44 ++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 46 insertions(+), 44 deletions(-) > > diff --git a/arch_init.c b/arch_init.c > index 7680d28..79c7784 100644 > --- a/arch_init.c > +++ b/arch_init.c > @@ -436,52 +436,10 @@ ram_addr_t > migration_bitmap_find_and_reset_dirty(MemoryRegion *mr, > return (next - base) << TARGET_PAGE_BITS; > } > > -static inline bool migration_bitmap_set_dirty(ram_addr_t addr) > -{ > - bool ret; > - int nr = addr >> TARGET_PAGE_BITS; > - > - ret = test_and_set_bit(nr, migration_bitmap); > - > - if (!ret) { > - migration_dirty_pages++; > - } > - return ret; > -} > - > static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) > { > - ram_addr_t addr; > - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); > - > - /* start address is aligned at the start of a word? */ > - if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { > - int k; > - int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); > - unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; > - > - for (k = page; k < page + nr; k++) { > - if (src[k]) { > - unsigned long new_dirty; > - new_dirty = ~migration_bitmap[k]; > - migration_bitmap[k] |= src[k]; > - new_dirty &= src[k]; > - migration_dirty_pages += ctpopl(new_dirty); > - src[k] = 0; > - } > - } > - } else { > - for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { > - if (cpu_physical_memory_get_dirty(start + addr, > - TARGET_PAGE_SIZE, > - DIRTY_MEMORY_MIGRATION)) { > - cpu_physical_memory_reset_dirty(start + addr, > - TARGET_PAGE_SIZE, > - DIRTY_MEMORY_MIGRATION); > - migration_bitmap_set_dirty(start + addr); > - } > - } > - } > + migration_dirty_pages += > + cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, > length); > } > > > diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h > index ba90daa..87a8b28 100644 > --- a/include/exec/ram_addr.h > +++ b/include/exec/ram_addr.h > @@ -190,5 +190,49 @@ static inline void > cpu_physical_memory_clear_dirty_range(ram_addr_t start, > void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length, > unsigned client); > > +static inline > +uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, > + ram_addr_t start, > + ram_addr_t length) > +{ > + ram_addr_t addr; > + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); > + uint64_t num_dirty = 0; > + > + /* start address is aligned at the start of a word? */ > + if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { > + int k; > + int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); > + unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; > + > + for (k = page; k < page + nr; k++) { > + if (src[k]) { > + unsigned long new_dirty; > + new_dirty = ~dest[k]; > + dest[k] |= src[k]; > + new_dirty &= src[k]; > + num_dirty += ctpopl(new_dirty); > + src[k] = 0; > + } > + } > + } else { > + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { > + if (cpu_physical_memory_get_dirty(start + addr, > + TARGET_PAGE_SIZE, > + DIRTY_MEMORY_MIGRATION)) { > + cpu_physical_memory_reset_dirty(start + addr, > + TARGET_PAGE_SIZE, > + DIRTY_MEMORY_MIGRATION); > + long k = (start + addr) >> TARGET_PAGE_BITS; > + if (!test_and_set_bit(k, dest)) { > + num_dirty++; > + } > + } > + } > + } > + > + return num_dirty; > +} > + > #endif > #endif > -- > 2.1.0 > > -- Dr. David Alan Gilbert / dgilb...@redhat.com / Manchester, UK