* zhanghailiang (zhang.zhanghaili...@huawei.com) wrote: > We need to record the address of the dirty pages that received from PVM, > It will help flushing pages that cached into SVM. > > Signed-off-by: zhanghailiang <zhang.zhanghaili...@huawei.com> > --- > v10: > - New patch split from v9's patch 13 > - Rebase to master to use 'migration_bitmap_rcu' > --- > migration/ram.c | 35 +++++++++++++++++++++++++++++++++++ > 1 file changed, 35 insertions(+) > > diff --git a/migration/ram.c b/migration/ram.c > index b094dc3..70879bd 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -1448,6 +1448,18 @@ static inline void *host_from_stream_offset(QEMUFile > *f, > } > > if (ram_cache_enable) { > + unsigned long *bitmap; > + long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS; > + > + bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; > + /* > + * During colo checkpoint, we need bitmap of these migrated pages. > + * It help us to decide which pages in ram cache should be flushed > + * into VM's RAM later. > + */ > + if (!test_and_set_bit(k, bitmap)) { > + migration_dirty_pages++; > + }
I don't like having this in host_from_stream_offset; if you look at the current ram_load there is only a single call to host_from_stream_offset, so it's now much easier for you to move it into a separate function. > return block->host_cache + offset; > } else { > return block->host + offset; > @@ -1462,6 +1474,13 @@ static inline void *host_from_stream_offset(QEMUFile > *f, > if (!strncmp(id, block->idstr, sizeof(id)) && > block->max_length > offset) { > if (ram_cache_enable) { > + unsigned long *bitmap; > + long k = (block->mr->ram_addr + offset) >> TARGET_PAGE_BITS; > + > + bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; > + if (!test_and_set_bit(k, bitmap)) { > + migration_dirty_pages++; > + } > return block->host_cache + offset; > } else { > return block->host + offset; > @@ -1723,6 +1742,7 @@ static int ram_load(QEMUFile *f, void *opaque, int > version_id) > int colo_init_ram_cache(void) > { > RAMBlock *block; > + int64_t ram_cache_pages = last_ram_offset() >> TARGET_PAGE_BITS; > > rcu_read_lock(); > QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { > @@ -1734,6 +1754,15 @@ int colo_init_ram_cache(void) > } > rcu_read_unlock(); > ram_cache_enable = true; > + /* > + * Record the dirty pages that sent by PVM, we use this dirty bitmap > together > + * with to decide which page in cache should be flushed into SVM's RAM. > Here > + * we use the same name 'migration_bitmap_rcu' as for migration. > + */ > + migration_bitmap_rcu = g_new(struct BitmapRcu, 1); Please update that to g_new0 (I changed the other use when I added postcopy). Dave > + migration_bitmap_rcu->bmap = bitmap_new(ram_cache_pages); > + migration_dirty_pages = 0; > + > return 0; > > out_locked: > @@ -1751,9 +1780,15 @@ out_locked: > void colo_release_ram_cache(void) > { > RAMBlock *block; > + struct BitmapRcu *bitmap = migration_bitmap_rcu; > > ram_cache_enable = false; > > + atomic_rcu_set(&migration_bitmap_rcu, NULL); > + if (bitmap) { > + call_rcu(bitmap, migration_bitmap_free, rcu); > + } > + > rcu_read_lock(); > QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { > if (block->host_cache) { > -- > 1.8.3.1 > > -- Dr. David Alan Gilbert / dgilb...@redhat.com / Manchester, UK