From: "Michael R. Hines" <mrhi...@us.ibm.com> This gives RDMA shared access to madvise() on the destination side when an entire chunk is found to be zero.
Reviewed-by: Paolo Bonzini <pbonz...@redhat.com> Signed-off-by: Michael R. Hines <mrhi...@us.ibm.com> --- arch_init.c | 24 ++++++++++++++++-------- include/migration/migration.h | 2 ++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/arch_init.c b/arch_init.c index 819ca5a..2f1fdd3 100644 --- a/arch_init.c +++ b/arch_init.c @@ -782,6 +782,21 @@ static inline void *host_from_stream_offset(QEMUFile *f, return NULL; } +/* + * If a page (or a whole RDMA chunk) has been + * determined to be zero, then zap it. + */ +void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) +{ + memset(host, ch, TARGET_PAGE_SIZE); +#ifndef _WIN32 + if (ch == 0 && (!kvm_enabled() || kvm_has_sync_mmu()) && + getpagesize() <= TARGET_PAGE_SIZE) { + qemu_madvise(host, size, QEMU_MADV_DONTNEED); + } +#endif +} + static int ram_load(QEMUFile *f, void *opaque, int version_id) { ram_addr_t addr; @@ -849,14 +864,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } ch = qemu_get_byte(f); - memset(host, ch, TARGET_PAGE_SIZE); -#ifndef _WIN32 - if (ch == 0 && - (!kvm_enabled() || kvm_has_sync_mmu()) && - getpagesize() <= TARGET_PAGE_SIZE) { - qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED); - } -#endif + ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); } else if (flags & RAM_SAVE_FLAG_PAGE) { void *host; diff --git a/include/migration/migration.h b/include/migration/migration.h index 1eeaa40..110f4ad 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -108,6 +108,8 @@ uint64_t xbzrle_mig_pages_transferred(void); uint64_t xbzrle_mig_pages_overflow(void); uint64_t xbzrle_mig_pages_cache_miss(void); +void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); + /** * @migrate_add_blocker - prevent migration from proceeding * -- 1.7.10.4