From: "Michael R. Hines" <mrhi...@us.ibm.com> This gives RDMA shared access to madvise() on the destination side when an entire chunk is found to be zero.
Reviewed-by: Juan Quintela <quint...@redhat.com> Reviewed-by: Paolo Bonzini <pbonz...@redhat.com> Reviewed-by: Chegu Vinod <chegu_vi...@hp.com> Tested-by: Chegu Vinod <chegu_vi...@hp.com> Tested-by: Michael R. Hines <mrhi...@us.ibm.com> Signed-off-by: Michael R. Hines <mrhi...@us.ibm.com> --- arch_init.c | 29 +++++++++++++++++++---------- include/migration/migration.h | 2 ++ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/arch_init.c b/arch_init.c index ea9ddad..82657e4 100644 --- a/arch_init.c +++ b/arch_init.c @@ -777,6 +777,24 @@ static inline void *host_from_stream_offset(QEMUFile *f, return NULL; } +/* + * If a page (or a whole RDMA chunk) has been + * determined to be zero, then zap it. + */ +void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) +{ + if (ch != 0 || !is_zero_page(host)) { + memset(host, ch, size); +#ifndef _WIN32 + if (ch == 0 && + (!kvm_enabled() || kvm_has_sync_mmu()) && + getpagesize() <= TARGET_PAGE_SIZE) { + qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED); + } +#endif + } +} + static int ram_load(QEMUFile *f, void *opaque, int version_id) { ram_addr_t addr; @@ -847,16 +865,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } ch = qemu_get_byte(f); - if (ch != 0 || !is_zero_page(host)) { - memset(host, ch, TARGET_PAGE_SIZE); -#ifndef _WIN32 - if (ch == 0 && - (!kvm_enabled() || kvm_has_sync_mmu()) && - getpagesize() <= TARGET_PAGE_SIZE) { - qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED); - } -#endif - } + ram_handle_compressed(host, ch, TARGET_PAGE_SIZE); } else if (flags & RAM_SAVE_FLAG_PAGE) { void *host; diff --git a/include/migration/migration.h b/include/migration/migration.h index f0640e0..9d3cc85 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -109,6 +109,8 @@ uint64_t xbzrle_mig_pages_transferred(void); uint64_t xbzrle_mig_pages_overflow(void); uint64_t xbzrle_mig_pages_cache_miss(void); +void ram_handle_compressed(void *host, uint8_t ch, uint64_t size); + /** * @migrate_add_blocker - prevent migration from proceeding * -- 1.7.10.4