From: Peter Xu <pet...@redhat.com> RAM_SAVE_FLAG_MULTIFD_FLUSH message should always be correlated to a sync request on src. Unify such message into one place, and conditionally send the message only if necessary.
Reviewed-by: Fabiano Rosas <faro...@suse.de> Signed-off-by: Peter Xu <pet...@redhat.com> Message-Id: <20241206224755.1108686-5-pet...@redhat.com> Signed-off-by: Fabiano Rosas <faro...@suse.de> --- migration/multifd-nocomp.c | 27 +++++++++++++++++++++++++-- migration/multifd.h | 2 +- migration/ram.c | 18 ++++-------------- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/migration/multifd-nocomp.c b/migration/multifd-nocomp.c index 219f9e58ef..58372db0f4 100644 --- a/migration/multifd-nocomp.c +++ b/migration/multifd-nocomp.c @@ -20,6 +20,7 @@ #include "qemu/cutils.h" #include "qemu/error-report.h" #include "trace.h" +#include "qemu-file.h" static MultiFDSendData *multifd_ram_send; @@ -343,9 +344,10 @@ retry: return true; } -int multifd_ram_flush_and_sync(void) +int multifd_ram_flush_and_sync(QEMUFile *f) { MultiFDSyncReq req; + int ret; if (!migrate_multifd()) { return 0; @@ -361,7 +363,28 @@ int multifd_ram_flush_and_sync(void) /* File migrations only need to sync with threads */ req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL; - return multifd_send_sync_main(req); + ret = multifd_send_sync_main(req); + if (ret) { + return ret; + } + + /* If we don't need to sync with remote at all, nothing else to do */ + if (req == MULTIFD_SYNC_LOCAL) { + return 0; + } + + /* + * Old QEMUs don't understand RAM_SAVE_FLAG_MULTIFD_FLUSH, it relies + * on RAM_SAVE_FLAG_EOS instead. + */ + if (migrate_multifd_flush_after_each_section()) { + return 0; + } + + qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); + qemu_fflush(f); + + return 0; } bool multifd_send_prepare_common(MultiFDSendParams *p) diff --git a/migration/multifd.h b/migration/multifd.h index 6493512305..0fef431f6b 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -354,7 +354,7 @@ static inline uint32_t multifd_ram_page_count(void) void multifd_ram_save_setup(void); void multifd_ram_save_cleanup(void); -int multifd_ram_flush_and_sync(void); +int multifd_ram_flush_and_sync(QEMUFile *f); size_t multifd_ram_payload_size(void); void multifd_ram_fill_packet(MultiFDSendParams *p); int multifd_ram_unfill_packet(MultiFDRecvParams *p, Error **errp); diff --git a/migration/ram.c b/migration/ram.c index 01521de71f..ef683d11f0 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1306,15 +1306,10 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) (!migrate_multifd_flush_after_each_section() || migrate_mapped_ram())) { QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; - int ret = multifd_ram_flush_and_sync(); + int ret = multifd_ram_flush_and_sync(f); if (ret < 0) { return ret; } - - if (!migrate_mapped_ram()) { - qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); - qemu_fflush(f); - } } /* Hit the end of the list */ @@ -3044,18 +3039,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque, Error **errp) } bql_unlock(); - ret = multifd_ram_flush_and_sync(); + ret = multifd_ram_flush_and_sync(f); bql_lock(); if (ret < 0) { error_setg(errp, "%s: multifd synchronization failed", __func__); return ret; } - if (migrate_multifd() && !migrate_multifd_flush_after_each_section() - && !migrate_mapped_ram()) { - qemu_put_be64(f, RAM_SAVE_FLAG_MULTIFD_FLUSH); - } - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); ret = qemu_fflush(f); if (ret < 0) { @@ -3190,7 +3180,7 @@ out: if (ret >= 0 && migration_is_running()) { if (migrate_multifd() && migrate_multifd_flush_after_each_section() && !migrate_mapped_ram()) { - ret = multifd_ram_flush_and_sync(); + ret = multifd_ram_flush_and_sync(f); if (ret < 0) { return ret; } @@ -3268,7 +3258,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) * Only the old dest QEMU will need this sync, because each EOS * will require one SYNC message on each channel. */ - ret = multifd_ram_flush_and_sync(); + ret = multifd_ram_flush_and_sync(f); if (ret < 0) { return ret; } -- 2.35.3