Starting from pss->page, ram_save_host_page() will check every page
and send the dirty pages up to the end of the current host page or
the boundary of used_length of the block. If the host page size is
a huge page, the step "check" will take a lot of time.

This will improve performance to use migration_bitmap_find_dirty().

Signed-off-by: Keqian Zhu <zhukeqi...@huawei.com>
Signed-off-by: Kunkun Jiang <jiangkun...@huawei.com>
---
 migration/ram.c | 39 +++++++++++++++++++--------------------
 1 file changed, 19 insertions(+), 20 deletions(-)

diff --git a/migration/ram.c b/migration/ram.c
index 9fc5b2997c..28215aefe4 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1991,6 +1991,8 @@ static int ram_save_host_page(RAMState *rs, 
PageSearchStatus *pss,
     int pages = 0;
     size_t pagesize_bits =
         qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
+    unsigned long hostpage_boundary =
+        QEMU_ALIGN_UP(pss->page + 1, pagesize_bits);
     unsigned long start_page = pss->page;
     int res;
 
@@ -2003,30 +2005,27 @@ static int ram_save_host_page(RAMState *rs, 
PageSearchStatus *pss,
         int pages_this_iteration = 0;
 
         /* Check if the page is dirty and send it if it is */
-        if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
-            pss->page++;
-            continue;
-        }
-
-        pages_this_iteration = ram_save_target_page(rs, pss, last_stage);
-        if (pages_this_iteration < 0) {
-            return pages_this_iteration;
-        }
+        if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
+            pages_this_iteration = ram_save_target_page(rs, pss, last_stage);
+            if (pages_this_iteration < 0) {
+                return pages_this_iteration;
+            }
 
-        pages += pages_this_iteration;
-        pss->page++;
-        /*
-         * Allow rate limiting to happen in the middle of huge pages if
-         * something is sent in the current iteration.
-         */
-        if (pagesize_bits > 1 && pages_this_iteration > 0) {
-            migration_rate_limit();
+            pages += pages_this_iteration;
+            /*
+             * Allow rate limiting to happen in the middle of huge pages if
+             * something is sent in the current iteration.
+             */
+            if (pagesize_bits > 1 && pages_this_iteration > 0) {
+                migration_rate_limit();
+            }
         }
-    } while ((pss->page & (pagesize_bits - 1)) &&
+        pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
+    } while ((pss->page < hostpage_boundary) &&
              offset_in_ramblock(pss->block,
                                 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
-    /* The offset we leave with is the last one we looked at */
-    pss->page--;
+    /* The offset we leave with is the min boundary of host page and block */
+    pss->page = MIN(pss->page, hostpage_boundary) - 1;
 
     res = ram_save_release_protection(rs, pss, start_page);
     return (res < 0 ? res : pages);
-- 
2.23.0


Reply via email to