From: "Rao, Lei" <lei....@intel.com> When we use continuous dirty memory copy for flushing ram cache on secondary VM, we can also clean up the bitmap of contiguous dirty page memory. This also can reduce the VM stop time during checkpoint.
Signed-off-by: Lei Rao <lei....@intel.com> --- migration/ram.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 2a8ee96..9b23df6 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -856,6 +856,30 @@ unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, return first; } +/** + * colo_bitmap_clear_dirty:when we flush ram cache to ram, we will use + * continuous memory copy, so we can also clean up the bitmap of contiguous + * dirty memory. + */ +static inline bool colo_bitmap_clear_dirty(RAMState *rs, + RAMBlock *rb, + unsigned long start, + unsigned long num) +{ + bool ret; + unsigned long i = 0; + + qemu_mutex_lock(&rs->bitmap_mutex); + for (i = 0; i < num; i++) { + ret = test_and_clear_bit(start + i, rb->bmap); + if (ret) { + rs->migration_dirty_pages--; + } + } + qemu_mutex_unlock(&rs->bitmap_mutex); + return ret; +} + static inline bool migration_bitmap_clear_dirty(RAMState *rs, RAMBlock *rb, unsigned long page) @@ -3703,7 +3727,6 @@ void colo_flush_ram_cache(void) void *src_host; unsigned long offset = 0; unsigned long num = 0; - unsigned long i = 0; memory_global_dirty_log_sync(); WITH_RCU_READ_LOCK_GUARD() { @@ -3725,9 +3748,7 @@ void colo_flush_ram_cache(void) num = 0; block = QLIST_NEXT_RCU(block, next); } else { - for (i = 0; i < num; i++) { - migration_bitmap_clear_dirty(ram_state, block, offset + i); - } + colo_bitmap_clear_dirty(ram_state, block, offset, num); dst_host = block->host + (((ram_addr_t)offset) << TARGET_PAGE_BITS); src_host = block->colo_cache -- 1.8.3.1