On 3/12/21 1:03 PM, leirao wrote: > From: "Rao, Lei" <lei....@intel.com> > > When flushing memory from ram cache to ram during every checkpoint > on secondary VM, we can copy continuous chunks of memory instead of > 4096 bytes per time to reduce the time of VM stop during checkpoint. > > Signed-off-by: Lei Rao <lei....@intel.com> > --- > migration/ram.c | 44 +++++++++++++++++++++++++++++++++++++++++--- > 1 file changed, 41 insertions(+), 3 deletions(-) > > diff --git a/migration/ram.c b/migration/ram.c > index e795a8d..b269637 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -823,6 +823,39 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, > RAMBlock *rb, > return next; > } > > +/* > + * colo_bitmap_find_diry:find contiguous dirty pages from start > + * > + * Returns the page offset within memory region of the start of the > contiguout > + * dirty page > + * > + * @rs: current RAM state > + * @rb: RAMBlock where to search for dirty pages > + * @start: page where we start the search > + * @num: the number of contiguous dirty pages > + */ > +static inline > +unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, > + unsigned long start, unsigned long *num) > +{ > + unsigned long size = rb->used_length >> TARGET_PAGE_BITS; > + unsigned long *bitmap = rb->bmap; > + unsigned long first, next; > + > + if (ramblock_is_ignored(rb)) { > + return size; > + } > + > + first = find_next_bit(bitmap, size, start); > + if (first >= size) { > + return first; > + } > + next = find_next_zero_bit(bitmap, size, first + 1); > + assert(next >= first); > + *num = next - first; > + return first;
The idea is outstanding i wonder it should return (next - 1) ? Thanks Zhijian > +} > + > static inline bool migration_bitmap_clear_dirty(RAMState *rs, > RAMBlock *rb, > unsigned long page) > @@ -3669,6 +3702,8 @@ void colo_flush_ram_cache(void) > void *dst_host; > void *src_host; > unsigned long offset = 0; > + unsigned long num = 0; > + unsigned long i = 0; > > memory_global_dirty_log_sync(); > WITH_RCU_READ_LOCK_GUARD() { > @@ -3682,19 +3717,22 @@ void colo_flush_ram_cache(void) > block = QLIST_FIRST_RCU(&ram_list.blocks); > > while (block) { > - offset = migration_bitmap_find_dirty(ram_state, block, offset); > + offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); > > if (((ram_addr_t)offset) << TARGET_PAGE_BITS > >= block->used_length) { > offset = 0; > + num = 0; > block = QLIST_NEXT_RCU(block, next); > } else { > - migration_bitmap_clear_dirty(ram_state, block, offset); > + for (i = 0; i < num; i++) { > + migration_bitmap_clear_dirty(ram_state, block, offset + > i); > + } > dst_host = block->host > + (((ram_addr_t)offset) << TARGET_PAGE_BITS); > src_host = block->colo_cache > + (((ram_addr_t)offset) << TARGET_PAGE_BITS); > - memcpy(dst_host, src_host, TARGET_PAGE_SIZE); > + memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); > } > } > }