* Peter Xu ([email protected]) wrote:
> Migration may not want to recognize memory chunks in page size of the host
> only, but sometimes we may want to recognize the memory in smaller chunks
> if e.g. they're doubly mapped as both huge and small.
>
> In those cases we'll prefer to assume the memory page size is always mapped
> small (qemu_real_host_page_size) and we'll do things just like when the
> pages was only smally mapped.
>
> Let's do this to be prepared of postcopy double-mapping for hugetlbfs.
>
> Signed-off-by: Peter Xu <[email protected]>
> ---
> migration/migration.c | 6 ++++--
> migration/postcopy-ram.c | 16 +++++++++-------
> migration/ram.c | 29 ++++++++++++++++++++++-------
> migration/ram.h | 1 +
> 4 files changed, 36 insertions(+), 16 deletions(-)
>
> diff --git a/migration/migration.c b/migration/migration.c
> index b174f2af92..f6fe474fc3 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -408,7 +408,7 @@ int
> migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
> {
> uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
> size_t msglen = 12; /* start + len */
> - size_t len = qemu_ram_pagesize(rb);
> + size_t len = migration_ram_pagesize(rb);
> enum mig_rp_message_type msg_type;
> const char *rbname;
> int rbname_len;
> @@ -443,8 +443,10 @@ int
> migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
> int migrate_send_rp_req_pages(MigrationIncomingState *mis,
> RAMBlock *rb, ram_addr_t start, uint64_t haddr)
> {
> - void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr,
> qemu_ram_pagesize(rb));
> bool received = false;
> + void *aligned;
> +
> + aligned = (void *)(uintptr_t)ROUND_DOWN(haddr,
> migration_ram_pagesize(rb));
>
> WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
> received = ramblock_recv_bitmap_test_byte_offset(rb, start);
> diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
> index 2c86bfc091..acae1dc6ae 100644
> --- a/migration/postcopy-ram.c
> +++ b/migration/postcopy-ram.c
> @@ -694,7 +694,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
> uint64_t client_addr,
> RAMBlock *rb)
> {
> - size_t pagesize = qemu_ram_pagesize(rb);
> + size_t pagesize = migration_ram_pagesize(rb);
> struct uffdio_range range;
> int ret;
> trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb));
> @@ -712,7 +712,9 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd,
> static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb,
> ram_addr_t start, uint64_t haddr)
> {
> - void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr,
> qemu_ram_pagesize(rb));
> + void *aligned;
> +
> + aligned = (void *)(uintptr_t)ROUND_DOWN(haddr,
> migration_ram_pagesize(rb));
>
> /*
> * Discarded pages (via RamDiscardManager) are never migrated. On
> unlikely
> @@ -722,7 +724,7 @@ static int postcopy_request_page(MigrationIncomingState
> *mis, RAMBlock *rb,
> * Checking a single bit is sufficient to handle pagesize > TPS as either
> * all relevant bits are set or not.
> */
> - assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb)));
> + assert(QEMU_IS_ALIGNED(start, migration_ram_pagesize(rb)));
> if (ramblock_page_is_discarded(rb, start)) {
> bool received = ramblock_recv_bitmap_test_byte_offset(rb, start);
>
> @@ -740,7 +742,7 @@ static int postcopy_request_page(MigrationIncomingState
> *mis, RAMBlock *rb,
> int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb,
> uint64_t client_addr, uint64_t rb_offset)
> {
> - uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
> + uint64_t aligned_rbo = ROUND_DOWN(rb_offset, migration_ram_pagesize(rb));
> MigrationIncomingState *mis = migration_incoming_get_current();
>
> trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb),
> @@ -1020,7 +1022,7 @@ static void *postcopy_ram_fault_thread(void *opaque)
> break;
> }
>
> - rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb));
> + rb_offset = ROUND_DOWN(rb_offset, migration_ram_pagesize(rb));
>
> trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address,
> qemu_ram_get_idstr(rb),
> rb_offset,
> @@ -1281,7 +1283,7 @@ int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t
> offset)
> int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
> RAMBlock *rb)
> {
> - size_t pagesize = qemu_ram_pagesize(rb);
> + size_t pagesize = migration_ram_pagesize(rb);
>
> /* copy also acks to the kernel waking the stalled thread up
> * TODO: We can inhibit that ack and only do it if it was requested
> @@ -1308,7 +1310,7 @@ int postcopy_place_page(MigrationIncomingState *mis,
> void *host, void *from,
> int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
> RAMBlock *rb)
> {
> - size_t pagesize = qemu_ram_pagesize(rb);
> + size_t pagesize = migration_ram_pagesize(rb);
> trace_postcopy_place_page_zero(host);
>
> /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE
> diff --git a/migration/ram.c b/migration/ram.c
> index 334309f1c6..945c6477fd 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -121,6 +121,20 @@ static struct {
> uint8_t *decoded_buf;
> } XBZRLE;
>
> +/* Get the page size we should use for migration purpose. */
> +size_t migration_ram_pagesize(RAMBlock *block)
> +{
> + /*
> + * When hugetlb doublemap is enabled, we should always use the smallest
> + * page for migration.
> + */
> + if (migrate_hugetlb_doublemap()) {
> + return qemu_real_host_page_size();
> + }
> +
> + return qemu_ram_pagesize(block);
> +}
> +
> static void XBZRLE_cache_lock(void)
> {
> if (migrate_use_xbzrle()) {
> @@ -1049,7 +1063,7 @@ bool ramblock_page_is_discarded(RAMBlock *rb,
> ram_addr_t start)
> MemoryRegionSection section = {
> .mr = rb->mr,
> .offset_within_region = start,
> - .size = int128_make64(qemu_ram_pagesize(rb)),
> + .size = int128_make64(migration_ram_pagesize(rb)),
> };
>
> return !ram_discard_manager_is_populated(rdm, §ion);
> @@ -2152,7 +2166,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t
> start, ram_addr_t len)
> */
> if (postcopy_preempt_active()) {
> ram_addr_t page_start = start >> TARGET_PAGE_BITS;
> - size_t page_size = qemu_ram_pagesize(ramblock);
> + size_t page_size = migration_ram_pagesize(ramblock);
> PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY];
> int ret = 0;
>
> @@ -2316,7 +2330,7 @@ static int ram_save_target_page(RAMState *rs,
> PageSearchStatus *pss)
> static void pss_host_page_prepare(PageSearchStatus *pss)
> {
> /* How many guest pages are there in one host page? */
> - size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
> + size_t guest_pfns = migration_ram_pagesize(pss->block) >>
> TARGET_PAGE_BITS;
>
> pss->host_page_sending = true;
> pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns);
> @@ -2425,7 +2439,7 @@ static int ram_save_host_page(RAMState *rs,
> PageSearchStatus *pss)
> bool page_dirty, preempt_active = postcopy_preempt_active();
> int tmppages, pages = 0;
> size_t pagesize_bits =
> - qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
> + migration_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
> unsigned long start_page = pss->page;
> int res;
>
> @@ -3518,7 +3532,7 @@ static void *host_page_from_ram_block_offset(RAMBlock
> *block,
> {
> /* Note: Explicitly no check against offset_in_ramblock(). */
> return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
> - block->page_size);
> + migration_ram_pagesize(block));
> }
>
> static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
> @@ -3970,7 +3984,8 @@ int ram_load_postcopy(QEMUFile *f, int channel)
> break;
> }
> tmp_page->target_pages++;
> - matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
> + matches_target_page_size =
> + migration_ram_pagesize(block) == TARGET_PAGE_SIZE;
> /*
> * Postcopy requires that we place whole host pages atomically;
> * these may be huge pages for RAMBlocks that are backed by
Hmm do you really want this change?
Dave
> @@ -4005,7 +4020,7 @@ int ram_load_postcopy(QEMUFile *f, int channel)
> * page
> */
> if (tmp_page->target_pages ==
> - (block->page_size / TARGET_PAGE_SIZE)) {
> + (migration_ram_pagesize(block) / TARGET_PAGE_SIZE)) {
> place_needed = true;
> }
> place_source = tmp_page->tmp_huge_page;
> diff --git a/migration/ram.h b/migration/ram.h
> index 81cbb0947c..162b3e7cb8 100644
> --- a/migration/ram.h
> +++ b/migration/ram.h
> @@ -68,6 +68,7 @@ bool ramblock_is_ignored(RAMBlock *block);
> if (!qemu_ram_is_migratable(block)) {} else
>
> int xbzrle_cache_resize(uint64_t new_size, Error **errp);
> +size_t migration_ram_pagesize(RAMBlock *block);
> uint64_t ram_bytes_remaining(void);
> uint64_t ram_bytes_total(void);
> void mig_throttle_counter_reset(void);
> --
> 2.37.3
>
--
Dr. David Alan Gilbert / [email protected] / Manchester, UK