From: Stanislav Kinsburskii <[email protected]> Sent: Monday, November 17, 2025 8:53 AM > > Reduce overhead when unmapping large memory regions by batching GPA unmap > operations in 2MB-aligned chunks. > > Use a dedicated constant for batch size to improve code clarity and > maintainability. > > Signed-off-by: Stanislav Kinsburskii <[email protected]> > Reviewed-by: Michael Kelley <[email protected]> > Reviewed-by: Nuno Das Neves <[email protected]> > --- > drivers/hv/mshv_root.h | 2 ++ > drivers/hv/mshv_root_hv_call.c | 2 +- > drivers/hv/mshv_root_main.c | 36 ++++++++++++++++++++++++++++++------ > 3 files changed, 33 insertions(+), 7 deletions(-) > > diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h > index 3eb815011b46..5eece7077f8b 100644 > --- a/drivers/hv/mshv_root.h > +++ b/drivers/hv/mshv_root.h > @@ -32,6 +32,8 @@ static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE); > > #define MSHV_PIN_PAGES_BATCH_SIZE (0x10000000ULL / HV_HYP_PAGE_SIZE) > > +#define MSHV_MAX_UNMAP_GPA_PAGES 512 > + > struct mshv_vp { > u32 vp_index; > struct mshv_partition *vp_partition; > diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c > index caf02cfa49c9..8fa983f1109b 100644 > --- a/drivers/hv/mshv_root_hv_call.c > +++ b/drivers/hv/mshv_root_hv_call.c > @@ -17,7 +17,7 @@ > /* Determined empirically */ > #define HV_INIT_PARTITION_DEPOSIT_PAGES 208 > #define HV_MAP_GPA_DEPOSIT_PAGES 256 > -#define HV_UMAP_GPA_PAGES 512 > +#define HV_UMAP_GPA_PAGES MSHV_MAX_UNMAP_GPA_PAGES > > #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1))) > > diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c > index a85872b72b1a..ef36d8115d8a 100644 > --- a/drivers/hv/mshv_root_main.c > +++ b/drivers/hv/mshv_root_main.c > @@ -1365,7 +1365,7 @@ mshv_map_user_memory(struct mshv_partition *partition, > static void mshv_partition_destroy_region(struct mshv_mem_region *region) > { > struct mshv_partition *partition = region->partition; > - u32 unmap_flags = 0; > + u64 gfn, gfn_count, start_gfn, end_gfn; > int ret; > > hlist_del(®ion->hnode); > @@ -1380,12 +1380,36 @@ static void mshv_partition_destroy_region(struct > mshv_mem_region *region) > } > } > > - if (region->flags.large_pages) > - unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE; > + start_gfn = region->start_gfn; > + end_gfn = region->start_gfn + region->nr_pages; > + > + for (gfn = start_gfn; gfn < end_gfn; gfn += gfn_count) { > + u32 unmap_flags = 0; > + > + if (gfn % MSHV_MAX_UNMAP_GPA_PAGES) > + gfn_count = ALIGN(gfn, MSHV_MAX_UNMAP_GPA_PAGES) - gfn; > + else > + gfn_count = MSHV_MAX_UNMAP_GPA_PAGES; > + > + if (gfn + gfn_count > end_gfn) > + gfn_count = end_gfn - gfn; > > - /* ignore unmap failures and continue as process may be exiting */ > - hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn, > - region->nr_pages, unmap_flags); > + /* Skip all pages in this range if none are mapped */ > + if (!memchr_inv(region->pages + (gfn - start_gfn), 0, > + gfn_count * sizeof(struct page *))) > + continue; > + > + if (region->flags.large_pages && > + VALUE_PMD_ALIGNED(gfn) && VALUE_PMD_ALIGNED(gfn_count))
VALUE_PMD_ALIGNED isn't defined until Patch 4 of this series. The idea of a page count being PMD aligned occurs a few other places in Linux kernel code, and it is usually written as IS_ALIGNED(count, PTRS_PER_PMD), though there's one occurrence of !(count % PTRS_PER_PMD). Also mshv_root_hv_call.c has HV_PAGE_COUNT_2M_ALIGNED() that does the same thing. Some macro consolidation is appropriate, or just open code as IS_ALIGNED(<cnt>, PTRS_PER_PMD) and eliminate the macros. > + unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE; > + > + ret = hv_call_unmap_gpa_pages(partition->pt_id, gfn, > + gfn_count, unmap_flags); > + if (ret) > + pt_err(partition, > + "Failed to unmap GPA pages %#llx-%#llx: %d\n", > + gfn, gfn + gfn_count - 1, ret); > + } > > mshv_region_invalidate(region); > > >
