Reduce overhead when unmapping large memory regions by batching GPA unmap
operations in 2MB-aligned chunks.

Use a dedicated constant for batch size to improve code clarity and
maintainability.

Signed-off-by: Stanislav Kinsburskii <[email protected]>
Reviewed-by: Michael Kelley <[email protected]>
Reviewed-by: Nuno Das Neves <[email protected]>
---
 drivers/hv/mshv_root.h         |    2 ++
 drivers/hv/mshv_root_hv_call.c |    2 +-
 drivers/hv/mshv_root_main.c    |   36 ++++++++++++++++++++++++++++++------
 3 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
index 3eb815011b46..5eece7077f8b 100644
--- a/drivers/hv/mshv_root.h
+++ b/drivers/hv/mshv_root.h
@@ -32,6 +32,8 @@ static_assert(HV_HYP_PAGE_SIZE == MSHV_HV_PAGE_SIZE);
 
 #define MSHV_PIN_PAGES_BATCH_SIZE      (0x10000000ULL / HV_HYP_PAGE_SIZE)
 
+#define MSHV_MAX_UNMAP_GPA_PAGES       512
+
 struct mshv_vp {
        u32 vp_index;
        struct mshv_partition *vp_partition;
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
index caf02cfa49c9..8fa983f1109b 100644
--- a/drivers/hv/mshv_root_hv_call.c
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -17,7 +17,7 @@
 /* Determined empirically */
 #define HV_INIT_PARTITION_DEPOSIT_PAGES 208
 #define HV_MAP_GPA_DEPOSIT_PAGES       256
-#define HV_UMAP_GPA_PAGES              512
+#define HV_UMAP_GPA_PAGES              MSHV_MAX_UNMAP_GPA_PAGES
 
 #define HV_PAGE_COUNT_2M_ALIGNED(pg_count) (!((pg_count) & (0x200 - 1)))
 
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index a85872b72b1a..ef36d8115d8a 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -1365,7 +1365,7 @@ mshv_map_user_memory(struct mshv_partition *partition,
 static void mshv_partition_destroy_region(struct mshv_mem_region *region)
 {
        struct mshv_partition *partition = region->partition;
-       u32 unmap_flags = 0;
+       u64 gfn, gfn_count, start_gfn, end_gfn;
        int ret;
 
        hlist_del(&region->hnode);
@@ -1380,12 +1380,36 @@ static void mshv_partition_destroy_region(struct 
mshv_mem_region *region)
                }
        }
 
-       if (region->flags.large_pages)
-               unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+       start_gfn = region->start_gfn;
+       end_gfn = region->start_gfn + region->nr_pages;
+
+       for (gfn = start_gfn; gfn < end_gfn; gfn += gfn_count) {
+               u32 unmap_flags = 0;
+
+               if (gfn % MSHV_MAX_UNMAP_GPA_PAGES)
+                       gfn_count = ALIGN(gfn, MSHV_MAX_UNMAP_GPA_PAGES) - gfn;
+               else
+                       gfn_count = MSHV_MAX_UNMAP_GPA_PAGES;
+
+               if (gfn + gfn_count > end_gfn)
+                       gfn_count = end_gfn - gfn;
 
-       /* ignore unmap failures and continue as process may be exiting */
-       hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
-                               region->nr_pages, unmap_flags);
+               /* Skip all pages in this range if none are mapped */
+               if (!memchr_inv(region->pages + (gfn - start_gfn), 0,
+                               gfn_count * sizeof(struct page *)))
+                       continue;
+
+               if (region->flags.large_pages &&
+                   VALUE_PMD_ALIGNED(gfn) && VALUE_PMD_ALIGNED(gfn_count))
+                       unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
+
+               ret = hv_call_unmap_gpa_pages(partition->pt_id, gfn,
+                                             gfn_count, unmap_flags);
+               if (ret)
+                       pt_err(partition,
+                              "Failed to unmap GPA pages %#llx-%#llx: %d\n",
+                              gfn, gfn + gfn_count - 1, ret);
+       }
 
        mshv_region_invalidate(region);
 



Reply via email to