From: Zi Yan <z...@nvidia.com>

Enable set_migratetype_isolate() to check specified sub-range for
unmovable pages during isolation. Page isolation is done
at max(MAX_ORDER_NR_PAEGS, pageblock_nr_pages) granularity, but not all
pages within that granularity are intended to be isolated. For example,
alloc_contig_range(), which uses page isolation, allows ranges without
alignment. This commit makes unmovable page check only look for
interesting pages, so that page isolation can succeed for any
non-overlapping ranges.

Signed-off-by: Zi Yan <z...@nvidia.com>
---
 include/linux/page-isolation.h |  1 +
 mm/memory_hotplug.c            | 12 +++++++-
 mm/page_alloc.c                |  2 +-
 mm/page_isolation.c            | 53 +++++++++++++++++++++-------------
 4 files changed, 46 insertions(+), 22 deletions(-)

diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index e14eddf6741a..a4d2687ed4e6 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -42,6 +42,7 @@ int move_freepages_block(struct zone *zone, struct page *page,
  */
 int
 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long isolate_start, unsigned long isolate_end,
                         unsigned migratetype, int flags);
 
 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0139b77c51d5..5db84c3fa882 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1901,8 +1901,18 @@ int __ref offline_pages(unsigned long start_pfn, 
unsigned long nr_pages,
        zone_pcp_disable(zone);
        lru_cache_disable();
 
-       /* set above range as isolated */
+       /*
+        * set above range as isolated
+        *
+        * start_pfn and end_pfn are the same as isolate_start and isolate_end,
+        * because start_pfn and end_pfn are already PAGES_PER_SECTION
+        * (>= MAX_ORDER_NR_PAGES) aligned; if start_pfn is
+        * pageblock_nr_pages aligned in memmap_on_memory case, there is no
+        * need to isolate pages before start_pfn, since they are used by
+        * memmap thus not user visible.
+        */
        ret = start_isolate_page_range(start_pfn, end_pfn,
+                                      start_pfn, end_pfn,
                                       MIGRATE_MOVABLE,
                                       MEMORY_OFFLINE | REPORT_FAILURE);
        if (ret) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1d812268c2a9..812cf557b20f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -9016,7 +9016,7 @@ int alloc_contig_range(unsigned long start, unsigned long 
end,
         * put back to page allocator so that buddy can use them.
         */
 
-       ret = start_isolate_page_range(pfn_max_align_down(start),
+       ret = start_isolate_page_range(start, end, pfn_max_align_down(start),
                                       pfn_max_align_up(end), migratetype, 0);
        if (ret)
                return ret;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 6c841274bf46..d17ad9a7d4bf 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -16,7 +16,8 @@
 #include <trace/events/page_isolation.h>
 
 /*
- * This function checks whether pageblock includes unmovable pages or not.
+ * This function checks whether pageblock within [start_pfn, end_pfn) includes
+ * unmovable pages or not.
  *
  * PageLRU check without isolation or lru_lock could race so that
  * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
@@ -29,11 +30,14 @@
  *
  */
 static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
-                                int migratetype, int flags)
+                                int migratetype, int flags,
+                                unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned long iter = 0;
-       unsigned long pfn = page_to_pfn(page);
-       unsigned long offset = pfn % pageblock_nr_pages;
+       unsigned long first_pfn = max(page_to_pfn(page), start_pfn);
+       unsigned long pfn = first_pfn;
+       unsigned long last_pfn = min(ALIGN(pfn + 1, pageblock_nr_pages), 
end_pfn);
+
+       page = pfn_to_page(pfn);
 
        if (is_migrate_cma_page(page)) {
                /*
@@ -47,8 +51,8 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                return page;
        }
 
-       for (; iter < pageblock_nr_pages - offset; iter++) {
-               page = pfn_to_page(pfn + iter);
+       for (pfn = first_pfn; pfn < last_pfn; pfn++) {
+               page = pfn_to_page(pfn);
 
                /*
                 * Both, bootmem allocations and memory holes are marked
@@ -85,7 +89,7 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                        }
 
                        skip_pages = compound_nr(head) - (page - head);
-                       iter += skip_pages - 1;
+                       pfn += skip_pages - 1;
                        continue;
                }
 
@@ -97,7 +101,7 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
                 */
                if (!page_ref_count(page)) {
                        if (PageBuddy(page))
-                               iter += (1 << buddy_order(page)) - 1;
+                               pfn += (1 << buddy_order(page)) - 1;
                        continue;
                }
 
@@ -134,7 +138,13 @@ static struct page *has_unmovable_pages(struct zone *zone, 
struct page *page,
        return NULL;
 }
 
-static int set_migratetype_isolate(struct page *page, int migratetype, int 
isol_flags)
+/*
+ * This function set pageblock migratetype to isolate if no unmovable page is
+ * present in [start_pfn, end_pfn). The pageblock must be within
+ * [start_pfn, end_pfn).
+ */
+static int set_migratetype_isolate(struct page *page, int migratetype, int 
isol_flags,
+                       unsigned long start_pfn, unsigned long end_pfn)
 {
        struct zone *zone = page_zone(page);
        struct page *unmovable;
@@ -156,7 +166,7 @@ static int set_migratetype_isolate(struct page *page, int 
migratetype, int isol_
         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
         * We just check MOVABLE pages.
         */
-       unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
+       unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags, 
start_pfn, end_pfn);
        if (!unmovable) {
                unsigned long nr_pages;
                int mt = get_pageblock_migratetype(page);
@@ -265,8 +275,12 @@ __first_valid_page(unsigned long pfn, unsigned long 
nr_pages)
 /**
  * start_isolate_page_range() - make page-allocation-type of range of pages to
  * be MIGRATE_ISOLATE.
- * @start_pfn:         The lower PFN of the range to be isolated.
- * @end_pfn:           The upper PFN of the range to be isolated.
+ * @start_pfn:         The lower PFN of the range to be checked for
+ *                     possibility of isolation.
+ * @end_pfn:           The upper PFN of the range to be checked for
+ *                     possibility of isolation.
+ * @isolate_start:             The lower PFN of the range to be isolated.
+ * @isolate_end:               The upper PFN of the range to be isolated.
  *                     start_pfn/end_pfn must be aligned to pageblock_order.
  * @migratetype:       Migrate type to set in error recovery.
  * @flags:             The following flags are allowed (they can be combined in
@@ -304,20 +318,19 @@ __first_valid_page(unsigned long pfn, unsigned long 
nr_pages)
  * Return: 0 on success and -EBUSY if any part of range cannot be isolated.
  */
 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+                            unsigned long isolate_start, unsigned long 
isolate_end,
                             unsigned migratetype, int flags)
 {
        unsigned long pfn;
        struct page *page;
 
-       BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
-       BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
-
-       for (pfn = start_pfn;
-            pfn < end_pfn;
+       for (pfn = isolate_start;
+            pfn < isolate_end;
             pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
-               if (page && set_migratetype_isolate(page, migratetype, flags)) {
-                       undo_isolate_page_range(start_pfn, pfn, migratetype);
+               if (page && set_migratetype_isolate(page, migratetype, flags,
+                                       start_pfn, end_pfn)) {
+                       undo_isolate_page_range(isolate_start, pfn, 
migratetype);
                        return -EBUSY;
                }
        }
-- 
2.34.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to