Reading a sysfs memoryN/valid_zones file leads to the following
oops when the first page of a range is not backed by struct page.
show_valid_zones() assumes that 'start_pfn' is always valid for
page_zone().

 BUG: unable to handle kernel paging request at ffffea017a000000
 IP: show_valid_zones+0x6f/0x160

Since test_pages_in_a_zone() already checks holes, extend this
function to return 'valid_start' and 'valid_end' for a given range.
show_valid_zones() then proceeds with the valid range.

Signed-off-by: Toshi Kani <toshi.k...@hpe.com>
Cc: Greg Kroah-Hartman <gre...@linuxfoundation.org>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Zhang Zhen <zhenzhang.zh...@huawei.com>
Cc: Reza Arbab <ar...@linux.vnet.ibm.com>
Cc: David Rientjes <rient...@google.com>
Cc: Dan Williams <dan.j.willi...@intel.com>
---
 drivers/base/memory.c          |   12 ++++++------
 include/linux/memory_hotplug.h |    3 ++-
 mm/memory_hotplug.c            |   20 +++++++++++++++-----
 3 files changed, 23 insertions(+), 12 deletions(-)

diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1..2c9aad9 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn, end_pfn;
+       unsigned long valid_start, valid_end, valid_pages;
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       struct page *first_page;
        struct zone *zone;
        int zone_shift = 0;
 
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        end_pfn = start_pfn + nr_pages;
-       first_page = pfn_to_page(start_pfn);
 
        /* The block contains more than one zone can not be offlined. */
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return sprintf(buf, "none\n");
 
-       zone = page_zone(first_page);
+       zone = page_zone(pfn_to_page(valid_start));
+       valid_pages = valid_end - valid_start;
 
        /* MMOP_ONLINE_KEEP */
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_shift = zone_can_shift(valid_start, valid_pages, ZONE_NORMAL);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_shift = zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fa..b6aa972 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned 
long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+       unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
 typedef void (*online_page_callback_t)(struct page *page);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7836606..9de2f83 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1478,10 +1478,13 @@ bool is_mem_section_removable(unsigned long start_pfn, 
unsigned long nr_pages)
 
 /*
  * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
        unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
@@ -1503,14 +1506,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, 
unsigned long end_pfn)
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
+                       if (!zone)
+                               start = pfn + i;
                        zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
 
-       if (zone)
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
                return 1;
-       else
+       } else {
                return 0;
+       }
 }
 
 /*
@@ -1837,6 +1846,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1847,10 +1857,10 @@ static int __ref __offline_pages(unsigned long 
start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
 

Reply via email to