Let's mark all offline pages with PG_offline. We'll continue to mark
them reserved.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 drivers/hv/hv_balloon.c |  2 +-
 mm/memory_hotplug.c     | 10 ++++++----
 mm/page_alloc.c         |  5 ++++-
 3 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b3e9f13f8bc3..04d98d9b6191 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -893,7 +893,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
                         * backed previously) online too.
                         */
                        if (start_pfn > has->start_pfn &&
-                           !PageReserved(pfn_to_page(start_pfn - 1)))
+                           !PageOffline(pfn_to_page(start_pfn - 1)))
                                hv_bring_pgs_online(has, start_pfn, pgs_ol);
 
                }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index d4474781c799..3a8d56476233 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -260,8 +260,8 @@ static int __meminit __add_section(int nid, unsigned long 
phys_start_pfn,
                return ret;
 
        /*
-        * Make all the pages reserved so that nobody will stumble over half
-        * initialized state.
+        * Make all the pages offline and reserved so that nobody will stumble
+        * over half initialized state.
         * FIXME: We also have to associate it with a node because page_to_nid
         * relies on having page with the proper node.
         */
@@ -274,6 +274,7 @@ static int __meminit __add_section(int nid, unsigned long 
phys_start_pfn,
                page = pfn_to_page(pfn);
                set_page_node(page, nid);
                SetPageReserved(page);
+               SetPageOffline(page);
        }
 
        if (!want_memblock)
@@ -669,6 +670,7 @@ EXPORT_SYMBOL_GPL(__online_page_increment_counters);
 
 void __online_page_free(struct page *page)
 {
+       ClearPageOffline(page);
        __free_reserved_page(page);
 }
 EXPORT_SYMBOL_GPL(__online_page_free);
@@ -687,7 +689,7 @@ static int online_pages_range(unsigned long start_pfn, 
unsigned long nr_pages,
        unsigned long onlined_pages = *(unsigned long *)arg;
        struct page *page;
 
-       if (PageReserved(pfn_to_page(start_pfn)))
+       if (PageOffline(pfn_to_page(start_pfn)))
                for (i = 0; i < nr_pages; i++) {
                        page = pfn_to_page(start_pfn + i);
                        (*online_page_callback)(page);
@@ -1437,7 +1439,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long 
end_pfn)
 }
 
 /*
- * remove from free_area[] and mark all as Reserved.
+ * remove from free_area[] and mark all as Reserved and Offline.
  */
 static int
 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 647c8c6dd4d1..2e5dcfdb0908 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8030,6 +8030,7 @@ __offline_isolated_pages(unsigned long start_pfn, 
unsigned long end_pfn)
                if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
                        pfn++;
                        SetPageReserved(page);
+                       SetPageOffline(page);
                        continue;
                }
 
@@ -8043,8 +8044,10 @@ __offline_isolated_pages(unsigned long start_pfn, 
unsigned long end_pfn)
                list_del(&page->lru);
                rmv_page_order(page);
                zone->free_area[order].nr_free--;
-               for (i = 0; i < (1 << order); i++)
+               for (i = 0; i < (1 << order); i++) {
                        SetPageReserved((page+i));
+                       SetPageOffline(page + i);
+               }
                pfn += (1 << order);
        }
        spin_unlock_irqrestore(&zone->lock, flags);
-- 
2.14.3

Reply via email to