We make sure that we cannot have any memory holes right at the beginning
of offline_pages() and we only support to online/offline full sections.
Both, sections and pageblocks are a power of two in size, and sections
always span full pageblocks.

We can directly calculate the number of isolated pageblocks from nr_pages.

Cc: Andrew Morton <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Wei Yang <[email protected]>
Cc: Baoquan He <[email protected]>
Cc: Pankaj Gupta <[email protected]>
Cc: Oscar Salvador <[email protected]>
Signed-off-by: David Hildenbrand <[email protected]>
---
 mm/memory_hotplug.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 50aa5df696e9d..098361fcb4504 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1460,10 +1460,10 @@ int __ref offline_pages(unsigned long start_pfn, 
unsigned long nr_pages)
 {
        const unsigned long end_pfn = start_pfn + nr_pages;
        unsigned long pfn, system_ram_pages = 0;
-       int ret, node, nr_isolate_pageblock;
        unsigned long flags;
        struct zone *zone;
        struct memory_notify arg;
+       int ret, node;
        char *reason;
 
        /* We can only offline full sections (e.g., SECTION_IS_ONLINE) */
@@ -1507,7 +1507,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned 
long nr_pages)
                reason = "failure to isolate range";
                goto failed_removal;
        }
-       nr_isolate_pageblock = ret;
 
        arg.start_pfn = start_pfn;
        arg.nr_pages = nr_pages;
@@ -1569,7 +1568,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned 
long nr_pages)
         * pageblocks zone counter here.
         */
        spin_lock_irqsave(&zone->lock, flags);
-       zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+       zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
        spin_unlock_irqrestore(&zone->lock, flags);
 
        /* removal success */
-- 
2.26.2

Reply via email to