Especially, when checking for "all holes" we can avoid rechecking already
processed pieces (that we are removing) - use the updated zone data
instead (possibly already zero).

Cc: Andrew Morton <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Pavel Tatashin <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Wei Yang <[email protected]>
Signed-off-by: David Hildenbrand <[email protected]>
---
 mm/memory_hotplug.c | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0a21f6f99753..d3c34bbeb36d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -374,14 +374,11 @@ static unsigned long find_biggest_section_pfn(int nid, 
struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                             unsigned long end_pfn)
 {
-       unsigned long zone_start_pfn = zone->zone_start_pfn;
-       unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-       unsigned long zone_end_pfn = z;
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
        zone_span_writelock(zone);
-       if (zone_start_pfn == start_pfn) {
+       if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -389,22 +386,29 @@ static void shrink_zone_span(struct zone *zone, unsigned 
long start_pfn,
                 * for shrinking zone.
                 */
                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-                                               zone_end_pfn);
+                                               zone_end_pfn(zone));
                if (pfn) {
+                       zone->spanned_pages = zone_end_pfn(zone) - pfn;
                        zone->zone_start_pfn = pfn;
-                       zone->spanned_pages = zone_end_pfn - pfn;
+               } else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
                }
-       } else if (zone_end_pfn == end_pfn) {
+       } else if (zone_end_pfn(zone) == end_pfn) {
                /*
                 * If the section is biggest section in the zone, it need
                 * shrink zone->spanned_pages.
                 * In this case, we find second biggest valid mem_section for
                 * shrinking zone.
                 */
-               pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
                                               start_pfn);
                if (pfn)
-                       zone->spanned_pages = pfn - zone_start_pfn + 1;
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+               else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
+               }
        }
 
        /*
@@ -413,8 +417,8 @@ static void shrink_zone_span(struct zone *zone, unsigned 
long start_pfn,
         * change the zone. But perhaps, the zone has only hole data. Thus
         * it check the zone has only hole or not.
         */
-       pfn = zone_start_pfn;
-       for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+       for (pfn = zone->zone_start_pfn;
+            pfn < zone_end_pfn(zone); pfn += PAGES_PER_SUBSECTION) {
                if (unlikely(!pfn_valid(pfn)))
                        continue;
 
-- 
2.21.0

Reply via email to