This helper function only factors out the code flow within each order
during fallback. There is no function change.

Signed-off-by: ChengYi He <[email protected]>
---
 mm/page_alloc.c | 79 +++++++++++++++++++++++++++++++++------------------------
 1 file changed, 46 insertions(+), 33 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63358d9..50c325a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1752,51 +1752,64 @@ static void unreserve_highatomic_pageblock(const struct 
alloc_context *ac)
        }
 }
 
-/* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
-__rmqueue_fallback(struct zone *zone, unsigned int order, int 
start_migratetype)
+__rmqueue_fallback_order(struct zone *zone, unsigned int order,
+               int start_migratetype, int current_order)
 {
        struct free_area *area;
-       unsigned int current_order;
        struct page *page;
        int fallback_mt;
        bool can_steal;
 
-       /* Find the largest possible block of pages in the other list */
-       for (current_order = MAX_ORDER-1;
-                               current_order >= order && current_order <= 
MAX_ORDER-1;
-                               --current_order) {
-               area = &(zone->free_area[current_order]);
-               fallback_mt = find_suitable_fallback(area, current_order,
-                               start_migratetype, false, &can_steal);
-               if (fallback_mt == -1)
-                       continue;
+       area = &(zone->free_area[current_order]);
+       fallback_mt = find_suitable_fallback(area, current_order,
+                       start_migratetype, false, &can_steal);
+       if (fallback_mt == -1)
+               return NULL;
 
-               page = list_first_entry(&area->free_list[fallback_mt],
-                                               struct page, lru);
-               if (can_steal)
-                       steal_suitable_fallback(zone, page, start_migratetype);
+       page = list_first_entry(&area->free_list[fallback_mt],
+                                       struct page, lru);
+       if (can_steal)
+               steal_suitable_fallback(zone, page, start_migratetype);
 
-               /* Remove the page from the freelists */
-               area->nr_free--;
-               list_del(&page->lru);
-               rmv_page_order(page);
+       /* Remove the page from the freelists */
+       area->nr_free--;
+       list_del(&page->lru);
+       rmv_page_order(page);
 
-               expand(zone, page, order, current_order, area,
-                                       start_migratetype);
-               /*
-                * The pcppage_migratetype may differ from pageblock's
-                * migratetype depending on the decisions in
-                * find_suitable_fallback(). This is OK as long as it does not
-                * differ for MIGRATE_CMA pageblocks. Those can be used as
-                * fallback only via special __rmqueue_cma_fallback() function
-                */
-               set_pcppage_migratetype(page, start_migratetype);
+       expand(zone, page, order, current_order, area,
+                               start_migratetype);
+       /*
+        * The pcppage_migratetype may differ from pageblock's
+        * migratetype depending on the decisions in
+        * find_suitable_fallback(). This is OK as long as it does not
+        * differ for MIGRATE_CMA pageblocks. Those can be used as
+        * fallback only via special __rmqueue_cma_fallback() function
+        */
+       set_pcppage_migratetype(page, start_migratetype);
 
-               trace_mm_page_alloc_extfrag(page, order, current_order,
-                       start_migratetype, fallback_mt);
+       trace_mm_page_alloc_extfrag(page, order, current_order,
+               start_migratetype, fallback_mt);
 
-               return page;
+       return page;
+}
+
+/* Remove an element from the buddy allocator from the fallback list */
+static inline struct page *
+__rmqueue_fallback(struct zone *zone, unsigned int order, int 
start_migratetype)
+{
+       unsigned int current_order;
+       struct page *page;
+
+       /* Find the largest possible block of pages in the other list */
+       for (current_order = MAX_ORDER-1;
+                               current_order >= order && current_order <= 
MAX_ORDER-1;
+                               --current_order) {
+               page = __rmqueue_fallback_order(zone, order, start_migratetype,
+                               current_order);
+
+               if (page)
+                       return page;
        }
 
        return NULL;
-- 
1.9.1

Reply via email to