Deferred struct page initialisation is using pfn_to_page() on every PFN
unnecessarily. This patch minimises the number of lookups and scheduler
checks.

Signed-off-by: Mel Gorman <mgor...@suse.de>
---
 mm/page_alloc.c | 29 ++++++++++++++++++++++++-----
 1 file changed, 24 insertions(+), 5 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index dfe63a3c3816..839e4c73ce6d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1089,6 +1089,7 @@ void __defermem_init deferred_init_memmap(int nid)
 
                for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
                        unsigned long pfn, end_pfn;
+                       struct page *page = NULL;
 
                        end_pfn = min(walk_end, zone_end_pfn(zone));
                        pfn = first_init_pfn;
@@ -1098,13 +1099,32 @@ void __defermem_init deferred_init_memmap(int nid)
                                pfn = zone->zone_start_pfn;
 
                        for (; pfn < end_pfn; pfn++) {
-                               struct page *page;
-
-                               if (!pfn_valid(pfn))
+                               if (!pfn_valid_within(pfn))
                                        continue;
 
-                               if (!meminit_pfn_in_nid(pfn, nid, 
&nid_init_state))
+                               /*
+                                * Ensure pfn_valid is checked every
+                                * MAX_ORDER_NR_PAGES for memory holes
+                                */
+                               if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+                                       if (!pfn_valid(pfn)) {
+                                               page = NULL;
+                                               continue;
+                                       }
+                               }
+
+                               if (!meminit_pfn_in_nid(pfn, nid, 
&nid_init_state)) {
+                                       page = NULL;
                                        continue;
+                               }
+
+                               /* Minimise pfn page lookups and scheduler 
checks */
+                               if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 
0) {
+                                       page++;
+                               } else {
+                                       page = pfn_to_page(pfn);
+                                       cond_resched();
+                               }
 
                                if (page->flags) {
                                        VM_BUG_ON(page_zone(page) != zone);
@@ -1113,7 +1133,6 @@ void __defermem_init deferred_init_memmap(int nid)
 
                                __init_single_page(page, pfn, zid, nid);
                                __free_pages_boot_core(page, pfn, 0);
-                               cond_resched();
                        }
                        first_init_pfn = max(end_pfn, first_init_pfn);
                }
-- 
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to