Introduce arch_prep_new_page(), which will be used by arm64 to reserve tag
storage for an allocated page. Reserving tag storage can fail, for example,
if the tag storage page has a short pin on it, so allow prep_new_page() ->
arch_prep_new_page() to similarly fail.

arch_alloc_page(), called from post_alloc_hook(), has been considered as an
alternative to adding yet another arch hook, but post_alloc_hook() cannot
fail, as it's also called when free pages are isolated.

Signed-off-by: Alexandru Elisei <alexandru.eli...@arm.com>
---
 include/linux/pgtable.h |  7 ++++
 mm/page_alloc.c         | 75 ++++++++++++++++++++++++++++++++---------
 2 files changed, 66 insertions(+), 16 deletions(-)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index af7639c3b0a3..b31f53e9ab1d 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -873,6 +873,13 @@ static inline void arch_do_swap_page(struct mm_struct *mm,
 }
 #endif
 
+#ifndef __HAVE_ARCH_PREP_NEW_PAGE
+static inline int arch_prep_new_page(struct page *page, int order, gfp_t gfp)
+{
+       return 0;
+}
+#endif
+
 #ifndef __HAVE_ARCH_UNMAP_ONE
 /*
  * Some architectures support metadata associated with a page. When a
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 770e585b77c8..b2782b778e78 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1538,9 +1538,15 @@ inline void post_alloc_hook(struct page *page, unsigned 
int order,
        page_table_check_alloc(page, order);
 }
 
-static void prep_new_page(struct page *page, unsigned int order, gfp_t 
gfp_flags,
+static int prep_new_page(struct page *page, unsigned int order, gfp_t 
gfp_flags,
                                                        unsigned int 
alloc_flags)
 {
+       int ret;
+
+       ret = arch_prep_new_page(page, order, gfp_flags);
+       if (unlikely(ret))
+               return ret;
+
        post_alloc_hook(page, order, gfp_flags);
 
        if (order && (gfp_flags & __GFP_COMP))
@@ -1556,6 +1562,8 @@ static void prep_new_page(struct page *page, unsigned int 
order, gfp_t gfp_flags
                set_page_pfmemalloc(page);
        else
                clear_page_pfmemalloc(page);
+
+       return 0;
 }
 
 /*
@@ -3163,6 +3171,24 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t 
gfp_mask,
        return alloc_flags;
 }
 
+#ifdef HAVE_ARCH_ALLOC_PAGE
+static void return_page_to_buddy(struct page *page, int order)
+{
+       int migratetype = get_pfnblock_migratetype(page, pfn);
+       unsigned long pfn = page_to_pfn(page);
+       struct zone *zone = page_zone(page);
+       unsigned long flags;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       __free_one_page(page, pfn, zone, order, migratetype, FPI_TO_TAIL);
+       spin_unlock_irqrestore(&zone->lock, flags);
+}
+#else
+static void return_page_to_buddy(struct page *page, int order)
+{
+}
+#endif
+
 /*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
@@ -3309,7 +3335,10 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
                page = rmqueue(ac->preferred_zoneref->zone, zone, order,
                                gfp_mask, alloc_flags, ac->migratetype);
                if (page) {
-                       prep_new_page(page, order, gfp_mask, alloc_flags);
+                       if (prep_new_page(page, order, gfp_mask, alloc_flags)) {
+                               return_page_to_buddy(page, order);
+                               goto no_page;
+                       }
 
                        /*
                         * If this is a high-order atomic allocation then check
@@ -3319,20 +3348,20 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
                                reserve_highatomic_pageblock(page, zone);
 
                        return page;
-               } else {
-                       if (has_unaccepted_memory()) {
-                               if (try_to_accept_memory(zone, order))
-                                       goto try_this_zone;
-                       }
+               }
+no_page:
+               if (has_unaccepted_memory()) {
+                       if (try_to_accept_memory(zone, order))
+                               goto try_this_zone;
+               }
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-                       /* Try again if zone has deferred pages */
-                       if (deferred_pages_enabled()) {
-                               if (_deferred_grow_zone(zone, order))
-                                       goto try_this_zone;
-                       }
-#endif
+               /* Try again if zone has deferred pages */
+               if (deferred_pages_enabled()) {
+                       if (_deferred_grow_zone(zone, order))
+                               goto try_this_zone;
                }
+#endif
        }
 
        /*
@@ -3538,8 +3567,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned 
int order,
        count_vm_event(COMPACTSTALL);
 
        /* Prep a captured page if available */
-       if (page)
-               prep_new_page(page, order, gfp_mask, alloc_flags);
+       if (page) {
+               if (prep_new_page(page, order, gfp_mask, alloc_flags)) {
+                       return_page_to_buddy(page, order);
+                       page = NULL;
+               }
+       }
 
        /* Try get a page from the freelist if available */
        if (!page)
@@ -4490,9 +4523,18 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int 
preferred_nid,
                        }
                        break;
                }
+
+               if (prep_new_page(page, 0, gfp, 0)) {
+                       pcp_spin_unlock(pcp);
+                       pcp_trylock_finish(UP_flags);
+                       return_page_to_buddy(page, 0);
+                       if (!nr_account)
+                               goto failed;
+                       else
+                               goto out_statistics;
+               }
                nr_account++;
 
-               prep_new_page(page, 0, gfp, 0);
                if (page_list)
                        list_add(&page->lru, page_list);
                else
@@ -4503,6 +4545,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int 
preferred_nid,
        pcp_spin_unlock(pcp);
        pcp_trylock_finish(UP_flags);
 
+out_statistics:
        __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
        zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
 
-- 
2.42.1


Reply via email to