Use new hugetlb specific flag HP_Temporary flag to replace the
PageHugeTemporary() interfaces.

Signed-off-by: Mike Kravetz <mike.krav...@oracle.com>
---
 include/linux/hugetlb.h |  5 +++++
 mm/hugetlb.c            | 36 +++++++-----------------------------
 2 files changed, 12 insertions(+), 29 deletions(-)

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e7157cf9967f..166825c85875 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -483,10 +483,15 @@ unsigned long hugetlb_get_unmapped_area(struct file 
*file, unsigned long addr,
  * HP_Migratable - Set after a newly allocated page is added to the page
  *     cache and/or page tables.  Indicates the page is a candidate for
  *     migration.
+ * HP_Temporary - Set on a page that is temporarily allocated from the buddy
+ *     allocator.  Typically used for migration target pages when no pages
+ *     are available in the pool.  The hugetlb free page path will
+ *     immediately free pages with this flag set to the buddy allocator.
  */
 enum hugetlb_page_flags {
        HP_Restore_Reserve = 0,
        HP_Migratable,
+       HP_Temporary,
 };
 
 #ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 31e896c70ba0..53e9168a97bd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1353,28 +1353,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Internal hugetlb specific page flag. Do not use outside of the hugetlb
- * code
- */
-static inline bool PageHugeTemporary(struct page *page)
-{
-       if (!PageHuge(page))
-               return false;
-
-       return (unsigned long)page[2].mapping == -1U;
-}
-
-static inline void SetPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = (void *)-1U;
-}
-
-static inline void ClearPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = NULL;
-}
-
 static void __free_huge_page(struct page *page)
 {
        /*
@@ -1422,9 +1400,9 @@ static void __free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (PageHugeTemporary(page)) {
+       if (hugetlb_test_page_flag(page, HP_Temporary)) {
                list_del(&page->lru);
-               ClearPageHugeTemporary(page);
+               hugetlb_clear_page_flag(page, HP_Temporary);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
@@ -1863,7 +1841,7 @@ static struct page *alloc_surplus_huge_page(struct hstate 
*h, gfp_t gfp_mask,
         * codeflow
         */
        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+               hugetlb_set_page_flag(page, HP_Temporary);
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
@@ -1894,7 +1872,7 @@ static struct page *alloc_migrate_huge_page(struct hstate 
*h, gfp_t gfp_mask,
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       SetPageHugeTemporary(page);
+       hugetlb_set_page_flag(page, HP_Temporary);
 
        return page;
 }
@@ -5608,12 +5586,12 @@ void move_hugetlb_state(struct page *oldpage, struct 
page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (PageHugeTemporary(newpage)) {
+       if (hugetlb_test_page_flag(newpage, HP_Temporary)) {
                int old_nid = page_to_nid(oldpage);
                int new_nid = page_to_nid(newpage);
 
-               SetPageHugeTemporary(oldpage);
-               ClearPageHugeTemporary(newpage);
+               hugetlb_set_page_flag(oldpage, HP_Temporary);
+               hugetlb_clear_page_flag(newpage, HP_Temporary);
 
                spin_lock(&hugetlb_lock);
                if (h->surplus_huge_pages_node[old_nid]) {
-- 
2.29.2

Reply via email to