In order to add mTHP support to khugepaged, we will often be checking if a
given order is (or is not) a PMD order. Some places in the kernel already
use this check, so lets create a simple helper function to keep the code clean
and readable.

Reviewed-by: Lorenzo Stoakes <[email protected]>
Suggested-by: Lorenzo Stoakes <[email protected]>
Signed-off-by: Nico Pache <[email protected]>
---
 include/linux/huge_mm.h | 5 +++++
 mm/huge_memory.c        | 2 +-
 mm/khugepaged.c         | 4 ++--
 mm/mempolicy.c          | 2 +-
 mm/page_alloc.c         | 2 +-
 5 files changed, 10 insertions(+), 5 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..bd7f0e1d8094 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -771,6 +771,11 @@ static inline bool pmd_is_huge(pmd_t pmd)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
+static inline bool is_pmd_order(unsigned int order)
+{
+       return order == HPAGE_PMD_ORDER;
+}
+
 static inline int split_folio_to_list_to_order(struct folio *folio,
                struct list_head *list, int new_order)
 {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 44ff8a648afd..5eae85818635 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -4097,7 +4097,7 @@ static int __folio_split(struct folio *folio, unsigned 
int new_order,
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       if (old_order == HPAGE_PMD_ORDER)
+       if (is_pmd_order(old_order))
                count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
        count_mthp_stat(old_order, !ret ? MTHP_STAT_SPLIT : 
MTHP_STAT_SPLIT_FAILED);
        return ret;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fba6aea5bea6..b85d00670d14 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2000,7 +2000,7 @@ static enum scan_result collapse_file(struct mm_struct 
*mm, unsigned long addr,
                 * we locked the first folio, then a THP might be there already.
                 * This will be discovered on the first iteration.
                 */
-               if (folio_order(folio) == HPAGE_PMD_ORDER &&
+               if (is_pmd_order(folio_order(folio)) &&
                    folio->index == start) {
                        /* Maybe PMD-mapped */
                        result = SCAN_PTE_MAPPED_HUGEPAGE;
@@ -2327,7 +2327,7 @@ static enum scan_result hpage_collapse_scan_file(struct 
mm_struct *mm, unsigned
                        continue;
                }
 
-               if (folio_order(folio) == HPAGE_PMD_ORDER &&
+               if (is_pmd_order(folio_order(folio)) &&
                    folio->index == start) {
                        /* Maybe PMD-mapped */
                        result = SCAN_PTE_MAPPED_HUGEPAGE;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dbd48502ac24..3802e52b01fc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2450,7 +2450,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned 
int order,
 
        if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
            /* filter "hugepage" allocation, unless from alloc_pages() */
-           order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
+           is_pmd_order(order) && ilx != NO_INTERLEAVE_INDEX) {
                /*
                 * For hugepage allocation and non-interleave policy which
                 * allows the current node (or other explicitly preferred
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e4104973e22f..e8a6d0d27b92 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -719,7 +719,7 @@ static inline bool pcp_allowed_order(unsigned int order)
        if (order <= PAGE_ALLOC_COSTLY_ORDER)
                return true;
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (order == HPAGE_PMD_ORDER)
+       if (is_pmd_order(order))
                return true;
 #endif
        return false;
-- 
2.52.0


Reply via email to