This is to let hugetlbfs be prepared to also recognize swap special ptes just
like uffd-wp special swap ptes.

Signed-off-by: Peter Xu <pet...@redhat.com>
---
 mm/hugetlb.c | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dd2acb8b3f0f..16a07f41880e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -82,6 +82,25 @@ struct mutex *hugetlb_fault_mutex_table 
____cacheline_aligned_in_smp;
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
+/*
+ * These are sister versions of is_swap_pte() and pte_has_swap_entry().  We
+ * need standalone ones because huge_pte_none() is handled differently from
+ * pte_none().  For more information, please refer to comments above
+ * is_swap_pte() and pte_has_swap_entry().
+ *
+ * Here we directly reuse the pte level of swap special ptes, for example, the
+ * pte_swp_uffd_wp_special().  It just stands for a huge page rather than a
+ * small page for hugetlbfs pages.
+ */
+static inline bool is_huge_swap_pte(pte_t pte)
+{
+       return !huge_pte_none(pte) && !pte_present(pte);
+}
+static inline bool huge_pte_has_swap_entry(pte_t pte)
+{
+       return is_huge_swap_pte(pte) && !is_swap_special_pte(pte);
+}
+
 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
 {
        bool free = (spool->count == 0) && (spool->used_hpages == 0);
@@ -3710,7 +3729,7 @@ bool is_hugetlb_entry_migration(pte_t pte)
 {
        swp_entry_t swp;
 
-       if (huge_pte_none(pte) || pte_present(pte))
+       if (!huge_pte_has_swap_entry(pte))
                return false;
        swp = pte_to_swp_entry(pte);
        if (is_migration_entry(swp))
@@ -3723,7 +3742,7 @@ static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
 {
        swp_entry_t swp;
 
-       if (huge_pte_none(pte) || pte_present(pte))
+       if (!huge_pte_has_swap_entry(pte))
                return false;
        swp = pte_to_swp_entry(pte);
        if (is_hwpoison_entry(swp))
-- 
2.26.2

Reply via email to