From: Alex Shi <[email protected]>

Since we have ptdesc struct now, better to use replace pgtable_t, aka
'struct page *'. It's alaos a preparation for return ptdesc pointer
in pte_alloc_one series function.

Signed-off-by: Alex Shi <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: Andrew Morton <[email protected]>
---
 mm/huge_memory.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a331d4504d52..236e1582d97e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1369,15 +1369,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        struct page *src_page;
        struct folio *src_folio;
        pmd_t pmd;
-       pgtable_t pgtable = NULL;
+       struct ptdesc *ptdesc = NULL;
        int ret = -ENOMEM;
 
        /* Skip if can be re-fill on fault */
        if (!vma_is_anonymous(dst_vma))
                return 0;
 
-       pgtable = pte_alloc_one(dst_mm);
-       if (unlikely(!pgtable))
+       ptdesc = page_ptdesc(pte_alloc_one(dst_mm));
+       if (unlikely(!ptdesc))
                goto out;
 
        dst_ptl = pmd_lock(dst_mm, dst_pmd);
@@ -1404,7 +1404,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
                }
                add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
                mm_inc_nr_ptes(dst_mm);
-               pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+               pgtable_trans_huge_deposit(dst_mm, dst_pmd, 
ptdesc_page(ptdesc));
                if (!userfaultfd_wp(dst_vma))
                        pmd = pmd_swp_clear_uffd_wp(pmd);
                set_pmd_at(dst_mm, addr, dst_pmd, pmd);
@@ -1414,7 +1414,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
 #endif
 
        if (unlikely(!pmd_trans_huge(pmd))) {
-               pte_free(dst_mm, pgtable);
+               pte_free(dst_mm, ptdesc_page(ptdesc));
                goto out_unlock;
        }
        /*
@@ -1440,7 +1440,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, 
src_vma))) {
                /* Page maybe pinned: split and retry the fault on PTEs. */
                folio_put(src_folio);
-               pte_free(dst_mm, pgtable);
+               pte_free(dst_mm, ptdesc_page(ptdesc));
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
                __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
@@ -1449,7 +1449,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct 
mm_struct *src_mm,
        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 out_zero_page:
        mm_inc_nr_ptes(dst_mm);
-       pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
+       pgtable_trans_huge_deposit(dst_mm, dst_pmd, ptdesc_page(ptdesc));
        pmdp_set_wrprotect(src_mm, addr, src_pmd);
        if (!userfaultfd_wp(dst_vma))
                pmd = pmd_clear_uffd_wp(pmd);
-- 
2.43.0


Reply via email to