From: Alex Shi <[email protected]>

Since we have ptdesc struct now, better to use replace pgtable_t, aka
'struct page *'.
It's a prepare for return ptdesc pointer in pte_alloc_one series
function.

Signed-off-by: Alex Shi <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: Christian Brauner <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Dan Williams <[email protected]>
---
 fs/dax.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index becb4a6920c6..6f7cea248206 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1206,7 +1206,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
        unsigned long pmd_addr = vmf->address & PMD_MASK;
        struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = mapping->host;
-       pgtable_t pgtable = NULL;
+       struct ptdesc *ptdesc = NULL;
        struct folio *zero_folio;
        spinlock_t *ptl;
        pmd_t pmd_entry;
@@ -1222,8 +1222,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
                                  DAX_PMD | DAX_ZERO_PAGE);
 
        if (arch_needs_pgtable_deposit()) {
-               pgtable = pte_alloc_one(vma->vm_mm);
-               if (!pgtable)
+               ptdesc = page_ptdesc(pte_alloc_one(vma->vm_mm));
+               if (!ptdesc)
                        return VM_FAULT_OOM;
        }
 
@@ -1233,8 +1233,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
                goto fallback;
        }
 
-       if (pgtable) {
-               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
+       if (ptdesc) {
+               pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, 
ptdesc_page(ptdesc));
                mm_inc_nr_ptes(vma->vm_mm);
        }
        pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
@@ -1245,8 +1245,8 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, 
struct vm_fault *vmf,
        return VM_FAULT_NOPAGE;
 
 fallback:
-       if (pgtable)
-               pte_free(vma->vm_mm, pgtable);
+       if (ptdesc)
+               pte_free(vma->vm_mm, ptdesc_page(ptdesc));
        trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
        return VM_FAULT_FALLBACK;
 }
-- 
2.43.0


Reply via email to