Add orig_pmd to struct vm_fault so the "orig_pmd" parameter used by huge page
fault could be removed, just like its PTE counterpart does.

Signed-off-by: Yang Shi <shy828...@gmail.com>
---
 include/linux/huge_mm.h |  9 ++++-----
 include/linux/mm.h      |  1 +
 mm/huge_memory.c        |  9 ++++++---
 mm/memory.c             | 26 +++++++++++++-------------
 4 files changed, 24 insertions(+), 21 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ba973efcd369..5650db25a49d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -11,7 +11,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
                  struct vm_area_struct *vma);
-void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+void huge_pmd_set_accessed(struct vm_fault *vmf);
 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
                  struct vm_area_struct *vma);
@@ -24,7 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault 
*vmf, pud_t orig_pud)
 }
 #endif
 
-vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr, pmd_t *pmd,
                                   unsigned int flags);
@@ -286,7 +286,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, 
unsigned long addr,
 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
                pud_t *pud, int flags, struct dev_pagemap **pgmap);
 
-vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
 
 extern struct page *huge_zero_page;
 
@@ -432,8 +432,7 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
        return NULL;
 }
 
-static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
-               pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
        return 0;
 }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8ba434287387..899f55d46fba 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -528,6 +528,7 @@ struct vm_fault {
                                         * the 'address'
                                         */
        pte_t orig_pte;                 /* Value of PTE at the time of fault */
+       pmd_t orig_pmd;                 /* Value of PMD at the time of fault */
 
        struct page *cow_page;          /* Page handler may use for COW fault */
        struct page *page;              /* ->fault handlers should return a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ae907a9c2050..53f3843ce72a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1252,11 +1252,12 @@ void huge_pud_set_accessed(struct vm_fault *vmf, pud_t 
orig_pud)
 }
 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 
-void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
+void huge_pmd_set_accessed(struct vm_fault *vmf)
 {
        pmd_t entry;
        unsigned long haddr;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
+       pmd_t orig_pmd = vmf->orig_pmd;
 
        vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
@@ -1273,11 +1274,12 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t 
orig_pmd)
        spin_unlock(vmf->ptl);
 }
 
-vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct page *page;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+       pmd_t orig_pmd = vmf->orig_pmd;
 
        vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
        VM_BUG_ON_VMA(!vma->anon_vma, vma);
@@ -1413,9 +1415,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct 
*vma,
 }
 
 /* NUMA hinting page fault entry point for trans huge pmds */
-vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
+       pmd_t pmd = vmf->orig_pmd;
        struct anon_vma *anon_vma = NULL;
        struct page *page;
        unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
diff --git a/mm/memory.c b/mm/memory.c
index 5efa07fb6cdc..33be5811ac65 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4193,12 +4193,12 @@ static inline vm_fault_t create_huge_pmd(struct 
vm_fault *vmf)
 }
 
 /* `inline' is required to avoid gcc 4.1.2 build error */
-static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
 {
        if (vma_is_anonymous(vmf->vma)) {
-               if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
+               if (userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
                        return handle_userfault(vmf, VM_UFFD_WP);
-               return do_huge_pmd_wp_page(vmf, orig_pmd);
+               return do_huge_pmd_wp_page(vmf);
        }
        if (vmf->vma->vm_ops->huge_fault) {
                vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
@@ -4425,26 +4425,26 @@ static vm_fault_t __handle_mm_fault(struct 
vm_area_struct *vma,
                if (!(ret & VM_FAULT_FALLBACK))
                        return ret;
        } else {
-               pmd_t orig_pmd = *vmf.pmd;
+               vmf.orig_pmd = *vmf.pmd;
 
                barrier();
-               if (unlikely(is_swap_pmd(orig_pmd))) {
+               if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
                        VM_BUG_ON(thp_migration_supported() &&
-                                         !is_pmd_migration_entry(orig_pmd));
-                       if (is_pmd_migration_entry(orig_pmd))
+                                         
!is_pmd_migration_entry(vmf.orig_pmd));
+                       if (is_pmd_migration_entry(vmf.orig_pmd))
                                pmd_migration_entry_wait(mm, vmf.pmd);
                        return 0;
                }
-               if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
-                       if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
-                               return do_huge_pmd_numa_page(&vmf, orig_pmd);
+               if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
+                       if (pmd_protnone(vmf.orig_pmd) && 
vma_is_accessible(vma))
+                               return do_huge_pmd_numa_page(&vmf);
 
-                       if (dirty && !pmd_write(orig_pmd)) {
-                               ret = wp_huge_pmd(&vmf, orig_pmd);
+                       if (dirty && !pmd_write(vmf.orig_pmd)) {
+                               ret = wp_huge_pmd(&vmf);
                                if (!(ret & VM_FAULT_FALLBACK))
                                        return ret;
                        } else {
-                               huge_pmd_set_accessed(&vmf, orig_pmd);
+                               huge_pmd_set_accessed(&vmf);
                                return 0;
                        }
                }
-- 
2.26.2

Reply via email to