Let's reduce the code duplication and factor out the non-pte/pmd related
magic into vm_normal_page_pfn().

To keep it simpler, check the pfn against both zero folios. We could
optimize this, but as it's only for the !CONFIG_ARCH_HAS_PTE_SPECIAL
case, it's not a compelling micro-optimization.

With CONFIG_ARCH_HAS_PTE_SPECIAL we don't have to check anything else,
really.

It's a good question if we can even hit the !CONFIG_ARCH_HAS_PTE_SPECIAL
scenario in the PMD case in practice: but doesn't really matter, as
it's now all unified in vm_normal_page_pfn().

While at it, add a check that pmd_special() is really only set where we
would expect it.

No functional change intended.

Signed-off-by: David Hildenbrand <[email protected]>
---
 mm/memory.c | 104 +++++++++++++++++++++++-----------------------------
 1 file changed, 46 insertions(+), 58 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index b6c069f4ad11f..3d3fa01cd217e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -536,6 +536,46 @@ static void print_bad_pte(struct vm_area_struct *vma, 
unsigned long addr,
        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 }
 
+/* Called only if the page table entry is not marked special. */
+static inline struct page *vm_normal_page_pfn(struct vm_area_struct *vma,
+               unsigned long addr, unsigned long pfn)
+{
+       /*
+        * With CONFIG_ARCH_HAS_PTE_SPECIAL, any special page table mappings
+        * (incl. shared zero folios) are marked accordingly.
+        */
+       if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
+               goto normal_page;
+
+       if (unlikely(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))) {
+               if (vma->vm_flags & VM_MIXEDMAP) {
+                       /* If it has a "struct page", it's "normal". */
+                       if (!pfn_valid(pfn))
+                               return NULL;
+               } else {
+                       unsigned long off = (addr - vma->vm_start) >> 
PAGE_SHIFT;
+
+                       /* Only CoW'ed anon folios are "normal". */
+                       if (pfn == vma->vm_pgoff + off)
+                               return NULL;
+                       if (!is_cow_mapping(vma->vm_flags))
+                               return NULL;
+               }
+       }
+
+       if (is_zero_pfn(pfn) || is_huge_zero_pfn(pfn))
+               return NULL;
+
+normal_page:
+       /*
+        * NOTE! We still have PageReserved() pages in the page tables.
+        * For example, VDSO mappings can cause them to exist.
+        */
+       VM_WARN_ON_ONCE(!pfn_valid(pfn));
+       VM_WARN_ON_ONCE(is_zero_pfn(pfn) || is_huge_zero_pfn(pfn));
+       return pfn_to_page(pfn);
+}
+
 /*
  * vm_normal_page -- This function gets the "struct page" associated with a 
pte.
  *
@@ -591,9 +631,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, 
unsigned long addr,
 {
        unsigned long pfn = pte_pfn(pte);
 
-       if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
-               if (likely(!pte_special(pte)))
-                       goto out;
+       if (unlikely(pte_special(pte))) {
                if (vma->vm_ops && vma->vm_ops->find_special_page)
                        return vma->vm_ops->find_special_page(vma, addr);
                if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
@@ -604,34 +642,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, 
unsigned long addr,
                print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
-
-       /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
-
-       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-               if (vma->vm_flags & VM_MIXEDMAP) {
-                       if (!pfn_valid(pfn))
-                               return NULL;
-               } else {
-                       unsigned long off;
-                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
-                       if (pfn == vma->vm_pgoff + off)
-                               return NULL;
-                       if (!is_cow_mapping(vma->vm_flags))
-                               return NULL;
-               }
-       }
-
-       if (is_zero_pfn(pfn))
-               return NULL;
-
-       /*
-        * NOTE! We still have PageReserved() pages in the page tables.
-        * eg. VDSO mappings can cause them to exist.
-        */
-out:
-       VM_WARN_ON_ONCE(!pfn_valid(pfn));
-       VM_WARN_ON_ONCE(is_zero_pfn(pfn));
-       return pfn_to_page(pfn);
+       return vm_normal_page_pfn(vma, addr, pfn);
 }
 
 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
@@ -650,35 +661,12 @@ struct page *vm_normal_page_pmd(struct vm_area_struct 
*vma, unsigned long addr,
 {
        unsigned long pfn = pmd_pfn(pmd);
 
-       /* Currently it's only used for huge pfnmaps */
-       if (unlikely(pmd_special(pmd)))
+       if (unlikely(pmd_special(pmd))) {
+               VM_WARN_ON_ONCE(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) &&
+                               !is_huge_zero_pfn(pfn));
                return NULL;
-
-       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
-               if (vma->vm_flags & VM_MIXEDMAP) {
-                       if (!pfn_valid(pfn))
-                               return NULL;
-                       goto out;
-               } else {
-                       unsigned long off;
-                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
-                       if (pfn == vma->vm_pgoff + off)
-                               return NULL;
-                       if (!is_cow_mapping(vma->vm_flags))
-                               return NULL;
-               }
        }
-
-       if (is_huge_zero_pfn(pfn))
-               return NULL;
-
-       /*
-        * NOTE! We still have PageReserved() pages in the page tables.
-        * eg. VDSO mappings can cause them to exist.
-        */
-out:
-       VM_WARN_ON_ONCE(!pfn_valid(pfn));
-       return pfn_to_page(pfn);
+       return vm_normal_page_pfn(vma, addr, pfn);
 }
 
 struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
-- 
2.49.0


Reply via email to