Let's prepare for further changes.

Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 mm/memory.c | 60 ++++++++++++++++++++++++++++-------------------------
 1 file changed, 32 insertions(+), 28 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 7e1f4849463aa..2aa2051ee51d3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -930,6 +930,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma
        return 0;
 }
 
+static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
+               struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
+               pte_t pte, unsigned long addr)
+{
+       struct mm_struct *src_mm = src_vma->vm_mm;
+
+       /* If it's a COW mapping, write protect it both processes. */
+       if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
+               ptep_set_wrprotect(src_mm, addr, src_pte);
+               pte = pte_wrprotect(pte);
+       }
+
+       /* If it's a shared mapping, mark it clean in the child. */
+       if (src_vma->vm_flags & VM_SHARED)
+               pte = pte_mkclean(pte);
+       pte = pte_mkold(pte);
+
+       if (!userfaultfd_wp(dst_vma))
+               pte = pte_clear_uffd_wp(pte);
+
+       set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+}
+
 /*
  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
  * is required to copy this pte.
@@ -939,16 +962,16 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma,
                 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
                 struct folio **prealloc)
 {
-       struct mm_struct *src_mm = src_vma->vm_mm;
-       unsigned long vm_flags = src_vma->vm_flags;
        pte_t pte = ptep_get(src_pte);
        struct page *page;
        struct folio *folio;
 
        page = vm_normal_page(src_vma, addr, pte);
-       if (page)
-               folio = page_folio(page);
-       if (page && folio_test_anon(folio)) {
+       if (unlikely(!page))
+               goto copy_pte;
+
+       folio = page_folio(page);
+       if (folio_test_anon(folio)) {
                /*
                 * If this page may have been pinned by the parent process,
                 * copy the page immediately for the child so that we'll always
@@ -963,34 +986,15 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct 
vm_area_struct *src_vma,
                                                 addr, rss, prealloc, page);
                }
                rss[MM_ANONPAGES]++;
-       } else if (page) {
+               VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
+       } else {
                folio_get(folio);
                folio_dup_file_rmap_pte(folio, page);
                rss[mm_counter_file(page)]++;
        }
 
-       /*
-        * If it's a COW mapping, write protect it both
-        * in the parent and the child
-        */
-       if (is_cow_mapping(vm_flags) && pte_write(pte)) {
-               ptep_set_wrprotect(src_mm, addr, src_pte);
-               pte = pte_wrprotect(pte);
-       }
-       VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
-
-       /*
-        * If it's a shared mapping, mark it clean in
-        * the child
-        */
-       if (vm_flags & VM_SHARED)
-               pte = pte_mkclean(pte);
-       pte = pte_mkold(pte);
-
-       if (!userfaultfd_wp(dst_vma))
-               pte = pte_clear_uffd_wp(pte);
-
-       set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+copy_pte:
+       __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr);
        return 0;
 }
 
-- 
2.43.0

Reply via email to