On 2/3/21 1:08 PM, Peter Xu wrote:
> This is the last missing piece of the COW-during-fork effort when there're
> pinned pages found.  One can reference 70e806e4e645 ("mm: Do early cow for
> pinned pages during fork() for ptes", 2020-09-27) for more information, since
> we do similar things here rather than pte this time, but just for hugetlb.
> 
> Signed-off-by: Peter Xu <pet...@redhat.com>
> ---
>  mm/hugetlb.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++----
>  1 file changed, 71 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9e6ea96bf33b..931bf1a81c16 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3734,11 +3734,27 @@ static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
>               return false;
>  }
>  
> +static void
> +hugetlb_copy_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long 
> addr,
> +               struct page *old_page, struct page *new_page)
> +{
> +     struct hstate *h = hstate_vma(vma);
> +     unsigned int psize = pages_per_huge_page(h);
> +
> +     copy_user_huge_page(new_page, old_page, addr, vma, psize);
> +     __SetPageUptodate(new_page);
> +     ClearPagePrivate(new_page);
> +     set_page_huge_active(new_page);
> +     set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 
> 1));
> +     hugepage_add_new_anon_rmap(new_page, vma, addr);
> +     hugetlb_count_add(psize, vma->vm_mm);
> +}
> +
>  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
>                           struct vm_area_struct *vma)
>  {
>       pte_t *src_pte, *dst_pte, entry, dst_entry;
> -     struct page *ptepage;
> +     struct page *ptepage, *prealloc = NULL;
>       unsigned long addr;
>       int cow;
>       struct hstate *h = hstate_vma(vma);
> @@ -3787,7 +3803,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
> struct mm_struct *src,
>               dst_entry = huge_ptep_get(dst_pte);
>               if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
>                       continue;
> -
> +again:
>               dst_ptl = huge_pte_lock(h, dst, dst_pte);
>               src_ptl = huge_pte_lockptr(h, src, src_pte);
>               spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
> @@ -3816,6 +3832,54 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
> struct mm_struct *src,
>                       }
>                       set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
>               } else {
> +                     entry = huge_ptep_get(src_pte);
> +                     ptepage = pte_page(entry);
> +                     get_page(ptepage);
> +
> +                     if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
> +                             /* This is very possibly a pinned huge page */
> +                             if (!prealloc) {
> +                                     /*
> +                                      * Preallocate the huge page without
> +                                      * tons of locks since we could sleep.
> +                                      * Note: we can't use any reservation
> +                                      * because the page will be exclusively
> +                                      * owned by the child later.
> +                                      */
> +                                     put_page(ptepage);
> +                                     spin_unlock(src_ptl);
> +                                     spin_unlock(dst_ptl);
> +                                     prealloc = alloc_huge_page(vma, addr, 
> 0);

One quick question:

The comment says we can't use any reservation, and I agree.  However, the
alloc_huge_page call has 0 as the avoid_reserve argument.  Shouldn't that
be !0 to avoid reserves?

-- 
Mike Kravetz

> +                                     if (!prealloc) {
> +                                             /*
> +                                              * hugetlb_cow() seems to be
> +                                              * more careful here than us.
> +                                              * However for fork() we could
> +                                              * be strict not only because
> +                                              * no one should be referencing
> +                                              * the child mm yet, but also
> +                                              * if resources are rare we'd
> +                                              * better simply fail the
> +                                              * fork() even earlier.
> +                                              */
> +                                             ret = -ENOMEM;
> +                                             break;
> +                                     }
> +                                     goto again;
> +                             }
> +                             /*
> +                              * We have page preallocated so that we can do
> +                              * the copy right now.
> +                              */
> +                             hugetlb_copy_page(vma, dst_pte, addr, ptepage,
> +                                               prealloc);
> +                             put_page(ptepage);
> +                             spin_unlock(src_ptl);
> +                             spin_unlock(dst_ptl);
> +                             prealloc = NULL;
> +                             continue;
> +                     }
> +
>                       if (cow) {
>                               /*
>                                * No need to notify as we are downgrading page
> @@ -3826,9 +3890,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
> struct mm_struct *src,
>                                */
>                               huge_ptep_set_wrprotect(src, addr, src_pte);
>                       }
> -                     entry = huge_ptep_get(src_pte);
> -                     ptepage = pte_page(entry);
> -                     get_page(ptepage);
> +
>                       page_dup_rmap(ptepage, true);
>                       set_huge_pte_at(dst, addr, dst_pte, entry);
>                       hugetlb_count_add(pages_per_huge_page(h), dst);
> @@ -3842,6 +3904,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, 
> struct mm_struct *src,
>       else
>               i_mmap_unlock_read(mapping);
>  
> +     /* Free the preallocated page if not used at last */
> +     if (prealloc)
> +             put_page(prealloc);
> +
>       return ret;
>  }
>  

Reply via email to