On Wed, Mar 20, 2019 at 10:06:28AM +0800, Peter Xu wrote:
> This allows uffd-wp to support write-protected pages for COW.
> 
> For example, the uffd write-protected PTE could also be write-protected
> by other usages like COW or zero pages.  When that happens, we can't
> simply set the write bit in the PTE since otherwise it'll change the
> content of every single reference to the page.  Instead, we should do
> the COW first if necessary, then handle the uffd-wp fault.
> 
> To correctly copy the page, we'll also need to carry over the
> _PAGE_UFFD_WP bit if it was set in the original PTE.
> 
> For huge PMDs, we just simply split the huge PMDs where we want to
> resolve an uffd-wp page fault always.  That matches what we do with
> general huge PMD write protections.  In that way, we resolved the huge
> PMD copy-on-write issue into PTE copy-on-write.
> 
> Signed-off-by: Peter Xu <[email protected]>

This one has a bug see below.


> ---
>  mm/memory.c   |  5 +++-
>  mm/mprotect.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++---
>  2 files changed, 65 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index e7a4b9650225..b8a4c0bab461 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2291,7 +2291,10 @@ vm_fault_t wp_page_copy(struct vm_fault *vmf)
>               }
>               flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
>               entry = mk_pte(new_page, vma->vm_page_prot);
> -             entry = maybe_mkwrite(pte_mkdirty(entry), vma);
> +             if (pte_uffd_wp(vmf->orig_pte))
> +                     entry = pte_mkuffd_wp(entry);
> +             else
> +                     entry = maybe_mkwrite(pte_mkdirty(entry), vma);
>               /*
>                * Clear the pte entry and flush it first, before updating the
>                * pte with the new entry. This will avoid a race condition
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 9d4433044c21..855dddb07ff2 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -73,18 +73,18 @@ static unsigned long change_pte_range(struct 
> vm_area_struct *vma, pmd_t *pmd,
>       flush_tlb_batched_pending(vma->vm_mm);
>       arch_enter_lazy_mmu_mode();
>       do {
> +retry_pte:
>               oldpte = *pte;
>               if (pte_present(oldpte)) {
>                       pte_t ptent;
>                       bool preserve_write = prot_numa && pte_write(oldpte);
> +                     struct page *page;
>  
>                       /*
>                        * Avoid trapping faults against the zero or KSM
>                        * pages. See similar comment in change_huge_pmd.
>                        */
>                       if (prot_numa) {
> -                             struct page *page;
> -
>                               page = vm_normal_page(vma, addr, oldpte);
>                               if (!page || PageKsm(page))
>                                       continue;
> @@ -114,6 +114,54 @@ static unsigned long change_pte_range(struct 
> vm_area_struct *vma, pmd_t *pmd,
>                                       continue;
>                       }
>  
> +                     /*
> +                      * Detect whether we'll need to COW before
> +                      * resolving an uffd-wp fault.  Note that this
> +                      * includes detection of the zero page (where
> +                      * page==NULL)
> +                      */
> +                     if (uffd_wp_resolve) {
> +                             /* If the fault is resolved already, skip */
> +                             if (!pte_uffd_wp(*pte))
> +                                     continue;
> +                             page = vm_normal_page(vma, addr, oldpte);
> +                             if (!page || page_mapcount(page) > 1) {
> +                                     struct vm_fault vmf = {
> +                                             .vma = vma,
> +                                             .address = addr & PAGE_MASK,
> +                                             .page = page,
> +                                             .orig_pte = oldpte,
> +                                             .pmd = pmd,
> +                                             /* pte and ptl not needed */
> +                                     };
> +                                     vm_fault_t ret;
> +
> +                                     if (page)
> +                                             get_page(page);
> +                                     arch_leave_lazy_mmu_mode();
> +                                     pte_unmap_unlock(pte, ptl);
> +                                     ret = wp_page_copy(&vmf);
> +                                     /* PTE is changed, or OOM */
> +                                     if (ret == 0)
> +                                             /* It's done by others */
> +                                             continue;

This is wrong if ret == 0 you still need to remap the pte before
continuing as otherwise you will go to next pte without the page
table lock for the directory. So 0 case must be handled after
arch_enter_lazy_mmu_mode() below.

Sorry i should have catch that in previous review.


> +                                     else if (WARN_ON(ret != VM_FAULT_WRITE))
> +                                             return pages;
> +                                     pte = pte_offset_map_lock(vma->vm_mm,
> +                                                               pmd, addr,
> +                                                               &ptl);
> +                                     arch_enter_lazy_mmu_mode();
> +                                     if (!pte_present(*pte))
> +                                             /*
> +                                              * This PTE could have been
> +                                              * modified after COW
> +                                              * before we have taken the
> +                                              * lock; retry this PTE
> +                                              */
> +                                             goto retry_pte;
> +                             }
> +                     }
> +
>                       ptent = ptep_modify_prot_start(mm, addr, pte);
>                       ptent = pte_modify(ptent, newprot);
>                       if (preserve_write)

>       unsigned long pages = 0;
>       unsigned long nr_huge_updates = 0;
>       struct mmu_notifier_range range;
> +     bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
>  
>       range.start = 0;
>  
> @@ -202,7 +251,16 @@ static inline unsigned long change_pmd_range(struct 
> vm_area_struct *vma,
>               }
>  
>               if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || 
> pmd_devmap(*pmd)) {
> -                     if (next - addr != HPAGE_PMD_SIZE) {
> +                     /*
> +                      * When resolving an userfaultfd write
> +                      * protection fault, it's not easy to identify
> +                      * whether a THP is shared with others and
> +                      * whether we'll need to do copy-on-write, so
> +                      * just split it always for now to simply the
> +                      * procedure.  And that's the policy too for
> +                      * general THP write-protect in af9e4d5f2de2.
> +                      */
> +                     if (next - addr != HPAGE_PMD_SIZE || uffd_wp_resolve) {

Just a nit pick can you please add () to next - addr ie:
if ((next - addr) != HPAGE_PMD_SIZE || uffd_wp_resolve) {

I know it is not needed but each time i bump into this i
have to scratch my head for second to remember the operator
rules :)

>                               __split_huge_pmd(vma, pmd, addr, false, NULL);
>                       } else {
>                               int nr_ptes = change_huge_pmd(vma, pmd, addr,
> -- 
> 2.17.1
> 

Reply via email to