On Fri, Sep 20, 2019 at 12:12:04AM +0800, Jia He wrote:
> @@ -2152,7 +2163,29 @@ static inline void cow_user_page(struct page *dst, 
> struct page *src, unsigned lo
>        */
>       if (unlikely(!src)) {
>               void *kaddr = kmap_atomic(dst);
> -             void __user *uaddr = (void __user *)(va & PAGE_MASK);
> +             void __user *uaddr = (void __user *)(addr & PAGE_MASK);
> +             pte_t entry;
> +
> +             /* On architectures with software "accessed" bits, we would
> +              * take a double page fault, so mark it accessed here.
> +              */
> +             if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
> +                     spin_lock(vmf->ptl);
> +                     if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
> +                             entry = pte_mkyoung(vmf->orig_pte);
> +                             if (ptep_set_access_flags(vma, addr,
> +                                                       vmf->pte, entry, 0))
> +                                     update_mmu_cache(vma, addr, vmf->pte);
> +                     } else {
> +                             /* Other thread has already handled the fault
> +                              * and we don't need to do anything. If it's
> +                              * not the case, the fault will be triggered
> +                              * again on the same address.
> +                              */
> +                             return -1;
> +                     }
> +                     spin_unlock(vmf->ptl);

Returning with the spinlock held doesn't normally go very well ;).

-- 
Catalin

Reply via email to