Makes sense to.

ok kettenis@

> On 06/30/2022 4:05 PM Martin Pieuchot <m...@openbsd.org> wrote:
> 
>  
> Diff below uses two tricks to make uvm_pagermapin/out() faster and less
> likely to fail in OOM situations.
> 
> These functions are used to map buffers when swapping pages in/out and
> when faulting on mmaped files.  robert@ even measured a 75% improvement
> when populating pages related to files that aren't yet in the buffer
> cache.
> 
> The first trick is to use the direct map when available.  I'm doing this
> for single pages but km_alloc(9) also does that for single segment...
> uvm_io() only maps one page at a time for the moment so this should be
> enough.
> 
> The second trick is to use pmap_kenter_pa() which doesn't fail and is
> faster.
> 
> With this changes the "freeze" happening on my server when entering many
> pages to swap in OOM situation is much shorter and the machine becomes
> quickly responsive.
> 
> ok?
> 
> Index: uvm/uvm_pager.c
> ===================================================================
> RCS file: /cvs/src/sys/uvm/uvm_pager.c,v
> retrieving revision 1.81
> diff -u -p -r1.81 uvm_pager.c
> --- uvm/uvm_pager.c   28 Jun 2022 19:07:40 -0000      1.81
> +++ uvm/uvm_pager.c   30 Jun 2022 13:34:46 -0000
> @@ -258,6 +258,16 @@ uvm_pagermapin(struct vm_page **pps, int
>       vsize_t size;
>       struct vm_page *pp;
>  
> +#ifdef __HAVE_PMAP_DIRECT
> +     /* use direct mappings for single page */
> +     if (npages == 1) {
> +             KASSERT(pps[0]);
> +             KASSERT(pps[0]->pg_flags & PG_BUSY);
> +             kva = pmap_map_direct(pps[0]);
> +             return kva;
> +     }
> +#endif
> +
>       prot = PROT_READ;
>       if (flags & UVMPAGER_MAPIN_READ)
>               prot |= PROT_WRITE;
> @@ -273,14 +283,7 @@ uvm_pagermapin(struct vm_page **pps, int
>               pp = *pps++;
>               KASSERT(pp);
>               KASSERT(pp->pg_flags & PG_BUSY);
> -             /* Allow pmap_enter to fail. */
> -             if (pmap_enter(pmap_kernel(), cva, VM_PAGE_TO_PHYS(pp),
> -                 prot, PMAP_WIRED | PMAP_CANFAIL | prot) != 0) {
> -                     pmap_remove(pmap_kernel(), kva, cva);
> -                     pmap_update(pmap_kernel());
> -                     uvm_pseg_release(kva);
> -                     return 0;
> -             }
> +             pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot);
>       }
>       pmap_update(pmap_kernel());
>       return kva;
> @@ -294,8 +297,15 @@ uvm_pagermapin(struct vm_page **pps, int
>  void
>  uvm_pagermapout(vaddr_t kva, int npages)
>  {
> +#ifdef __HAVE_PMAP_DIRECT
> +     /* use direct mappings for single page */
> +     if (npages == 1) {
> +             pmap_unmap_direct(kva);
> +             return;
> +     }
> +#endif
>  
> -     pmap_remove(pmap_kernel(), kva, kva + ((vsize_t)npages << PAGE_SHIFT));
> +     pmap_kremove(kva, (vsize_t)npages << PAGE_SHIFT);
>       pmap_update(pmap_kernel());
>       uvm_pseg_release(kva);

Reply via email to