Re: Merge swap-backed and object-backed inactive lists
> Date: Mon, 2 May 2022 18:39:17 +0200 > From: Martin Pieuchot > > Let's simplify the existing logic and use a single list for inactive > pages. uvmpd_scan_inactive() already does a lot of check if it finds > a page which is swap-backed. This will be improved in a next change. So NetBSD did this in 2001. The commit message doesn't say why though. The simplification is nice and the diff does what it says. > ok? ok kettenis@ > Index: uvm/uvm.h > === > RCS file: /cvs/src/sys/uvm/uvm.h,v > retrieving revision 1.68 > diff -u -p -r1.68 uvm.h > --- uvm/uvm.h 24 Nov 2020 13:49:09 - 1.68 > +++ uvm/uvm.h 2 May 2022 16:32:16 - > @@ -53,8 +53,7 @@ struct uvm { > > /* vm_page queues */ > struct pglist page_active; /* [Q] allocated pages, in use */ > - struct pglist page_inactive_swp;/* [Q] pages inactive (reclaim/free) */ > - struct pglist page_inactive_obj;/* [Q] pages inactive (reclaim/free) */ > + struct pglist page_inactive;/* [Q] pages inactive (reclaim/free) */ > /* Lock order: pageqlock, then fpageqlock. */ > struct mutex pageqlock; /* [] lock for active/inactive page q */ > struct mutex fpageqlock;/* [] lock for free page q + pdaemon */ > Index: uvm/uvm_map.c > === > RCS file: /cvs/src/sys/uvm/uvm_map.c,v > retrieving revision 1.290 > diff -u -p -r1.290 uvm_map.c > --- uvm/uvm_map.c 12 Mar 2022 08:11:07 - 1.290 > +++ uvm/uvm_map.c 2 May 2022 16:32:16 - > @@ -3281,8 +3281,7 @@ uvm_page_printit(struct vm_page *pg, boo > (*pr)(" >>> page not found in uvm_pmemrange <<<\n"); > pgl = NULL; > } else if (pg->pg_flags & PQ_INACTIVE) { > - pgl = (pg->pg_flags & PQ_SWAPBACKED) ? > - &uvm.page_inactive_swp : &uvm.page_inactive_obj; > + pgl = &uvm.page_inactive; > } else if (pg->pg_flags & PQ_ACTIVE) { > pgl = &uvm.page_active; > } else { > Index: uvm/uvm_page.c > === > RCS file: /cvs/src/sys/uvm/uvm_page.c,v > retrieving revision 1.164 > diff -u -p -r1.164 uvm_page.c > --- uvm/uvm_page.c28 Apr 2022 09:59:28 - 1.164 > +++ uvm/uvm_page.c2 May 2022 16:32:16 - > @@ -185,8 +185,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr >*/ > > TAILQ_INIT(&uvm.page_active); > - TAILQ_INIT(&uvm.page_inactive_swp); > - TAILQ_INIT(&uvm.page_inactive_obj); > + TAILQ_INIT(&uvm.page_inactive); > mtx_init(&uvm.pageqlock, IPL_VM); > mtx_init(&uvm.fpageqlock, IPL_VM); > uvm_pmr_init(); > @@ -994,10 +993,7 @@ uvm_pageclean(struct vm_page *pg) > uvmexp.active--; > } > if (pg->pg_flags & PQ_INACTIVE) { > - if (pg->pg_flags & PQ_SWAPBACKED) > - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); > - else > - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); > + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); > flags_to_clear |= PQ_INACTIVE; > uvmexp.inactive--; > } > @@ -1253,10 +1249,7 @@ uvm_pagewire(struct vm_page *pg) > uvmexp.active--; > } > if (pg->pg_flags & PQ_INACTIVE) { > - if (pg->pg_flags & PQ_SWAPBACKED) > - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); > - else > - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); > + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); > atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); > uvmexp.inactive--; > } > @@ -1304,10 +1297,7 @@ uvm_pagedeactivate(struct vm_page *pg) > } > if ((pg->pg_flags & PQ_INACTIVE) == 0) { > KASSERT(pg->wire_count == 0); > - if (pg->pg_flags & PQ_SWAPBACKED) > - TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); > - else > - TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); > + TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq); > atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); > uvmexp.inactive++; > pmap_clear_reference(pg); > @@ -1335,10 +1325,7 @@ uvm_pageactivate(struct vm_page *pg) > MUTEX_ASSERT_LOCKED(&uvm.pageqlock); > > if (pg->pg_flags & PQ_INACTIVE) { > - if (pg->pg_flags & PQ_SWAPBACKED) > - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); > - else > - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); > + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); > atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); >
Re: Merge swap-backed and object-backed inactive lists
On Mon, 02 May 2022 18:39:17 +0200, Martin Pieuchot wrote: > Let's simplify the existing logic and use a single list for inactive > pages. uvmpd_scan_inactive() already does a lot of check if it finds > a page which is swap-backed. This will be improved in a next change. That looks fine to me. Calling uvmpd_scan_inactive() a single time (instead of up to three times!) seems like a win. - todd
Merge swap-backed and object-backed inactive lists
Let's simplify the existing logic and use a single list for inactive pages. uvmpd_scan_inactive() already does a lot of check if it finds a page which is swap-backed. This will be improved in a next change. ok? Index: uvm/uvm.h === RCS file: /cvs/src/sys/uvm/uvm.h,v retrieving revision 1.68 diff -u -p -r1.68 uvm.h --- uvm/uvm.h 24 Nov 2020 13:49:09 - 1.68 +++ uvm/uvm.h 2 May 2022 16:32:16 - @@ -53,8 +53,7 @@ struct uvm { /* vm_page queues */ struct pglist page_active; /* [Q] allocated pages, in use */ - struct pglist page_inactive_swp;/* [Q] pages inactive (reclaim/free) */ - struct pglist page_inactive_obj;/* [Q] pages inactive (reclaim/free) */ + struct pglist page_inactive;/* [Q] pages inactive (reclaim/free) */ /* Lock order: pageqlock, then fpageqlock. */ struct mutex pageqlock; /* [] lock for active/inactive page q */ struct mutex fpageqlock;/* [] lock for free page q + pdaemon */ Index: uvm/uvm_map.c === RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.290 diff -u -p -r1.290 uvm_map.c --- uvm/uvm_map.c 12 Mar 2022 08:11:07 - 1.290 +++ uvm/uvm_map.c 2 May 2022 16:32:16 - @@ -3281,8 +3281,7 @@ uvm_page_printit(struct vm_page *pg, boo (*pr)(" >>> page not found in uvm_pmemrange <<<\n"); pgl = NULL; } else if (pg->pg_flags & PQ_INACTIVE) { - pgl = (pg->pg_flags & PQ_SWAPBACKED) ? - &uvm.page_inactive_swp : &uvm.page_inactive_obj; + pgl = &uvm.page_inactive; } else if (pg->pg_flags & PQ_ACTIVE) { pgl = &uvm.page_active; } else { Index: uvm/uvm_page.c === RCS file: /cvs/src/sys/uvm/uvm_page.c,v retrieving revision 1.164 diff -u -p -r1.164 uvm_page.c --- uvm/uvm_page.c 28 Apr 2022 09:59:28 - 1.164 +++ uvm/uvm_page.c 2 May 2022 16:32:16 - @@ -185,8 +185,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr */ TAILQ_INIT(&uvm.page_active); - TAILQ_INIT(&uvm.page_inactive_swp); - TAILQ_INIT(&uvm.page_inactive_obj); + TAILQ_INIT(&uvm.page_inactive); mtx_init(&uvm.pageqlock, IPL_VM); mtx_init(&uvm.fpageqlock, IPL_VM); uvm_pmr_init(); @@ -994,10 +993,7 @@ uvm_pageclean(struct vm_page *pg) uvmexp.active--; } if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); flags_to_clear |= PQ_INACTIVE; uvmexp.inactive--; } @@ -1253,10 +1249,7 @@ uvm_pagewire(struct vm_page *pg) uvmexp.active--; } if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive--; } @@ -1304,10 +1297,7 @@ uvm_pagedeactivate(struct vm_page *pg) } if ((pg->pg_flags & PQ_INACTIVE) == 0) { KASSERT(pg->wire_count == 0); - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); + TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq); atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive++; pmap_clear_reference(pg); @@ -1335,10 +1325,7 @@ uvm_pageactivate(struct vm_page *pg) MUTEX_ASSERT_LOCKED(&uvm.pageqlock); if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive--; } Index: uvm/uvm_pdaemon.c === RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.97 diff -u -p -r1.97 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 30 Apr 2022 1