Let's simplify the existing logic and use a single list for inactive pages. uvmpd_scan_inactive() already does a lot of check if it finds a page which is swap-backed. This will be improved in a next change.
ok? Index: uvm/uvm.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm.h,v retrieving revision 1.68 diff -u -p -r1.68 uvm.h --- uvm/uvm.h 24 Nov 2020 13:49:09 -0000 1.68 +++ uvm/uvm.h 2 May 2022 16:32:16 -0000 @@ -53,8 +53,7 @@ struct uvm { /* vm_page queues */ struct pglist page_active; /* [Q] allocated pages, in use */ - struct pglist page_inactive_swp;/* [Q] pages inactive (reclaim/free) */ - struct pglist page_inactive_obj;/* [Q] pages inactive (reclaim/free) */ + struct pglist page_inactive; /* [Q] pages inactive (reclaim/free) */ /* Lock order: pageqlock, then fpageqlock. */ struct mutex pageqlock; /* [] lock for active/inactive page q */ struct mutex fpageqlock; /* [] lock for free page q + pdaemon */ Index: uvm/uvm_map.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.290 diff -u -p -r1.290 uvm_map.c --- uvm/uvm_map.c 12 Mar 2022 08:11:07 -0000 1.290 +++ uvm/uvm_map.c 2 May 2022 16:32:16 -0000 @@ -3281,8 +3281,7 @@ uvm_page_printit(struct vm_page *pg, boo (*pr)(" >>> page not found in uvm_pmemrange <<<\n"); pgl = NULL; } else if (pg->pg_flags & PQ_INACTIVE) { - pgl = (pg->pg_flags & PQ_SWAPBACKED) ? - &uvm.page_inactive_swp : &uvm.page_inactive_obj; + pgl = &uvm.page_inactive; } else if (pg->pg_flags & PQ_ACTIVE) { pgl = &uvm.page_active; } else { Index: uvm/uvm_page.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_page.c,v retrieving revision 1.164 diff -u -p -r1.164 uvm_page.c --- uvm/uvm_page.c 28 Apr 2022 09:59:28 -0000 1.164 +++ uvm/uvm_page.c 2 May 2022 16:32:16 -0000 @@ -185,8 +185,7 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr */ TAILQ_INIT(&uvm.page_active); - TAILQ_INIT(&uvm.page_inactive_swp); - TAILQ_INIT(&uvm.page_inactive_obj); + TAILQ_INIT(&uvm.page_inactive); mtx_init(&uvm.pageqlock, IPL_VM); mtx_init(&uvm.fpageqlock, IPL_VM); uvm_pmr_init(); @@ -994,10 +993,7 @@ uvm_pageclean(struct vm_page *pg) uvmexp.active--; } if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); flags_to_clear |= PQ_INACTIVE; uvmexp.inactive--; } @@ -1253,10 +1249,7 @@ uvm_pagewire(struct vm_page *pg) uvmexp.active--; } if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive--; } @@ -1304,10 +1297,7 @@ uvm_pagedeactivate(struct vm_page *pg) } if ((pg->pg_flags & PQ_INACTIVE) == 0) { KASSERT(pg->wire_count == 0); - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq); + TAILQ_INSERT_TAIL(&uvm.page_inactive, pg, pageq); atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive++; pmap_clear_reference(pg); @@ -1335,10 +1325,7 @@ uvm_pageactivate(struct vm_page *pg) MUTEX_ASSERT_LOCKED(&uvm.pageqlock); if (pg->pg_flags & PQ_INACTIVE) { - if (pg->pg_flags & PQ_SWAPBACKED) - TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); - else - TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); + TAILQ_REMOVE(&uvm.page_inactive, pg, pageq); atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE); uvmexp.inactive--; } Index: uvm/uvm_pdaemon.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v retrieving revision 1.97 diff -u -p -r1.97 uvm_pdaemon.c --- uvm/uvm_pdaemon.c 30 Apr 2022 17:58:43 -0000 1.97 +++ uvm/uvm_pdaemon.c 2 May 2022 16:32:16 -0000 @@ -396,13 +396,6 @@ uvmpd_scan_inactive(struct pglist *pglst int dirtyreacts; /* - * note: we currently keep swap-backed pages on a separate inactive - * list from object-backed pages. however, merging the two lists - * back together again hasn't been ruled out. thus, we keep our - * swap cluster in "swpps" rather than in pps (allows us to mix - * clustering types in the event of a mixed inactive queue). - */ - /* * swslot is non-zero if we are building a swap cluster. we want * to stay in the loop while we have a page to scan or we have * a swap-cluster to build. @@ -881,7 +874,6 @@ uvmpd_scan(void) struct uvm_object *uobj; struct vm_anon *anon; struct rwlock *slock; - boolean_t got_it; MUTEX_ASSERT_LOCKED(&uvm.pageqlock); @@ -917,14 +909,8 @@ uvmpd_scan(void) * alternate starting queue between swap and object based on the * low bit of uvmexp.pdrevs (which we bump by one each call). */ - got_it = FALSE; - pages_freed = uvmexp.pdfreed; /* XXX - int */ - if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0) - got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp); - if (!got_it) - got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj); - if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0) - (void) uvmpd_scan_inactive(&uvm.page_inactive_swp); + pages_freed = uvmexp.pdfreed; + (void) uvmpd_scan_inactive(&uvm.page_inactive); pages_freed = uvmexp.pdfreed - pages_freed; /* @@ -1069,8 +1055,7 @@ uvmpd_drop(struct pglist *pglst) void uvmpd_hibernate(void) { - uvmpd_drop(&uvm.page_inactive_swp); - uvmpd_drop(&uvm.page_inactive_obj); + uvmpd_drop(&uvm.page_inactive); uvmpd_drop(&uvm.page_active); }