Author: markj
Date: Tue Dec 10 18:14:50 2019
New Revision: 355586
URL: https://svnweb.freebsd.org/changeset/base/355586

Log:
  Introduce vm_page_astate.
  
  This is a 32-bit structure embedded in each vm_page, consisting mostly
  of page queue state.  The use of a structure makes it easy to store a
  snapshot of a page's queue state in a stack variable and use cmpset
  loops to update that state without requiring the page lock.
  
  This change merely adds the structure and updates references to atomic
  state fields.  No functional change intended.
  
  Reviewed by:  alc, jeff, kib
  Sponsored by: Netflix, Intel
  Differential Revision:        https://reviews.freebsd.org/D22650

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/amd64/include/pmap.h
  head/sys/arm/include/pmap.h
  head/sys/arm64/arm64/pmap.c
  head/sys/arm64/include/pmap.h
  head/sys/dev/virtio/balloon/virtio_balloon.c
  head/sys/i386/i386/pmap.c
  head/sys/i386/include/pmap.h
  head/sys/mips/include/pmap.h
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/include/pmap.h
  head/sys/riscv/include/pmap.h
  head/sys/riscv/riscv/pmap.c
  head/sys/sparc64/include/pmap.h
  head/sys/vm/memguard.c
  head/sys/vm/swap_pager.c
  head/sys/vm/vm_mmap.c
  head/sys/vm/vm_object.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h
  head/sys/vm/vm_pageout.c
  head/sys/vm/vm_swapout.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/amd64/amd64/pmap.c Tue Dec 10 18:14:50 2019        (r355586)
@@ -6104,7 +6104,7 @@ retry:
                            ("pmap_enter: no PV entry for %#lx", va));
                        if ((newpte & PG_MANAGED) == 0)
                                free_pv_entry(pmap, pv);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                       if ((om->a.flags & PGA_WRITEABLE) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -7297,7 +7297,7 @@ pmap_remove_pages(pmap_t pmap)
                                        pvh->pv_gen++;
                                        if (TAILQ_EMPTY(&pvh->pv_list)) {
                                                for (mt = m; mt < &m[NBPDR / 
PAGE_SIZE]; mt++)
-                                                       if ((mt->aflags & 
PGA_WRITEABLE) != 0 &&
+                                                       if ((mt->a.flags & 
PGA_WRITEABLE) != 0 &&
                                                            
TAILQ_EMPTY(&mt->md.pv_list))
                                                                
vm_page_aflag_clear(mt, PGA_WRITEABLE);
                                        }
@@ -7315,7 +7315,7 @@ pmap_remove_pages(pmap_t pmap)
                                        pmap_resident_count_dec(pmap, 1);
                                        TAILQ_REMOVE(&m->md.pv_list, pv, 
pv_next);
                                        m->md.pv_gen++;
-                                       if ((m->aflags & PGA_WRITEABLE) != 0 &&
+                                       if ((m->a.flags & PGA_WRITEABLE) != 0 &&
                                            TAILQ_EMPTY(&m->md.pv_list) &&
                                            (m->flags & PG_FICTITIOUS) == 0) {
                                                pvh = 
pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/amd64/include/pmap.h
==============================================================================
--- head/sys/amd64/include/pmap.h       Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/amd64/include/pmap.h       Tue Dec 10 18:14:50 2019        
(r355586)
@@ -415,8 +415,8 @@ extern int pmap_pcid_enabled;
 extern int invpcid_works;
 
 #define        pmap_page_get_memattr(m)        ((vm_memattr_t)(m)->md.pat_mode)
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
-#define        pmap_unmapbios(va, sz)  pmap_unmapdev((va), (sz))
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_unmapbios(va, sz)          pmap_unmapdev((va), (sz))
 
 struct thread;
 

Modified: head/sys/arm/include/pmap.h
==============================================================================
--- head/sys/arm/include/pmap.h Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/arm/include/pmap.h Tue Dec 10 18:14:50 2019        (r355586)
@@ -47,7 +47,7 @@ extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
 void *pmap_kenter_temporary(vm_paddr_t, int);
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
 
 void *pmap_mapdev(vm_paddr_t, vm_size_t);

Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/arm64/arm64/pmap.c Tue Dec 10 18:14:50 2019        (r355586)
@@ -3415,7 +3415,7 @@ havel3:
                        pv = pmap_pvh_remove(&om->md, pmap, va);
                        if ((m->oflags & VPO_UNMANAGED) != 0)
                                free_pv_entry(pmap, pv);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                       if ((om->a.flags & PGA_WRITEABLE) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -4486,7 +4486,7 @@ pmap_remove_pages(pmap_t pmap)
                                        pvh->pv_gen++;
                                        if (TAILQ_EMPTY(&pvh->pv_list)) {
                                                for (mt = m; mt < &m[L2_SIZE / 
PAGE_SIZE]; mt++)
-                                                       if ((mt->aflags & 
PGA_WRITEABLE) != 0 &&
+                                                       if ((mt->a.flags & 
PGA_WRITEABLE) != 0 &&
                                                            
TAILQ_EMPTY(&mt->md.pv_list))
                                                                
vm_page_aflag_clear(mt, PGA_WRITEABLE);
                                        }
@@ -4508,7 +4508,7 @@ pmap_remove_pages(pmap_t pmap)
                                        TAILQ_REMOVE(&m->md.pv_list, pv,
                                            pv_next);
                                        m->md.pv_gen++;
-                                       if ((m->aflags & PGA_WRITEABLE) != 0 &&
+                                       if ((m->a.flags & PGA_WRITEABLE) != 0 &&
                                            TAILQ_EMPTY(&m->md.pv_list) &&
                                            (m->flags & PG_FICTITIOUS) == 0) {
                                                pvh = pa_to_pvh(

Modified: head/sys/arm64/include/pmap.h
==============================================================================
--- head/sys/arm64/include/pmap.h       Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/arm64/include/pmap.h       Tue Dec 10 18:14:50 2019        
(r355586)
@@ -53,7 +53,7 @@
 #endif
 
 #define        pmap_page_get_memattr(m)        ((m)->md.pv_memattr)
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
 
 /*

Modified: head/sys/dev/virtio/balloon/virtio_balloon.c
==============================================================================
--- head/sys/dev/virtio/balloon/virtio_balloon.c        Tue Dec 10 14:35:38 
2019        (r355585)
+++ head/sys/dev/virtio/balloon/virtio_balloon.c        Tue Dec 10 18:14:50 
2019        (r355586)
@@ -332,7 +332,7 @@ vtballoon_inflate(struct vtballoon_softc *sc, int npag
                sc->vtballoon_page_frames[i] =
                    VM_PAGE_TO_PHYS(m) >> VIRTIO_BALLOON_PFN_SHIFT;
 
-               KASSERT(m->queue == PQ_NONE,
+               KASSERT(m->a.queue == PQ_NONE,
                    ("%s: allocated page %p on queue", __func__, m));
                TAILQ_INSERT_TAIL(&sc->vtballoon_pages, m, plinks.q);
        }

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/i386/i386/pmap.c   Tue Dec 10 18:14:50 2019        (r355586)
@@ -3783,7 +3783,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, v
                            ("pmap_enter: no PV entry for %#x", va));
                        if ((newpte & PG_MANAGED) == 0)
                                free_pv_entry(pmap, pv);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                       if ((om->a.flags & PGA_WRITEABLE) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))

Modified: head/sys/i386/include/pmap.h
==============================================================================
--- head/sys/i386/include/pmap.h        Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/i386/include/pmap.h        Tue Dec 10 18:14:50 2019        
(r355586)
@@ -239,7 +239,7 @@ extern vm_offset_t virtual_avail;
 extern vm_offset_t virtual_end;
 
 #define        pmap_page_get_memattr(m)        ((vm_memattr_t)(m)->md.pat_mode)
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 #define        pmap_unmapbios(va, sz)  pmap_unmapdev((va), (sz))
 
 static inline int

Modified: head/sys/mips/include/pmap.h
==============================================================================
--- head/sys/mips/include/pmap.h        Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/mips/include/pmap.h        Tue Dec 10 18:14:50 2019        
(r355586)
@@ -164,7 +164,7 @@ extern vm_offset_t virtual_end;
 
 #define        pmap_page_get_memattr(m) (((m)->md.pv_flags & PV_MEMATTR_MASK) 
>> PV_MEMATTR_SHIFT)
 #define        pmap_page_is_mapped(m)  (!TAILQ_EMPTY(&(m)->md.pv_list))
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 
 void pmap_bootstrap(void);
 void *pmap_mapdev(vm_paddr_t, vm_size_t);

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c   Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/mips/mips/pmap.c   Tue Dec 10 18:14:50 2019        (r355586)
@@ -2158,7 +2158,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
                        pv = pmap_pvh_remove(&om->md, pmap, va);
                        if (!pte_test(&newpte, PTE_MANAGED))
                                free_pv_entry(pmap, pv);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                       if ((om->a.flags & PGA_WRITEABLE) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
                }
@@ -3223,7 +3223,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t
                 * determine if the address is MINCORE_REFERENCED.
                 */
                m = PHYS_TO_VM_PAGE(pa);
-               if ((m->aflags & PGA_REFERENCED) != 0)
+               if ((m->a.flags & PGA_REFERENCED) != 0)
                        val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
        }
        if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/powerpc/aim/mmu_oea.c      Tue Dec 10 18:14:50 2019        
(r355586)
@@ -1906,7 +1906,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
                moea_pvo_remove(pvo, -1);
                PMAP_UNLOCK(pmap);
        }
-       if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
+       if ((m->a.flags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) {
                moea_attr_clear(m, PTE_CHG);
                vm_page_dirty(m);
        }

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/powerpc/aim/mmu_oea64.c    Tue Dec 10 18:14:50 2019        
(r355586)
@@ -1493,7 +1493,7 @@ out:
         * Flush the page from the instruction cache if this page is
         * mapped executable and cacheable.
         */
-       if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
+       if (pmap != kernel_pmap && (m->a.flags & PGA_EXECUTABLE) == 0 &&
            (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
                vm_page_aflag_set(m, PGA_EXECUTABLE);
                moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
@@ -2254,7 +2254,8 @@ moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_e
        if (refchg < 0)
                refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
 
-       if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
+       if (pm != kernel_pmap && pg != NULL &&
+           (pg->a.flags & PGA_EXECUTABLE) == 0 &&
            (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
                if ((pg->oflags & VPO_UNMANAGED) == 0)
                        vm_page_aflag_set(pg, PGA_EXECUTABLE);
@@ -2468,7 +2469,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
                
        }
        KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
-       KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
+       KASSERT((m->a.flags & PGA_WRITEABLE) == 0, ("Page still writable"));
        PV_PAGE_UNLOCK(m);
 
        /* Clean up UMA allocations */

Modified: head/sys/powerpc/include/pmap.h
==============================================================================
--- head/sys/powerpc/include/pmap.h     Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/powerpc/include/pmap.h     Tue Dec 10 18:14:50 2019        
(r355586)
@@ -249,7 +249,7 @@ extern      struct pmap kernel_pmap_store;
 #define        PMAP_TRYLOCK(pmap)      mtx_trylock(&(pmap)->pm_mtx)
 #define        PMAP_UNLOCK(pmap)       mtx_unlock(&(pmap)->pm_mtx)
 
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 
 void           pmap_bootstrap(vm_offset_t, vm_offset_t);
 void           pmap_kenter(vm_offset_t va, vm_paddr_t pa);

Modified: head/sys/riscv/include/pmap.h
==============================================================================
--- head/sys/riscv/include/pmap.h       Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/riscv/include/pmap.h       Tue Dec 10 18:14:50 2019        
(r355586)
@@ -54,7 +54,7 @@
 #endif
 
 #define        pmap_page_get_memattr(m)        ((m)->md.pv_memattr)
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
 
 /*

Modified: head/sys/riscv/riscv/pmap.c
==============================================================================
--- head/sys/riscv/riscv/pmap.c Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/riscv/riscv/pmap.c Tue Dec 10 18:14:50 2019        (r355586)
@@ -2832,7 +2832,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
                            ("pmap_enter: no PV entry for %#lx", va));
                        if ((new_l3 & PTE_SW_MANAGED) == 0)
                                free_pv_entry(pmap, pv);
-                       if ((om->aflags & PGA_WRITEABLE) != 0 &&
+                       if ((om->a.flags & PGA_WRITEABLE) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
                            ((om->flags & PG_FICTITIOUS) != 0 ||
                            TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
@@ -3586,7 +3586,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entr
                if (TAILQ_EMPTY(&pvh->pv_list)) {
                        for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
                                if (TAILQ_EMPTY(&mt->md.pv_list) &&
-                                   (mt->aflags & PGA_WRITEABLE) != 0)
+                                   (mt->a.flags & PGA_WRITEABLE) != 0)
                                        vm_page_aflag_clear(mt, PGA_WRITEABLE);
                }
                mpte = pmap_remove_pt_page(pmap, pv->pv_va);
@@ -3604,7 +3604,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entr
                TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
                m->md.pv_gen++;
                if (TAILQ_EMPTY(&m->md.pv_list) &&
-                   (m->aflags & PGA_WRITEABLE) != 0) {
+                   (m->a.flags & PGA_WRITEABLE) != 0) {
                        pvh = pa_to_pvh(m->phys_addr);
                        if (TAILQ_EMPTY(&pvh->pv_list))
                                vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -4138,7 +4138,7 @@ pmap_clear_modify(vm_page_t m)
         * If the object containing the page is locked and the page is not
         * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
         */
-       if ((m->aflags & PGA_WRITEABLE) == 0)
+       if ((m->a.flags & PGA_WRITEABLE) == 0)
                return;
        pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
            pa_to_pvh(VM_PAGE_TO_PHYS(m));

Modified: head/sys/sparc64/include/pmap.h
==============================================================================
--- head/sys/sparc64/include/pmap.h     Tue Dec 10 14:35:38 2019        
(r355585)
+++ head/sys/sparc64/include/pmap.h     Tue Dec 10 18:14:50 2019        
(r355586)
@@ -82,7 +82,7 @@ struct pmap {
 #define        PMAP_UNLOCK(pmap)       mtx_unlock(&(pmap)->pm_mtx)
 
 #define        pmap_page_get_memattr(m)        VM_MEMATTR_DEFAULT
-#define        pmap_page_is_write_mapped(m)    (((m)->aflags & PGA_WRITEABLE) 
!= 0)
+#define        pmap_page_is_write_mapped(m)    (((m)->a.flags & PGA_WRITEABLE) 
!= 0)
 #define        pmap_page_set_memattr(m, ma)    (void)0
 
 void   pmap_bootstrap(u_int cpu_impl);

Modified: head/sys/vm/memguard.c
==============================================================================
--- head/sys/vm/memguard.c      Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/memguard.c      Tue Dec 10 18:14:50 2019        (r355586)
@@ -262,7 +262,7 @@ v2sizep(vm_offset_t va)
        if (pa == 0)
                panic("MemGuard detected double-free of %p", (void *)va);
        p = PHYS_TO_VM_PAGE(pa);
-       KASSERT(vm_page_wired(p) && p->queue == PQ_NONE,
+       KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
            ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
        return (&p->plinks.memguard.p);
 }
@@ -277,7 +277,7 @@ v2sizev(vm_offset_t va)
        if (pa == 0)
                panic("MemGuard detected double-free of %p", (void *)va);
        p = PHYS_TO_VM_PAGE(pa);
-       KASSERT(vm_page_wired(p) && p->queue == PQ_NONE,
+       KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
            ("MEMGUARD: Expected wired page %p in vtomgfifo!", p));
        return (&p->plinks.memguard.v);
 }

Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c    Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/swap_pager.c    Tue Dec 10 18:14:50 2019        (r355586)
@@ -1669,7 +1669,7 @@ swp_pager_force_dirty(vm_page_t m)
        vm_page_dirty(m);
 #ifdef INVARIANTS
        vm_page_lock(m);
-       if (!vm_page_wired(m) && m->queue == PQ_NONE)
+       if (!vm_page_wired(m) && m->a.queue == PQ_NONE)
                panic("page %p is neither wired nor queued", m);
        vm_page_unlock(m);
 #endif

Modified: head/sys/vm/vm_mmap.c
==============================================================================
--- head/sys/vm/vm_mmap.c       Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_mmap.c       Tue Dec 10 18:14:50 2019        (r355586)
@@ -931,9 +931,9 @@ retry:
                                 * and set PGA_REFERENCED before the call to
                                 * pmap_is_referenced(). 
                                 */
-                               if ((m->aflags & PGA_REFERENCED) != 0 ||
+                               if ((m->a.flags & PGA_REFERENCED) != 0 ||
                                    pmap_is_referenced(m) ||
-                                   (m->aflags & PGA_REFERENCED) != 0)
+                                   (m->a.flags & PGA_REFERENCED) != 0)
                                        mincoreinfo |= MINCORE_REFERENCED_OTHER;
                        }
                        if (object != NULL)

Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c     Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_object.c     Tue Dec 10 18:14:50 2019        (r355586)
@@ -897,7 +897,7 @@ vm_object_page_remove_write(vm_page_t p, int flags, bo
         * nosync page, skip it.  Note that the object flags were not
         * cleared in this case so we do not have to set them.
         */
-       if ((flags & OBJPC_NOSYNC) != 0 && (p->aflags & PGA_NOSYNC) != 0) {
+       if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) {
                *allclean = FALSE;
                return (FALSE);
        } else {
@@ -2472,9 +2472,9 @@ sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
                         * sysctl is only meant to give an
                         * approximation of the system anyway.
                         */
-                       if (m->queue == PQ_ACTIVE)
+                       if (m->a.queue == PQ_ACTIVE)
                                kvo->kvo_active++;
-                       else if (m->queue == PQ_INACTIVE)
+                       else if (m->a.queue == PQ_INACTIVE)
                                kvo->kvo_inactive++;
                }
 

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_page.c       Tue Dec 10 18:14:50 2019        (r355586)
@@ -436,9 +436,9 @@ vm_page_init_marker(vm_page_t marker, int queue, uint1
 
        bzero(marker, sizeof(*marker));
        marker->flags = PG_MARKER;
-       marker->aflags = aflags;
+       marker->a.flags = aflags;
        marker->busy_lock = VPB_CURTHREAD_EXCLUSIVE;
-       marker->queue = queue;
+       marker->a.queue = queue;
 }
 
 static void
@@ -508,9 +508,9 @@ vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segi
        m->object = NULL;
        m->ref_count = 0;
        m->busy_lock = VPB_UNBUSIED;
-       m->flags = m->aflags = 0;
+       m->flags = m->a.flags = 0;
        m->phys_addr = pa;
-       m->queue = PQ_NONE;
+       m->a.queue = PQ_NONE;
        m->psind = 0;
        m->segind = segind;
        m->order = VM_NFREEORDER;
@@ -1265,7 +1265,7 @@ vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_mem
                goto memattr;
        }
        m->phys_addr = paddr;
-       m->queue = PQ_NONE;
+       m->a.queue = PQ_NONE;
        /* Fictitious pages don't use "segind". */
        m->flags = PG_FICTITIOUS;
        /* Fictitious pages don't use "order" or "pool". */
@@ -2002,7 +2002,7 @@ found:
        if ((req & VM_ALLOC_NODUMP) != 0)
                flags |= PG_NODUMP;
        m->flags = flags;
-       m->aflags = 0;
+       m->a.flags = 0;
        m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
            VPO_UNMANAGED : 0;
        m->busy_lock = VPB_UNBUSIED;
@@ -2018,7 +2018,7 @@ found:
                vm_wire_add(1);
                m->ref_count = 1;
        }
-       m->act_count = 0;
+       m->a.act_count = 0;
 
        if (object != NULL) {
                if (vm_page_insert_after(m, object, pindex, mpred)) {
@@ -2212,12 +2212,12 @@ found:
                        memattr = object->memattr;
        }
        for (m = m_ret; m < &m_ret[npages]; m++) {
-               m->aflags = 0;
+               m->a.flags = 0;
                m->flags = (m->flags | PG_NODUMP) & flags;
                m->busy_lock = busy_lock;
                if ((req & VM_ALLOC_WIRED) != 0)
                        m->ref_count = 1;
-               m->act_count = 0;
+               m->a.act_count = 0;
                m->oflags = oflags;
                if (object != NULL) {
                        if (vm_page_insert_after(m, object, pindex, mpred)) {
@@ -2260,9 +2260,10 @@ vm_page_alloc_check(vm_page_t m)
 {
 
        KASSERT(m->object == NULL, ("page %p has object", m));
-       KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
+       KASSERT(m->a.queue == PQ_NONE &&
+           (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
            ("page %p has unexpected queue %d, flags %#x",
-           m, m->queue, (m->aflags & PGA_QUEUE_STATE_MASK)));
+           m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
        KASSERT(m->ref_count == 0, ("page %p has references", m));
        KASSERT(!vm_page_busied(m), ("page %p is busy", m));
        KASSERT(m->dirty == 0, ("page %p is dirty", m));
@@ -2336,7 +2337,7 @@ again:
        /*
         * Initialize the page.  Only the PG_ZERO flag is inherited.
         */
-       m->aflags = 0;
+       m->a.flags = 0;
        flags = 0;
        if ((req & VM_ALLOC_ZERO) != 0)
                flags = PG_ZERO;
@@ -2744,7 +2745,7 @@ retry:
                                         * and dequeued.  Finally, change "m"
                                         * as if vm_page_free() was called.
                                         */
-                                       m_new->aflags = m->aflags &
+                                       m_new->a.flags = m->a.flags &
                                            ~PGA_QUEUE_STATE_MASK;
                                        KASSERT(m_new->oflags == VPO_UNMANAGED,
                                            ("page %p is managed", m_new));
@@ -3216,7 +3217,7 @@ vm_page_pagequeue(vm_page_t m)
 
        uint8_t queue;
 
-       if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
+       if ((queue = atomic_load_8(&m->a.queue)) == PQ_NONE)
                return (NULL);
        return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
 }
@@ -3231,11 +3232,11 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_pa
        vm_pagequeue_assert_locked(pq);
 
        /*
-        * The page daemon is allowed to set m->queue = PQ_NONE without
+        * The page daemon is allowed to set m->a.queue = PQ_NONE without
         * the page queue lock held.  In this case it is about to free the page,
         * which must not have any queue state.
         */
-       qflags = atomic_load_16(&m->aflags);
+       qflags = atomic_load_16(&m->a.flags);
        KASSERT(pq == vm_page_pagequeue(m) ||
            (qflags & PGA_QUEUE_STATE_MASK) == 0,
            ("page %p doesn't belong to queue %p but has aflags %#x",
@@ -3261,7 +3262,7 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_pa
                 * first.
                 */
                if ((qflags & PGA_REQUEUE_HEAD) != 0) {
-                       KASSERT(m->queue == PQ_INACTIVE,
+                       KASSERT(m->a.queue == PQ_INACTIVE,
                            ("head enqueue not supported for page %p", m));
                        vmd = vm_pagequeue_domain(m);
                        TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
@@ -3285,7 +3286,7 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_
 
        for (i = 0; i < bq->bq_cnt; i++) {
                m = bq->bq_pa[i];
-               if (__predict_false(m->queue != queue))
+               if (__predict_false(m->a.queue != queue))
                        continue;
                vm_pqbatch_process_page(pq, m);
        }
@@ -3297,7 +3298,7 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_
  *
  *     Enqueue a page in the specified page queue's batched work queue.
  *     The caller must have encoded the requested operation in the page
- *     structure's aflags field.
+ *     structure's a.flags field.
  */
 void
 vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
@@ -3333,12 +3334,12 @@ vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
         * or the page is being freed, a different thread cannot be concurrently
         * enqueuing the page.
         */
-       if (__predict_true(m->queue == queue))
+       if (__predict_true(m->a.queue == queue))
                vm_pqbatch_process_page(pq, m);
        else {
-               KASSERT(m->queue == PQ_NONE,
+               KASSERT(m->a.queue == PQ_NONE,
                    ("invalid queue transition for page %p", m));
-               KASSERT((m->aflags & PGA_ENQUEUED) == 0,
+               KASSERT((m->a.flags & PGA_ENQUEUED) == 0,
                    ("page %p is enqueued with invalid queue index", m));
        }
        vm_pagequeue_unlock(pq);
@@ -3394,7 +3395,7 @@ static void
 vm_page_dequeue_complete(vm_page_t m)
 {
 
-       m->queue = PQ_NONE;
+       m->a.queue = PQ_NONE;
        atomic_thread_fence_rel();
        vm_page_aflag_clear(m, PGA_QUEUE_STATE_MASK);
 }
@@ -3446,10 +3447,10 @@ vm_page_dequeue_deferred_free(vm_page_t m)
        KASSERT(m->ref_count == 0, ("page %p has references", m));
 
        for (;;) {
-               if ((m->aflags & PGA_DEQUEUE) != 0)
+               if ((m->a.flags & PGA_DEQUEUE) != 0)
                        return;
                atomic_thread_fence_acq();
-               if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
+               if ((queue = atomic_load_8(&m->a.queue)) == PQ_NONE)
                        return;
                if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE,
                    PGA_DEQUEUE)) {
@@ -3483,7 +3484,7 @@ vm_page_dequeue(vm_page_t m)
                         * vm_page_dequeue_complete().  Ensure that all queue
                         * state is cleared before we return.
                         */
-                       aflags = atomic_load_16(&m->aflags);
+                       aflags = atomic_load_16(&m->a.flags);
                        if ((aflags & PGA_QUEUE_STATE_MASK) == 0)
                                return;
                        KASSERT((aflags & PGA_DEQUEUE) != 0,
@@ -3506,11 +3507,11 @@ vm_page_dequeue(vm_page_t m)
        }
        KASSERT(pq == vm_page_pagequeue(m),
            ("%s: page %p migrated directly between queues", __func__, m));
-       KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
+       KASSERT((m->a.flags & PGA_DEQUEUE) != 0 ||
            mtx_owned(vm_page_lockptr(m)),
            ("%s: queued unlocked page %p", __func__, m));
 
-       if ((m->aflags & PGA_ENQUEUED) != 0)
+       if ((m->a.flags & PGA_ENQUEUED) != 0)
                vm_pagequeue_remove(pq, m);
        vm_page_dequeue_complete(m);
        vm_pagequeue_unlock(pq);
@@ -3525,13 +3526,14 @@ vm_page_enqueue(vm_page_t m, uint8_t queue)
 {
 
        vm_page_assert_locked(m);
-       KASSERT(m->queue == PQ_NONE && (m->aflags & PGA_QUEUE_STATE_MASK) == 0,
+       KASSERT(m->a.queue == PQ_NONE &&
+           (m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
            ("%s: page %p is already enqueued", __func__, m));
        KASSERT(m->ref_count > 0,
            ("%s: page %p does not carry any references", __func__, m));
 
-       m->queue = queue;
-       if ((m->aflags & PGA_REQUEUE) == 0)
+       m->a.queue = queue;
+       if ((m->a.flags & PGA_REQUEUE) == 0)
                vm_page_aflag_set(m, PGA_REQUEUE);
        vm_page_pqbatch_submit(m, queue);
 }
@@ -3553,9 +3555,9 @@ vm_page_requeue(vm_page_t m)
        KASSERT(m->ref_count > 0,
            ("%s: page %p does not carry any references", __func__, m));
 
-       if ((m->aflags & PGA_REQUEUE) == 0)
+       if ((m->a.flags & PGA_REQUEUE) == 0)
                vm_page_aflag_set(m, PGA_REQUEUE);
-       vm_page_pqbatch_submit(m, atomic_load_8(&m->queue));
+       vm_page_pqbatch_submit(m, atomic_load_8(&m->a.queue));
 }
 
 /*
@@ -3584,7 +3586,7 @@ vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t n
         * queue lock is acquired, so we must verify that we hold the correct
         * lock before proceeding.
         */
-       if (__predict_false(m->queue != oldq)) {
+       if (__predict_false(m->a.queue != oldq)) {
                vm_pagequeue_unlock(pq);
                return;
        }
@@ -3595,7 +3597,7 @@ vm_page_swapqueue(vm_page_t m, uint8_t oldq, uint8_t n
         * Therefore we must remove the page from the queue now in anticipation
         * of a successful commit, and be prepared to roll back.
         */
-       if (__predict_true((m->aflags & PGA_ENQUEUED) != 0)) {
+       if (__predict_true((m->a.flags & PGA_ENQUEUED) != 0)) {
                next = TAILQ_NEXT(m, plinks.q);
                TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
                vm_page_aflag_clear(m, PGA_ENQUEUED);
@@ -3658,10 +3660,10 @@ vm_page_free_prep(vm_page_t m)
        if ((m->oflags & VPO_UNMANAGED) == 0) {
                KASSERT(!pmap_page_is_mapped(m),
                    ("vm_page_free_prep: freeing mapped page %p", m));
-               KASSERT((m->aflags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
+               KASSERT((m->a.flags & (PGA_EXECUTABLE | PGA_WRITEABLE)) == 0,
                    ("vm_page_free_prep: mapping flags set in page %p", m));
        } else {
-               KASSERT(m->queue == PQ_NONE,
+               KASSERT(m->a.queue == PQ_NONE,
                    ("vm_page_free_prep: unmanaged page %p is queued", m));
        }
        VM_CNT_INC(v_tfree);
@@ -3694,7 +3696,7 @@ vm_page_free_prep(vm_page_t m)
        if ((m->flags & PG_FICTITIOUS) != 0) {
                KASSERT(m->ref_count == 1,
                    ("fictitious page %p is referenced", m));
-               KASSERT(m->queue == PQ_NONE,
+               KASSERT(m->a.queue == PQ_NONE,
                    ("fictitious page %p is queued", m));
                return (false);
        }
@@ -3955,8 +3957,8 @@ vm_page_mvqueue(vm_page_t m, const uint8_t nqueue)
                vm_page_requeue(m);
        }
 
-       if (nqueue == PQ_ACTIVE && m->act_count < ACT_INIT)
-               m->act_count = ACT_INIT;
+       if (nqueue == PQ_ACTIVE && m->a.act_count < ACT_INIT)
+               m->a.act_count = ACT_INIT;
 }
 
 /*
@@ -3998,9 +4000,9 @@ _vm_page_deactivate_noreuse(vm_page_t m)
 
        if (!vm_page_inactive(m)) {
                vm_page_dequeue(m);
-               m->queue = PQ_INACTIVE;
+               m->a.queue = PQ_INACTIVE;
        }
-       if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
+       if ((m->a.flags & PGA_REQUEUE_HEAD) == 0)
                vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
        vm_page_pqbatch_submit(m, PQ_INACTIVE);
 }
@@ -5102,7 +5104,7 @@ DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
     "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref %u\n"
     "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
            m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
-           m->queue, m->ref_count, m->aflags, m->oflags,
-           m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
+           m->a.queue, m->ref_count, m->a.flags, m->oflags,
+           m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
 }
 #endif /* DDB */

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_page.h       Tue Dec 10 18:14:50 2019        (r355586)
@@ -215,6 +215,15 @@ typedef uint32_t vm_page_bits_t;
 typedef uint64_t vm_page_bits_t;
 #endif
 
+typedef union vm_page_astate {
+       struct {
+               uint16_t flags;
+               uint8_t queue;
+               uint8_t act_count;
+       };
+       uint32_t _bits;
+} vm_page_astate_t;
+
 struct vm_page {
        union {
                TAILQ_ENTRY(vm_page) q; /* page queue or free list (Q) */
@@ -237,9 +246,7 @@ struct vm_page {
        struct md_page md;              /* machine dependent stuff */
        u_int ref_count;                /* page references (A) */
        volatile u_int busy_lock;       /* busy owners lock */
-       uint16_t aflags;                /* atomic flags (A) */
-       uint8_t queue;                  /* page queue index (Q) */
-       uint8_t act_count;              /* page usage count (P) */
+       union vm_page_astate a;         /* state accessed atomically */
        uint8_t order;                  /* index of the buddy queue (F) */
        uint8_t pool;                   /* vm_phys freepool index (F) */
        uint8_t flags;                  /* page PG_* flags (P) */
@@ -755,19 +762,19 @@ void vm_page_assert_pga_writeable(vm_page_t m, uint16_
  * destinations.  In order that we can easily use a 32-bit operation, we
  * require that the aflags field be 32-bit aligned.
  */
-_Static_assert(offsetof(struct vm_page, aflags) % sizeof(uint32_t) == 0,
+_Static_assert(offsetof(struct vm_page, a.flags) % sizeof(uint32_t) == 0,
     "aflags field is not 32-bit aligned");
 
 /*
  * We want to be able to update the aflags and queue fields atomically in
  * the same operation.
  */
-_Static_assert(offsetof(struct vm_page, aflags) / sizeof(uint32_t) ==
-    offsetof(struct vm_page, queue) / sizeof(uint32_t),
+_Static_assert(offsetof(struct vm_page, a.flags) / sizeof(uint32_t) ==
+    offsetof(struct vm_page, a.queue) / sizeof(uint32_t),
     "aflags and queue fields do not belong to the same 32-bit word");
-_Static_assert(offsetof(struct vm_page, queue) % sizeof(uint32_t) == 2,
+_Static_assert(offsetof(struct vm_page, a.queue) % sizeof(uint32_t) == 2,
     "queue field is at an unexpected offset");
-_Static_assert(sizeof(((struct vm_page *)NULL)->queue) == 1,
+_Static_assert(sizeof(((struct vm_page *)NULL)->a.queue) == 1,
     "queue field has an unexpected size");
 
 #if BYTE_ORDER == LITTLE_ENDIAN
@@ -798,7 +805,7 @@ vm_page_aflag_clear(vm_page_t m, uint16_t bits)
         * atomic update.  Parallel non-atomic updates to the other fields
         * within this word are handled properly by the atomic update.
         */
-       addr = (void *)&m->aflags;
+       addr = (void *)&m->a.flags;
        val = bits << VM_PAGE_AFLAG_SHIFT;
        atomic_clear_32(addr, val);
 }
@@ -818,7 +825,7 @@ vm_page_aflag_set(vm_page_t m, uint16_t bits)
         * atomic update.  Parallel non-atomic updates to the other fields
         * within this word are handled properly by the atomic update.
         */
-       addr = (void *)&m->aflags;
+       addr = (void *)&m->a.flags;
        val = bits << VM_PAGE_AFLAG_SHIFT;
        atomic_set_32(addr, val);
 }
@@ -843,7 +850,7 @@ vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uin
        qsmask = ((PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD) <<
            VM_PAGE_AFLAG_SHIFT) | VM_PAGE_QUEUE_MASK;
 
-       addr = (void *)&m->aflags;
+       addr = (void *)&m->a.flags;
        oval = atomic_load_32(addr);
        do {
                if ((oval & fflags) != 0)
@@ -918,10 +925,10 @@ vm_page_queue(vm_page_t m)
 
        vm_page_assert_locked(m);
 
-       if ((m->aflags & PGA_DEQUEUE) != 0)
+       if ((m->a.flags & PGA_DEQUEUE) != 0)
                return (PQ_NONE);
        atomic_thread_fence_acq();
-       return (m->queue);
+       return (m->a.queue);
 }
 
 static inline bool

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c    Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_pageout.c    Tue Dec 10 18:14:50 2019        (r355586)
@@ -218,7 +218,7 @@ vm_pageout_init_scan(struct scan_state *ss, struct vm_
 {
 
        vm_pagequeue_assert_locked(pq);
-       KASSERT((marker->aflags & PGA_ENQUEUED) == 0,
+       KASSERT((marker->a.flags & PGA_ENQUEUED) == 0,
            ("marker %p already enqueued", marker));
 
        if (after == NULL)
@@ -242,7 +242,7 @@ vm_pageout_end_scan(struct scan_state *ss)
 
        pq = ss->pq;
        vm_pagequeue_assert_locked(pq);
-       KASSERT((ss->marker->aflags & PGA_ENQUEUED) != 0,
+       KASSERT((ss->marker->a.flags & PGA_ENQUEUED) != 0,
            ("marker %p not enqueued", ss->marker));
 
        TAILQ_REMOVE(&pq->pq_pl, ss->marker, plinks.q);
@@ -271,7 +271,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const 
        marker = ss->marker;
        pq = ss->pq;
 
-       KASSERT((marker->aflags & PGA_ENQUEUED) != 0,
+       KASSERT((marker->a.flags & PGA_ENQUEUED) != 0,
            ("marker %p not enqueued", ss->marker));
 
        vm_pagequeue_lock(pq);
@@ -280,7 +280,7 @@ vm_pageout_collect_batch(struct scan_state *ss, const 
            m = n, ss->scanned++) {
                n = TAILQ_NEXT(m, plinks.q);
                if ((m->flags & PG_MARKER) == 0) {
-                       KASSERT((m->aflags & PGA_ENQUEUED) != 0,
+                       KASSERT((m->a.flags & PGA_ENQUEUED) != 0,
                            ("page %p not enqueued", m));
                        KASSERT((m->flags & PG_FICTITIOUS) == 0,
                            ("Fictitious page %p cannot be in page queue", m));
@@ -472,7 +472,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, 
                KASSERT(vm_page_all_valid(mc[i]),
                    ("vm_pageout_flush: partially invalid page %p index %d/%d",
                        mc[i], i, count));
-               KASSERT((mc[i]->aflags & PGA_WRITEABLE) == 0,
+               KASSERT((mc[i]->a.flags & PGA_WRITEABLE) == 0,
                    ("vm_pageout_flush: writeable page %p", mc[i]));
                vm_page_busy_downgrade(mc[i]);
        }
@@ -766,7 +766,7 @@ recheck:
                 * A requeue was requested, so this page gets a second
                 * chance.
                 */
-               if ((m->aflags & PGA_REQUEUE) != 0) {
+               if ((m->a.flags & PGA_REQUEUE) != 0) {
                        vm_page_pqbatch_submit(m, queue);
                        continue;
                }
@@ -848,7 +848,7 @@ recheck:
                            ("page %p is mapped", m));
                        act_delta = 0;
                }
-               if ((m->aflags & PGA_REFERENCED) != 0) {
+               if ((m->a.flags & PGA_REFERENCED) != 0) {
                        vm_page_aflag_clear(m, PGA_REFERENCED);
                        act_delta++;
                }
@@ -865,7 +865,7 @@ recheck:
                                 * be returned prematurely to the inactive
                                 * queue.
                                 */
-                               m->act_count += act_delta + ACT_ADVANCE;
+                               m->a.act_count += act_delta + ACT_ADVANCE;
 
                                /*
                                 * If this was a background laundering, count
@@ -1302,7 +1302,7 @@ act_scan:
                        act_delta = pmap_ts_referenced(m);
                else
                        act_delta = 0;
-               if ((m->aflags & PGA_REFERENCED) != 0) {
+               if ((m->a.flags & PGA_REFERENCED) != 0) {
                        vm_page_aflag_clear(m, PGA_REFERENCED);
                        act_delta++;
                }
@@ -1311,13 +1311,13 @@ act_scan:
                 * Advance or decay the act_count based on recent usage.
                 */
                if (act_delta != 0) {
-                       m->act_count += ACT_ADVANCE + act_delta;
-                       if (m->act_count > ACT_MAX)
-                               m->act_count = ACT_MAX;
+                       m->a.act_count += ACT_ADVANCE + act_delta;
+                       if (m->a.act_count > ACT_MAX)
+                               m->a.act_count = ACT_MAX;
                } else
-                       m->act_count -= min(m->act_count, ACT_DECLINE);
+                       m->a.act_count -= min(m->a.act_count, ACT_DECLINE);
 
-               if (m->act_count == 0) {
+               if (m->a.act_count == 0) {
                        /*
                         * When not short for inactive pages, let dirty pages go
                         * through the inactive queue before moving to the
@@ -1372,14 +1372,14 @@ vm_pageout_reinsert_inactive_page(struct scan_state *s
 {
        struct vm_domain *vmd;
 
-       if (m->queue != PQ_INACTIVE || (m->aflags & PGA_ENQUEUED) != 0)
+       if (m->a.queue != PQ_INACTIVE || (m->a.flags & PGA_ENQUEUED) != 0)
                return (0);
        vm_page_aflag_set(m, PGA_ENQUEUED);
-       if ((m->aflags & PGA_REQUEUE_HEAD) != 0) {
+       if ((m->a.flags & PGA_REQUEUE_HEAD) != 0) {
                vmd = vm_pagequeue_domain(m);
                TAILQ_INSERT_BEFORE(&vmd->vmd_inacthead, m, plinks.q);
                vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
-       } else if ((m->aflags & PGA_REQUEUE) != 0) {
+       } else if ((m->a.flags & PGA_REQUEUE) != 0) {
                TAILQ_INSERT_TAIL(&ss->pq->pq_pl, m, plinks.q);
                vm_page_aflag_clear(m, PGA_REQUEUE | PGA_REQUEUE_HEAD);
        } else
@@ -1458,7 +1458,7 @@ vm_pageout_scan_inactive(struct vm_domain *vmd, int sh
        /*
         * Start scanning the inactive queue for pages that we can free.  The
         * scan will stop when we reach the target or we have scanned the
-        * entire queue.  (Note that m->act_count is not used to make
+        * entire queue.  (Note that m->a.act_count is not used to make
         * decisions for the inactive queue, only for the active queue.)
         */
        marker = &vmd->vmd_markers[PQ_INACTIVE];
@@ -1488,7 +1488,7 @@ recheck:
                 * dropped, or a requeue was requested.  This page gets a second
                 * chance.
                 */
-               if ((m->aflags & (PGA_ENQUEUED | PGA_REQUEUE |
+               if ((m->a.flags & (PGA_ENQUEUED | PGA_REQUEUE |
                    PGA_REQUEUE_HEAD)) != 0)
                        goto reinsert;
 
@@ -1579,7 +1579,7 @@ recheck:
                            ("page %p is mapped", m));
                        act_delta = 0;
                }
-               if ((m->aflags & PGA_REFERENCED) != 0) {
+               if ((m->a.flags & PGA_REFERENCED) != 0) {
                        vm_page_aflag_clear(m, PGA_REFERENCED);
                        act_delta++;
                }
@@ -1596,7 +1596,7 @@ recheck:
                                 * be returned prematurely to the inactive
                                 * queue.
                                 */
-                               m->act_count += act_delta + ACT_ADVANCE;
+                               m->a.act_count += act_delta + ACT_ADVANCE;
                                continue;
                        } else if ((object->flags & OBJ_DEAD) == 0) {
                                vm_page_xunbusy(m);
@@ -1636,9 +1636,9 @@ free_page:
                         * requests, we can safely disassociate the page
                         * from the inactive queue.
                         */
-                       KASSERT((m->aflags & PGA_QUEUE_STATE_MASK) == 0,
+                       KASSERT((m->a.flags & PGA_QUEUE_STATE_MASK) == 0,
                            ("page %p has queue state", m));
-                       m->queue = PQ_NONE;
+                       m->a.queue = PQ_NONE;
                        vm_page_free(m);
                        page_shortage--;
                        continue;

Modified: head/sys/vm/vm_swapout.c
==============================================================================
--- head/sys/vm/vm_swapout.c    Tue Dec 10 14:35:38 2019        (r355585)
+++ head/sys/vm/vm_swapout.c    Tue Dec 10 18:14:50 2019        (r355586)
@@ -224,31 +224,31 @@ vm_swapout_object_deactivate_pages(pmap_t pmap, vm_obj
                        }
                        act_delta = pmap_ts_referenced(p);
                        vm_page_lock(p);
-                       if ((p->aflags & PGA_REFERENCED) != 0) {
+                       if ((p->a.flags & PGA_REFERENCED) != 0) {
                                if (act_delta == 0)
                                        act_delta = 1;
                                vm_page_aflag_clear(p, PGA_REFERENCED);
                        }
                        if (!vm_page_active(p) && act_delta != 0) {
                                vm_page_activate(p);
-                               p->act_count += act_delta;
+                               p->a.act_count += act_delta;
                        } else if (vm_page_active(p)) {
                                /*
                                 * The page daemon does not requeue pages
                                 * after modifying their activation count.
                                 */
                                if (act_delta == 0) {
-                                       p->act_count -= min(p->act_count,
+                                       p->a.act_count -= min(p->a.act_count,
                                            ACT_DECLINE);
-                                       if (!remove_mode && p->act_count == 0) {
+                                       if (!remove_mode && p->a.act_count == 
0) {
                                                (void)vm_page_try_remove_all(p);
                                                vm_page_deactivate(p);
                                        }
                                } else {
                                        vm_page_activate(p);
-                                       if (p->act_count < ACT_MAX -
+                                       if (p->a.act_count < ACT_MAX -
                                            ACT_ADVANCE)
-                                               p->act_count += ACT_ADVANCE;
+                                               p->a.act_count += ACT_ADVANCE;
                                }
                        } else if (vm_page_inactive(p))
                                (void)vm_page_try_remove_all(p);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to