The branch main has been updated by adrian:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=19a18bb750e8ed88207664b4959e433b7e68e926

commit 19a18bb750e8ed88207664b4959e433b7e68e926
Author:     Adrian Chadd <[email protected]>
AuthorDate: 2026-02-22 02:22:46 +0000
Commit:     Adrian Chadd <[email protected]>
CommitDate: 2026-02-22 02:22:46 +0000

    powerpc64: initial conversion of oea64 to rwlocks
    
    Convert the oea64 mmu code to rwlocks.
    
    Reviewed by:    jhibbits
    Differential Revision:  https://reviews.freebsd.org/D54936
---
 sys/powerpc/aim/mmu_oea64.c | 50 +++++++++++++++++++++++++++++----------------
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 7dfda5776a37..22442c59fd30 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -123,7 +123,7 @@ uintptr_t moea64_get_unique_vsid(void);
  */
 
 #define PV_LOCK_COUNT  MAXCPU
-static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
+static struct rwlock __exclusive_cache_line pv_lock[PV_LOCK_COUNT];
 
 #define        PV_LOCK_SHIFT   HPT_SP_SHIFT
 #define        pa_index(pa)    ((pa) >> PV_LOCK_SHIFT)
@@ -138,11 +138,17 @@ static struct mtx_padalign pv_lock[PV_LOCK_COUNT];
 #else
 #define PV_LOCK_IDX(pa)        (pa_index(pa) % PV_LOCK_COUNT)
 #endif
-#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[PV_LOCK_IDX(pa)]))
-#define PV_LOCK(pa)            mtx_lock(PV_LOCKPTR(pa))
-#define PV_UNLOCK(pa)          mtx_unlock(PV_LOCKPTR(pa))
-#define PV_LOCKASSERT(pa)      mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
-#define PV_PAGE_LOCK(m)                PV_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_LOCKPTR(pa) ((struct rwlock *)(&pv_lock[PV_LOCK_IDX(pa)]))
+
+#define PV_WR_LOCK(pa)         rw_wlock(PV_LOCKPTR(pa))
+#define PV_RD_LOCK(pa)         rw_rlock(PV_LOCKPTR(pa))
+#define PV_UNLOCK(pa)          rw_unlock(PV_LOCKPTR(pa))
+#define PV_LOCKASSERT(pa)      rw_assert(PV_LOCKPTR(pa), RA_LOCKED)
+#define PV_LOCK_RD_ASSERT(pa)  rw_assert(PV_LOCKPTR(pa), RA_RLOCKED)
+#define PV_LOCK_WR_ASSERT(pa)  rw_assert(PV_LOCKPTR(pa), RA_WLOCKED)
+
+#define PV_PAGE_WR_LOCK(m)     PV_WR_LOCK(VM_PAGE_TO_PHYS(m))
+#define PV_PAGE_RD_LOCK(m)     PV_RD_LOCK(VM_PAGE_TO_PHYS(m))
 #define PV_PAGE_UNLOCK(m)      PV_UNLOCK(VM_PAGE_TO_PHYS(m))
 #define PV_PAGE_LOCKASSERT(m)  PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
 
@@ -500,7 +506,7 @@ static struct pvo_head *
 vm_page_to_pvoh(vm_page_t m)
 {
 
-       mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
+       rw_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), RA_LOCKED);
        return (&m->md.mdpg_pvoh);
 }
 
@@ -1028,7 +1034,7 @@ moea64_mid_bootstrap(vm_offset_t kernelstart, vm_offset_t 
kernelend)
         */
        mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
        for (i = 0; i < PV_LOCK_COUNT; i++)
-               mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
+               rw_init(&pv_lock[i], "pv lock");
 
        /*
         * Initialise the bootstrap pvo pool.
@@ -1644,7 +1650,7 @@ moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
                pvo->pvo_vaddr |= PVO_MANAGED;
        }
 
-       PV_LOCK(pa);
+       PV_WR_LOCK(pa);
        PMAP_LOCK(pmap);
        if (pvo->pvo_pmap == NULL)
                init_pvo_entry(pvo, pmap, va);
@@ -2023,7 +2029,7 @@ moea64_remove_write(vm_page_t m)
                return;
 
        powerpc_sync();
-       PV_PAGE_LOCK(m);
+       PV_PAGE_WR_LOCK(m);
        refchg = 0;
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                pmap = pvo->pvo_pmap;
@@ -2096,7 +2102,7 @@ moea64_page_set_memattr(vm_page_t m, vm_memattr_t ma)
 
        lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
 
-       PV_PAGE_LOCK(m);
+       PV_PAGE_WR_LOCK(m);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                pmap = pvo->pvo_pmap;
                PMAP_LOCK(pmap);
@@ -2355,7 +2361,7 @@ moea64_page_exists_quick(pmap_t pmap, vm_page_t m)
            ("moea64_page_exists_quick: page %p is not managed", m));
        loops = 0;
        rv = false;
-       PV_PAGE_LOCK(m);
+       PV_PAGE_RD_LOCK(m);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
                        rv = true;
@@ -2390,7 +2396,7 @@ moea64_page_wired_mappings(vm_page_t m)
        count = 0;
        if ((m->oflags & VPO_UNMANAGED) != 0)
                return (count);
-       PV_PAGE_LOCK(m);
+       PV_PAGE_RD_LOCK(m);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
                if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
                        count++;
@@ -2760,7 +2766,7 @@ moea64_remove_all(vm_page_t m)
 
        LIST_INIT(&freequeue);
 
-       PV_PAGE_LOCK(m);
+       PV_PAGE_WR_LOCK(m);
        LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
                pmap = pvo->pvo_pmap;
                PMAP_LOCK(pmap);
@@ -2983,7 +2989,7 @@ moea64_pvo_remove_from_page(struct pvo_entry *pvo)
        if (pvo->pvo_vaddr & PVO_MANAGED)
                pg = PHYS_TO_VM_PAGE(PVO_PADDR(pvo));
 
-       PV_LOCK(PVO_PADDR(pvo));
+       PV_WR_LOCK(PVO_PADDR(pvo));
        moea64_pvo_remove_from_page_locked(pvo, pg);
        PV_UNLOCK(PVO_PADDR(pvo));
 }
@@ -3024,7 +3030,7 @@ moea64_query_bit(vm_page_t m, uint64_t ptebit)
         */
        rv = false;
        powerpc_sync();
-       PV_PAGE_LOCK(m);
+       PV_PAGE_RD_LOCK(m);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                if (PVO_IS_SP(pvo)) {
                        ret = moea64_sp_query(pvo, ptebit);
@@ -3084,7 +3090,7 @@ moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
         * For each pvo entry, clear the pte's ptebit.
         */
        count = 0;
-       PV_PAGE_LOCK(m);
+       PV_PAGE_WR_LOCK(m);
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                if (PVO_IS_SP(pvo)) {
                        if ((ret = moea64_sp_clear(pvo, m, ptebit)) != -1) {
@@ -3690,7 +3696,7 @@ moea64_sp_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
                }
        }
 
-       PV_LOCK(spa);
+       PV_WR_LOCK(spa);
        PMAP_LOCK(pmap);
 
        /* Note: moea64_remove_locked() also clears cached REF/CHG bits. */
@@ -4094,6 +4100,8 @@ moea64_sp_query_locked(struct pvo_entry *pvo, uint64_t 
ptebit)
        pmap_t pmap;
        struct pvo_entry *sp;
 
+       PV_LOCKASSERT(PVO_PADDR(pvo));
+
        pmap = pvo->pvo_pmap;
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
@@ -4126,12 +4134,18 @@ moea64_sp_query_locked(struct pvo_entry *pvo, uint64_t 
ptebit)
        return (refchg);
 }
 
+/*
+ * Note: this assumes the vm_page represented by the given pvo
+ * is at least read locked.
+ */
 static int64_t
 moea64_sp_query(struct pvo_entry *pvo, uint64_t ptebit)
 {
        int64_t refchg;
        pmap_t pmap;
 
+       PV_LOCKASSERT(PVO_PADDR(pvo));
+
        pmap = pvo->pvo_pmap;
        PMAP_LOCK(pmap);
 

Reply via email to