Author: kib Date: Wed Sep 28 15:01:20 2011 New Revision: 225841 URL: http://svn.freebsd.org/changeset/base/225841
Log: Remove locking of the vm page queues from several pmaps, which only protected the dirty mask updates. The dirty mask updates are handled by atomics after the r225840. Submitted by: alc Tested by: flo (sparc64) MFC after: 2 weeks Modified: head/sys/ia64/ia64/pmap.c head/sys/powerpc/booke/pmap.c head/sys/sparc64/sparc64/pmap.c Modified: head/sys/ia64/ia64/pmap.c ============================================================================== --- head/sys/ia64/ia64/pmap.c Wed Sep 28 14:57:50 2011 (r225840) +++ head/sys/ia64/ia64/pmap.c Wed Sep 28 15:01:20 2011 (r225841) @@ -1486,7 +1486,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sv if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); - vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); for ( ; sva < eva; sva += PAGE_SIZE) { @@ -1514,7 +1513,6 @@ pmap_protect(pmap_t pmap, vm_offset_t sv pmap_pte_prot(pmap, pte, prot); pmap_invalidate_page(sva); } - vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } Modified: head/sys/powerpc/booke/pmap.c ============================================================================== --- head/sys/powerpc/booke/pmap.c Wed Sep 28 14:57:50 2011 (r225840) +++ head/sys/powerpc/booke/pmap.c Wed Sep 28 15:01:20 2011 (r225841) @@ -1918,7 +1918,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap if (prot & VM_PROT_WRITE) return; - vm_page_lock_queues(); PMAP_LOCK(pmap); for (va = sva; va < eva; va += PAGE_SIZE) { if ((pte = pte_find(mmu, pmap, va)) != NULL) { @@ -1941,7 +1940,6 @@ mmu_booke_protect(mmu_t mmu, pmap_t pmap } } PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); } /* Modified: head/sys/sparc64/sparc64/pmap.c ============================================================================== --- head/sys/sparc64/sparc64/pmap.c Wed Sep 28 14:57:50 2011 (r225840) +++ head/sys/sparc64/sparc64/pmap.c Wed Sep 28 15:01:20 2011 (r225841) @@ -1423,6 +1423,7 @@ pmap_protect_tte(struct pmap *pm, struct u_long data; vm_page_t m; + PMAP_LOCK_ASSERT(pm, MA_OWNED); data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) { m = PHYS_TO_VM_PAGE(TD_PA(data)); @@ -1451,7 +1452,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, if (prot & VM_PROT_WRITE) return; - vm_page_lock_queues(); PMAP_LOCK(pm); if (eva - sva > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); @@ -1463,7 +1463,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, tlb_range_demap(pm, sva, eva - 1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); } /* _______________________________________________ svn-src-head@freebsd.org mailing list http://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"