Author: alc
Date: Wed May 26 18:00:44 2010
New Revision: 208574
URL: http://svn.freebsd.org/changeset/base/208574

Log:
  Push down page queues lock acquisition in pmap_enter_object() and
  pmap_is_referenced().  Eliminate the corresponding page queues lock
  acquisitions from vm_map_pmap_enter() and mincore(), respectively.  In
  mincore(), this allows some additional cases to complete without ever
  acquiring the page queues lock.
  
  Assert that the page is managed in pmap_is_referenced().
  
  On powerpc/aim, push down the page queues lock acquisition from
  moea*_is_modified() and moea*_is_referenced() into moea*_query_bit().
  Again, this will allow some additional cases to complete without ever
  acquiring the page queues lock.
  
  Reorder a few statements in vm_page_dontneed() so that a race can't lead
  to an old reference persisting.  This scenario is described in detail by a
  comment.
  
  Correct a spelling error in vm_page_dontneed().
  
  Assert that the object is locked in vm_page_clear_dirty(), and restrict the
  page queues lock assertion to just those cases in which the page is
  currently writeable.
  
  Add object locking to vnode_pager_generic_putpages().  This was the one
  and only place where vm_page_clear_dirty() was being called without the
  object being locked.
  
  Eliminate an unnecessary vm_page_lock() around vnode_pager_setsize()'s call
  to vm_page_clear_dirty().
  
  Change vnode_pager_generic_putpages() to the modern-style of function
  definition.  Also, change the name of one of the parameters to follow
  virtual memory system naming conventions.
  
  Reviewed by:  kib

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/arm/arm/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/i386/xen/pmap.c
  head/sys/ia64/ia64/pmap.c
  head/sys/mips/mips/pmap.c
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/sparc64/sparc64/pmap.c
  head/sys/sun4v/sun4v/pmap.c
  head/sys/vm/vm_map.c
  head/sys/vm/vm_mmap.c
  head/sys/vm/vm_page.c
  head/sys/vm/vnode_pager.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/amd64/amd64/pmap.c Wed May 26 18:00:44 2010        (r208574)
@@ -3389,6 +3389,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
        psize = atop(end - start);
        mpte = NULL;
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                va = start + ptoa(diff);
@@ -3402,6 +3403,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
                            mpte);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -4209,12 +4211,15 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
 boolean_t
 pmap_is_referenced(vm_page_t m)
 {
+       boolean_t rv;
 
-       if (m->flags & PG_FICTITIOUS)
-               return (FALSE);
-       if (pmap_is_referenced_pvh(&m->md))
-               return (TRUE);
-       return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       vm_page_lock_queues();
+       rv = pmap_is_referenced_pvh(&m->md) ||
+           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+       vm_page_unlock_queues();
+       return (rv);
 }
 
 /*

Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c     Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/arm/arm/pmap.c     Wed May 26 18:00:44 2010        (r208574)
@@ -3589,12 +3589,14 @@ pmap_enter_object(pmap_t pmap, vm_offset
 
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
                    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -4521,8 +4523,9 @@ boolean_t
 pmap_is_referenced(vm_page_t m)
 {
 
-       return ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
-           (m->md.pvh_attrs & PVF_REF) != 0);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       return ((m->md.pvh_attrs & PVF_REF) != 0);
 }
 
 /*

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/i386/i386/pmap.c   Wed May 26 18:00:44 2010        (r208574)
@@ -3519,6 +3519,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
        psize = atop(end - start);
        mpte = NULL;
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                va = start + ptoa(diff);
@@ -3532,6 +3533,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
                            mpte);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -4377,12 +4379,15 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
 boolean_t
 pmap_is_referenced(vm_page_t m)
 {
+       boolean_t rv;
 
-       if (m->flags & PG_FICTITIOUS)
-               return (FALSE);
-       if (pmap_is_referenced_pvh(&m->md))
-               return (TRUE);
-       return (pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       vm_page_lock_queues();
+       rv = pmap_is_referenced_pvh(&m->md) ||
+           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+       vm_page_unlock_queues();
+       return (rv);
 }
 
 /*

Modified: head/sys/i386/xen/pmap.c
==============================================================================
--- head/sys/i386/xen/pmap.c    Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/i386/xen/pmap.c    Wed May 26 18:00:44 2010        (r208574)
@@ -2901,6 +2901,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
            
        mpte = NULL;
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                mpte = pmap_enter_quick_locked(&mclp, &count, pmap, start + 
ptoa(diff), m,
@@ -2917,7 +2918,7 @@ pmap_enter_object(pmap_t pmap, vm_offset
                error = HYPERVISOR_multicall(mcl, count);
                KASSERT(error == 0, ("bad multicall %d", error));
        }
-       
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -3734,11 +3735,11 @@ pmap_is_referenced(vm_page_t m)
        pmap_t pmap;
        boolean_t rv;
 
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
        rv = FALSE;
-       if (m->flags & PG_FICTITIOUS)
-               return (rv);
+       vm_page_lock_queues();
        sched_pin();
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -3751,6 +3752,7 @@ pmap_is_referenced(vm_page_t m)
        if (*PMAP1)
                PT_SET_MA(PADDR1, 0);
        sched_unpin();
+       vm_page_unlock_queues();
        return (rv);
 }
 

Modified: head/sys/ia64/ia64/pmap.c
==============================================================================
--- head/sys/ia64/ia64/pmap.c   Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/ia64/ia64/pmap.c   Wed May 26 18:00:44 2010        (r208574)
@@ -1625,12 +1625,14 @@ pmap_enter_object(pmap_t pmap, vm_offset
        VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        oldpmap = pmap_switch(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        pmap_switch(oldpmap);
        PMAP_UNLOCK(pmap);
 }
@@ -2041,9 +2043,10 @@ pmap_is_referenced(vm_page_t m)
        pv_entry_t pv;
        boolean_t rv;
 
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
        rv = FALSE;
-       if (m->flags & PG_FICTITIOUS)
-               return (rv);
+       vm_page_lock_queues();
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                PMAP_LOCK(pv->pv_pmap);
                oldpmap = pmap_switch(pv->pv_pmap);
@@ -2055,6 +2058,7 @@ pmap_is_referenced(vm_page_t m)
                if (rv)
                        break;
        }
+       vm_page_unlock_queues();
        return (rv);
 }
 

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c   Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/mips/mips/pmap.c   Wed May 26 18:00:44 2010        (r208574)
@@ -2130,12 +2130,14 @@ pmap_enter_object(pmap_t pmap, vm_offset
        psize = atop(end - start);
        mpte = NULL;
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
                    prot, mpte);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -2671,8 +2673,9 @@ boolean_t
 pmap_is_referenced(vm_page_t m)
 {
 
-       return ((m->flags & PG_FICTITIOUS) == 0 &&
-           (m->md.pv_flags & PV_TABLE_REF) != 0);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       return ((m->md.pv_flags & PV_TABLE_REF) != 0);
 }
 
 /*

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Wed May 26 17:30:19 2010        
(r208573)
+++ head/sys/powerpc/aim/mmu_oea.c      Wed May 26 18:00:44 2010        
(r208574)
@@ -1197,12 +1197,14 @@ moea_enter_object(mmu_t mmu, pmap_t pm, 
 
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea_enter_locked(pm, start + ptoa(diff), m, prot &
                    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pm);
 }
 
@@ -1282,15 +1284,14 @@ boolean_t
 moea_is_referenced(mmu_t mmu, vm_page_t m)
 {
 
-       if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
-               return (FALSE);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("moea_is_referenced: page %p is not managed", m));
        return (moea_query_bit(m, PTE_REF));
 }
 
 boolean_t
 moea_is_modified(mmu_t mmu, vm_page_t m)
 {
-       boolean_t rv;
 
        KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
            ("moea_is_modified: page %p is not managed", m));
@@ -1304,10 +1305,7 @@ moea_is_modified(mmu_t mmu, vm_page_t m)
        if ((m->oflags & VPO_BUSY) == 0 &&
            (m->flags & PG_WRITEABLE) == 0)
                return (FALSE);
-       vm_page_lock_queues();
-       rv = moea_query_bit(m, PTE_CHG);
-       vm_page_unlock_queues();
-       return (rv);
+       return (moea_query_bit(m, PTE_CHG));
 }
 
 void
@@ -2268,6 +2266,7 @@ moea_query_bit(vm_page_t m, int ptebit)
        if (moea_attr_fetch(m) & ptebit)
                return (TRUE);
 
+       vm_page_lock_queues();
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                MOEA_PVO_CHECK(pvo);    /* sanity check */
 
@@ -2278,6 +2277,7 @@ moea_query_bit(vm_page_t m, int ptebit)
                if (pvo->pvo_pte.pte.pte_lo & ptebit) {
                        moea_attr_save(m, ptebit);
                        MOEA_PVO_CHECK(pvo);    /* sanity check */
+                       vm_page_unlock_queues();
                        return (TRUE);
                }
        }
@@ -2303,11 +2303,13 @@ moea_query_bit(vm_page_t m, int ptebit)
                        if (pvo->pvo_pte.pte.pte_lo & ptebit) {
                                moea_attr_save(m, ptebit);
                                MOEA_PVO_CHECK(pvo);    /* sanity check */
+                               vm_page_unlock_queues();
                                return (TRUE);
                        }
                }
        }
 
+       vm_page_unlock_queues();
        return (FALSE);
 }
 

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Wed May 26 17:30:19 2010        
(r208573)
+++ head/sys/powerpc/aim/mmu_oea64.c    Wed May 26 18:00:44 2010        
(r208574)
@@ -1330,12 +1330,14 @@ moea64_enter_object(mmu_t mmu, pmap_t pm
 
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea64_enter_locked(pm, start + ptoa(diff), m, prot &
                    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pm);
 }
 
@@ -1477,15 +1479,14 @@ boolean_t
 moea64_is_referenced(mmu_t mmu, vm_page_t m)
 {
 
-       if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
-               return (FALSE);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("moea64_is_referenced: page %p is not managed", m));
        return (moea64_query_bit(m, PTE_REF));
 }
 
 boolean_t
 moea64_is_modified(mmu_t mmu, vm_page_t m)
 {
-       boolean_t rv;
 
        KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
            ("moea64_is_modified: page %p is not managed", m));
@@ -1499,10 +1500,7 @@ moea64_is_modified(mmu_t mmu, vm_page_t 
        if ((m->oflags & VPO_BUSY) == 0 &&
            (m->flags & PG_WRITEABLE) == 0)
                return (FALSE);
-       vm_page_lock_queues();
-       rv = moea64_query_bit(m, LPTE_CHG);
-       vm_page_unlock_queues();
-       return (rv);
+       return (moea64_query_bit(m, LPTE_CHG));
 }
 
 void
@@ -2394,7 +2392,7 @@ moea64_query_bit(vm_page_t m, u_int64_t 
        if (moea64_attr_fetch(m) & ptebit)
                return (TRUE);
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       vm_page_lock_queues();
 
        LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
                MOEA_PVO_CHECK(pvo);    /* sanity check */
@@ -2406,6 +2404,7 @@ moea64_query_bit(vm_page_t m, u_int64_t 
                if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
                        moea64_attr_save(m, ptebit);
                        MOEA_PVO_CHECK(pvo);    /* sanity check */
+                       vm_page_unlock_queues();
                        return (TRUE);
                }
        }
@@ -2433,12 +2432,14 @@ moea64_query_bit(vm_page_t m, u_int64_t 
 
                                moea64_attr_save(m, ptebit);
                                MOEA_PVO_CHECK(pvo);    /* sanity check */
+                               vm_page_unlock_queues();
                                return (TRUE);
                        }
                }
                UNLOCK_TABLE();
        }
 
+       vm_page_unlock_queues();
        return (FALSE);
 }
 

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Wed May 26 17:30:19 2010        
(r208573)
+++ head/sys/powerpc/booke/pmap.c       Wed May 26 18:00:44 2010        
(r208574)
@@ -1711,12 +1711,14 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t
 
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
                    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pmap);
 }
 
@@ -2209,19 +2211,22 @@ mmu_booke_is_referenced(mmu_t mmu, vm_pa
        pv_entry_t pv;
        boolean_t rv;
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("mmu_booke_is_referenced: page %p is not managed", m));
        rv = FALSE;
-       if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
-               return (rv);
+       vm_page_lock_queues();
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
-                   PTE_ISVALID(pte))
-                       rv = PTE_ISREFERENCED(pte) ? TRUE : FALSE;
+                   PTE_ISVALID(pte)) {
+                       if (PTE_ISREFERENCED(pte))
+                               rv = TRUE;
+               }
                PMAP_UNLOCK(pv->pv_pmap);
                if (rv)
                        break;
        }
+       vm_page_unlock_queues();
        return (rv);
 }
 

Modified: head/sys/sparc64/sparc64/pmap.c
==============================================================================
--- head/sys/sparc64/sparc64/pmap.c     Wed May 26 17:30:19 2010        
(r208573)
+++ head/sys/sparc64/sparc64/pmap.c     Wed May 26 18:00:44 2010        
(r208574)
@@ -1492,12 +1492,14 @@ pmap_enter_object(pmap_t pm, vm_offset_t
 
        psize = atop(end - start);
        m = m_start;
+       vm_page_lock_queues();
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                pmap_enter_locked(pm, start + ptoa(diff), m, prot &
                    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
+       vm_page_unlock_queues();
        PMAP_UNLOCK(pm);
 }
 
@@ -1947,17 +1949,22 @@ boolean_t
 pmap_is_referenced(vm_page_t m)
 {
        struct tte *tp;
+       boolean_t rv;
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
-       if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
-               return (FALSE);
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       rv = FALSE;
+       vm_page_lock_queues();
        TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
                if ((tp->tte_data & TD_PV) == 0)
                        continue;
-               if ((tp->tte_data & TD_REF) != 0)
-                       return (TRUE);
+               if ((tp->tte_data & TD_REF) != 0) {
+                       rv = TRUE;
+                       break;
+               }
        }
-       return (FALSE);
+       vm_page_unlock_queues();
+       return (rv);
 }
 
 void

Modified: head/sys/sun4v/sun4v/pmap.c
==============================================================================
--- head/sys/sun4v/sun4v/pmap.c Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/sun4v/sun4v/pmap.c Wed May 26 18:00:44 2010        (r208574)
@@ -1221,11 +1221,13 @@ pmap_enter_object(pmap_t pmap, vm_offset
         VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
         psize = atop(end - start);
         m = m_start;
+       vm_page_lock_queues();
         PMAP_LOCK(pmap);
         while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
                 m = TAILQ_NEXT(m, listq);
         }
+       vm_page_unlock_queues();
         PMAP_UNLOCK(pmap);
 }
 
@@ -1642,8 +1644,14 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
 boolean_t
 pmap_is_referenced(vm_page_t m)
 {
+       boolean_t rv;
 
-       return (tte_get_phys_bit(m, VTD_REF));
+       KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
+           ("pmap_is_referenced: page %p is not managed", m));
+       vm_page_lock_queues();
+       rv = tte_get_phys_bit(m, VTD_REF);
+       vm_page_unlock_queues();
+       return (rv);
 }
 
 /*

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c        Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/vm/vm_map.c        Wed May 26 18:00:44 2010        (r208574)
@@ -1726,7 +1726,6 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
        vm_offset_t start;
        vm_page_t p, p_start;
        vm_pindex_t psize, tmpidx;
-       boolean_t are_queues_locked;
 
        if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
                return;
@@ -1748,7 +1747,6 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
                psize = object->size - pindex;
        }
 
-       are_queues_locked = FALSE;
        start = 0;
        p_start = NULL;
 
@@ -1782,25 +1780,14 @@ vm_map_pmap_enter(vm_map_t map, vm_offse
                                p_start = p;
                        }
                } else if (p_start != NULL) {
-                       if (!are_queues_locked) {
-                               are_queues_locked = TRUE;
-                               vm_page_lock_queues();
-                       }
                        pmap_enter_object(map->pmap, start, addr +
                            ptoa(tmpidx), p_start, prot);
                        p_start = NULL;
                }
        }
-       if (p_start != NULL) {
-               if (!are_queues_locked) {
-                       are_queues_locked = TRUE;
-                       vm_page_lock_queues();
-               }
+       if (p_start != NULL)
                pmap_enter_object(map->pmap, start, addr + ptoa(psize),
                    p_start, prot);
-       }
-       if (are_queues_locked)
-               vm_page_unlock_queues();
 unlock_return:
        VM_OBJECT_UNLOCK(object);
 }

Modified: head/sys/vm/vm_mmap.c
==============================================================================
--- head/sys/vm/vm_mmap.c       Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/vm/vm_mmap.c       Wed May 26 18:00:44 2010        (r208574)
@@ -912,11 +912,18 @@ RestartScan:
                                        vm_page_dirty(m);
                                if (m->dirty != 0)
                                        mincoreinfo |= MINCORE_MODIFIED_OTHER;
-                               vm_page_lock_queues();
+                               /*
+                                * The first test for PG_REFERENCED is an
+                                * optimization.  The second test is
+                                * required because a concurrent pmap
+                                * operation could clear the last reference
+                                * and set PG_REFERENCED before the call to
+                                * pmap_is_referenced(). 
+                                */
                                if ((m->flags & PG_REFERENCED) != 0 ||
-                                   pmap_is_referenced(m))
+                                   pmap_is_referenced(m) ||
+                                   (m->flags & PG_REFERENCED) != 0)
                                        mincoreinfo |= MINCORE_REFERENCED_OTHER;
-                               vm_page_unlock_queues();
                        }
                        if (object != NULL)
                                VM_OBJECT_UNLOCK(object);

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/vm/vm_page.c       Wed May 26 18:00:44 2010        (r208574)
@@ -1894,7 +1894,7 @@ vm_page_dontneed(vm_page_t m)
        PCPU_INC(dnweight);
 
        /*
-        * occassionally leave the page alone
+        * Occasionally leave the page alone.
         */
        if ((dnw & 0x01F0) == 0 ||
            VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) {
@@ -1906,11 +1906,18 @@ vm_page_dontneed(vm_page_t m)
        /*
         * Clear any references to the page.  Otherwise, the page daemon will
         * immediately reactivate the page.
+        *
+        * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
+        * pmap operation, such as pmap_remove(), could clear a reference in
+        * the pmap and set PG_REFERENCED on the page before the
+        * pmap_clear_reference() had completed.  Consequently, the page would
+        * appear referenced based upon an old reference that occurred before
+        * this function ran.
         */
+       pmap_clear_reference(m);
        vm_page_lock_queues();
        vm_page_flag_clear(m, PG_REFERENCED);
        vm_page_unlock_queues();
-       pmap_clear_reference(m);
 
        if (m->dirty == 0 && pmap_is_modified(m))
                vm_page_dirty(m);
@@ -2142,7 +2149,9 @@ void
 vm_page_clear_dirty(vm_page_t m, int base, int size)
 {
 
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
+       if ((m->flags & PG_WRITEABLE) != 0)
+               mtx_assert(&vm_page_queue_mtx, MA_OWNED);
        m->dirty &= ~vm_page_bits(base, size);
 }
 

Modified: head/sys/vm/vnode_pager.c
==============================================================================
--- head/sys/vm/vnode_pager.c   Wed May 26 17:30:19 2010        (r208573)
+++ head/sys/vm/vnode_pager.c   Wed May 26 18:00:44 2010        (r208574)
@@ -429,11 +429,9 @@ vnode_pager_setsize(vp, nsize)
                         * bits.  This would prevent bogus_page
                         * replacement from working properly.
                         */
-                       vm_page_lock(m);
                        vm_page_lock_queues();
                        vm_page_clear_dirty(m, base, PAGE_SIZE - base);
                        vm_page_unlock_queues();
-                       vm_page_unlock(m);
                } else if ((nsize & PAGE_MASK) &&
                    __predict_false(object->cache != NULL)) {
                        vm_page_cache_free(object, OFF_TO_IDX(nsize),
@@ -1071,15 +1069,12 @@ vnode_pager_putpages(object, m, count, s
  * then delayed.
  */
 int
-vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
-       struct vnode *vp;
-       vm_page_t *m;
-       int bytecount;
-       int flags;
-       int *rtvals;
+vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
+    int flags, int *rtvals)
 {
        int i;
        vm_object_t object;
+       vm_page_t m;
        int count;
 
        int maxsize, ncount;
@@ -1098,9 +1093,9 @@ vnode_pager_generic_putpages(vp, m, byte
        for (i = 0; i < count; i++)
                rtvals[i] = VM_PAGER_AGAIN;
 
-       if ((int64_t)m[0]->pindex < 0) {
+       if ((int64_t)ma[0]->pindex < 0) {
                printf("vnode_pager_putpages: attempt to write meta-data!!! -- 
0x%lx(%lx)\n",
-                       (long)m[0]->pindex, (u_long)m[0]->dirty);
+                   (long)ma[0]->pindex, (u_long)ma[0]->dirty);
                rtvals[0] = VM_PAGER_BAD;
                return VM_PAGER_BAD;
        }
@@ -1108,7 +1103,7 @@ vnode_pager_generic_putpages(vp, m, byte
        maxsize = count * PAGE_SIZE;
        ncount = count;
 
-       poffset = IDX_TO_OFF(m[0]->pindex);
+       poffset = IDX_TO_OFF(ma[0]->pindex);
 
        /*
         * If the page-aligned write is larger then the actual file we
@@ -1122,6 +1117,7 @@ vnode_pager_generic_putpages(vp, m, byte
         * We do not under any circumstances truncate the valid bits, as
         * this will screw up bogus page replacement.
         */
+       VM_OBJECT_LOCK(object);
        if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
                if (object->un_pager.vnp.vnp_size > poffset) {
                        int pgoff;
@@ -1129,12 +1125,19 @@ vnode_pager_generic_putpages(vp, m, byte
                        maxsize = object->un_pager.vnp.vnp_size - poffset;
                        ncount = btoc(maxsize);
                        if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
-                               vm_page_lock(m[ncount - 1]);
-                               vm_page_lock_queues();
-                               vm_page_clear_dirty(m[ncount - 1], pgoff,
-                                       PAGE_SIZE - pgoff);
-                               vm_page_unlock_queues();
-                               vm_page_unlock(m[ncount - 1]);
+                               /*
+                                * If the object is locked and the following
+                                * conditions hold, then the page's dirty
+                                * field cannot be concurrently changed by a
+                                * pmap operation.
+                                */
+                               m = ma[ncount - 1];
+                               KASSERT(m->busy > 0,
+               ("vnode_pager_generic_putpages: page %p is not busy", m));
+                               KASSERT((m->flags & PG_WRITEABLE) == 0,
+               ("vnode_pager_generic_putpages: page %p is not read-only", m));
+                               vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
+                                   pgoff);
                        }
                } else {
                        maxsize = 0;
@@ -1146,6 +1149,7 @@ vnode_pager_generic_putpages(vp, m, byte
                        }
                }
        }
+       VM_OBJECT_UNLOCK(object);
 
        /*
         * pageouts are already clustered, use IO_ASYNC t o force a bawrite()
@@ -1182,7 +1186,7 @@ vnode_pager_generic_putpages(vp, m, byte
        if (auio.uio_resid) {
                if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
                        printf("vnode_pager_putpages: residual I/O %zd at 
%lu\n",
-                           auio.uio_resid, (u_long)m[0]->pindex);
+                           auio.uio_resid, (u_long)ma[0]->pindex);
        }
        for (i = 0; i < ncount; i++) {
                rtvals[i] = VM_PAGER_OK;
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to