Author: alc
Date: Wed Jul 18 05:21:34 2012
New Revision: 238561
URL: http://svn.freebsd.org/changeset/base/238561

Log:
  Move what remains of vm/vm_contig.c into vm/vm_pageout.c, where similar
  code resides.  Rename vm_contig_grow_cache() to vm_pageout_grow_cache().
  
  Reviewed by:  kib

Deleted:
  head/sys/vm/vm_contig.c
Modified:
  head/sys/conf/files
  head/sys/mips/mips/pmap.c
  head/sys/vm/vm_kern.c
  head/sys/vm/vm_pageout.c
  head/sys/vm/vm_pageout.h

Modified: head/sys/conf/files
==============================================================================
--- head/sys/conf/files Wed Jul 18 04:52:37 2012        (r238560)
+++ head/sys/conf/files Wed Jul 18 05:21:34 2012        (r238561)
@@ -3551,7 +3551,6 @@ vm/sg_pager.c                     standard
 vm/swap_pager.c                        standard
 vm/uma_core.c                  standard
 vm/uma_dbg.c                   standard
-vm/vm_contig.c                 standard
 vm/memguard.c                  optional DEBUG_MEMGUARD
 vm/vm_fault.c                  standard
 vm/vm_glue.c                   standard

Modified: head/sys/mips/mips/pmap.c
==============================================================================
--- head/sys/mips/mips/pmap.c   Wed Jul 18 04:52:37 2012        (r238560)
+++ head/sys/mips/mips/pmap.c   Wed Jul 18 05:21:34 2012        (r238561)
@@ -1034,9 +1034,9 @@ pmap_grow_direct_page_cache()
 {
 
 #ifdef __mips_n64
-       vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
+       vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS);
 #else
-       vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
+       vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS);
 #endif
 }
 

Modified: head/sys/vm/vm_kern.c
==============================================================================
--- head/sys/vm/vm_kern.c       Wed Jul 18 04:52:37 2012        (r238560)
+++ head/sys/vm/vm_kern.c       Wed Jul 18 05:21:34 2012        (r238561)
@@ -239,7 +239,7 @@ retry:
                        VM_OBJECT_UNLOCK(object);
                        if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
                                vm_map_unlock(map);
-                               vm_contig_grow_cache(tries, low, high);
+                               vm_pageout_grow_cache(tries, low, high);
                                vm_map_lock(map);
                                VM_OBJECT_LOCK(object);
                                tries++;
@@ -313,7 +313,7 @@ retry:
                VM_OBJECT_UNLOCK(object);
                if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
                        vm_map_unlock(map);
-                       vm_contig_grow_cache(tries, low, high);
+                       vm_pageout_grow_cache(tries, low, high);
                        vm_map_lock(map);
                        VM_OBJECT_LOCK(object);
                        tries++;

Modified: head/sys/vm/vm_pageout.c
==============================================================================
--- head/sys/vm/vm_pageout.c    Wed Jul 18 04:52:37 2012        (r238560)
+++ head/sys/vm/vm_pageout.c    Wed Jul 18 05:21:34 2012        (r238561)
@@ -209,11 +209,14 @@ int vm_page_max_wired;            /* XXX max # of 
 SYSCTL_INT(_vm, OID_AUTO, max_wired,
        CTLFLAG_RW, &vm_page_max_wired, 0, "System-wide limit to wired page 
count");
 
+static boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
+static boolean_t vm_pageout_launder(int, int, vm_paddr_t, vm_paddr_t);
 #if !defined(NO_SWAPPING)
 static void vm_pageout_map_deactivate_pages(vm_map_t, long);
 static void vm_pageout_object_deactivate_pages(pmap_t, vm_object_t, long);
 static void vm_req_vmdaemon(int req);
 #endif
+static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
 static void vm_pageout_page_stats(void);
 
 /*
@@ -247,7 +250,7 @@ vm_pageout_init_marker(vm_page_t marker,
  * This function depends on both the lock portion of struct vm_object
  * and normal struct vm_page being type stable.
  */
-boolean_t
+static boolean_t
 vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
 {
        struct vm_page marker;
@@ -286,7 +289,7 @@ vm_pageout_fallback_object_lock(vm_page_
  *
  * This function depends on normal struct vm_page being type stable.
  */
-boolean_t
+static boolean_t
 vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
 {
        struct vm_page marker;
@@ -558,6 +561,138 @@ vm_pageout_flush(vm_page_t *mc, int coun
        return (numpagedout);
 }
 
+static boolean_t
+vm_pageout_launder(int queue, int tries, vm_paddr_t low, vm_paddr_t high)
+{
+       struct mount *mp;
+       struct vnode *vp;
+       vm_object_t object;
+       vm_paddr_t pa;
+       vm_page_t m, m_tmp, next;
+       int vfslocked;
+
+       vm_page_lock_queues();
+       TAILQ_FOREACH_SAFE(m, &vm_page_queues[queue].pl, pageq, next) {
+               KASSERT(m->queue == queue,
+                   ("vm_pageout_launder: page %p's queue is not %d", m,
+                   queue));
+               if ((m->flags & PG_MARKER) != 0)
+                       continue;
+               pa = VM_PAGE_TO_PHYS(m);
+               if (pa < low || pa + PAGE_SIZE > high)
+                       continue;
+               if (!vm_pageout_page_lock(m, &next) || m->hold_count != 0) {
+                       vm_page_unlock(m);
+                       continue;
+               }
+               object = m->object;
+               if (!VM_OBJECT_TRYLOCK(object) &&
+                   (!vm_pageout_fallback_object_lock(m, &next) ||
+                   m->hold_count != 0)) {
+                       vm_page_unlock(m);
+                       VM_OBJECT_UNLOCK(object);
+                       continue;
+               }
+               if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
+                       if (tries == 0) {
+                               vm_page_unlock(m);
+                               VM_OBJECT_UNLOCK(object);
+                               continue;
+                       }
+                       vm_page_sleep(m, "vpctw0");
+                       VM_OBJECT_UNLOCK(object);
+                       return (FALSE);
+               }
+               vm_page_test_dirty(m);
+               if (m->dirty == 0)
+                       pmap_remove_all(m);
+               if (m->dirty != 0) {
+                       vm_page_unlock(m);
+                       if (tries == 0 || (object->flags & OBJ_DEAD) != 0) {
+                               VM_OBJECT_UNLOCK(object);
+                               continue;
+                       }
+                       if (object->type == OBJT_VNODE) {
+                               vm_page_unlock_queues();
+                               vp = object->handle;
+                               vm_object_reference_locked(object);
+                               VM_OBJECT_UNLOCK(object);
+                               (void)vn_start_write(vp, &mp, V_WAIT);
+                               vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+                               vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+                               VM_OBJECT_LOCK(object);
+                               vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
+                               VM_OBJECT_UNLOCK(object);
+                               VOP_UNLOCK(vp, 0);
+                               VFS_UNLOCK_GIANT(vfslocked);
+                               vm_object_deallocate(object);
+                               vn_finished_write(mp);
+                               return (TRUE);
+                       } else if (object->type == OBJT_SWAP ||
+                           object->type == OBJT_DEFAULT) {
+                               vm_page_unlock_queues();
+                               m_tmp = m;
+                               vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC,
+                                   0, NULL, NULL);
+                               VM_OBJECT_UNLOCK(object);
+                               return (TRUE);
+                       }
+               } else {
+                       vm_page_cache(m);
+                       vm_page_unlock(m);
+               }
+               VM_OBJECT_UNLOCK(object);
+       }
+       vm_page_unlock_queues();
+       return (FALSE);
+}
+
+/*
+ * Increase the number of cached pages.  The specified value, "tries",
+ * determines which categories of pages are cached:
+ *
+ *  0: All clean, inactive pages within the specified physical address range
+ *     are cached.  Will not sleep.
+ *  1: The vm_lowmem handlers are called.  All inactive pages within
+ *     the specified physical address range are cached.  May sleep.
+ *  2: The vm_lowmem handlers are called.  All inactive and active pages
+ *     within the specified physical address range are cached.  May sleep.
+ */
+void
+vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high)
+{
+       int actl, actmax, inactl, inactmax;
+
+       if (tries > 0) {
+               /*
+                * Decrease registered cache sizes.  The vm_lowmem handlers
+                * may acquire locks and/or sleep, so they can only be invoked
+                * when "tries" is greater than zero.
+                */
+               EVENTHANDLER_INVOKE(vm_lowmem, 0);
+
+               /*
+                * We do this explicitly after the caches have been drained
+                * above.
+                */
+               uma_reclaim();
+       }
+       inactl = 0;
+       inactmax = cnt.v_inactive_count;
+       actl = 0;
+       actmax = tries < 2 ? 0 : cnt.v_active_count;
+again:
+       if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low,
+           high)) {
+               inactl++;
+               goto again;
+       }
+       if (actl < actmax && vm_pageout_launder(PQ_ACTIVE, tries, low, high)) {
+               actl++;
+               goto again;
+       }
+}
+
 #if !defined(NO_SWAPPING)
 /*
  *     vm_pageout_object_deactivate_pages

Modified: head/sys/vm/vm_pageout.h
==============================================================================
--- head/sys/vm/vm_pageout.h    Wed Jul 18 04:52:37 2012        (r238560)
+++ head/sys/vm/vm_pageout.h    Wed Jul 18 05:21:34 2012        (r238561)
@@ -101,10 +101,8 @@ extern void vm_wait(void);
 extern void vm_waitpfault(void);
 
 #ifdef _KERNEL
-boolean_t vm_pageout_fallback_object_lock(vm_page_t, vm_page_t *);
 int vm_pageout_flush(vm_page_t *, int, int, int, int *, boolean_t *);
+void vm_pageout_grow_cache(int, vm_paddr_t, vm_paddr_t);
 void vm_pageout_oom(int shortage);
-boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
-void vm_contig_grow_cache(int, vm_paddr_t, vm_paddr_t);
 #endif
 #endif /* _VM_VM_PAGEOUT_H_ */
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to