Author: kib
Date: Wed Sep 13 19:11:52 2017
New Revision: 323559
URL: https://svnweb.freebsd.org/changeset/base/323559

Log:
  Split vm_page_free_toq() into two parts, preparation vm_page_free_prep()
  and insertion into the phys allocator free queues vm_page_free_phys().
  Also provide a wrapper vm_page_free_phys_pglist() for batched free.
  
  Reviewed by:  alc, markj
  Tested by:    mjg (previous version)
  Sponsored by: The FreeBSD Foundation
  MFC after:    1 week

Modified:
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Wed Sep 13 19:03:59 2017        (r323558)
+++ head/sys/vm/vm_page.c       Wed Sep 13 19:11:52 2017        (r323559)
@@ -163,6 +163,7 @@ static uma_zone_t fakepg_zone;
 static void vm_page_alloc_check(vm_page_t m);
 static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
 static void vm_page_enqueue(uint8_t queue, vm_page_t m);
+static void vm_page_free_phys(vm_page_t m);
 static void vm_page_free_wakeup(void);
 static void vm_page_init(void *dummy);
 static int vm_page_insert_after(vm_page_t m, vm_object_t object,
@@ -2402,13 +2403,7 @@ unlock:
                mtx_lock(&vm_page_queue_free_mtx);
                do {
                        SLIST_REMOVE_HEAD(&free, plinks.s.ss);
-                       vm_phys_freecnt_adj(m, 1);
-#if VM_NRESERVLEVEL > 0
-                       if (!vm_reserv_free_page(m))
-#else
-                       if (true)
-#endif
-                               vm_phys_free_pages(m, 0);
+                       vm_page_free_phys(m);
                } while ((m = SLIST_FIRST(&free)) != NULL);
                vm_page_free_wakeup();
                mtx_unlock(&vm_page_queue_free_mtx);
@@ -2770,15 +2765,18 @@ vm_page_free_wakeup(void)
 }
 
 /*
- *     vm_page_free_toq:
+ *     vm_page_free_prep:
  *
- *     Returns the given page to the free list,
- *     disassociating it with any VM object.
+ *     Prepares the given page to be put on the free list,
+ *     disassociating it from any VM object. The caller may return
+ *     the page to the free list only if this function returns true.
  *
- *     The object must be locked.  The page must be locked if it is managed.
+ *     The object must be locked.  The page must be locked if it is
+ *     managed.  For a queued managed page, the pagequeue_locked
+ *     argument specifies whether the page queue is already locked.
  */
-void
-vm_page_free_toq(vm_page_t m)
+bool
+vm_page_free_prep(vm_page_t m, bool pagequeue_locked)
 {
 
        if ((m->oflags & VPO_UNMANAGED) == 0) {
@@ -2799,16 +2797,20 @@ vm_page_free_toq(vm_page_t m)
         * callback routine until after we've put the page on the
         * appropriate free queue.
         */
-       vm_page_remque(m);
+       if (m->queue != PQ_NONE) {
+               if (pagequeue_locked)
+                       vm_page_dequeue_locked(m);
+               else
+                       vm_page_dequeue(m);
+       }
        vm_page_remove(m);
 
        /*
         * If fictitious remove object association and
         * return, otherwise delay object association removal.
         */
-       if ((m->flags & PG_FICTITIOUS) != 0) {
-               return;
-       }
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               return (false);
 
        m->valid = 0;
        vm_page_undirty(m);
@@ -2820,28 +2822,66 @@ vm_page_free_toq(vm_page_t m)
                KASSERT((m->flags & PG_UNHOLDFREE) == 0,
                    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
                m->flags |= PG_UNHOLDFREE;
-       } else {
-               /*
-                * Restore the default memory attribute to the page.
-                */
-               if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
-                       pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
+               return (false);
+       }
 
-               /*
-                * Insert the page into the physical memory allocator's free
-                * page queues.
-                */
-               mtx_lock(&vm_page_queue_free_mtx);
-               vm_phys_freecnt_adj(m, 1);
+       /*
+        * Restore the default memory attribute to the page.
+        */
+       if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
+               pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
+
+       return (true);
+}
+
+/*
+ * Insert the page into the physical memory allocator's free page
+ * queues.  This is the last step to free a page.
+ */
+static void
+vm_page_free_phys(vm_page_t m)
+{
+
+       mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+
+       vm_phys_freecnt_adj(m, 1);
 #if VM_NRESERVLEVEL > 0
-               if (!vm_reserv_free_page(m))
-#else
-               if (TRUE)
+       if (!vm_reserv_free_page(m))
 #endif
-                       vm_phys_free_pages(m, 0);
-               vm_page_free_wakeup();
-               mtx_unlock(&vm_page_queue_free_mtx);
-       }
+               vm_phys_free_pages(m, 0);
+}
+
+void
+vm_page_free_phys_pglist(struct pglist *tq)
+{
+       vm_page_t m;
+
+       mtx_lock(&vm_page_queue_free_mtx);
+       TAILQ_FOREACH(m, tq, listq)
+               vm_page_free_phys(m);
+       vm_page_free_wakeup();
+       mtx_unlock(&vm_page_queue_free_mtx);
+}
+
+/*
+ *     vm_page_free_toq:
+ *
+ *     Returns the given page to the free list, disassociating it
+ *     from any VM object.
+ *
+ *     The object must be locked.  The page must be locked if it is
+ *     managed.
+ */
+void
+vm_page_free_toq(vm_page_t m)
+{
+
+       if (!vm_page_free_prep(m, false))
+               return;
+       mtx_lock(&vm_page_queue_free_mtx);
+       vm_page_free_phys(m);
+       vm_page_free_wakeup();
+       mtx_unlock(&vm_page_queue_free_mtx);
 }
 
 /*

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Wed Sep 13 19:03:59 2017        (r323558)
+++ head/sys/vm/vm_page.h       Wed Sep 13 19:11:52 2017        (r323559)
@@ -483,6 +483,8 @@ void vm_page_deactivate_noreuse(vm_page_t);
 void vm_page_dequeue(vm_page_t m);
 void vm_page_dequeue_locked(vm_page_t m);
 vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
+void vm_page_free_phys_pglist(struct pglist *tq);
+bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
 vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
 int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to