Author: markj
Date: Wed Jul  5 19:24:53 2017
New Revision: 320693
URL: https://svnweb.freebsd.org/changeset/base/320693

Log:
  MFS r320605, r320610: MFC r303052, r309017 (by alc):
  Omit v_cache_count when computing the number of free pages, since its
  value is always 0.
  
  Approved by:  re (gjb, kib)

Modified:
  releng/11.1/sys/cddl/compat/opensolaris/sys/kmem.h
  releng/11.1/sys/compat/linprocfs/linprocfs.c
  releng/11.1/sys/fs/tmpfs/tmpfs_subr.c
  releng/11.1/sys/sys/vmmeter.h
  releng/11.1/sys/vm/swap_pager.c
  releng/11.1/sys/vm/vm_meter.c
  releng/11.1/sys/vm/vm_page.c
  releng/11.1/sys/vm/vm_pageout.c
  releng/11.1/sys/vm/vnode_pager.c
Directory Properties:
  releng/11.1/   (props changed)

Modified: releng/11.1/sys/cddl/compat/opensolaris/sys/kmem.h
==============================================================================
--- releng/11.1/sys/cddl/compat/opensolaris/sys/kmem.h  Wed Jul  5 19:24:38 
2017        (r320692)
+++ releng/11.1/sys/cddl/compat/opensolaris/sys/kmem.h  Wed Jul  5 19:24:53 
2017        (r320693)
@@ -77,7 +77,7 @@ void kmem_reap(void);
 int kmem_debugging(void);
 void *calloc(size_t n, size_t s);
 
-#define        freemem                         (vm_cnt.v_free_count + 
vm_cnt.v_cache_count)
+#define        freemem                         vm_cnt.v_free_count
 #define        minfree                         vm_cnt.v_free_min
 #define        heap_arena                      kmem_arena
 #define        kmem_alloc(size, kmflags)       zfs_kmem_alloc((size), 
(kmflags))

Modified: releng/11.1/sys/compat/linprocfs/linprocfs.c
==============================================================================
--- releng/11.1/sys/compat/linprocfs/linprocfs.c        Wed Jul  5 19:24:38 
2017        (r320692)
+++ releng/11.1/sys/compat/linprocfs/linprocfs.c        Wed Jul  5 19:24:53 
2017        (r320693)
@@ -176,7 +176,7 @@ linprocfs_domeminfo(PFS_FILL_ARGS)
         * like unstaticizing it just for linprocfs's sake.
         */
        buffers = 0;
-       cached = vm_cnt.v_cache_count * PAGE_SIZE;
+       cached = vm_cnt.v_inactive_count * PAGE_SIZE;
 
        sbuf_printf(sb,
            "MemTotal: %9lu kB\n"

Modified: releng/11.1/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- releng/11.1/sys/fs/tmpfs/tmpfs_subr.c       Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/fs/tmpfs/tmpfs_subr.c       Wed Jul  5 19:24:53 2017        
(r320693)
@@ -100,8 +100,7 @@ tmpfs_mem_avail(void)
 {
        vm_ooffset_t avail;
 
-       avail = swap_pager_avail + vm_cnt.v_free_count + vm_cnt.v_cache_count -
-           tmpfs_pages_reserved;
+       avail = swap_pager_avail + vm_cnt.v_free_count - tmpfs_pages_reserved;
        if (__predict_false(avail < 0))
                avail = 0;
        return (avail);

Modified: releng/11.1/sys/sys/vmmeter.h
==============================================================================
--- releng/11.1/sys/sys/vmmeter.h       Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/sys/vmmeter.h       Wed Jul  5 19:24:53 2017        
(r320693)
@@ -118,7 +118,7 @@ struct vmmeter {
 
 extern struct vmmeter vm_cnt;
 
-extern int vm_pageout_wakeup_thresh;
+extern u_int vm_pageout_wakeup_thresh;
 
 /*
  * Return TRUE if we are under our severe low-free-pages threshold
@@ -130,8 +130,7 @@ static inline int
 vm_page_count_severe(void)
 {
 
-       return (vm_cnt.v_free_severe > vm_cnt.v_free_count +
-           vm_cnt.v_cache_count);
+       return (vm_cnt.v_free_severe > vm_cnt.v_free_count);
 }
 
 /*
@@ -147,7 +146,7 @@ static inline int
 vm_page_count_min(void)
 {
 
-       return (vm_cnt.v_free_min > vm_cnt.v_free_count + vm_cnt.v_cache_count);
+       return (vm_cnt.v_free_min > vm_cnt.v_free_count);
 }
 
 /*
@@ -158,8 +157,7 @@ static inline int
 vm_page_count_target(void)
 {
 
-       return (vm_cnt.v_free_target > vm_cnt.v_free_count +
-           vm_cnt.v_cache_count);
+       return (vm_cnt.v_free_target > vm_cnt.v_free_count);
 }
 
 /*
@@ -170,8 +168,7 @@ static inline int
 vm_paging_target(void)
 {
 
-       return (vm_cnt.v_free_target - (vm_cnt.v_free_count +
-           vm_cnt.v_cache_count));
+       return (vm_cnt.v_free_target - vm_cnt.v_free_count);
 }
 
 /*
@@ -181,8 +178,7 @@ static inline int
 vm_paging_needed(void)
 {
 
-       return (vm_cnt.v_free_count + vm_cnt.v_cache_count <
-           (u_int)vm_pageout_wakeup_thresh);
+       return (vm_cnt.v_free_count < vm_pageout_wakeup_thresh);
 }
 
 /*

Modified: releng/11.1/sys/vm/swap_pager.c
==============================================================================
--- releng/11.1/sys/vm/swap_pager.c     Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/vm/swap_pager.c     Wed Jul  5 19:24:53 2017        
(r320693)
@@ -2282,10 +2282,8 @@ swapoff_one(struct swdevt *sp, struct ucred *cred)
         * of data we will have to page back in, plus an epsilon so
         * the system doesn't become critically low on swap space.
         */
-       if (vm_cnt.v_free_count + vm_cnt.v_cache_count + swap_pager_avail <
-           nblks + nswap_lowat) {
+       if (vm_cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat)
                return (ENOMEM);
-       }
 
        /*
         * Prevent further allocations on this device.

Modified: releng/11.1/sys/vm/vm_meter.c
==============================================================================
--- releng/11.1/sys/vm/vm_meter.c       Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/vm/vm_meter.c       Wed Jul  5 19:24:53 2017        
(r320693)
@@ -204,7 +204,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
                }
        }
        mtx_unlock(&vm_object_list_mtx);
-       total.t_free = vm_cnt.v_free_count + vm_cnt.v_cache_count;
+       total.t_free = vm_cnt.v_free_count;
        return (sysctl_handle_opaque(oidp, &total, sizeof(total), req));
 }
 

Modified: releng/11.1/sys/vm/vm_page.c
==============================================================================
--- releng/11.1/sys/vm/vm_page.c        Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/vm/vm_page.c        Wed Jul  5 19:24:53 2017        
(r320693)
@@ -1561,11 +1561,11 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, 
         * for the request class.
         */
        mtx_lock(&vm_page_queue_free_mtx);
-       if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved 
||
+       if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
            (req_class == VM_ALLOC_SYSTEM &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count > 
vm_cnt.v_interrupt_free_min) ||
+           vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
            (req_class == VM_ALLOC_INTERRUPT &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) {
+           vm_cnt.v_free_count > 0)) {
                /*
                 * Can we allocate the page from a reservation?
                 */
@@ -1752,11 +1752,11 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t p
         * below the lower bound for the allocation class?
         */
        mtx_lock(&vm_page_queue_free_mtx);
-       if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
-           vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
-           vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) {
+       if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
+           (req_class == VM_ALLOC_SYSTEM &&
+           vm_cnt.v_free_count >= npages + vm_cnt.v_interrupt_free_min) ||
+           (req_class == VM_ALLOC_INTERRUPT &&
+           vm_cnt.v_free_count >= npages)) {
                /*
                 * Can we allocate the pages from a reservation?
                 */
@@ -1916,11 +1916,11 @@ vm_page_alloc_freelist(int flind, int req)
         * Do not allocate reserved pages unless the req has asked for it.
         */
        mtx_lock(&vm_page_queue_free_mtx);
-       if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved 
||
+       if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
            (req_class == VM_ALLOC_SYSTEM &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count > 
vm_cnt.v_interrupt_free_min) ||
+           vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
            (req_class == VM_ALLOC_INTERRUPT &&
-           vm_cnt.v_free_count + vm_cnt.v_cache_count > 0))
+           vm_cnt.v_free_count > 0))
                m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
        else {
                mtx_unlock(&vm_page_queue_free_mtx);
@@ -2448,7 +2448,7 @@ vm_page_reclaim_contig(int req, u_long npages, vm_padd
         * Return if the number of free pages cannot satisfy the requested
         * allocation.
         */
-       count = vm_cnt.v_free_count + vm_cnt.v_cache_count;
+       count = vm_cnt.v_free_count;
        if (count < npages + vm_cnt.v_free_reserved || (count < npages +
            vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
            (count < npages && req_class == VM_ALLOC_INTERRUPT))
@@ -2731,7 +2731,7 @@ vm_page_free_wakeup(void)
         * some free.
         */
        if (vm_pageout_pages_needed &&
-           vm_cnt.v_cache_count + vm_cnt.v_free_count >= 
vm_cnt.v_pageout_free_min) {
+           vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
                wakeup(&vm_pageout_pages_needed);
                vm_pageout_pages_needed = 0;
        }
@@ -3570,8 +3570,8 @@ vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits
 
 DB_SHOW_COMMAND(page, vm_page_print_page_info)
 {
+
        db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count);
-       db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count);
        db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count);
        db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count);
        db_printf("vm_cnt.v_laundry_count: %d\n", vm_cnt.v_laundry_count);
@@ -3586,8 +3586,7 @@ DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
 {
        int dom;
 
-       db_printf("pq_free %d pq_cache %d\n",
-           vm_cnt.v_free_count, vm_cnt.v_cache_count);
+       db_printf("pq_free %d\n", vm_cnt.v_free_count);
        for (dom = 0; dom < vm_ndomains; dom++) {
                db_printf(
            "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d\n",

Modified: releng/11.1/sys/vm/vm_pageout.c
==============================================================================
--- releng/11.1/sys/vm/vm_pageout.c     Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/vm/vm_pageout.c     Wed Jul  5 19:24:53 2017        
(r320693)
@@ -159,7 +159,7 @@ SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, k
 #define        VM_INACT_SCAN_RATE      2
 
 int vm_pageout_deficit;                /* Estimated number of pages deficit */
-int vm_pageout_wakeup_thresh;
+u_int vm_pageout_wakeup_thresh;
 static int vm_pageout_oom_seq = 12;
 bool vm_pageout_wanted;                /* Event on which pageout daemon sleeps 
*/
 bool vm_pages_needed;          /* Are threads waiting for free pages? */

Modified: releng/11.1/sys/vm/vnode_pager.c
==============================================================================
--- releng/11.1/sys/vm/vnode_pager.c    Wed Jul  5 19:24:38 2017        
(r320692)
+++ releng/11.1/sys/vm/vnode_pager.c    Wed Jul  5 19:24:53 2017        
(r320693)
@@ -1124,8 +1124,7 @@ vnode_pager_putpages(vm_object_t object, vm_page_t *m,
         * daemon up.  This should be probably be addressed XXX.
         */
 
-       if (vm_cnt.v_free_count + vm_cnt.v_cache_count <
-           vm_cnt.v_pageout_free_min)
+       if (vm_cnt.v_free_count < vm_cnt.v_pageout_free_min)
                flags |= VM_PAGER_PUT_SYNC;
 
        /*
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to