Module Name:    src
Committed By:   ad
Date:           Thu Jun 11 22:21:05 UTC 2020

Modified Files:
        src/sys/compat/linux/common: linux_misc.c
        src/sys/compat/linux32/common: linux32_sysinfo.c
        src/sys/kern: subr_cpu.c vfs_vnode.c vfs_vnops.c
        src/sys/miscfs/procfs: procfs_linux.c
        src/sys/sys: cpu_data.h
        src/sys/uvm: uvm_loan.c uvm_meter.c uvm_page.c uvm_pdaemon.c
            uvm_pdpolicy_clock.c uvm_stat.c
        src/usr.bin/vmstat: vmstat.c

Log Message:
Counter tweaks:

- Don't need to count anonpages+filepages any more; clean+unknown+dirty for
  each kind of page can be summed to get the totals.

- Track the number of free pages with a counter so that it's one less thing
  for the allocator to do, which opens up further options there.

- Remove cpu_count_sync_one().  It has no users and doesn't save a whole lot.
  For the cheap option, give cpu_count_sync() a boolean parameter indicating
  that a cached value is okay, and rate limit the updates for cached values
  to hz.


To generate a diff of this commit:
cvs rdiff -u -r1.250 -r1.251 src/sys/compat/linux/common/linux_misc.c
cvs rdiff -u -r1.12 -r1.13 src/sys/compat/linux32/common/linux32_sysinfo.c
cvs rdiff -u -r1.14 -r1.15 src/sys/kern/subr_cpu.c
cvs rdiff -u -r1.123 -r1.124 src/sys/kern/vfs_vnode.c
cvs rdiff -u -r1.212 -r1.213 src/sys/kern/vfs_vnops.c
cvs rdiff -u -r1.85 -r1.86 src/sys/miscfs/procfs/procfs_linux.c
cvs rdiff -u -r1.50 -r1.51 src/sys/sys/cpu_data.h
cvs rdiff -u -r1.103 -r1.104 src/sys/uvm/uvm_loan.c
cvs rdiff -u -r1.78 -r1.79 src/sys/uvm/uvm_meter.c
cvs rdiff -u -r1.239 -r1.240 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.128 -r1.129 src/sys/uvm/uvm_pdaemon.c
cvs rdiff -u -r1.38 -r1.39 src/sys/uvm/uvm_pdpolicy_clock.c
cvs rdiff -u -r1.44 -r1.45 src/sys/uvm/uvm_stat.c
cvs rdiff -u -r1.239 -r1.240 src/usr.bin/vmstat/vmstat.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/compat/linux/common/linux_misc.c
diff -u src/sys/compat/linux/common/linux_misc.c:1.250 src/sys/compat/linux/common/linux_misc.c:1.251
--- src/sys/compat/linux/common/linux_misc.c:1.250	Thu Jun 11 19:20:46 2020
+++ src/sys/compat/linux/common/linux_misc.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux_misc.c,v 1.250 2020/06/11 19:20:46 ad Exp $	*/
+/*	$NetBSD: linux_misc.c,v 1.251 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
  * Copyright (c) 1995, 1998, 1999, 2008 The NetBSD Foundation, Inc.
@@ -57,7 +57,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: linux_misc.c,v 1.250 2020/06/11 19:20:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux_misc.c,v 1.251 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1347,6 +1347,7 @@ linux_sys_sysinfo(struct lwp *l, const s
 	} */
 	struct linux_sysinfo si;
 	struct loadavg *la;
+	int64_t filepg;
 
 	memset(&si, 0, sizeof(si));
 	si.uptime = time_uptime;
@@ -1355,9 +1356,14 @@ linux_sys_sysinfo(struct lwp *l, const s
 	si.loads[1] = la->ldavg[1] * LINUX_SYSINFO_LOADS_SCALE / la->fscale;
 	si.loads[2] = la->ldavg[2] * LINUX_SYSINFO_LOADS_SCALE / la->fscale;
 	si.totalram = ctob((u_long)physmem);
+	/* uvm_availmem() may sync the counters. */
 	si.freeram = (u_long)uvm_availmem(true) * uvmexp.pagesize;
+	filepg = cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    cpu_count_get(CPU_COUNT_EXECPAGES);
 	si.sharedram = 0;	/* XXX */
-	si.bufferram = (u_long)uvmexp.filepages * uvmexp.pagesize;
+	si.bufferram = (u_long)(filepg * uvmexp.pagesize);
 	si.totalswap = (u_long)uvmexp.swpages * uvmexp.pagesize;
 	si.freeswap = 
 	    (u_long)(uvmexp.swpages - uvmexp.swpginuse) * uvmexp.pagesize;

Index: src/sys/compat/linux32/common/linux32_sysinfo.c
diff -u src/sys/compat/linux32/common/linux32_sysinfo.c:1.12 src/sys/compat/linux32/common/linux32_sysinfo.c:1.13
--- src/sys/compat/linux32/common/linux32_sysinfo.c:1.12	Thu Jun 11 19:20:46 2020
+++ src/sys/compat/linux32/common/linux32_sysinfo.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: linux32_sysinfo.c,v 1.12 2020/06/11 19:20:46 ad Exp $ */
+/*	$NetBSD: linux32_sysinfo.c,v 1.13 2020/06/11 22:21:05 ad Exp $ */
 
 /*-
  * Copyright (c) 2006 Emmanuel Dreyfus, all rights reserved.
@@ -33,7 +33,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: linux32_sysinfo.c,v 1.12 2020/06/11 19:20:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: linux32_sysinfo.c,v 1.13 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/types.h>
 #include <sys/param.h>
@@ -69,6 +69,7 @@ linux32_sys_sysinfo(struct lwp *l, const
 	} */
 	struct linux32_sysinfo si;
 	struct loadavg *la;
+	int64_t filepg;
 
 	memset(&si, 0, sizeof(si));
 	si.uptime = time_uptime;
@@ -77,9 +78,14 @@ linux32_sys_sysinfo(struct lwp *l, const
 	si.loads[1] = la->ldavg[1] * LINUX_SYSINFO_LOADS_SCALE / la->fscale;
 	si.loads[2] = la->ldavg[2] * LINUX_SYSINFO_LOADS_SCALE / la->fscale;
 	si.totalram = ctob((u_long)physmem);
+	/* uvm_availmem() may sync the counters. */
 	si.freeram = (u_long)uvm_availmem(true) * uvmexp.pagesize;
+	filepg = cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    cpu_count_get(CPU_COUNT_EXECPAGES);
 	si.sharedram = 0;	/* XXX */
-	si.bufferram = (u_long)uvmexp.filepages * uvmexp.pagesize;
+	si.bufferram = (u_long)(filepg * uvmexp.pagesize);
 	si.totalswap = (u_long)uvmexp.swpages * uvmexp.pagesize;
 	si.freeswap = 
 	    (u_long)(uvmexp.swpages - uvmexp.swpginuse) * uvmexp.pagesize;

Index: src/sys/kern/subr_cpu.c
diff -u src/sys/kern/subr_cpu.c:1.14 src/sys/kern/subr_cpu.c:1.15
--- src/sys/kern/subr_cpu.c:1.14	Thu Mar 26 19:23:18 2020
+++ src/sys/kern/subr_cpu.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: subr_cpu.c,v 1.14 2020/03/26 19:23:18 ad Exp $	*/
+/*	$NetBSD: subr_cpu.c,v 1.15 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019, 2020
@@ -61,9 +61,10 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.14 2020/03/26 19:23:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_cpu.c,v 1.15 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
+#include <sys/atomic.h>
 #include <sys/systm.h>
 #include <sys/sched.h>
 #include <sys/conf.h>
@@ -445,70 +446,56 @@ cpu_count(enum cpu_count idx, int64_t de
 
 /*
  * Fetch fresh sum total for all counts.  Expensive - don't call often.
+ *
+ * If poll is true, the the caller is okay with with less recent values (but
+ * no more than 1/hz seconds old).  Where this is called very often that
+ * should be the case.
+ *
+ * This should be reasonably quick so that any value collected get isn't
+ * totally out of whack, and it can also be called from interrupt context,
+ * so go to splvm() while summing the counters.  It's tempting to use a spin
+ * mutex here but this routine is called from DDB.
  */
 void
-cpu_count_sync_all(void)
+cpu_count_sync(bool poll)
 {
 	CPU_INFO_ITERATOR cii;
 	struct cpu_info *ci;
 	int64_t sum[CPU_COUNT_MAX], *ptr;
+	static int lasttick;
+	int curtick, s;
 	enum cpu_count i;
-	int s;
 
 	KASSERT(sizeof(ci->ci_counts) == sizeof(cpu_counts));
 
-	if (__predict_true(mp_online)) {
-		memset(sum, 0, sizeof(sum));
-		/*
-		 * We want this to be reasonably quick, so any value we get
-		 * isn't totally out of whack, so don't let the current LWP
-		 * get preempted.
-		 */
-		s = splvm();
-		curcpu()->ci_counts[CPU_COUNT_SYNC_ALL]++;
-		for (CPU_INFO_FOREACH(cii, ci)) {
-			ptr = ci->ci_counts;
-			for (i = 0; i < CPU_COUNT_MAX; i += 8) {
-				sum[i+0] += ptr[i+0];
-				sum[i+1] += ptr[i+1];
-				sum[i+2] += ptr[i+2];
-				sum[i+3] += ptr[i+3];
-				sum[i+4] += ptr[i+4];
-				sum[i+5] += ptr[i+5];
-				sum[i+6] += ptr[i+6];
-				sum[i+7] += ptr[i+7];
-			}
-			KASSERT(i == CPU_COUNT_MAX);
-		}
-		memcpy(cpu_counts, sum, sizeof(cpu_counts));
-		splx(s);
-	} else {
+	if (__predict_false(!mp_online)) {
 		memcpy(cpu_counts, curcpu()->ci_counts, sizeof(cpu_counts));
+		return;
 	}
-}
 
-/*
- * Fetch a fresh sum total for one single count.  Expensive - don't call often.
- */
-int64_t
-cpu_count_sync(enum cpu_count count)
-{
-	CPU_INFO_ITERATOR cii;
-	struct cpu_info *ci;
-	int64_t sum;
-	int s;
-
-	if (__predict_true(mp_online)) {
-		s = splvm();
-		curcpu()->ci_counts[CPU_COUNT_SYNC_ONE]++;
-		sum = 0;
-		for (CPU_INFO_FOREACH(cii, ci)) {
-			sum += ci->ci_counts[count];
-		}
+	s = splvm();
+	curtick = getticks();
+	if (poll && atomic_load_acquire(&lasttick) == curtick) {
 		splx(s);
-	} else {
-		/* XXX Early boot, iterator might not be available. */
-		sum = curcpu()->ci_counts[count];
+		return;
 	}
-	return cpu_counts[count] = sum;
+	memset(sum, 0, sizeof(sum));
+	curcpu()->ci_counts[CPU_COUNT_SYNC]++;
+	for (CPU_INFO_FOREACH(cii, ci)) {
+		ptr = ci->ci_counts;
+		for (i = 0; i < CPU_COUNT_MAX; i += 8) {
+			sum[i+0] += ptr[i+0];
+			sum[i+1] += ptr[i+1];
+			sum[i+2] += ptr[i+2];
+			sum[i+3] += ptr[i+3];
+			sum[i+4] += ptr[i+4];
+			sum[i+5] += ptr[i+5];
+			sum[i+6] += ptr[i+6];
+			sum[i+7] += ptr[i+7];
+		}
+		KASSERT(i == CPU_COUNT_MAX);
+	}
+	memcpy(cpu_counts, sum, sizeof(cpu_counts));
+	atomic_store_release(&lasttick, curtick);
+	splx(s);
 }

Index: src/sys/kern/vfs_vnode.c
diff -u src/sys/kern/vfs_vnode.c:1.123 src/sys/kern/vfs_vnode.c:1.124
--- src/sys/kern/vfs_vnode.c:1.123	Tue May 26 18:38:37 2020
+++ src/sys/kern/vfs_vnode.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_vnode.c,v 1.123 2020/05/26 18:38:37 ad Exp $	*/
+/*	$NetBSD: vfs_vnode.c,v 1.124 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
@@ -148,7 +148,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.123 2020/05/26 18:38:37 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.124 2020/06/11 22:21:05 ad Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_pax.h"
@@ -900,7 +900,6 @@ vrelel(vnode_t *vp, int flags, int lktyp
 		if ((vp->v_iflag & VI_EXECMAP) != 0 &&
 		    vp->v_uobj.uo_npages != 0) {
 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
-			cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
 		}
 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
 		vp->v_vflag &= ~VV_MAPPED;
@@ -1710,7 +1709,6 @@ vcache_reclaim(vnode_t *vp)
 	mutex_enter(vp->v_interlock);
 	if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
-		cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
 	}
 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */

Index: src/sys/kern/vfs_vnops.c
diff -u src/sys/kern/vfs_vnops.c:1.212 src/sys/kern/vfs_vnops.c:1.213
--- src/sys/kern/vfs_vnops.c:1.212	Sat May 23 23:42:43 2020
+++ src/sys/kern/vfs_vnops.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: vfs_vnops.c,v 1.212 2020/05/23 23:42:43 ad Exp $	*/
+/*	$NetBSD: vfs_vnops.c,v 1.213 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
  * Copyright (c) 2009 The NetBSD Foundation, Inc.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.212 2020/05/23 23:42:43 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.213 2020/06/11 22:21:05 ad Exp $");
 
 #include "veriexec.h"
 
@@ -342,7 +342,6 @@ vn_markexec(struct vnode *vp)
 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
 	mutex_enter(vp->v_interlock);
 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
-		cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
 		vp->v_iflag |= VI_EXECMAP;
 	}
@@ -372,7 +371,6 @@ vn_marktext(struct vnode *vp)
 		return (ETXTBSY);
 	}
 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
-		cpu_count(CPU_COUNT_FILEPAGES, -vp->v_uobj.uo_npages);
 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
 	}
 	vp->v_iflag |= (VI_TEXT | VI_EXECMAP);

Index: src/sys/miscfs/procfs/procfs_linux.c
diff -u src/sys/miscfs/procfs/procfs_linux.c:1.85 src/sys/miscfs/procfs/procfs_linux.c:1.86
--- src/sys/miscfs/procfs/procfs_linux.c:1.85	Thu Jun 11 19:20:46 2020
+++ src/sys/miscfs/procfs/procfs_linux.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*      $NetBSD: procfs_linux.c,v 1.85 2020/06/11 19:20:46 ad Exp $      */
+/*      $NetBSD: procfs_linux.c,v 1.86 2020/06/11 22:21:05 ad Exp $      */
 
 /*
  * Copyright (c) 2001 Wasabi Systems, Inc.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.85 2020/06/11 19:20:46 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: procfs_linux.c,v 1.86 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -142,10 +142,15 @@ procfs_domeminfo(struct lwp *curl, struc
 
 	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);
 
-	cpu_count_sync_all();
+	/* uvm_availmem() will sync the counters if needed. */
 	freepg = (long)uvm_availmem(true);
-	filepg = (long)cpu_count_get(CPU_COUNT_FILEPAGES);
-	anonpg = (long)cpu_count_get(CPU_COUNT_ANONPAGES);
+	filepg = (long)(cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) + 
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    cpu_count_get(CPU_COUNT_EXECPAGES));
+	anonpg = (long)(cpu_count_get(CPU_COUNT_ANONCLEAN) +
+	    cpu_count_get(CPU_COUNT_ANONDIRTY) + 
+	    cpu_count_get(CPU_COUNT_ANONUNKNOWN));
 	execpg = (long)cpu_count_get(CPU_COUNT_EXECPAGES);
 
 	len = snprintf(bf, LBFSZ,
@@ -296,7 +301,7 @@ procfs_docpustat(struct lwp *curl, struc
 		i += 1;
 	}
 
-	cpu_count_sync_all();
+	cpu_count_sync(true);
 
 	struct timeval btv;
 	getmicroboottime(&btv);

Index: src/sys/sys/cpu_data.h
diff -u src/sys/sys/cpu_data.h:1.50 src/sys/sys/cpu_data.h:1.51
--- src/sys/sys/cpu_data.h:1.50	Sun Mar 22 18:32:42 2020
+++ src/sys/sys/cpu_data.h	Thu Jun 11 22:21:05 2020
@@ -1,7 +1,7 @@
-/*	$NetBSD: cpu_data.h,v 1.50 2020/03/22 18:32:42 ad Exp $	*/
+/*	$NetBSD: cpu_data.h,v 1.51 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
- * Copyright (c) 2004, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2004, 2006, 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,22 +55,22 @@ enum cpu_count {
 	CPU_COUNT_FORKS,
 	CPU_COUNT_FORKS_PPWAIT,
 	CPU_COUNT_FORKS_SHAREVM,
-	CPU_COUNT_ANONPAGES,		/* 8 */
-	CPU_COUNT_COLORHIT,
+	CPU_COUNT_COLORHIT,		/* 8 */
 	CPU_COUNT_COLORMISS,
+	CPU_COUNT_PGA_ZEROHIT,
+	CPU_COUNT_PGA_ZEROMISS,
 	CPU_COUNT_CPUHIT,
 	CPU_COUNT_CPUMISS,
-	CPU_COUNT_EXECPAGES,
-	CPU_COUNT_FILEPAGES,
-	CPU_COUNT_PGA_ZEROHIT,
-	CPU_COUNT_PGA_ZEROMISS,		/* 16 */
+	CPU_COUNT_FREEPAGES,
 	CPU_COUNT_ZEROPAGES,
-	CPU_COUNT_PAGEINS,
-	CPU_COUNT_SYNC_ONE,
-	CPU_COUNT_SYNC_ALL,
+	CPU_COUNT_PAGEINS,		/* 16 */
+	CPU_COUNT_FLTUP,
+	CPU_COUNT_FLTNOUP,
 	CPU_COUNT_FLTPGWAIT,
 	CPU_COUNT_FLTRELCK,
 	CPU_COUNT_FLTRELCKOK,
+	CPU_COUNT__UNUSED1,
+	CPU_COUNT__UNUSED2,
 	CPU_COUNT_NFAULT,		/* 24 */
 	CPU_COUNT_FLT_ACOW,
 	CPU_COUNT_FLT_ANON,
@@ -93,8 +93,8 @@ enum cpu_count {
 	CPU_COUNT_FILEUNKNOWN,
 	CPU_COUNT_FILECLEAN,
 	CPU_COUNT_FILEDIRTY,
-	CPU_COUNT_FLTUP,
-	CPU_COUNT_FLTNOUP,
+	CPU_COUNT_EXECPAGES,
+	CPU_COUNT_SYNC,
 	CPU_COUNT_MAX			/* 48 */
 };
 
@@ -249,8 +249,6 @@ cpu_count_get(enum cpu_count idx)
 }
 
 void	cpu_count(enum cpu_count, int64_t);
-int64_t	cpu_count_get(enum cpu_count);
-int64_t	cpu_count_sync(enum cpu_count);
-void	cpu_count_sync_all(void);
+void	cpu_count_sync(bool);
 
 #endif /* _SYS_CPU_DATA_H_ */

Index: src/sys/uvm/uvm_loan.c
diff -u src/sys/uvm/uvm_loan.c:1.103 src/sys/uvm/uvm_loan.c:1.104
--- src/sys/uvm/uvm_loan.c:1.103	Wed May 20 18:37:50 2020
+++ src/sys/uvm/uvm_loan.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_loan.c,v 1.103 2020/05/20 18:37:50 ad Exp $	*/
+/*	$NetBSD: uvm_loan.c,v 1.104 2020/06/11 22:21:05 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.103 2020/05/20 18:37:50 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.104 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1219,9 +1219,7 @@ uvm_loanbreak_anon(struct vm_anon *anon,
 
 	/* done! */
 	kpreempt_disable();
-	if (uobj != NULL) {
-		CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
-	} else {
+	if (uobj == NULL) {
 		CPU_COUNT(CPU_COUNT_ANONUNKNOWN + oldstatus, -1);
 	}
 	CPU_COUNT(CPU_COUNT_ANONDIRTY, 1);

Index: src/sys/uvm/uvm_meter.c
diff -u src/sys/uvm/uvm_meter.c:1.78 src/sys/uvm/uvm_meter.c:1.79
--- src/sys/uvm/uvm_meter.c:1.78	Thu Jun 11 19:20:47 2020
+++ src/sys/uvm/uvm_meter.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_meter.c,v 1.78 2020/06/11 19:20:47 ad Exp $	*/
+/*	$NetBSD: uvm_meter.c,v 1.79 2020/06/11 22:21:05 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -36,7 +36,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.78 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.79 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -97,17 +97,16 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
 	struct uvmexp_sysctl u;
 	int active, inactive;
 
-	cpu_count_sync_all();
 	uvm_estimatepageable(&active, &inactive);
 
 	memset(&u, 0, sizeof(u));
 
-	/* Entries here are in order of uvmexp_sysctl, not uvmexp */
+	/* uvm_availmem() will sync the counters if old. */
+	u.free = uvm_availmem(true);
 	u.pagesize = uvmexp.pagesize;
 	u.pagemask = uvmexp.pagemask;
 	u.pageshift = uvmexp.pageshift;
 	u.npages = uvmexp.npages;
-	u.free = uvm_availmem(true);
 	u.active = active;
 	u.inactive = inactive;
 	u.paging = uvmexp.paging;
@@ -170,16 +169,13 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
 	u.pdpageouts = uvmexp.pdpageouts;
 	u.pdpending = uvmexp.pdpending;
 	u.pddeact = uvmexp.pddeact;
-	u.anonpages = cpu_count_get(CPU_COUNT_ANONPAGES);
-	u.filepages = cpu_count_get(CPU_COUNT_FILEPAGES);
 	u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
 	u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
 	u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
 	u.ncolors = uvmexp.ncolors;
 	u.bootpages = uvmexp.bootpages;
 	u.poolpages = pool_totalpages();
-	u.countsyncone = cpu_count_get(CPU_COUNT_SYNC_ONE);
-	u.countsyncall = cpu_count_get(CPU_COUNT_SYNC_ALL);
+	u.countsyncall = cpu_count_get(CPU_COUNT_SYNC);
 	u.anonunknown = cpu_count_get(CPU_COUNT_ANONUNKNOWN);
 	u.anonclean = cpu_count_get(CPU_COUNT_ANONCLEAN);
 	u.anondirty = cpu_count_get(CPU_COUNT_ANONDIRTY);
@@ -188,6 +184,8 @@ sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
 	u.filedirty = cpu_count_get(CPU_COUNT_FILEDIRTY);
 	u.fltup = cpu_count_get(CPU_COUNT_FLTUP);
 	u.fltnoup = cpu_count_get(CPU_COUNT_FLTNOUP);
+	u.anonpages = u.anonclean + u.anondirty + u.anonunknown;
+	u.filepages = u.fileclean + u.filedirty + u.fileunknown - u.execpages;
 
 	node = *rnode;
 	node.sysctl_data = &u;
@@ -455,8 +453,7 @@ void
 uvm_update_uvmexp(void)
 {
 
-	cpu_count_sync_all();
-
+	/* uvm_availmem() will sync the counters if old. */
 	uvmexp.free = (int)uvm_availmem(true);
 	uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
 	uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
@@ -491,8 +488,13 @@ uvm_update_uvmexp(void)
 	uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
 	uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
 	uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
-	uvmexp.anonpages = (int)cpu_count_get(CPU_COUNT_ANONPAGES);
-	uvmexp.filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES);
+	uvmexp.anonpages = (int)(cpu_count_get(CPU_COUNT_ANONCLEAN) +
+	    cpu_count_get(CPU_COUNT_ANONDIRTY) +
+	    cpu_count_get(CPU_COUNT_ANONUNKNOWN));
+    	uvmexp.filepages = (int)(cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    cpu_count_get(CPU_COUNT_EXECPAGES));
 	uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
 	uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
 	uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.239 src/sys/uvm/uvm_page.c:1.240
--- src/sys/uvm/uvm_page.c:1.239	Thu Jun 11 19:20:47 2020
+++ src/sys/uvm/uvm_page.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.239 2020/06/11 19:20:47 ad Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.240 2020/06/11 22:21:05 ad Exp $	*/
 
 /*-
  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.239 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.240 2020/06/11 22:21:05 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -230,19 +230,13 @@ uvm_pageinsert_object(struct uvm_object 
 				vholdl(vp);
 				mutex_exit(vp->v_interlock);
 			}
-			kpreempt_disable();
 			if (UVM_OBJ_IS_VTEXT(uobj)) {
-				CPU_COUNT(CPU_COUNT_EXECPAGES, 1);
-			} else {
-				CPU_COUNT(CPU_COUNT_FILEPAGES, 1);
+				cpu_count(CPU_COUNT_EXECPAGES, 1);
 			}
-			CPU_COUNT(CPU_COUNT_FILEUNKNOWN + status, 1);
+			cpu_count(CPU_COUNT_FILEUNKNOWN + status, 1);
 		} else {
-			kpreempt_disable();
-			CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
-			CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, 1);
+			cpu_count(CPU_COUNT_ANONUNKNOWN + status, 1);
 		}
-		kpreempt_enable();
 	}
 	pg->flags |= PG_TABLED;
 	uobj->uo_npages++;
@@ -293,19 +287,13 @@ uvm_pageremove_object(struct uvm_object 
 				holdrelel(vp);
 				mutex_exit(vp->v_interlock);
 			}
-			kpreempt_disable();
 			if (UVM_OBJ_IS_VTEXT(uobj)) {
-				CPU_COUNT(CPU_COUNT_EXECPAGES, -1);
-			} else {
-				CPU_COUNT(CPU_COUNT_FILEPAGES, -1);
+				cpu_count(CPU_COUNT_EXECPAGES, -1);
 			}
-			CPU_COUNT(CPU_COUNT_FILEUNKNOWN + status, -1);
+			cpu_count(CPU_COUNT_FILEUNKNOWN + status, -1);
 		} else {
-			kpreempt_disable();
-			CPU_COUNT(CPU_COUNT_ANONPAGES, -1);
-			CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, -1);
+			cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
 		}
-		kpreempt_enable();
 	}
 	uobj->uo_npages--;
 	pg->flags &= ~PG_TABLED;
@@ -1015,22 +1003,28 @@ uvm_cpu_attach(struct cpu_info *ci)
  * uvm_availmem: fetch the total amount of free memory in pages.  this can
  * have a detrimental effect on performance due to false sharing; don't call
  * unless needed.
+ *
+ * some users can request the amount of free memory so often that it begins
+ * to impact upon performance.  if calling frequently and an inexact value
+ * is okay, call with cached = true.
  */
 
 int
 uvm_availmem(bool cached)
 {
-	struct pgfreelist *pgfl;
-	int fl, b, fpages;
+	int64_t fp;
 
-	fpages = 0;
-	for (fl = 0; fl < VM_NFREELIST; fl++) {
-		pgfl = &uvm.page_free[fl];
-		for (b = 0; b < uvm.bucketcount; b++) {
-			fpages += pgfl->pgfl_buckets[b]->pgb_nfree;
-		}
+	cpu_count_sync(cached);
+	if ((fp = cpu_count_get(CPU_COUNT_FREEPAGES)) < 0) {
+		/*
+		 * XXXAD could briefly go negative because it's impossible
+		 * to get a clean snapshot.  address this for other counters
+		 * used as running totals before NetBSD 10 although less
+		 * important for those.
+		 */
+		fp = 0;
 	}
-	return fpages;
+	return (int)fp;
 }
 
 /*
@@ -1290,6 +1284,7 @@ uvm_pagealloc_strat(struct uvm_object *o
 	 * if we have to zero the page
 	 */
 
+    	CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
 	if (flags & UVM_PGA_ZERO) {
 		if (pg->flags & PG_ZERO) {
 		    	CPU_COUNT(CPU_COUNT_PGA_ZEROHIT, 1);
@@ -1303,7 +1298,6 @@ uvm_pagealloc_strat(struct uvm_object *o
 	    	CPU_COUNT(CPU_COUNT_ZEROPAGES, -1);
 	}
 	if (anon) {
-		CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
 		CPU_COUNT(CPU_COUNT_ANONCLEAN, 1);
 	}
 	splx(s);
@@ -1547,8 +1541,9 @@ uvm_pagefree(struct vm_page *pg)
 			if ((pg->flags & PG_ANON) == 0) {
 				pg->loan_count--;
 			} else {
+				const unsigned status = uvm_pagegetdirty(pg);
 				pg->flags &= ~PG_ANON;
-				cpu_count(CPU_COUNT_ANONPAGES, -1);
+				cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
 			}
 			pg->uanon->an_page = NULL;
 			pg->uanon = NULL;
@@ -1587,10 +1582,7 @@ uvm_pagefree(struct vm_page *pg)
 		const unsigned int status = uvm_pagegetdirty(pg);
 		pg->uanon->an_page = NULL;
 		pg->uanon = NULL;
-		kpreempt_disable();
-		CPU_COUNT(CPU_COUNT_ANONPAGES, -1);
-		CPU_COUNT(CPU_COUNT_ANONUNKNOWN + status, -1);
-		kpreempt_enable();
+		cpu_count(CPU_COUNT_ANONUNKNOWN + status, -1);
 	}
 
 	/*
@@ -1632,6 +1624,7 @@ uvm_pagefree(struct vm_page *pg)
 
 	/* Try to send the page to the per-CPU cache. */
 	s = splvm();
+    	CPU_COUNT(CPU_COUNT_FREEPAGES, 1);
 	if (pg->flags & PG_ZERO) {
 	    	CPU_COUNT(CPU_COUNT_ZEROPAGES, 1);
 	}

Index: src/sys/uvm/uvm_pdaemon.c
diff -u src/sys/uvm/uvm_pdaemon.c:1.128 src/sys/uvm/uvm_pdaemon.c:1.129
--- src/sys/uvm/uvm_pdaemon.c:1.128	Thu Jun 11 19:20:47 2020
+++ src/sys/uvm/uvm_pdaemon.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pdaemon.c,v 1.128 2020/06/11 19:20:47 ad Exp $	*/
+/*	$NetBSD: uvm_pdaemon.c,v 1.129 2020/06/11 22:21:05 ad Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.128 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.129 2020/06/11 22:21:05 ad Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -952,6 +952,7 @@ uvm_reclaimable(void)
 	/*
 	 * file-backed pages can be reclaimed even when swap is full.
 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
+	 * NB: filepages calculation does not exclude EXECPAGES - intentional.
 	 *
 	 * XXX assume the worst case, ie. all wired pages are file-backed.
 	 *
@@ -959,9 +960,10 @@ uvm_reclaimable(void)
 	 * XXX ie. pools, traditional buffer cache.
 	 */
 
-	cpu_count_sync_all();
-	filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES) +
-	    (int)cpu_count_get(CPU_COUNT_EXECPAGES) - uvmexp.wired;
+	cpu_count_sync(false);
+	filepages = (int)(cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) - uvmexp.wired);
 	uvm_estimatepageable(&active, &inactive);
 	if (filepages >= MIN((active + inactive) >> 4,
 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {

Index: src/sys/uvm/uvm_pdpolicy_clock.c
diff -u src/sys/uvm/uvm_pdpolicy_clock.c:1.38 src/sys/uvm/uvm_pdpolicy_clock.c:1.39
--- src/sys/uvm/uvm_pdpolicy_clock.c:1.38	Thu Jun 11 19:20:47 2020
+++ src/sys/uvm/uvm_pdpolicy_clock.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pdpolicy_clock.c,v 1.38 2020/06/11 19:20:47 ad Exp $	*/
+/*	$NetBSD: uvm_pdpolicy_clock.c,v 1.39 2020/06/11 22:21:05 ad Exp $	*/
 /*	NetBSD: uvm_pdaemon.c,v 1.72 2006/01/05 10:47:33 yamt Exp $	*/
 
 /*-
@@ -98,7 +98,7 @@
 #else /* defined(PDSIM) */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.38 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdpolicy_clock.c,v 1.39 2020/06/11 22:21:05 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -193,13 +193,18 @@ uvmpdpol_scaninit(void)
 	/*
 	 * decide which types of pages we want to reactivate instead of freeing
 	 * to keep usage within the minimum and maximum usage limits.
+	 * uvm_availmem() will sync the counters.
 	 */
 
-	cpu_count_sync_all();
 	freepg = uvm_availmem(false);
-	anonpg = cpu_count_get(CPU_COUNT_ANONPAGES);
-	filepg = cpu_count_get(CPU_COUNT_FILEPAGES);
+	anonpg = cpu_count_get(CPU_COUNT_ANONCLEAN) +
+	    cpu_count_get(CPU_COUNT_ANONDIRTY) +
+	    cpu_count_get(CPU_COUNT_ANONUNKNOWN);
 	execpg = cpu_count_get(CPU_COUNT_EXECPAGES);
+	filepg = cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    execpg;
 
 	mutex_enter(&s->lock);
 	t = s->s_active + s->s_inactive + freepg;

Index: src/sys/uvm/uvm_stat.c
diff -u src/sys/uvm/uvm_stat.c:1.44 src/sys/uvm/uvm_stat.c:1.45
--- src/sys/uvm/uvm_stat.c:1.44	Thu Jun 11 19:20:47 2020
+++ src/sys/uvm/uvm_stat.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_stat.c,v 1.44 2020/06/11 19:20:47 ad Exp $	 */
+/*	$NetBSD: uvm_stat.c,v 1.45 2020/06/11 22:21:05 ad Exp $	 */
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_stat.c,v 1.44 2020/06/11 19:20:47 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_stat.c,v 1.45 2020/06/11 22:21:05 ad Exp $");
 
 #include "opt_readahead.h"
 #include "opt_ddb.h"
@@ -55,23 +55,33 @@ void
 uvmexp_print(void (*pr)(const char *, ...)
     __attribute__((__format__(__printf__,1,2))))
 {
+	int64_t anonpg, execpg, filepg;
 	int active, inactive;
-	int poolpages;
+	int poolpages, freepg;
 
 	uvm_estimatepageable(&active, &inactive);
 	poolpages = pool_totalpages_locked();
 
-	cpu_count_sync_all();
+	/* this will sync all counters. */
+	freepg = uvm_availmem(false);
+
+	anonpg = cpu_count_get(CPU_COUNT_ANONCLEAN) +
+	    cpu_count_get(CPU_COUNT_ANONDIRTY) +
+	    cpu_count_get(CPU_COUNT_ANONUNKNOWN);
+	execpg = cpu_count_get(CPU_COUNT_EXECPAGES);
+	filepg = cpu_count_get(CPU_COUNT_FILECLEAN) +
+	    cpu_count_get(CPU_COUNT_FILEDIRTY) +
+	    cpu_count_get(CPU_COUNT_FILEUNKNOWN) -
+	    execpg;
+
 	(*pr)("Current UVM status:\n");
 	(*pr)("  pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d, ncolors=%d\n",
 	    uvmexp.pagesize, uvmexp.pagesize, uvmexp.pagemask,
 	    uvmexp.pageshift, uvmexp.ncolors);
 	(*pr)("  %d VM pages: %d active, %d inactive, %d wired, %d free\n",
-	    uvmexp.npages, active, inactive, uvmexp.wired, uvm_availmem(false));
+	    uvmexp.npages, active, inactive, uvmexp.wired, freepg);
 	(*pr)("  pages  %" PRId64 " anon, %" PRId64 " file, %" PRId64 " exec\n",
-	    cpu_count_get(CPU_COUNT_ANONPAGES),
-	    cpu_count_get(CPU_COUNT_FILEPAGES),
-	    cpu_count_get(CPU_COUNT_EXECPAGES));
+	    anonpg, filepg, execpg);
 	(*pr)("  freemin=%d, free-target=%d, wired-max=%d\n",
 	    uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax);
 	(*pr)("  resv-pg=%d, resv-kernel=%d, zeropages=%" PRId64 "\n",

Index: src/usr.bin/vmstat/vmstat.c
diff -u src/usr.bin/vmstat/vmstat.c:1.239 src/usr.bin/vmstat/vmstat.c:1.240
--- src/usr.bin/vmstat/vmstat.c:1.239	Mon Mar 23 18:44:17 2020
+++ src/usr.bin/vmstat/vmstat.c	Thu Jun 11 22:21:05 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: vmstat.c,v 1.239 2020/03/23 18:44:17 ad Exp $ */
+/* $NetBSD: vmstat.c,v 1.240 2020/06/11 22:21:05 ad Exp $ */
 
 /*-
  * Copyright (c) 1998, 2000, 2001, 2007, 2019, 2020
@@ -71,7 +71,7 @@ __COPYRIGHT("@(#) Copyright (c) 1980, 19
 #if 0
 static char sccsid[] = "@(#)vmstat.c	8.2 (Berkeley) 3/1/95";
 #else
-__RCSID("$NetBSD: vmstat.c,v 1.239 2020/03/23 18:44:17 ad Exp $");
+__RCSID("$NetBSD: vmstat.c,v 1.240 2020/06/11 22:21:05 ad Exp $");
 #endif
 #endif /* not lint */
 
@@ -1090,8 +1090,7 @@ dosum(void)
 	(void)printf("%9" PRIu64 " pages found busy by daemon\n", uvmexp.pdbusy);
 	(void)printf("%9" PRIu64 " total pending pageouts\n", uvmexp.pdpending);
 	(void)printf("%9" PRIu64 " pages deactivated\n", uvmexp.pddeact);
-	(void)printf("%9" PRIu64 " per-cpu stats one synced\n", uvmexp.countsyncone);
-	(void)printf("%9" PRIu64 " per-cpu stats all synced\n", uvmexp.countsyncall);
+	(void)printf("%9" PRIu64 " per-cpu stats synced\n", uvmexp.countsyncall);
 	(void)printf("%9" PRIu64 " anon pages possibly dirty\n", uvmexp.anonunknown);
 	(void)printf("%9" PRIu64 " anon pages dirty\n", uvmexp.anondirty);
 	(void)printf("%9" PRIu64 " anon pages clean\n", uvmexp.anonclean);

Reply via email to