On 12/11/16(Sat) 15:52, patrick keshishian wrote:
> Ahh... seems the culprit is softclock_thread added 2016/09/22
> (kern/kern_timeout.c mpi@).

I'd suggest we simply skip kernel thread when calculating the load.

Since we're slowly moving code executed in software interrupt
context to kernel threads this will keep the original behavior.

ok?

Index: uvm/uvm_meter.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_meter.c,v
retrieving revision 1.36
diff -u -p -r1.36 uvm_meter.c
--- uvm/uvm_meter.c     14 Mar 2015 03:38:53 -0000      1.36
+++ uvm/uvm_meter.c     14 Nov 2016 12:08:11 -0000
@@ -107,6 +107,9 @@ uvm_loadav(struct loadavg *avg)
        memset(nrun_cpu, 0, sizeof(nrun_cpu));
 
        LIST_FOREACH(p, &allproc, p_list) {
+               if (p->p_flag & P_SYSTEM)
+                       continue;
+
                switch (p->p_stat) {
                case SSLEEP:
                        if (p->p_priority > PZERO || p->p_slptime > 1)
@@ -114,8 +117,6 @@ uvm_loadav(struct loadavg *avg)
                /* FALLTHROUGH */
                case SRUN:
                case SONPROC:
-                       if (p == p->p_cpu->ci_schedstate.spc_idleproc)
-                               continue;
                case SIDL:
                        nrun++;
                        if (p->p_cpu)
@@ -136,7 +137,7 @@ uvm_loadav(struct loadavg *avg)
                spc->spc_ldavg = (cexp[0] * spc->spc_ldavg +
                    nrun_cpu[CPU_INFO_UNIT(ci)] * FSCALE *
                    (FSCALE - cexp[0])) >> FSHIFT;
-       }               
+       }
 }
 
 /*

Reply via email to