On Fri, Jul 28, 2023 at 07:36:41PM -0500, Scott Cheloha wrote: > claudio@ notes that uvm_loadav() pointlessly walks the allproc list to > recompute schedstate_percpu.spn_nrun for each CPU. > > We can just use the value instead of recomputing it.
Whoops, off-by-one. The current load averaging code includes the running thread in the nrun count if it is *not* the idle thread. Index: uvm_meter.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_meter.c,v retrieving revision 1.44 diff -u -p -r1.44 uvm_meter.c --- uvm_meter.c 21 Jun 2023 21:16:21 -0000 1.44 +++ uvm_meter.c 31 Jul 2023 15:20:37 -0000 @@ -102,43 +102,29 @@ uvm_loadav(struct loadavg *avg) { CPU_INFO_ITERATOR cii; struct cpu_info *ci; - int i, nrun; - struct proc *p; - int nrun_cpu[MAXCPUS]; + struct schedstate_percpu *spc; + u_int i, nrun = 0, nrun_cpu; + int s; - nrun = 0; - memset(nrun_cpu, 0, sizeof(nrun_cpu)); - LIST_FOREACH(p, &allproc, p_list) { - switch (p->p_stat) { - case SSTOP: - case SSLEEP: - break; - case SRUN: - case SONPROC: - if (p == p->p_cpu->ci_schedstate.spc_idleproc) - continue; - /* FALLTHROUGH */ - case SIDL: - nrun++; - if (p->p_cpu) - nrun_cpu[CPU_INFO_UNIT(p->p_cpu)]++; - } + SCHED_LOCK(s); + CPU_INFO_FOREACH(cii, ci) { + spc = &ci->ci_schedstate; + nrun_cpu = spc->spc_nrun; + if (ci->ci_curproc != spc->spc_idleproc) + nrun_cpu++; + if (nrun_cpu == 0) + continue; + spc->spc_ldavg = (cexp[0] * spc->spc_ldavg + + nrun_cpu * FSCALE * + (FSCALE - cexp[0])) >> FSHIFT; + nrun += nrun_cpu; } + SCHED_UNLOCK(s); for (i = 0; i < 3; i++) { avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; - } - - CPU_INFO_FOREACH(cii, ci) { - struct schedstate_percpu *spc = &ci->ci_schedstate; - - if (nrun_cpu[CPU_INFO_UNIT(ci)] == 0) - continue; - spc->spc_ldavg = (cexp[0] * spc->spc_ldavg + - nrun_cpu[CPU_INFO_UNIT(ci)] * FSCALE * - (FSCALE - cexp[0])) >> FSHIFT; } }