On Wed, Jul 19, 2023 at 05:09:04AM +0000, Mike Larkin wrote:
> On Tue, Jul 18, 2023 at 08:21:41AM -0500, Scott Cheloha wrote:
> > This patch moves the profil(2)- and GPROF-specific parts of
> > statclock() out into into separate clock interrupt routines. The
> > profil(2) part moves into profclock() and is enabled/disabled as
> > needed during mi_switch(). The GPROF part moves into gmonclock() and
> > is enabled/disabled as needed via sysctl(2).
> >
> > Moving those parts out of statclock() eliminates the need for an
> > effective statclock frequency and we can delete all the junk related
> > to that: psratio/psdiv/pscnt and corresponding members of
> > schedstate_percpu, clockintr_setstatclockrate(), a bunch of other
> > clockintr-internal code
> >
> > In separate commits I have addressed:
> >
> > - General GPROF instability on amd64
> > - GPROF causing a crash during suspend/resume
> > - CTASSERT breakage on amd64 related to schedstate_percpu
> > changes in this patch
> >
> > This has been kicking around for over two months. Personally, I have
> > tested it on amd64, arm64, macppc, octeon, and sparc64.
> >
> > Compile- and boot-tests on other platforms (alpha, i386, luna88k,
> > riscv64, sh) would be appreciated, but the last time I asked for tests
> > I got zero reports back.
>
> i386 compiles and boots.
Great!
> as reported in separate mail, riscv64 doesn't compile.
I think we're missing a 'struct user' definition on riscv64. Can you
try this?
Index: kern/kern_clock.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_clock.c,v
retrieving revision 1.108
diff -u -p -r1.108 kern_clock.c
--- kern/kern_clock.c 25 Apr 2023 00:58:47 -0000 1.108
+++ kern/kern_clock.c 19 Jul 2023 14:33:04 -0000
@@ -49,10 +49,6 @@
#include <sys/sched.h>
#include <sys/timetc.h>
-#if defined(GPROF) || defined(DDBPROF)
-#include <sys/gmon.h>
-#endif
-
#include "dt.h"
#if NDT > 0
#include <dev/dt/dtvar.h>
@@ -87,8 +83,6 @@ int schedhz;
int profhz;
int profprocs;
int ticks = INT_MAX - (15 * 60 * HZ);
-static int psdiv, pscnt; /* prof => stat divider */
-int psratio; /* ratio: prof / stat */
volatile unsigned long jiffies = ULONG_MAX - (10 * 60 * HZ);
@@ -99,16 +93,13 @@ void
initclocks(void)
{
/*
- * Set divisors to 1 (normal case) and let the machine-specific
- * code do its bit.
+ * Let the machine-specific code do its bit.
*/
- psdiv = pscnt = 1;
cpu_initclocks();
- /*
- * Compute profhz/stathz.
- */
- psratio = profhz / stathz;
+ KASSERT(profhz >= stathz && profhz <= 1000000000);
+ KASSERT(profhz % stathz == 0);
+ profclock_period = 1000000000 / profhz;
inittimecounter();
}
@@ -256,7 +247,6 @@ startprofclock(struct process *pr)
atomic_setbits_int(&pr->ps_flags, PS_PROFIL);
if (++profprocs == 1) {
s = splstatclock();
- psdiv = pscnt = psratio;
setstatclockrate(profhz);
splx(s);
}
@@ -275,7 +265,6 @@ stopprofclock(struct process *pr)
atomic_clearbits_int(&pr->ps_flags, PS_PROFIL);
if (--profprocs == 0) {
s = splstatclock();
- psdiv = pscnt = 1;
setstatclockrate(stathz);
splx(s);
}
@@ -289,35 +278,13 @@ stopprofclock(struct process *pr)
void
statclock(struct clockframe *frame)
{
-#if defined(GPROF) || defined(DDBPROF)
- struct gmonparam *g;
- u_long i;
-#endif
struct cpu_info *ci = curcpu();
struct schedstate_percpu *spc = &ci->ci_schedstate;
struct proc *p = curproc;
struct process *pr;
- /*
- * Notice changes in divisor frequency, and adjust clock
- * frequency accordingly.
- */
- if (spc->spc_psdiv != psdiv) {
- spc->spc_psdiv = psdiv;
- spc->spc_pscnt = psdiv;
- if (psdiv == 1) {
- setstatclockrate(stathz);
- } else {
- setstatclockrate(profhz);
- }
- }
-
if (CLKF_USERMODE(frame)) {
pr = p->p_p;
- if (pr->ps_flags & PS_PROFIL)
- addupc_intr(p, CLKF_PC(frame), 1);
- if (--spc->spc_pscnt > 0)
- return;
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled record the tick.
@@ -328,23 +295,6 @@ statclock(struct clockframe *frame)
else
spc->spc_cp_time[CP_USER]++;
} else {
-#if defined(GPROF) || defined(DDBPROF)
- /*
- * Kernel statistics are just like addupc_intr, only easier.
- */
- g = ci->ci_gmon;
- if (g != NULL && g->state == GMON_PROF_ON) {
- i = CLKF_PC(frame) - g->lowpc;
- if (i < g->textsize) {
- i /= HISTFRACTION * sizeof(*g->kcount);
- g->kcount[i]++;
- }
- }
-#endif
- if (p != NULL && p->p_p->ps_flags & PS_PROFIL)
- addupc_intr(p, PROC_PC(p), 1);
- if (--spc->spc_pscnt > 0)
- return;
/*
* Came from kernel mode, so we were:
* - spinning on a lock
@@ -371,7 +321,6 @@ statclock(struct clockframe *frame)
spc->spc_cp_time[spc->spc_spinning ?
CP_SPIN : CP_IDLE]++;
}
- spc->spc_pscnt = psdiv;
if (p != NULL) {
p->p_cpticks++;
Index: kern/subr_prof.c
===================================================================
RCS file: /cvs/src/sys/kern/subr_prof.c,v
retrieving revision 1.35
diff -u -p -r1.35 subr_prof.c
--- kern/subr_prof.c 2 Jun 2023 17:44:29 -0000 1.35
+++ kern/subr_prof.c 19 Jul 2023 14:33:04 -0000
@@ -34,13 +34,16 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/atomic.h>
#include <sys/pledge.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/mount.h>
#include <sys/sysctl.h>
#include <sys/syscallargs.h>
+#include <sys/user.h>
+uint32_t profclock_period;
#if defined(GPROF) || defined(DDBPROF)
#include <sys/malloc.h>
@@ -60,6 +63,8 @@ u_int gmon_cpu_count; /* [K] number of
extern char etext[];
+void gmonclock(struct clockintr *, void *);
+
void
prof_init(void)
{
@@ -95,6 +100,14 @@ prof_init(void)
/* Allocate and initialize one profiling buffer per CPU. */
CPU_INFO_FOREACH(cii, ci) {
+ ci->ci_gmonclock = clockintr_establish(&ci->ci_queue,
+ gmonclock);
+ if (ci->ci_gmonclock == NULL) {
+ printf("%s: clockintr_establish gmonclock\n", __func__);
+ return;
+ }
+ clockintr_stagger(ci->ci_gmonclock, profclock_period,
+ CPU_INFO_UNIT(ci), MAXCPUS);
cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait);
if (cp == NULL) {
printf("No memory for profiling.\n");
@@ -124,8 +137,9 @@ prof_init(void)
}
int
-prof_state_toggle(struct gmonparam *gp, int oldstate)
+prof_state_toggle(struct cpu_info *ci, int oldstate)
{
+ struct gmonparam *gp = ci->ci_gmon;
int error = 0;
KERNEL_ASSERT_LOCKED();
@@ -145,6 +159,7 @@ prof_state_toggle(struct gmonparam *gp,
if (error == 0) {
if (++gmon_cpu_count == 1)
startprofclock(&process0);
+ clockintr_advance(ci->ci_gmonclock, profclock_period);
}
break;
default:
@@ -152,6 +167,7 @@ prof_state_toggle(struct gmonparam *gp,
gp->state = GMON_PROF_OFF;
/* FALLTHROUGH */
case GMON_PROF_OFF:
+ clockintr_cancel(ci->ci_gmonclock);
if (--gmon_cpu_count == 0)
stopprofclock(&process0);
#if !defined(GPROF)
@@ -201,7 +217,7 @@ sysctl_doprof(int *name, u_int namelen,
error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
if (error)
return (error);
- return (prof_state_toggle(gp, state));
+ return prof_state_toggle(ci, state);
case GPROF_COUNT:
return (sysctl_struct(oldp, oldlenp, newp, newlen,
gp->kcount, gp->kcountsize));
@@ -218,6 +234,31 @@ sysctl_doprof(int *name, u_int namelen,
}
/* NOTREACHED */
}
+
+void
+gmonclock(struct clockintr *cl, void *cf)
+{
+ uint64_t count;
+ struct clockframe *frame = cf;
+ struct gmonparam *g = curcpu()->ci_gmon;
+ u_long i;
+
+ count = clockintr_advance(cl, profclock_period);
+ if (count > ULONG_MAX)
+ count = ULONG_MAX;
+
+ /*
+ * Kernel statistics are just like addupc_intr(), only easier.
+ */
+ if (!CLKF_USERMODE(frame) && g != NULL && g->state == GMON_PROF_ON) {
+ i = CLKF_PC(frame) - g->lowpc;
+ if (i < g->textsize) {
+ i /= HISTFRACTION * sizeof(*g->kcount);
+ g->kcount[i] += (u_long)count;
+ }
+ }
+}
+
#endif /* GPROF || DDBPROF */
/*
@@ -247,6 +288,7 @@ sys_profil(struct proc *p, void *v, regi
return (EINVAL);
if (SCARG(uap, scale) == 0) {
stopprofclock(pr);
+ need_resched(curcpu());
return (0);
}
upp = &pr->ps_prof;
@@ -259,8 +301,29 @@ sys_profil(struct proc *p, void *v, regi
upp->pr_size = SCARG(uap, size);
startprofclock(pr);
splx(s);
+ need_resched(curcpu());
return (0);
+}
+
+void
+profclock(struct clockintr *cl, void *cf)
+{
+ uint64_t count;
+ struct clockframe *frame = cf;
+ struct proc *p = curproc;
+
+ count = clockintr_advance(cl, profclock_period);
+ if (count > ULONG_MAX)
+ count = ULONG_MAX;
+
+ if (CLKF_USERMODE(frame)) {
+ if (ISSET(p->p_p->ps_flags, PS_PROFIL))
+ addupc_intr(p, CLKF_PC(frame), (u_long)count);
+ } else {
+ if (p != NULL && ISSET(p->p_p->ps_flags, PS_PROFIL))
+ addupc_intr(p, PROC_PC(p), (u_long)count);
+ }
}
/*
Index: kern/kern_sched.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_sched.c,v
retrieving revision 1.79
diff -u -p -r1.79 kern_sched.c
--- kern/kern_sched.c 14 Jul 2023 07:07:08 -0000 1.79
+++ kern/kern_sched.c 19 Jul 2023 14:33:05 -0000
@@ -21,6 +21,8 @@
#include <sys/proc.h>
#include <sys/kthread.h>
#include <sys/systm.h>
+#include <sys/clockintr.h>
+#include <sys/resourcevar.h>
#include <sys/task.h>
#include <sys/smr.h>
#include <sys/tracepoint.h>
@@ -85,6 +87,15 @@ sched_init_cpu(struct cpu_info *ci)
spc->spc_idleproc = NULL;
+ if (spc->spc_profclock == NULL) {
+ spc->spc_profclock = clockintr_establish(&ci->ci_queue,
+ profclock);
+ if (spc->spc_profclock == NULL)
+ panic("%s: clockintr_establish profclock", __func__);
+ clockintr_stagger(spc->spc_profclock, profclock_period,
+ CPU_INFO_UNIT(ci), MAXCPUS);
+ }
+
kthread_create_deferred(sched_kthreads_create, ci);
LIST_INIT(&spc->spc_deadproc);
@@ -213,6 +224,11 @@ sched_exit(struct proc *p)
nanouptime(&ts);
timespecsub(&ts, &spc->spc_runtime, &ts);
timespecadd(&p->p_rtime, &ts, &p->p_rtime);
+
+ if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
+ atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
+ clockintr_cancel(spc->spc_profclock);
+ }
LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
Index: kern/sched_bsd.c
===================================================================
RCS file: /cvs/src/sys/kern/sched_bsd.c,v
retrieving revision 1.77
diff -u -p -r1.77 sched_bsd.c
--- kern/sched_bsd.c 11 Jul 2023 07:02:43 -0000 1.77
+++ kern/sched_bsd.c 19 Jul 2023 14:33:05 -0000
@@ -39,6 +39,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/clockintr.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -349,6 +350,12 @@ mi_switch(void)
/* add the time counts for this thread to the process's total */
tuagg_unlocked(pr, p);
+ /* Stop the profclock if it's running. */
+ if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
+ atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
+ clockintr_cancel(spc->spc_profclock);
+ }
+
/*
* Process is about to yield the CPU; clear the appropriate
* scheduling flags.
@@ -392,6 +399,14 @@ mi_switch(void)
* schedstate_percpu pointer.
*/
KASSERT(p->p_cpu == curcpu());
+
+ /* Start the profclock if profil(2) is enabled. */
+ if (ISSET(p->p_p->ps_flags, PS_PROFIL)) {
+ atomic_setbits_int(&p->p_cpu->ci_schedstate.spc_schedflags,
+ SPCF_PROFCLOCK);
+ clockintr_advance(p->p_cpu->ci_schedstate.spc_profclock,
+ profclock_period);
+ }
nanouptime(&p->p_cpu->ci_schedstate.spc_runtime);
Index: kern/kern_clockintr.c
===================================================================
RCS file: /cvs/src/sys/kern/kern_clockintr.c,v
retrieving revision 1.27
diff -u -p -r1.27 kern_clockintr.c
--- kern/kern_clockintr.c 2 Jul 2023 19:02:27 -0000 1.27
+++ kern/kern_clockintr.c 19 Jul 2023 14:33:05 -0000
@@ -25,6 +25,7 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/queue.h>
+#include <sys/resourcevar.h>
#include <sys/stdint.h>
#include <sys/sysctl.h>
#include <sys/time.h>
@@ -32,39 +33,23 @@
/*
* Protection for global variables in this file:
*
- * C Global clockintr configuration mutex (clockintr_mtx).
* I Immutable after initialization.
*/
-struct mutex clockintr_mtx = MUTEX_INITIALIZER(IPL_CLOCK);
-
u_int clockintr_flags; /* [I] global state + behavior flags */
uint32_t hardclock_period; /* [I] hardclock period (ns) */
uint32_t schedclock_period; /* [I] schedclock period (ns) */
-volatile u_int statclock_gen = 1; /* [C] statclock update generation */
-volatile uint32_t statclock_avg; /* [C] average statclock period (ns) */
-uint32_t statclock_min; /* [C] minimum statclock period
(ns) */
-uint32_t statclock_mask; /* [C] set of allowed offsets */
-uint32_t stat_avg; /* [I] average stathz period (ns) */
-uint32_t stat_min; /* [I] set of allowed offsets */
-uint32_t stat_mask; /* [I] max offset from minimum (ns) */
-uint32_t prof_avg; /* [I] average profhz period (ns) */
-uint32_t prof_min; /* [I] minimum profhz period (ns) */
-uint32_t prof_mask; /* [I] set of allowed offsets */
+uint32_t statclock_avg; /* [I] average statclock period
(ns) */
+uint32_t statclock_min; /* [I] minimum statclock period
(ns) */
+uint32_t statclock_mask; /* [I] set of allowed offsets */
-uint64_t clockintr_advance(struct clockintr *, uint64_t);
-void clockintr_cancel(struct clockintr *);
void clockintr_cancel_locked(struct clockintr *);
-struct clockintr *clockintr_establish(struct clockintr_queue *,
- void (*)(struct clockintr *, void *));
uint64_t clockintr_expiration(const struct clockintr *);
void clockintr_hardclock(struct clockintr *, void *);
uint64_t clockintr_nsecuptime(const struct clockintr *);
void clockintr_schedclock(struct clockintr *, void *);
void clockintr_schedule(struct clockintr *, uint64_t);
void clockintr_schedule_locked(struct clockintr *, uint64_t);
-void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int);
void clockintr_statclock(struct clockintr *, void *);
-void clockintr_statvar_init(int, uint32_t *, uint32_t *, uint32_t *);
uint64_t clockqueue_next(const struct clockintr_queue *);
void clockqueue_reset_intrclock(struct clockintr_queue *);
uint64_t nsec_advance(uint64_t *, uint64_t, uint64_t);
@@ -75,6 +60,8 @@ uint64_t nsec_advance(uint64_t *, uint64
void
clockintr_init(u_int flags)
{
+ uint32_t half_avg, var;
+
KASSERT(CPU_IS_PRIMARY(curcpu()));
KASSERT(clockintr_flags == 0);
KASSERT(!ISSET(flags, ~CL_FLAG_MASK));
@@ -83,12 +70,22 @@ clockintr_init(u_int flags)
hardclock_period = 1000000000 / hz;
KASSERT(stathz >= 1 && stathz <= 1000000000);
- KASSERT(profhz >= stathz && profhz <= 1000000000);
- KASSERT(profhz % stathz == 0);
- clockintr_statvar_init(stathz, &stat_avg, &stat_min, &stat_mask);
- clockintr_statvar_init(profhz, &prof_avg, &prof_min, &prof_mask);
- SET(clockintr_flags, CL_STATCLOCK);
- clockintr_setstatclockrate(stathz);
+
+ /*
+ * Compute the average statclock() period. Then find var, the
+ * largest power of two such that var <= statclock_avg / 2.
+ */
+ statclock_avg = 1000000000 / stathz;
+ half_avg = statclock_avg / 2;
+ for (var = 1U << 31; var > half_avg; var /= 2)
+ continue;
+
+ /*
+ * Set a lower bound for the range using statclock_avg and var.
+ * The mask for that range is just (var - 1).
+ */
+ statclock_min = statclock_avg - (var / 2);
+ statclock_mask = var - 1;
KASSERT(schedhz >= 0 && schedhz <= 1000000000);
if (schedhz != 0)
@@ -479,70 +476,6 @@ clockintr_stagger(struct clockintr *cl,
mtx_leave(&cq->cq_mtx);
}
-/*
- * Compute the period (avg) for the given frequency and a range around
- * that period. The range is [min + 1, min + mask]. The range is used
- * during dispatch to choose a new pseudorandom deadline for each statclock
- * event.
- */
-void
-clockintr_statvar_init(int freq, uint32_t *avg, uint32_t *min, uint32_t *mask)
-{
- uint32_t half_avg, var;
-
- KASSERT(!ISSET(clockintr_flags, CL_INIT | CL_STATCLOCK));
- KASSERT(freq > 0 && freq <= 1000000000);
-
- /* Compute avg, the average period. */
- *avg = 1000000000 / freq;
-
- /* Find var, the largest power of two such that var <= avg / 2. */
- half_avg = *avg / 2;
- for (var = 1U << 31; var > half_avg; var /= 2)
- continue;
-
- /* Using avg and var, set a lower bound for the range. */
- *min = *avg - (var / 2);
-
- /* The mask is just (var - 1). */
- *mask = var - 1;
-}
-
-/*
- * Update the statclock_* variables according to the given frequency.
- * Must only be called after clockintr_statvar_init() initializes both
- * stathz_* and profhz_*.
- */
-void
-clockintr_setstatclockrate(int freq)
-{
- u_int ogen;
-
- KASSERT(ISSET(clockintr_flags, CL_STATCLOCK));
-
- mtx_enter(&clockintr_mtx);
-
- ogen = statclock_gen;
- statclock_gen = 0;
- membar_producer();
- if (freq == stathz) {
- statclock_avg = stat_avg;
- statclock_min = stat_min;
- statclock_mask = stat_mask;
- } else if (freq == profhz) {
- statclock_avg = prof_avg;
- statclock_min = prof_min;
- statclock_mask = prof_mask;
- } else {
- panic("%s: frequency is not stathz (%d) or profhz (%d): %d",
- __func__, stathz, profhz, freq);
- }
- membar_producer();
- statclock_gen = MAX(1, ogen + 1);
-
- mtx_leave(&clockintr_mtx);
-}
-
uint64_t
clockintr_nsecuptime(const struct clockintr *cl)
{
@@ -577,24 +510,16 @@ void
clockintr_statclock(struct clockintr *cl, void *frame)
{
uint64_t count, expiration, i, uptime;
- uint32_t mask, min, off;
- u_int gen;
+ uint32_t off;
if (ISSET(clockintr_flags, CL_RNDSTAT)) {
- do {
- gen = statclock_gen;
- membar_consumer();
- min = statclock_min;
- mask = statclock_mask;
- membar_consumer();
- } while (gen == 0 || gen != statclock_gen);
count = 0;
expiration = clockintr_expiration(cl);
uptime = clockintr_nsecuptime(cl);
while (expiration <= uptime) {
- while ((off = (random() & mask)) == 0)
+ while ((off = (random() & statclock_mask)) == 0)
continue;
- expiration += min + off;
+ expiration += statclock_min + off;
count++;
}
clockintr_schedule(cl, expiration);
Index: sys/resourcevar.h
===================================================================
RCS file: /cvs/src/sys/sys/resourcevar.h,v
retrieving revision 1.26
diff -u -p -r1.26 resourcevar.h
--- sys/resourcevar.h 25 Apr 2023 00:58:47 -0000 1.26
+++ sys/resourcevar.h 19 Jul 2023 14:33:05 -0000
@@ -60,8 +60,13 @@ do {
\
#include <lib/libkern/libkern.h> /* for KASSERT() */
+struct clockintr;
+
+extern uint32_t profclock_period;
+
void addupc_intr(struct proc *, u_long, u_long);
void addupc_task(struct proc *, u_long, u_int);
+void profclock(struct clockintr *, void *);
void tuagg_unlocked(struct process *, struct proc *);
void tuagg(struct process *, struct proc *);
struct tusage;
Index: sys/sched.h
===================================================================
RCS file: /cvs/src/sys/sys/sched.h,v
retrieving revision 1.57
diff -u -p -r1.57 sched.h
--- sys/sched.h 25 Dec 2020 12:49:31 -0000 1.57
+++ sys/sched.h 19 Jul 2023 14:33:05 -0000
@@ -90,6 +90,7 @@
#define SCHED_NQS 32 /* 32 run queues. */
+struct clockintr;
struct smr_entry;
/*
@@ -105,8 +106,8 @@ struct schedstate_percpu {
u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */
u_char spc_curpriority; /* usrpri of curproc */
int spc_rrticks; /* ticks until roundrobin() */
- int spc_pscnt; /* prof/stat counter */
- int spc_psdiv; /* prof/stat divisor */
+
+ struct clockintr *spc_profclock; /* [o] profclock handle */
u_int spc_nrun; /* procs on the run queues */
fixpt_t spc_ldavg; /* shortest load avg. for this cpu */
@@ -137,6 +138,7 @@ struct cpustats {
#define SPCF_SWITCHCLEAR (SPCF_SEENRR|SPCF_SHOULDYIELD)
#define SPCF_SHOULDHALT 0x0004 /* CPU should be vacated */
#define SPCF_HALTED 0x0008 /* CPU has been halted */
+#define SPCF_PROFCLOCK 0x0010 /* profclock() was started */
#define SCHED_PPQ (128 / SCHED_NQS) /* priorities per queue
*/
#define NICE_WEIGHT 2 /* priorities per nice level */
Index: sys/clockintr.h
===================================================================
RCS file: /cvs/src/sys/sys/clockintr.h,v
retrieving revision 1.8
diff -u -p -r1.8 clockintr.h
--- sys/clockintr.h 15 Jun 2023 22:18:06 -0000 1.8
+++ sys/clockintr.h 19 Jul 2023 14:33:05 -0000
@@ -112,8 +112,7 @@ struct clockintr_queue {
/* Global state flags. */
#define CL_INIT 0x00000001 /* global init done */
-#define CL_STATCLOCK 0x00000002 /* statclock variables set */
-#define CL_STATE_MASK 0x00000003
+#define CL_STATE_MASK 0x00000001
/* Global behavior flags. */
#define CL_RNDSTAT 0x80000000 /* randomized statclock */
@@ -122,13 +121,17 @@ struct clockintr_queue {
void clockintr_cpu_init(const struct intrclock *);
int clockintr_dispatch(void *);
void clockintr_init(u_int);
-void clockintr_setstatclockrate(int);
void clockintr_trigger(void);
/*
* Kernel API
*/
+uint64_t clockintr_advance(struct clockintr *, uint64_t);
+void clockintr_cancel(struct clockintr *);
+struct clockintr *clockintr_establish(struct clockintr_queue *,
+ void (*)(struct clockintr *, void *));
+void clockintr_stagger(struct clockintr *, uint64_t, u_int, u_int);
void clockqueue_init(struct clockintr_queue *);
int sysctl_clockintr(int *, u_int, void *, size_t *, void *, size_t);
Index: arch/alpha/alpha/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/alpha/alpha/clock.c,v
retrieving revision 1.27
diff -u -p -r1.27 clock.c
--- arch/alpha/alpha/clock.c 4 Feb 2023 19:19:36 -0000 1.27
+++ arch/alpha/alpha/clock.c 19 Jul 2023 14:33:05 -0000
@@ -218,7 +218,6 @@ cpu_initclocks(void)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
Index: arch/alpha/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/alpha/include/cpu.h,v
retrieving revision 1.69
diff -u -p -r1.69 cpu.h
--- arch/alpha/include/cpu.h 31 Jan 2023 15:18:53 -0000 1.69
+++ arch/alpha/include/cpu.h 19 Jul 2023 14:33:05 -0000
@@ -212,6 +212,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/amd64/isa/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/isa/clock.c,v
retrieving revision 1.39
diff -u -p -r1.39 clock.c
--- arch/amd64/isa/clock.c 4 Feb 2023 19:19:36 -0000 1.39
+++ arch/amd64/isa/clock.c 19 Jul 2023 14:33:05 -0000
@@ -519,7 +519,6 @@ setstatclockrate(int arg)
mc146818_write(NULL, MC_REGA,
MC_BASE_32_KHz | MC_RATE_1024_Hz);
}
- clockintr_setstatclockrate(arg);
}
void
Index: arch/amd64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v
retrieving revision 1.155
diff -u -p -r1.155 cpu.h
--- arch/amd64/include/cpu.h 4 Jul 2023 17:29:32 -0000 1.155
+++ arch/amd64/include/cpu.h 19 Jul 2023 14:33:05 -0000
@@ -208,6 +208,7 @@ struct cpu_info {
u_int64_t ci_hz_aperf;
#if defined(GPROF) || defined(DDBPROF)
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
u_int32_t ci_vmm_flags;
#define CI_VMM_VMX (1 << 0)
Index: arch/arm/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/arm/include/cpu.h,v
retrieving revision 1.62
diff -u -p -r1.62 cpu.h
--- arch/arm/include/cpu.h 17 Jan 2023 02:27:14 -0000 1.62
+++ arch/arm/include/cpu.h 19 Jul 2023 14:33:05 -0000
@@ -198,6 +198,7 @@ struct cpu_info {
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/arm/cortex/agtimer.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/cortex/agtimer.c,v
retrieving revision 1.17
diff -u -p -r1.17 agtimer.c
--- arch/arm/cortex/agtimer.c 4 Feb 2023 19:19:36 -0000 1.17
+++ arch/arm/cortex/agtimer.c 19 Jul 2023 14:33:05 -0000
@@ -288,7 +288,6 @@ agtimer_delay(u_int usecs)
void
agtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
Index: arch/arm/cortex/amptimer.c
===================================================================
RCS file: /cvs/src/sys/arch/arm/cortex/amptimer.c,v
retrieving revision 1.16
diff -u -p -r1.16 amptimer.c
--- arch/arm/cortex/amptimer.c 4 Feb 2023 19:19:36 -0000 1.16
+++ arch/arm/cortex/amptimer.c 19 Jul 2023 14:33:05 -0000
@@ -343,7 +343,6 @@ amptimer_delay(u_int usecs)
void
amptimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
Index: arch/armv7/omap/dmtimer.c
===================================================================
RCS file: /cvs/src/sys/arch/armv7/omap/dmtimer.c,v
retrieving revision 1.18
diff -u -p -r1.18 dmtimer.c
--- arch/armv7/omap/dmtimer.c 4 Feb 2023 19:19:36 -0000 1.18
+++ arch/armv7/omap/dmtimer.c 19 Jul 2023 14:33:05 -0000
@@ -317,7 +317,6 @@ dmtimer_delay(u_int usecs)
void
dmtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
Index: arch/armv7/omap/gptimer.c
===================================================================
RCS file: /cvs/src/sys/arch/armv7/omap/gptimer.c,v
retrieving revision 1.19
diff -u -p -r1.19 gptimer.c
--- arch/armv7/omap/gptimer.c 4 Feb 2023 19:19:36 -0000 1.19
+++ arch/armv7/omap/gptimer.c 19 Jul 2023 14:33:05 -0000
@@ -326,7 +326,6 @@ gptimer_delay(u_int usecs)
void
gptimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
Index: arch/armv7/sunxi/sxitimer.c
===================================================================
RCS file: /cvs/src/sys/arch/armv7/sunxi/sxitimer.c,v
retrieving revision 1.20
diff -u -p -r1.20 sxitimer.c
--- arch/armv7/sunxi/sxitimer.c 4 Feb 2023 19:19:36 -0000 1.20
+++ arch/armv7/sunxi/sxitimer.c 19 Jul 2023 14:33:06 -0000
@@ -299,7 +299,6 @@ sxitimer_delay(u_int usecs)
void
sxitimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
Index: arch/arm64/dev/agtimer.c
===================================================================
RCS file: /cvs/src/sys/arch/arm64/dev/agtimer.c,v
retrieving revision 1.22
diff -u -p -r1.22 agtimer.c
--- arch/arm64/dev/agtimer.c 4 Feb 2023 19:19:36 -0000 1.22
+++ arch/arm64/dev/agtimer.c 19 Jul 2023 14:33:06 -0000
@@ -354,7 +354,6 @@ agtimer_delay(u_int usecs)
void
agtimer_setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
Index: arch/arm64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/arm64/include/cpu.h,v
retrieving revision 1.37
diff -u -p -r1.37 cpu.h
--- arch/arm64/include/cpu.h 13 Jul 2023 08:33:36 -0000 1.37
+++ arch/arm64/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -172,6 +172,7 @@ struct cpu_info {
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/hppa/dev/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/hppa/dev/clock.c,v
retrieving revision 1.35
diff -u -p -r1.35 clock.c
--- arch/hppa/dev/clock.c 4 Feb 2023 19:19:36 -0000 1.35
+++ arch/hppa/dev/clock.c 19 Jul 2023 14:33:06 -0000
@@ -141,7 +141,6 @@ itmr_intr(void *v)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
Index: arch/hppa/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/hppa/include/cpu.h,v
retrieving revision 1.99
diff -u -p -r1.99 cpu.h
--- arch/hppa/include/cpu.h 31 Jan 2023 15:18:54 -0000 1.99
+++ arch/hppa/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -113,6 +113,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/i386/isa/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/i386/isa/clock.c,v
retrieving revision 1.64
diff -u -p -r1.64 clock.c
--- arch/i386/isa/clock.c 4 Feb 2023 19:19:36 -0000 1.64
+++ arch/i386/isa/clock.c 19 Jul 2023 14:33:06 -0000
@@ -663,7 +663,6 @@ setstatclockrate(int arg)
mc146818_write(NULL, MC_REGA,
MC_BASE_32_KHz | MC_RATE_1024_Hz);
}
- clockintr_setstatclockrate(arg);
}
void
Index: arch/i386/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/i386/include/cpu.h,v
retrieving revision 1.181
diff -u -p -r1.181 cpu.h
--- arch/i386/include/cpu.h 6 Dec 2022 01:56:44 -0000 1.181
+++ arch/i386/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -168,6 +168,7 @@ struct cpu_info {
struct ksensor ci_sensor;
#if defined(GPROF) || defined(DDBPROF)
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/luna88k/luna88k/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/luna88k/luna88k/clock.c,v
retrieving revision 1.16
diff -u -p -r1.16 clock.c
--- arch/luna88k/luna88k/clock.c 6 Dec 2022 00:56:52 -0000 1.16
+++ arch/luna88k/luna88k/clock.c 19 Jul 2023 14:33:06 -0000
@@ -152,7 +152,6 @@ cpu_initclocks()
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
Index: arch/m88k/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/m88k/include/cpu.h,v
retrieving revision 1.72
diff -u -p -r1.72 cpu.h
--- arch/m88k/include/cpu.h 31 Jan 2023 15:18:54 -0000 1.72
+++ arch/m88k/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -177,6 +177,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
struct clockintr_queue ci_queue;
char ci_panicbuf[512];
Index: arch/macppc/macppc/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/macppc/macppc/clock.c,v
retrieving revision 1.54
diff -u -p -r1.54 clock.c
--- arch/macppc/macppc/clock.c 4 Feb 2023 23:17:05 -0000 1.54
+++ arch/macppc/macppc/clock.c 19 Jul 2023 14:33:06 -0000
@@ -234,7 +234,6 @@ delay(unsigned n)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
Index: arch/powerpc/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/powerpc/include/cpu.h,v
retrieving revision 1.74
diff -u -p -r1.74 cpu.h
--- arch/powerpc/include/cpu.h 29 Nov 2022 00:58:05 -0000 1.74
+++ arch/powerpc/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -89,6 +89,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};
Index: arch/mips64/mips64/mips64_machdep.c
===================================================================
RCS file: /cvs/src/sys/arch/mips64/mips64/mips64_machdep.c,v
retrieving revision 1.41
diff -u -p -r1.41 mips64_machdep.c
--- arch/mips64/mips64/mips64_machdep.c 4 Feb 2023 19:19:36 -0000 1.41
+++ arch/mips64/mips64/mips64_machdep.c 19 Jul 2023 14:33:06 -0000
@@ -333,7 +333,6 @@ cpu_initclocks(void)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
Index: arch/mips64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/mips64/include/cpu.h,v
retrieving revision 1.141
diff -u -p -r1.141 cpu.h
--- arch/mips64/include/cpu.h 11 Jan 2023 03:19:52 -0000 1.141
+++ arch/mips64/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -200,6 +200,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};
Index: arch/powerpc64/powerpc64/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/powerpc64/powerpc64/clock.c,v
retrieving revision 1.10
diff -u -p -r1.10 clock.c
--- arch/powerpc64/powerpc64/clock.c 4 Feb 2023 23:20:54 -0000 1.10
+++ arch/powerpc64/powerpc64/clock.c 19 Jul 2023 14:33:06 -0000
@@ -141,7 +141,6 @@ decr_intr(struct trapframe *frame)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
Index: arch/riscv64/riscv64/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/riscv64/riscv64/clock.c,v
retrieving revision 1.9
diff -u -p -r1.9 clock.c
--- arch/riscv64/riscv64/clock.c 4 Feb 2023 19:19:37 -0000 1.9
+++ arch/riscv64/riscv64/clock.c 19 Jul 2023 14:33:06 -0000
@@ -144,7 +144,6 @@ clock_intr(void *frame)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
void
Index: arch/riscv64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/riscv64/include/cpu.h,v
retrieving revision 1.15
diff -u -p -r1.15 cpu.h
--- arch/riscv64/include/cpu.h 19 Nov 2022 16:02:37 -0000 1.15
+++ arch/riscv64/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -119,6 +119,7 @@ struct cpu_info {
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
Index: arch/sh/sh/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/sh/sh/clock.c,v
retrieving revision 1.14
diff -u -p -r1.14 clock.c
--- arch/sh/sh/clock.c 10 Apr 2023 04:21:20 -0000 1.14
+++ arch/sh/sh/clock.c 19 Jul 2023 14:33:06 -0000
@@ -203,7 +203,6 @@ sh_clock_get_pclock(void)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
u_int
Index: arch/sh/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/sh/include/cpu.h,v
retrieving revision 1.34
diff -u -p -r1.34 cpu.h
--- arch/sh/include/cpu.h 6 Dec 2022 01:19:35 -0000 1.34
+++ arch/sh/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -68,6 +68,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
int ci_want_resched;
Index: arch/sparc64/sparc64/clock.c
===================================================================
RCS file: /cvs/src/sys/arch/sparc64/sparc64/clock.c,v
retrieving revision 1.77
diff -u -p -r1.77 clock.c
--- arch/sparc64/sparc64/clock.c 28 Apr 2023 18:27:55 -0000 1.77
+++ arch/sparc64/sparc64/clock.c 19 Jul 2023 14:33:06 -0000
@@ -576,7 +576,6 @@ cpu_initclocks(void)
void
setstatclockrate(int newhz)
{
- clockintr_setstatclockrate(newhz);
}
/*
Index: arch/sparc64/include/cpu.h
===================================================================
RCS file: /cvs/src/sys/arch/sparc64/include/cpu.h,v
retrieving revision 1.101
diff -u -p -r1.101 cpu.h
--- arch/sparc64/include/cpu.h 13 Jan 2023 03:22:18 -0000 1.101
+++ arch/sparc64/include/cpu.h 19 Jul 2023 14:33:06 -0000
@@ -165,6 +165,7 @@ struct cpu_info {
#endif
#ifdef GPROF
struct gmonparam *ci_gmon;
+ struct clockintr *ci_gmonclock;
#endif
char ci_panicbuf[512];
};