On 05/08/23(Sat) 17:17, Scott Cheloha wrote:
> This is the next piece of the clock interrupt reorganization patch
> series.

The round robin logic is here to make sure process doesn't hog a CPU.
The period to tell a process it should yield doesn't have to be tied
to the hardclock period.  We want to be sure a process doesn't run more
than 100ms at a time.

Is the priority of this new clock interrupt the same as the hardlock?

I don't understand what clockintr_advance() is doing.  Maybe you could
write a manual for it?  I'm afraid we could wait 200ms now?  Or what
`count' of 2 mean?

Same question for clockintr_stagger().

Can we get rid of `hardclock_period' and use a variable set to 100ms?
This should be tested on alpha which has a hz of 1024 but I'd argue this
is an improvement.

> This patch removes the roundrobin() call from hardclock() and makes
> roundrobin() an independent clock interrupt.
> 
> - Revise roundrobin() to make it a valid clock interrupt callback.
>   It remains periodic.  It still runs at one tenth of the hardclock
>   frequency.
> 
> - Account for multiple expirations in roundrobin().  If two or more
>   intervals have elapsed we set SPCF_SHOULDYIELD immediately.
> 
>   This preserves existing behavior: hardclock() is called multiple
>   times during clockintr_hardclock() if clock interrupts are blocked
>   for long enough.
> 
> - Each schedstate_percpu has its own roundrobin() handle, spc_roundrobin.
>   spc_roundrobin is established during sched_init_cpu(), staggered during
>   the first clockintr_cpu_init() call, and advanced during 
> clockintr_cpu_init().
>   Expirations during suspend/resume are discarded.
> 
> - spc_rrticks and rrticks_init are now useless.  Delete them.
> 
> ok?
> 
> Also, yes, I see the growing pile of scheduler-controlled clock
> interrupt handles.  My current plan is to move the setup code at the
> end of clockintr_cpu_init() to a different routine, maybe something
> like "sched_start_cpu()".  On the primary CPU you'd call it immediately
> after cpu_initclocks().  On secondary CPUs you'd call it at the end of
> cpu_hatch() just before cpu_switchto().
> 
> In any case, we will need to find a home for that code someplace.  It
> can't stay in clockintr_cpu_init() forever.
> 
> Index: kern/sched_bsd.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/sched_bsd.c,v
> retrieving revision 1.79
> diff -u -p -r1.79 sched_bsd.c
> --- kern/sched_bsd.c  5 Aug 2023 20:07:55 -0000       1.79
> +++ kern/sched_bsd.c  5 Aug 2023 22:15:25 -0000
> @@ -56,7 +56,6 @@
>  
>  
>  int  lbolt;                  /* once a second sleep address */
> -int  rrticks_init;           /* # of hardclock ticks per roundrobin() */
>  
>  #ifdef MULTIPROCESSOR
>  struct __mp_lock sched_lock;
> @@ -69,21 +68,23 @@ uint32_t          decay_aftersleep(uint32_t, uin
>   * Force switch among equal priority processes every 100ms.
>   */
>  void
> -roundrobin(struct cpu_info *ci)
> +roundrobin(struct clockintr *cl, void *cf)
>  {
> +     struct cpu_info *ci = curcpu();
>       struct schedstate_percpu *spc = &ci->ci_schedstate;
> +     uint64_t count;
>  
> -     spc->spc_rrticks = rrticks_init;
> +     count = clockintr_advance(cl, hardclock_period * 10);
>  
>       if (ci->ci_curproc != NULL) {
> -             if (spc->spc_schedflags & SPCF_SEENRR) {
> +             if (spc->spc_schedflags & SPCF_SEENRR || count >= 2) {
>                       /*
>                        * The process has already been through a roundrobin
>                        * without switching and may be hogging the CPU.
>                        * Indicate that the process should yield.
>                        */
>                       atomic_setbits_int(&spc->spc_schedflags,
> -                         SPCF_SHOULDYIELD);
> +                         SPCF_SEENRR | SPCF_SHOULDYIELD);
>               } else {
>                       atomic_setbits_int(&spc->spc_schedflags,
>                           SPCF_SEENRR);
> @@ -695,8 +696,6 @@ scheduler_start(void)
>        * its job.
>        */
>       timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
> -
> -     rrticks_init = hz / 10;
>       schedcpu(&schedcpu_to);
>  
>  #ifndef SMALL_KERNEL
> Index: kern/kern_sched.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/kern_sched.c,v
> retrieving revision 1.84
> diff -u -p -r1.84 kern_sched.c
> --- kern/kern_sched.c 5 Aug 2023 20:07:55 -0000       1.84
> +++ kern/kern_sched.c 5 Aug 2023 22:15:25 -0000
> @@ -102,6 +102,12 @@ sched_init_cpu(struct cpu_info *ci)
>               if (spc->spc_profclock == NULL)
>                       panic("%s: clockintr_establish profclock", __func__);
>       }
> +     if (spc->spc_roundrobin == NULL) {
> +             spc->spc_roundrobin = clockintr_establish(&ci->ci_queue,
> +                 roundrobin);
> +             if (spc->spc_roundrobin == NULL)
> +                     panic("%s: clockintr_establish roundrobin", __func__);
> +     }
>  
>       kthread_create_deferred(sched_kthreads_create, ci);
>  
> Index: kern/kern_clockintr.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/kern_clockintr.c,v
> retrieving revision 1.30
> diff -u -p -r1.30 kern_clockintr.c
> --- kern/kern_clockintr.c     5 Aug 2023 20:07:55 -0000       1.30
> +++ kern/kern_clockintr.c     5 Aug 2023 22:15:25 -0000
> @@ -204,6 +204,11 @@ clockintr_cpu_init(const struct intrcloc
>               clockintr_stagger(spc->spc_profclock, profclock_period,
>                   multiplier, MAXCPUS);
>       }
> +     if (spc->spc_roundrobin->cl_expiration == 0) {
> +             clockintr_stagger(spc->spc_roundrobin, hardclock_period,
> +                 multiplier, MAXCPUS);
> +     }
> +     clockintr_advance(spc->spc_roundrobin, hardclock_period * 10);
>  
>       if (reset_cq_intrclock)
>               SET(cq->cq_flags, CQ_INTRCLOCK);
> Index: kern/kern_clock.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/kern_clock.c,v
> retrieving revision 1.111
> diff -u -p -r1.111 kern_clock.c
> --- kern/kern_clock.c 5 Aug 2023 20:07:55 -0000       1.111
> +++ kern/kern_clock.c 5 Aug 2023 22:15:25 -0000
> @@ -113,9 +113,6 @@ hardclock(struct clockframe *frame)
>  {
>       struct cpu_info *ci = curcpu();
>  
> -     if (--ci->ci_schedstate.spc_rrticks <= 0)
> -             roundrobin(ci);
> -
>  #if NDT > 0
>       DT_ENTER(profile, NULL);
>       if (CPU_IS_PRIMARY(ci))
> Index: sys/sched.h
> ===================================================================
        > RCS file: /cvs/src/sys/sys/sched.h,v
        > retrieving revision 1.60
> diff -u -p -r1.60 sched.h
> --- sys/sched.h       5 Aug 2023 20:07:56 -0000       1.60
> +++ sys/sched.h       5 Aug 2023 22:15:25 -0000
> @@ -105,10 +105,10 @@ struct schedstate_percpu {
>       u_int spc_schedticks;           /* ticks for schedclock() */
>       u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */
>       u_char spc_curpriority;         /* usrpri of curproc */
> -     int spc_rrticks;                /* ticks until roundrobin() */
>  
>       struct clockintr *spc_itimer;   /* [o] itimer_update handle */
>       struct clockintr *spc_profclock; /* [o] profclock handle */
> +     struct clockintr *spc_roundrobin; /* [o] roundrobin handle */
>  
>       u_int spc_nrun;                 /* procs on the run queues */
>  
> @@ -150,11 +150,11 @@ extern int rrticks_init;                /* ticks per r
>  
>  struct proc;
>  void schedclock(struct proc *);
> -struct cpu_info;
> -void roundrobin(struct cpu_info *);
> +void roundrobin(struct clockintr *, void *);
>  void scheduler_start(void);
>  void userret(struct proc *p);
>  
> +struct cpu_info;
>  void sched_init_cpu(struct cpu_info *);
>  void sched_idle(void *);
>  void sched_exit(struct proc *);

Reply via email to