On Thu, Oct 04, 2012 at 11:42:45PM +0200, Gregor Best wrote:
> @@ -222,14 +230,13 @@
> setrunqueue(struct proc *p)
> {
> struct schedstate_percpu *spc;
> - int queue = p->p_priority >> 2;
>
> SCHED_ASSERT_LOCKED();
> spc = &p->p_cpu->ci_schedstate;
> spc->spc_nrun++;
>
> - TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
> - spc->spc_whichqs |= (1 << queue);
> + KASSERT(!RB_FIND(prochead, &spc->spc_runq, p));
> + RB_INSERT(prochead, &spc->spc_runq, p);
> cpuset_add(&sched_queued_cpus, p->p_cpu);
>
> if (cpuset_isset(&sched_idle_cpus, p->p_cpu))
> @@ -240,38 +247,29 @@
> remrunqueue(struct proc *p)
> {
> struct schedstate_percpu *spc;
> - int queue = p->p_priority >> 2;
>
> SCHED_ASSERT_LOCKED();
> spc = &p->p_cpu->ci_schedstate;
> spc->spc_nrun--;
>
> - TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
> - if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
> - spc->spc_whichqs &= ~(1 << queue);
> - if (spc->spc_whichqs == 0)
> - cpuset_del(&sched_queued_cpus, p->p_cpu);
> - }
> + KASSERT(RB_REMOVE(prochead, &spc->spc_runq, p));
> + if (RB_EMPTY(&spc->spc_runq))
> + cpuset_del(&sched_queued_cpus, p->p_cpu);
> }
>
This change is unclear for me; AFAIU, it removes the mechanism
which makes processes wake up with a priority depending on what
they are blocked on.
For instance processes waking up from poll(2) or audio read/write
won't be prioritized any longer. If so, this would hurt audio and
other interactive processes but would improve cpu-intesive
bloatware.
haven't tested this though
> Index: kern/sched_bsd.c
> ===================================================================
> RCS file: /cvs/src/sys/kern/sched_bsd.c,v
> retrieving revision 1.30
> diff -u -r1.30 sched_bsd.c
> --- kern/sched_bsd.c 9 Jul 2012 17:27:32 -0000 1.30
> +++ kern/sched_bsd.c 4 Oct 2012 21:27:58 -0000
> @@ -77,20 +77,18 @@
>
> timeout_set(&schedcpu_to, schedcpu, &schedcpu_to);
>
> - rrticks_init = hz / 10;
> + rrticks_init = hz / 20;
this change is unrelated to the rest isn't it?
-- Alexandre