On Thu, Jan 19, 2017 at 10:17:03AM -0500, Steven Rostedt wrote: > diff --git a/kernel/sched/core.c b/kernel/sched/core.c > index 154fd68..e2c6d3b 100644 > --- a/kernel/sched/core.c > +++ b/kernel/sched/core.c > @@ -3259,13 +3259,15 @@ static inline struct task_struct * > pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie > cookie) > { > const struct sched_class *class = &fair_sched_class; > + const struct sched_class *idle_class = &idle_sched_class; > struct task_struct *p; > > /* > * Optimization: we know that if all tasks are in > * the fair class we can call that function directly: > */ > - if (likely(prev->sched_class == class && > + if (likely((prev->sched_class == class || > + prev->sched_class == idle_class) && > rq->nr_running == rq->cfs.h_nr_running)) {
OK, so I hate this patch because it makes the condition more complex, and while staring at what it does for code generation I couldn't for the life of me figure out why we care about prev->sched_class to begin with. (we used to, but the current code not so much) So I simply removed that entire clause, like below, and lo and behold, the system booted... Could you give it a spin to see if anything comes apart? diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 49ce1cb..51ca21e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3321,15 +3321,14 @@ static inline void schedule_debug(struct task_struct *prev) static inline struct task_struct * pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { - const struct sched_class *class = &fair_sched_class; + const struct sched_class *class; struct task_struct *p; /* * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(prev->sched_class == class && - rq->nr_running == rq->cfs.h_nr_running)) { + if (likely(rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq, prev, rf); if (unlikely(p == RETRY_TASK)) goto again;