> > > @@ -4323,6 +4340,10 @@ bool __sched yield_to(struct task_struct *p,
> > bool preempt)
> > > rq = this_rq();
> > >
> > > again:
> > > + /* optimistic test to avoid taking locks */
> > > + if (!__yield_to_candidate(curr, p))
> > > + goto out_irq;
> > > +
>
> So add
* Peter Zijlstra [2012-09-10 18:03:55]:
> On Mon, 2012-09-10 at 08:16 -0500, Andrew Theurer wrote:
> > > > @@ -4856,8 +4859,6 @@ again:
> > > > if (curr->sched_class != p->sched_class)
> > > > goto out;
> > > >
> > > > - if (task_running(p_rq, p) || p->state)
> > > > -
>
> signed-off-by: Andrew Theurer
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index fbf1fd0..c767915 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4844,6 +4844,9 @@ bool __sched yield_to(struct task_struct *p, bool
> preempt)
>
> again:
> p_rq = ta
> On 12/07/12 21:18, Raghavendra K T wrote:
> > +#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
> [...]
> > + struct {
> > + bool cpu_relax_intercepted;
> > + bool dy_eligible;
> > + } ple;
> > +#endif
> [...]
> > }
> > vcpu->run = page_address(page);
> > + vcpu->ple.c