On Wed, Aug 06, 2014 at 12:06:19PM +0400, Kirill Tkhai wrote:
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -331,9 +331,13 @@ static inline struct rq *__task_rq_lock(struct 
> task_struct *p)
>       lockdep_assert_held(&p->pi_lock);
>  
>       for (;;) {
> +             while (unlikely(task_migrating(p)))
> +                     cpu_relax();
> +
>               rq = task_rq(p);
>               raw_spin_lock(&rq->lock);
> -             if (likely(rq == task_rq(p)))
> +             if (likely(rq == task_rq(p) &&
> +                        !task_migrating(p)))
>                       return rq;
>               raw_spin_unlock(&rq->lock);
>       }
> @@ -349,10 +353,14 @@ static struct rq *task_rq_lock(struct task_struct *p, 
> unsigned long *flags)
>       struct rq *rq;
>  
>       for (;;) {
> +             while (unlikely(task_migrating(p)))
> +                     cpu_relax();
> +
>               raw_spin_lock_irqsave(&p->pi_lock, *flags);
>               rq = task_rq(p);
>               raw_spin_lock(&rq->lock);
> -             if (likely(rq == task_rq(p)))
> +             if (likely(rq == task_rq(p) &&
> +                        !task_migrating(p)))
>                       return rq;
>               raw_spin_unlock(&rq->lock);
>               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);

I know I suggested that; but I changed it like the below. The advantage
is of not having two task_migrating() tests on the likely path.

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -331,15 +331,15 @@ static inline struct rq *__task_rq_lock(
        lockdep_assert_held(&p->pi_lock);
 
        for (;;) {
-               while (unlikely(task_migrating(p)))
-                       cpu_relax();
-
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p) &&
                           !task_migrating(p)))
                        return rq;
                raw_spin_unlock(&rq->lock);
+
+               while (unlikely(task_migrating(p)))
+                       cpu_relax();
        }
 }
 
@@ -353,9 +353,6 @@ static struct rq *task_rq_lock(struct ta
        struct rq *rq;
 
        for (;;) {
-               while (unlikely(task_migrating(p)))
-                       cpu_relax();
-
                raw_spin_lock_irqsave(&p->pi_lock, *flags);
                rq = task_rq(p);
                raw_spin_lock(&rq->lock);
@@ -364,6 +361,9 @@ static struct rq *task_rq_lock(struct ta
                        return rq;
                raw_spin_unlock(&rq->lock);
                raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+               while (unlikely(task_migrating(p)))
+                       cpu_relax();
        }
 }
 

Attachment: pgpYDfn4KSY7r.pgp
Description: PGP signature

Reply via email to