Future patches will emit warnings if rq_clock() is called before
update_rq_clock() inside a rq_pin_lock()/rq_unpin_lock() pair.

Since there is only one caller of idle_balance() we can push the
unpin/repin there.

Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Mike Galbraith <umgwanakikb...@gmail.com>
Cc: Mel Gorman <mgor...@techsingularity.net>
Signed-off-by: Matt Fleming <m...@codeblueprint.co.uk>
---
 kernel/sched/fair.c | 27 +++++++++++++++------------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f0032827fb79..df9a5b16e1df 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3232,7 +3232,7 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq 
*cfs_rq)
        return cfs_rq->avg.load_avg;
 }
 
-static int idle_balance(struct rq *this_rq);
+static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
 
 #else /* CONFIG_SMP */
 
@@ -3261,7 +3261,7 @@ attach_entity_load_avg(struct cfs_rq *cfs_rq, struct 
sched_entity *se) {}
 static inline void
 detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 
-static inline int idle_balance(struct rq *rq)
+static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
 {
        return 0;
 }
@@ -5825,15 +5825,8 @@ simple:
        return p;
 
 idle:
-       /*
-        * This is OK, because current is on_cpu, which avoids it being picked
-        * for load-balance and preemption/IRQs are still disabled avoiding
-        * further scheduler activity on it and we're being very careful to
-        * re-start the picking loop.
-        */
-       rq_unpin_lock(rq, rf);
-       new_tasks = idle_balance(rq);
-       rq_repin_lock(rq, rf);
+       new_tasks = idle_balance(rq, rf);
+
        /*
         * Because idle_balance() releases (and re-acquires) rq->lock, it is
         * possible for any higher priority task to appear. In that case we
@@ -7767,7 +7760,7 @@ update_next_balance(struct sched_domain *sd, unsigned 
long *next_balance)
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-static int idle_balance(struct rq *this_rq)
+static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 {
        unsigned long next_balance = jiffies + HZ;
        int this_cpu = this_rq->cpu;
@@ -7781,6 +7774,14 @@ static int idle_balance(struct rq *this_rq)
         */
        this_rq->idle_stamp = rq_clock(this_rq);
 
+       /*
+        * This is OK, because current is on_cpu, which avoids it being picked
+        * for load-balance and preemption/IRQs are still disabled avoiding
+        * further scheduler activity on it and we're being very careful to
+        * re-start the picking loop.
+        */
+       rq_unpin_lock(this_rq, rf);
+
        if (this_rq->avg_idle < sysctl_sched_migration_cost ||
            !this_rq->rd->overload) {
                rcu_read_lock();
@@ -7858,6 +7859,8 @@ out:
        if (pulled_task)
                this_rq->idle_stamp = 0;
 
+       rq_repin_lock(this_rq, rf);
+
        return pulled_task;
 }
 
-- 
2.9.3

Reply via email to