From: Mike Galbraith <[email protected]>

commit 8b911acdf08477c059d1c36c21113ab1696c612b upstream.

Don't bother with selection when the current cpu is idle.  Recent load
balancing changes also make it no longer necessary to check wake_affine()
success before returning the selected sibling, so we now always use it.

Signed-off-by: Mike Galbraith <[email protected]>
Signed-off-by: Peter Zijlstra <[email protected]>
LKML-Reference: <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
Signed-off-by: Paul Gortmaker <[email protected]>
---
 kernel/sched_fair.c |   14 ++++++++++----
 1 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 94993ac..ae4d842 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1454,7 +1454,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, 
int sd_flag, int wake_
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        int new_cpu = cpu;
-       int want_affine = 0;
+       int want_affine = 0, cpu_idle = !current->pid;
        int want_sd = 1;
        int sync = wake_flags & WF_SYNC;
 
@@ -1512,13 +1512,15 @@ select_task_rq_fair(struct rq *rq, struct task_struct 
*p, int sd_flag, int wake_
                         * If there's an idle sibling in this domain, make that
                         * the wake_affine target instead of the current cpu.
                         */
-                       if (tmp->flags & SD_SHARE_PKG_RESOURCES)
+                       if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
                                target = select_idle_sibling(p, tmp, target);
 
                        if (target >= 0) {
                                if (tmp->flags & SD_WAKE_AFFINE) {
                                        affine_sd = tmp;
                                        want_affine = 0;
+                                       if (target != cpu)
+                                               cpu_idle = 1;
                                }
                                cpu = target;
                        }
@@ -1534,6 +1536,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, 
int sd_flag, int wake_
                        sd = tmp;
        }
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
        if (sched_feat(LB_SHARES_UPDATE)) {
                /*
                 * Pick the largest domain to update shares over
@@ -1550,9 +1553,12 @@ select_task_rq_fair(struct rq *rq, struct task_struct 
*p, int sd_flag, int wake_
                        raw_spin_lock(&rq->lock);
                }
        }
+#endif
 
-       if (affine_sd && wake_affine(affine_sd, p, sync))
-               return cpu;
+       if (affine_sd) {
+               if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, 
sync))
+                       return cpu;
+       }
 
        while (sd) {
                int load_idx = sd->forkexec_idx;
-- 
1.7.3.3

_______________________________________________
stable mailing list
[email protected]
http://linux.kernel.org/mailman/listinfo/stable

Reply via email to