 kernel/sched/fair.c | 80 ++++++++++++++++++++++++++++-------------------------
 1 file changed, 43 insertions(+), 37 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 96e2b18b6283..25817cff72c4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2632,53 +2632,53 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
 /*
  * Try and locate an idle CPU in the sched_domain.
  */
-static int select_idle_sibling(struct task_struct *p, int target)
+static int select_idle_sibling(struct task_struct *p, int target, struct sched_domain *affine)
 {
-	int cpu = smp_processor_id();
-	int prev_cpu = task_cpu(p);
-	struct sched_domain *sd;
-	struct sched_group *sg;
+	struct sched_domain *sd, *llc_sd;
 	int i;
 
 	/*
 	 * If the task is going to be woken-up on this cpu and if it is
 	 * already idle, then it is the right target.
 	 */
-	if (target == cpu && idle_cpu(cpu))
-		return cpu;
-
-	/*
-	 * If the task is going to be woken-up on the cpu where it previously
-	 * ran and if it is currently idle, then it the right target.
-	 */
-	if (target == prev_cpu && idle_cpu(prev_cpu))
-		return prev_cpu;
+	if (idle_cpu(target))
+		return target;
 
 	/*
 	 * Otherwise, iterate the domains and find an elegible idle cpu.
 	 */
-	sd = rcu_dereference(per_cpu(sd_llc, target));
-	for_each_lower_domain(sd) {
-		sg = sd->groups;
-		do {
-			if (!cpumask_intersects(sched_group_cpus(sg),
-						tsk_cpus_allowed(p)))
-				goto next;
+	llc_sd = rcu_dereference(per_cpu(sd_llc, target));
+	for_each_domain(target, sd) {
+		for_each_cpu(i, sched_domain_span(sd)) {
+			if (!cpumask_test_cpu(i, tsk_cpus_allowed(p)))
+				continue;
+			if (!idle_cpu(i))
+				continue;
+			return  i;
+		}
+		/* Don't iterate past the last level cache domain */
+		if (sd == llc_sd)
+			break;
+		/* Don't iterate past the affinity level */
+		if (sd == affine)
+			break;
+	}
+	return -1;
+}
 
-			for_each_cpu(i, sched_group_cpus(sg)) {
-				if (!idle_cpu(i))
-					goto next;
-			}
+/*
+ * For synchronous wake-ups: is the currently running
+ * process the only pending process of this CPU runqueue?
+ */
+static inline int single_running(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
 
-			target = cpumask_first_and(sched_group_cpus(sg),
-					tsk_cpus_allowed(p));
-			goto done;
-next:
-			sg = sg->next;
-		} while (sg != sd->groups);
-	}
-done:
-	return target;
+#ifdef CONFIG_SMP
+	if (!llist_empty(&rq->wake_list))
+		return 0;
+#endif
+	return cpu_rq(cpu)->nr_running <= 1;
 }
 
 /*
@@ -2759,11 +2759,17 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
 	}
 
 	if (affine_sd) {
-		if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
+		if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) {
 			prev_cpu = cpu;
+			if (sync && single_running(cpu)) {
+				new_cpu = cpu;
+				goto unlock;
+			}
+		}
 
-		new_cpu = select_idle_sibling(p, prev_cpu);
-		goto unlock;
+		new_cpu = select_idle_sibling(p, prev_cpu, affine_sd);
+		if (new_cpu >= 0)
+			goto unlock;
 	}
 
 	while (sd) {
