Linus,

Please pull the latest sched-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
sched-urgent-for-linus

   # HEAD: 46123355af729514e6fa8b8a9dd1e645e61a6466 sched/fair: Fix nuisance 
kernel-doc warning

Three fixes:

 - fix a suspend/resume cpusets bug
 - fix a !CONFIG_NUMA_BALANCING bug
 - fix a kerneldoc warning

 Thanks,

        Ingo

------------------>
Peter Zijlstra (2):
      sched/fair: Fix wake_affine_llc() balancing rules
      sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs

Randy Dunlap (1):
      sched/fair: Fix nuisance kernel-doc warning


 include/linux/cpuset.h |  6 ++++++
 kernel/cgroup/cpuset.c | 16 +++++++++++++++-
 kernel/power/process.c |  5 ++++-
 kernel/sched/core.c    |  7 +++----
 kernel/sched/fair.c    |  4 ++--
 5 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e74655d941b7..a1e6a33a4b03 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -51,7 +51,9 @@ static inline void cpuset_dec(void)
 
 extern int cpuset_init(void);
 extern void cpuset_init_smp(void);
+extern void cpuset_force_rebuild(void);
 extern void cpuset_update_active_cpus(void);
+extern void cpuset_wait_for_hotplug(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -164,11 +166,15 @@ static inline bool cpusets_enabled(void) { return false; }
 static inline int cpuset_init(void) { return 0; }
 static inline void cpuset_init_smp(void) {}
 
+static inline void cpuset_force_rebuild(void) { }
+
 static inline void cpuset_update_active_cpus(void)
 {
        partition_sched_domains(1, NULL, NULL);
 }
 
+static inline void cpuset_wait_for_hotplug(void) { }
+
 static inline void cpuset_cpus_allowed(struct task_struct *p,
                                       struct cpumask *mask)
 {
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 2f4039bafebb..0513ee39698b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2267,6 +2267,13 @@ static void cpuset_hotplug_update_tasks(struct cpuset 
*cs)
        mutex_unlock(&cpuset_mutex);
 }
 
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+       force_rebuild = true;
+}
+
 /**
  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
  *
@@ -2341,8 +2348,10 @@ static void cpuset_hotplug_workfn(struct work_struct 
*work)
        }
 
        /* rebuild sched domains if cpus_allowed has changed */
-       if (cpus_updated)
+       if (cpus_updated || force_rebuild) {
+               force_rebuild = false;
                rebuild_sched_domains();
+       }
 }
 
 void cpuset_update_active_cpus(void)
@@ -2355,6 +2364,11 @@ void cpuset_update_active_cpus(void)
        schedule_work(&cpuset_hotplug_work);
 }
 
+void cpuset_wait_for_hotplug(void)
+{
+       flush_work(&cpuset_hotplug_work);
+}
+
 /*
  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
  * Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 78672d324a6e..50f25cb370c6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -20,8 +20,9 @@
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
 #include <trace/events/power.h>
+#include <linux/cpuset.h>
 
-/* 
+/*
  * Timeout for stopping processes
  */
 unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -202,6 +203,8 @@ void thaw_processes(void)
        __usermodehelper_set_disable_depth(UMH_FREEZING);
        thaw_workqueues();
 
+       cpuset_wait_for_hotplug();
+
        read_lock(&tasklist_lock);
        for_each_process_thread(g, p) {
                /* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6d2c7ff9ba98..136a76d80dbf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5556,16 +5556,15 @@ static void cpuset_cpu_active(void)
                 * operation in the resume sequence, just build a single sched
                 * domain, ignoring cpusets.
                 */
-               num_cpus_frozen--;
-               if (likely(num_cpus_frozen)) {
-                       partition_sched_domains(1, NULL, NULL);
+               partition_sched_domains(1, NULL, NULL);
+               if (--num_cpus_frozen)
                        return;
-               }
                /*
                 * This is the last CPU online operation. So fall through and
                 * restore the original sched domains by considering the
                 * cpuset configurations.
                 */
+               cpuset_force_rebuild();
        }
        cpuset_update_active_cpus();
 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8d5868771cb3..8415d1ec2b84 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5435,7 +5435,7 @@ wake_affine_llc(struct sched_domain *sd, struct 
task_struct *p,
                return false;
 
        /* if this cache has capacity, come here */
-       if (this_stats.has_capacity && this_stats.nr_running < 
prev_stats.nr_running+1)
+       if (this_stats.has_capacity && this_stats.nr_running+1 < 
prev_stats.nr_running)
                return true;
 
        /*
@@ -7719,7 +7719,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, 
struct sd_lb_stats *sd
  * number.
  *
  * Return: 1 when packing is required and a task should be moved to
- * this CPU.  The amount of the imbalance is returned in *imbalance.
+ * this CPU.  The amount of the imbalance is returned in env->imbalance.
  *
  * @env: The load balancing environment.
  * @sds: Statistics of the sched_domain which is to be packed

Reply via email to