From: Frederic Weisbecker <[email protected]>

Issue an IPI to restart the tick on a CPU that belongs
to a cpuset when its nohz flag gets cleared.

Signed-off-by: Frederic Weisbecker <[email protected]>
Cc: Alessio Igor Bogani <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Avi Kivity <[email protected]>
Cc: Chris Metcalf <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Daniel Lezcano <[email protected]>
Cc: Geoff Levand <[email protected]>
Cc: Gilad Ben Yossef <[email protected]>
Cc: Hakan Akkan <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Kevin Hilman <[email protected]>
Cc: Max Krasnyansky <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Stephen Hemminger <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Sven-Thorsten Dietrich <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
 include/linux/cpuset.h   |    2 ++
 kernel/cpuset.c          |   25 +++++++++++++++++++++++--
 kernel/time/tick-sched.c |    8 ++++++++
 3 files changed, 33 insertions(+), 2 deletions(-)

diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 7e7eb41..631968b 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -260,6 +260,8 @@ static inline bool cpuset_adaptive_nohz(void)
         */
        return cpuset_cpu_adaptive_nohz(smp_processor_id());
 }
+
+extern void cpuset_exit_nohz_interrupt(void *unused);
 #else
 static inline bool cpuset_cpu_adaptive_nohz(int cpu) { return false; }
 static inline bool cpuset_adaptive_nohz(void) { return false; }
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6319d8e..1b67e5b 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1200,6 +1200,14 @@ static void cpuset_change_flag(struct task_struct *tsk,
 
 DEFINE_PER_CPU(atomic_t, cpu_adaptive_nohz_ref);
 
+static void cpu_exit_nohz(int cpu)
+{
+       preempt_disable();
+       smp_call_function_single(cpu, cpuset_exit_nohz_interrupt,
+                                NULL, true);
+       preempt_enable();
+}
+
 static void update_nohz_cpus(struct cpuset *old_cs, struct cpuset *cs)
 {
        int cpu;
@@ -1211,9 +1219,22 @@ static void update_nohz_cpus(struct cpuset *old_cs, 
struct cpuset *cs)
        for_each_cpu(cpu, cs->cpus_allowed) {
                atomic_t *ref = &per_cpu(cpu_adaptive_nohz_ref, cpu);
                if (is_adaptive_nohz(cs))
-                       atomic_inc(ref);
+                       val = atomic_inc_return(ref);
                else
-                       atomic_dec(ref);
+                       val = atomic_dec_return(ref);
+
+               if (!val) {
+                       /*
+                        * The update to cpu_adaptive_nohz_ref must be
+                        * visible right away. So that once we restart the tick
+                        * from the IPI, it won't be stopped again due to cache
+                        * update lag.
+                        * FIXME: We probably need more to ensure this value is 
really
+                        * visible right away.
+                        */
+                       smp_mb();
+                       cpu_exit_nohz(cpu);
+               }
        }
 }
 #else
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0a5e650..de7de68 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -884,6 +884,14 @@ void tick_nohz_check_adaptive(void)
        }
 }
 
+void cpuset_exit_nohz_interrupt(void *unused)
+{
+       struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+
+       if (ts->tick_stopped && !is_idle_task(current))
+               tick_nohz_restart_adaptive();
+}
+
 void tick_nohz_post_schedule(void)
 {
        struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
-- 
1.7.10.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to