[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-03 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool "callable anywhere/anytime" properties.

Reviewed-by: Paul E. McKenney 
Cc: Andrew Morton 
Cc: Ingo Molnar 
Cc: Jens Axboe 
Cc: Kevin Hilman 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 21 +
 4 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(_rq()->wake_list)
-   && !tick_nohz_full_cpu(smp_processor_id())
-   && !got_nohz_idle_kick())
+   if (llist_empty(_rq()->wake_list) && !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq->cpu)) {
/* Order rq->nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq->cpu);
+   tick_nohz_full_kick_cpu(rq->cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..582d3f6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,27 @@ void tick_nohz_full_kick(void)
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
 }
 
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd) = {
+   .func = nohz_full_kick_queue,
+};
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, _cpu(nohz_full_kick_qsd, 
cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-03 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool callable anywhere/anytime properties.

Reviewed-by: Paul E. McKenney paul...@linux.vnet.ibm.com
Cc: Andrew Morton a...@linux-foundation.org
Cc: Ingo Molnar mi...@kernel.org
Cc: Jens Axboe ax...@fb.com
Cc: Kevin Hilman khil...@linaro.org
Cc: Paul E. McKenney paul...@linux.vnet.ibm.com
Cc: Peter Zijlstra pet...@infradead.org
Cc: Thomas Gleixner t...@linutronix.de
Signed-off-by: Frederic Weisbecker fweis...@gmail.com
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 21 +
 4 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(this_rq()-wake_list)
-!tick_nohz_full_cpu(smp_processor_id())
-!got_nohz_idle_kick())
+   if (llist_empty(this_rq()-wake_list)  !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq-cpu)) {
/* Order rq-nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq-cpu);
+   tick_nohz_full_kick_cpu(rq-cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..582d3f6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,27 @@ void tick_nohz_full_kick(void)
irq_work_queue(__get_cpu_var(nohz_full_kick_work));
 }
 
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd) = {
+   .func = nohz_full_kick_queue,
+};
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, per_cpu(nohz_full_kick_qsd, 
cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-02 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool "callable anywhere/anytime" properties.

Cc: Andrew Morton 
Cc: Ingo Molnar 
Cc: Jens Axboe 
Cc: Kevin Hilman 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 21 +
 4 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(_rq()->wake_list)
-   && !tick_nohz_full_cpu(smp_processor_id())
-   && !got_nohz_idle_kick())
+   if (llist_empty(_rq()->wake_list) && !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq->cpu)) {
/* Order rq->nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq->cpu);
+   tick_nohz_full_kick_cpu(rq->cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..582d3f6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,27 @@ void tick_nohz_full_kick(void)
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
 }
 
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd) = {
+   .func = nohz_full_kick_queue,
+};
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, _cpu(nohz_full_kick_qsd, 
cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-02 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool "callable anywhere/anytime" properties.

Cc: Andrew Morton 
Cc: Ingo Molnar 
Cc: Jens Axboe 
Cc: Kevin Hilman 
Cc: Paul E. McKenney 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Signed-off-by: Frederic Weisbecker 
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 20 
 4 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(_rq()->wake_list)
-   && !tick_nohz_full_cpu(smp_processor_id())
-   && !got_nohz_idle_kick())
+   if (llist_empty(_rq()->wake_list) && !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq->cpu)) {
/* Order rq->nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq->cpu);
+   tick_nohz_full_kick_cpu(rq->cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..33a0043 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,26 @@ void tick_nohz_full_kick(void)
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
 }
 
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd);
+
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, nohz_full_kick_queue,
+ _cpu(nohz_full_kick_qsd, cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-02 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool callable anywhere/anytime properties.

Cc: Andrew Morton a...@linux-foundation.org
Cc: Ingo Molnar mi...@kernel.org
Cc: Jens Axboe jens.ax...@oracle.com
Cc: Kevin Hilman khil...@linaro.org
Cc: Paul E. McKenney paul...@linux.vnet.ibm.com
Cc: Peter Zijlstra pet...@infradead.org
Cc: Thomas Gleixner t...@linutronix.de
Signed-off-by: Frederic Weisbecker fweis...@gmail.com
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 20 
 4 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(this_rq()-wake_list)
-!tick_nohz_full_cpu(smp_processor_id())
-!got_nohz_idle_kick())
+   if (llist_empty(this_rq()-wake_list)  !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq-cpu)) {
/* Order rq-nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq-cpu);
+   tick_nohz_full_kick_cpu(rq-cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..33a0043 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,26 @@ void tick_nohz_full_kick(void)
irq_work_queue(__get_cpu_var(nohz_full_kick_work));
 }
 
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd);
+
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, nohz_full_kick_queue,
+ per_cpu(nohz_full_kick_qsd, cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 2/2] nohz: Move full nohz kick to its own IPI

2014-04-02 Thread Frederic Weisbecker
Now that we have smp_queue_function_single() which can be used to
safely queue IPIs when interrupts are disabled and without worrying
about concurrent callers, lets use it for the full dynticks kick to
notify a CPU that it's exiting single task mode.

This unbloats a bit the scheduler IPI that the nohz code was abusing
for its cool callable anywhere/anytime properties.

Cc: Andrew Morton a...@linux-foundation.org
Cc: Ingo Molnar mi...@kernel.org
Cc: Jens Axboe jens.ax...@oracle.com
Cc: Kevin Hilman khil...@linaro.org
Cc: Paul E. McKenney paul...@linux.vnet.ibm.com
Cc: Peter Zijlstra pet...@infradead.org
Cc: Thomas Gleixner t...@linutronix.de
Signed-off-by: Frederic Weisbecker fweis...@gmail.com
---
 include/linux/tick.h |  2 ++
 kernel/sched/core.c  |  5 +
 kernel/sched/sched.h |  2 +-
 kernel/time/tick-sched.c | 21 +
 4 files changed, 25 insertions(+), 5 deletions(-)

diff --git a/include/linux/tick.h b/include/linux/tick.h
index b84773c..9d3fcc2 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -182,6 +182,7 @@ static inline bool tick_nohz_full_cpu(int cpu)
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -190,6 +191,7 @@ static inline bool tick_nohz_full_enabled(void) { return 
false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9cae286..e4b344e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1499,9 +1499,7 @@ void scheduler_ipi(void)
 */
preempt_fold_need_resched();
 
-   if (llist_empty(this_rq()-wake_list)
-!tick_nohz_full_cpu(smp_processor_id())
-!got_nohz_idle_kick())
+   if (llist_empty(this_rq()-wake_list)  !got_nohz_idle_kick())
return;
 
/*
@@ -1518,7 +1516,6 @@ void scheduler_ipi(void)
 * somewhat pessimize the simple resched case.
 */
irq_enter();
-   tick_nohz_full_check();
sched_ttwu_pending();
 
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f2..4771063 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1225,7 +1225,7 @@ static inline void inc_nr_running(struct rq *rq)
if (tick_nohz_full_cpu(rq-cpu)) {
/* Order rq-nr_running write against the IPI */
smp_wmb();
-   smp_send_reschedule(rq-cpu);
+   tick_nohz_full_kick_cpu(rq-cpu);
}
}
 #endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 9f8af69..582d3f6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -230,6 +230,27 @@ void tick_nohz_full_kick(void)
irq_work_queue(__get_cpu_var(nohz_full_kick_work));
 }
 
+static void nohz_full_kick_queue(struct queue_single_data *qsd)
+{
+   __tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct queue_single_data, nohz_full_kick_qsd) = {
+   .func = nohz_full_kick_queue,
+};
+
+void tick_nohz_full_kick_cpu(int cpu)
+{
+   if (!tick_nohz_full_cpu(cpu))
+   return;
+
+   if (cpu == smp_processor_id()) {
+   irq_work_queue(__get_cpu_var(nohz_full_kick_work));
+   } else {
+   smp_queue_function_single(cpu, per_cpu(nohz_full_kick_qsd, 
cpu));
+   }
+}
+
 static void nohz_full_kick_ipi(void *info)
 {
__tick_nohz_full_check();
-- 
1.8.3.1

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/