[patch 3/3] nohz: tick_nohz_kick_task: only IPI if remote task is running

2021-01-28 Thread Marcelo Tosatti
If the task is not running, run_posix_cpu_timers has nothing
to elapsed, so spare IPI in that case.

Suggested-by: Peter Zijlstra 
Signed-off-by: Marcelo Tosatti 

Index: linux-2.6/kernel/sched/core.c
===
--- linux-2.6.orig/kernel/sched/core.c
+++ linux-2.6/kernel/sched/core.c
@@ -9182,3 +9182,9 @@ void call_trace_sched_update_nr_running(
 {
 trace_sched_update_nr_running_tp(rq, count);
 }
+
+bool task_on_rq(struct task_struct *p)
+{
+   return p->on_rq == TASK_ON_RQ_QUEUED;
+}
+
Index: linux-2.6/include/linux/sched.h
===
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -232,6 +232,8 @@ extern void io_schedule_finish(int token
 extern long io_schedule_timeout(long timeout);
 extern void io_schedule(void);
 
+extern bool task_on_rq(struct task_struct *p);
+
 /**
  * struct prev_cputime - snapshot of system and user cputime
  * @utime: time spent in user mode
Index: linux-2.6/kernel/time/tick-sched.c
===
--- linux-2.6.orig/kernel/time/tick-sched.c
+++ linux-2.6/kernel/time/tick-sched.c
@@ -324,8 +324,6 @@ void tick_nohz_full_kick_cpu(int cpu)
 
 static void tick_nohz_kick_task(struct task_struct *tsk)
 {
-   int cpu = task_cpu(tsk);
-
/*
 * If the task concurrently migrates to another cpu,
 * we guarantee it sees the new tick dependency upon
@@ -340,6 +338,23 @@ static void tick_nohz_kick_task(struct t
 *   tick_nohz_task_switch()smp_mb() (atomic_fetch_or())
 *  LOAD p->tick_dep_mask   LOAD p->cpu
 */
+   int cpu = task_cpu(tsk);
+
+   /*
+* If the task is not running, run_posix_cpu_timers
+* has nothing to elapsed, can spare IPI in that
+* case.
+*
+* activate_task()  STORE p->tick_dep_mask
+* STORE p->task_on_rq
+* __schedule() (switch to task 'p')smp_mb() (atomic_fetch_or())
+* LOCK rq->lockLOAD p->task_on_rq
+* smp_mb__after_spin_lock()
+* tick_nohz_task_switch()
+*  LOAD p->tick_dep_mask
+*/
+   if (!task_on_rq(tsk))
+   return;
 
preempt_disable();
if (cpu_online(cpu))




[patch 3/3] nohz: tick_nohz_kick_task: only IPI if remote task is running

2021-01-28 Thread Marcelo Tosatti
If the task is not running, run_posix_cpu_timers has nothing
to elapsed, so spare IPI in that case.

Suggested-by: Peter Zijlstra 
Signed-off-by: Marcelo Tosatti 

Index: linux-2.6/kernel/sched/core.c
===
--- linux-2.6.orig/kernel/sched/core.c
+++ linux-2.6/kernel/sched/core.c
@@ -9182,3 +9182,9 @@ void call_trace_sched_update_nr_running(
 {
 trace_sched_update_nr_running_tp(rq, count);
 }
+
+bool task_on_rq(struct task_struct *p)
+{
+   return p->on_rq == TASK_ON_RQ_QUEUED;
+}
+
Index: linux-2.6/include/linux/sched.h
===
--- linux-2.6.orig/include/linux/sched.h
+++ linux-2.6/include/linux/sched.h
@@ -232,6 +232,8 @@ extern void io_schedule_finish(int token
 extern long io_schedule_timeout(long timeout);
 extern void io_schedule(void);
 
+extern bool task_on_rq(struct task_struct *p);
+
 /**
  * struct prev_cputime - snapshot of system and user cputime
  * @utime: time spent in user mode
Index: linux-2.6/kernel/time/tick-sched.c
===
--- linux-2.6.orig/kernel/time/tick-sched.c
+++ linux-2.6/kernel/time/tick-sched.c
@@ -324,8 +324,6 @@ void tick_nohz_full_kick_cpu(int cpu)
 
 static void tick_nohz_kick_task(struct task_struct *tsk)
 {
-   int cpu = task_cpu(tsk);
-
/*
 * If the task concurrently migrates to another cpu,
 * we guarantee it sees the new tick dependency upon
@@ -340,6 +338,23 @@ static void tick_nohz_kick_task(struct t
 *   tick_nohz_task_switch()smp_mb() (atomic_fetch_or())
 *  LOAD p->tick_dep_mask   LOAD p->cpu
 */
+   int cpu = task_cpu(tsk);
+
+   /*
+* If the task is not running, run_posix_cpu_timers
+* has nothing to elapsed, can spare IPI in that
+* case.
+*
+* activate_task()  STORE p->tick_dep_mask
+* STORE p->task_on_rq
+* __schedule() (switch to task 'p')smp_mb() (atomic_fetch_or())
+* LOCK rq->lockLOAD p->task_on_rq
+* smp_mb__after_spin_lock()
+* tick_nohz_task_switch()
+*  LOAD p->tick_dep_mask
+*/
+   if (!task_on_rq(tsk))
+   return;
 
preempt_disable();
if (cpu_online(cpu))