The whole of ttwu_stat() is guarded by a single schedstat_enabled(),
there is absolutely no point in then issuing another static_branch for
every single schedstat_inc() in there.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/core.c  |   16 ++++++++--------
 kernel/sched/stats.h |    2 ++
 2 files changed, 10 insertions(+), 8 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1630,16 +1630,16 @@ ttwu_stat(struct task_struct *p, int cpu
 
 #ifdef CONFIG_SMP
        if (cpu == rq->cpu) {
-               schedstat_inc(rq->ttwu_local);
-               schedstat_inc(p->se.statistics.nr_wakeups_local);
+               __schedstat_inc(rq->ttwu_local);
+               __schedstat_inc(p->se.statistics.nr_wakeups_local);
        } else {
                struct sched_domain *sd;
 
-               schedstat_inc(p->se.statistics.nr_wakeups_remote);
+               __schedstat_inc(p->se.statistics.nr_wakeups_remote);
                rcu_read_lock();
                for_each_domain(rq->cpu, sd) {
                        if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               schedstat_inc(sd->ttwu_wake_remote);
+                               __schedstat_inc(sd->ttwu_wake_remote);
                                break;
                        }
                }
@@ -1647,14 +1647,14 @@ ttwu_stat(struct task_struct *p, int cpu
        }
 
        if (wake_flags & WF_MIGRATED)
-               schedstat_inc(p->se.statistics.nr_wakeups_migrate);
+               __schedstat_inc(p->se.statistics.nr_wakeups_migrate);
 #endif /* CONFIG_SMP */
 
-       schedstat_inc(rq->ttwu_count);
-       schedstat_inc(p->se.statistics.nr_wakeups);
+       __schedstat_inc(rq->ttwu_count);
+       __schedstat_inc(p->se.statistics.nr_wakeups);
 
        if (wake_flags & WF_SYNC)
-               schedstat_inc(p->se.statistics.nr_wakeups_sync);
+               __schedstat_inc(p->se.statistics.nr_wakeups_sync);
 }
 
 static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int 
en_flags)
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -31,6 +31,7 @@ rq_sched_info_dequeued(struct rq *rq, un
                rq->rq_sched_info.run_delay += delta;
 }
 #define schedstat_enabled()            
static_branch_unlikely(&sched_schedstats)
+#define __schedstat_inc(var)           do { var++; } while (0)
 #define schedstat_inc(var)             do { if (schedstat_enabled()) { var++; 
} } while (0)
 #define schedstat_add(var, amt)                do { if (schedstat_enabled()) { 
var += (amt); } } while (0)
 #define schedstat_set(var, val)                do { if (schedstat_enabled()) { 
var = (val); } } while (0)
@@ -48,6 +49,7 @@ static inline void
 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 {}
 #define schedstat_enabled()            0
+#define __schedstat_inc(var)           do { } while (0)
 #define schedstat_inc(var)             do { } while (0)
 #define schedstat_add(var, amt)                do { } while (0)
 #define schedstat_set(var, val)                do { } while (0)


Reply via email to