sched_clock_running may be read every time sched_clock_cpu() is called. Yet, this variable is updated only twice during boot, and never changes again, therefore it is better to make it a static key.
Signed-off-by: Pavel Tatashin <pasha.tatas...@oracle.com> --- kernel/sched/clock.c | 16 ++++++++-------- kernel/sched/debug.c | 2 -- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 4d07b785d566..07e54073d6af 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c @@ -66,7 +66,7 @@ unsigned long long __weak sched_clock(void) } EXPORT_SYMBOL_GPL(sched_clock); -__read_mostly int sched_clock_running; +static DEFINE_STATIC_KEY_FALSE(sched_clock_running); #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK /* @@ -190,7 +190,7 @@ void clear_sched_clock_stable(void) smp_mb(); /* matches sched_clock_init_late() */ - if (sched_clock_running == 2) + if (static_key_count(&sched_clock_running.key) == 2) __clear_sched_clock_stable(); } @@ -203,7 +203,7 @@ void __init sched_clock_init(void) { unsigned long flags; - sched_clock_running = 1; + static_branch_inc(&sched_clock_running); /* Adjust __gtod_offset for contigious transition from early clock */ local_irq_save(flags); @@ -217,7 +217,7 @@ void __init sched_clock_init(void) */ static int __init sched_clock_init_late(void) { - sched_clock_running = 2; + static_branch_inc(&sched_clock_running); /* * Ensure that it is impossible to not do a static_key update. * @@ -362,7 +362,7 @@ u64 sched_clock_cpu(int cpu) if (sched_clock_stable()) return sched_clock() + __sched_clock_offset; - if (unlikely(!sched_clock_running)) + if (!static_branch_unlikely(&sched_clock_running)) return sched_clock(); preempt_disable_notrace(); @@ -385,7 +385,7 @@ void sched_clock_tick(void) if (sched_clock_stable()) return; - if (unlikely(!sched_clock_running)) + if (!static_branch_unlikely(&sched_clock_running)) return; lockdep_assert_irqs_disabled(); @@ -444,13 +444,13 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); void __init sched_clock_init(void) { - sched_clock_running = 1; + static_branch_inc(&sched_clock_running); generic_sched_clock_init(); } u64 sched_clock_cpu(int cpu) { - if (unlikely(!sched_clock_running)) + if (!static_branch_unlikely(&sched_clock_running)) return 0; return sched_clock(); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e593b4118578..b0212f489a33 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -623,8 +623,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) #undef PU } -extern __read_mostly int sched_clock_running; - static void print_cpu(struct seq_file *m, int cpu) { struct rq *rq = cpu_rq(cpu); -- 2.18.0