Commit-ID: f5efc6fad63f5533a6083e95286920d5753e52bf Gitweb: http://git.kernel.org/tip/f5efc6fad63f5533a6083e95286920d5753e52bf Author: Peter Zijlstra (Intel) <pet...@infradead.org> AuthorDate: Tue, 18 Apr 2017 19:05:04 +0200 Committer: Thomas Gleixner <t...@linutronix.de> CommitDate: Thu, 20 Apr 2017 13:08:57 +0200
jump_label: Provide static_key_slow_inc_cpuslocked() Provide static_key_slow_inc_cpuslocked(), a variant that doesn't take cpu_hotplug_lock(). Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org> Signed-off-by: Thomas Gleixner <t...@linutronix.de> Cc: Sebastian Siewior <bige...@linutronix.de> Cc: Steven Rostedt <rost...@goodmis.org> Cc: jba...@akamai.com Link: http://lkml.kernel.org/r/20170418103422.636958...@infradead.org --- include/linux/jump_label.h | 3 +++ kernel/jump_label.c | 21 +++++++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 2afd74b..7d07f0b 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -158,6 +158,7 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); extern int jump_label_text_reserved(void *start, void *end); extern void static_key_slow_inc(struct static_key *key); +extern void static_key_slow_inc_cpuslocked(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern void jump_label_apply_nops(struct module *mod); extern int static_key_count(struct static_key *key); @@ -213,6 +214,8 @@ static inline void static_key_slow_inc(struct static_key *key) atomic_inc(&key->enabled); } +#define static_key_slow_inc_cpuslocked static_key_slow_inc + static inline void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index f3afe07..308b12e 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -101,7 +101,7 @@ void static_key_disable(struct static_key *key) } EXPORT_SYMBOL_GPL(static_key_disable); -void static_key_slow_inc(struct static_key *key) +void __static_key_slow_inc(struct static_key *key) { int v, v1; @@ -130,7 +130,6 @@ void static_key_slow_inc(struct static_key *key) * the all CPUs, for that to be serialized against CPU hot-plug * we need to avoid CPUs coming online. */ - get_online_cpus(); jump_label_lock(); if (atomic_read(&key->enabled) == 0) { atomic_set(&key->enabled, -1); @@ -140,10 +139,22 @@ void static_key_slow_inc(struct static_key *key) atomic_inc(&key->enabled); } jump_label_unlock(); +} + +void static_key_slow_inc(struct static_key *key) +{ + get_online_cpus(); + __static_key_slow_inc(key); put_online_cpus(); } EXPORT_SYMBOL_GPL(static_key_slow_inc); +void static_key_slow_inc_cpuslocked(struct static_key *key) +{ + __static_key_slow_inc(key); +} +EXPORT_SYMBOL_GPL(static_key_slow_inc_cpuslocked); + static void __static_key_slow_dec(struct static_key *key, unsigned long rate_limit, struct delayed_work *work) { @@ -154,7 +165,6 @@ static void __static_key_slow_dec(struct static_key *key, * returns is unbalanced, because all other static_key_slow_inc() * instances block while the update is in progress. */ - get_online_cpus(); if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { WARN(atomic_read(&key->enabled) < 0, "jump label: negative count!\n"); @@ -168,20 +178,23 @@ static void __static_key_slow_dec(struct static_key *key, jump_label_update(key); } jump_label_unlock(); - put_online_cpus(); } static void jump_label_update_timeout(struct work_struct *work) { struct static_key_deferred *key = container_of(work, struct static_key_deferred, work.work); + get_online_cpus(); __static_key_slow_dec(&key->key, 0, NULL); + put_online_cpus(); } void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(); + get_online_cpus(); __static_key_slow_dec(key, 0, NULL); + put_online_cpus(); } EXPORT_SYMBOL_GPL(static_key_slow_dec);