Provide static_key_slow_inc_nohp(), a variant that doesn't take
cpu_hotplug_lock().

XXX maybe add an assertion that it is taken.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 include/linux/jump_label.h |    3 +++
 kernel/jump_label.c        |   21 +++++++++++++++++----
 2 files changed, 20 insertions(+), 4 deletions(-)

--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -158,6 +158,7 @@ extern void arch_jump_label_transform_st
                                             enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
 extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_inc_nohp(struct static_key *key);
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 extern int static_key_count(struct static_key *key);
@@ -213,6 +214,8 @@ static inline void static_key_slow_inc(s
        atomic_inc(&key->enabled);
 }
 
+#define static_key_slow_inc_nohp static_key_slow_inc
+
 static inline void static_key_slow_dec(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE();
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -100,7 +100,7 @@ void static_key_disable(struct static_ke
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-void static_key_slow_inc(struct static_key *key)
+void __static_key_slow_inc(struct static_key *key)
 {
        int v, v1;
 
@@ -129,7 +129,6 @@ void static_key_slow_inc(struct static_k
         * the all CPUs, for that to be serialized against CPU hot-plug
         * we need to avoid CPUs coming online.
         */
-       get_online_cpus();
        jump_label_lock();
        if (atomic_read(&key->enabled) == 0) {
                atomic_set(&key->enabled, -1);
@@ -139,10 +138,22 @@ void static_key_slow_inc(struct static_k
                atomic_inc(&key->enabled);
        }
        jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+       get_online_cpus();
+       __static_key_slow_inc(key);
        put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
+void static_key_slow_inc_nohp(struct static_key *key)
+{
+       __static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_slow_inc_nohp);
+
 static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
@@ -153,7 +164,6 @@ static void __static_key_slow_dec(struct
         * returns is unbalanced, because all other static_key_slow_inc()
         * instances block while the update is in progress.
         */
-       get_online_cpus();
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
@@ -167,20 +177,23 @@ static void __static_key_slow_dec(struct
                jump_label_update(key);
        }
        jump_label_unlock();
-       put_online_cpus();
 }
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
        struct static_key_deferred *key =
                container_of(work, struct static_key_deferred, work.work);
+       get_online_cpus();
        __static_key_slow_dec(&key->key, 0, NULL);
+       put_online_cpus();
 }
 
 void static_key_slow_dec(struct static_key *key)
 {
        STATIC_KEY_CHECK_USE();
+       get_online_cpus();
        __static_key_slow_dec(key, 0, NULL);
+       put_online_cpus();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 


Reply via email to