The static_key->next field goes mostly unused. The field is used for
associating module uses with a static key. Most uses of struct static_key
define a static key in the core kernel and make use of it entirely within
the core kernel, or define the static key in a module and make use of it
only from within that module. In fact, of the ~3,000 static keys defined,
I found only about 5 or so that did not fit this pattern.

Thus, we can remove the static_key->next field entirely and overload
the static_key->entries field. That is, when all the static_key uses
are contained within the same module, static_key->entries continues
to point to those uses. However, if the static_key uses are not contained
within the module where the static_key is defined, then we allocate a
struct static_key_mod, store a pointer to the uses within that
struct static_key_mod, and have the static key point at the static_key_mod.
This does incur some extra memory usage when a static_key is used in a
module that does not define it, but since there are only a handful of such
cases there is a net savings.

In order to identify if the static_key->entries pointer contains a
struct static_key_mod or a struct jump_entry pointer, bit 1 of
static_key->entries is set to 1 if it points to a struct static_key_mod and
is 0 if it points to a struct jump_entry. We were already using bit 0 in a
similar way to store the initial value of the static_key. This does mean
that allocations of struct static_key_mod and that the struct jump_entry
tables need to be at least 4-byte aligned in memory. As far as I can tell
all arches meet this criteria.

For my .config, the patch increased the text by 267 bytes, but reduced
the data + bss size by 14,976, for a net savings of 14,709 bytes.

   text    data     bss     dec     hex filename
8092427 5016512  790528 13899467         d416cb vmlinux.pre
8092694 5001536  790528 13884758         d3dd56 vmlinux.post

Cc: Peter Zijlstra <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Joe Perches <[email protected]>
Signed-off-by: Jason Baron <[email protected]>
---
Changed in v3:
-added static_key_set_[entries|mod]() to clean up type casting (Ingo Molnar)
Changed in v2:
-Replace static_key->entries with union (Steven Rostedt)
---
 Documentation/static-keys.txt |   4 +-
 include/linux/jump_label.h    |  23 +++++---
 kernel/jump_label.c           | 126 ++++++++++++++++++++++++++++++++++--------
 3 files changed, 120 insertions(+), 33 deletions(-)

diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index ea8d7b4e53f0..32a25fad0c1b 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -155,7 +155,9 @@ or:
 
 There are a few functions and macros that architectures must implement in order
 to take advantage of this optimization. If there is no architecture support, we
-simply fall back to a traditional, load, test, and jump sequence.
+simply fall back to a traditional, load, test, and jump sequence. Also, the
+struct jump_entry table must be at least 4-byte aligned because the
+static_key->entry field makes use of the two least significant bits.
 
 * select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
 
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index a0547c571800..680c98b2f41c 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -89,11 +89,17 @@ extern bool static_key_initialized;
 
 struct static_key {
        atomic_t enabled;
-/* Set lsb bit to 1 if branch is default true, 0 ot */
-       struct jump_entry *entries;
-#ifdef CONFIG_MODULES
-       struct static_key_mod *next;
-#endif
+/*
+ * bit 0 => 1 if key is initially true
+ *         0 if initially false
+ * bit 1 => 1 if points to struct static_key_mod
+ *         0 if points to struct jump_entry
+ */
+       union {
+               unsigned long type;
+               struct jump_entry *entries;
+               struct static_key_mod *next;
+       };
 };
 
 #else
@@ -118,9 +124,10 @@ struct module;
 
 #ifdef HAVE_JUMP_LABEL
 
-#define JUMP_TYPE_FALSE        0UL
-#define JUMP_TYPE_TRUE 1UL
-#define JUMP_TYPE_MASK 1UL
+#define JUMP_TYPE_FALSE                0UL
+#define JUMP_TYPE_TRUE         1UL
+#define JUMP_TYPE_LINKED       2UL
+#define JUMP_TYPE_MASK         3UL
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index a9b8cf500591..2eb9e80dc691 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -236,12 +236,27 @@ void __weak __init_or_module 
arch_jump_label_transform_static(struct jump_entry
 
 static inline struct jump_entry *static_key_entries(struct static_key *key)
 {
-       return (struct jump_entry *)((unsigned long)key->entries & 
~JUMP_TYPE_MASK);
+       return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
 }
 
 static inline bool static_key_type(struct static_key *key)
 {
-       return (unsigned long)key->entries & JUMP_TYPE_MASK;
+       return key->type & JUMP_TYPE_TRUE;
+}
+
+static inline bool static_key_linked(struct static_key *key)
+{
+       return key->type & JUMP_TYPE_LINKED;
+}
+
+static inline void static_key_clear_linked(struct static_key *key)
+{
+       key->type &= ~JUMP_TYPE_LINKED;
+}
+
+static inline void static_key_set_linked(struct static_key *key)
+{
+       key->type |= JUMP_TYPE_LINKED;
 }
 
 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
@@ -254,6 +269,25 @@ static bool jump_entry_branch(struct jump_entry *entry)
        return (unsigned long)entry->key & 1UL;
 }
 
+/***
+ * A 'struct static_key' uses a union such that it either points directly
+ * to a table of 'struct jump_entry' or to a linked list of modules, which in
+ * turn point to 'struct jump_entry' tables. Since most static_key uses occur
+ * within the module in which they are defined, this saves space. Since the
+ * two lower bits of the pointer are used to keep track of which pointer type
+ * is in use and to store the initial branch direction, we need a special
+ * access function which preserves these bits.
+ */
+static void static_key_set_entries(struct static_key *key,
+                                  struct jump_entry *entries)
+{
+       unsigned long type;
+
+       type = key->type & JUMP_TYPE_MASK;
+       key->entries = entries;
+       key->type |= type;
+}
+
 static enum jump_label_type jump_label_type(struct jump_entry *entry)
 {
        struct static_key *key = jump_entry_key(entry);
@@ -313,13 +347,7 @@ void __init jump_label_init(void)
                        continue;
 
                key = iterk;
-               /*
-                * Set key->entries to iter, but preserve 
JUMP_LABEL_TRUE_BRANCH.
-                */
-               *((unsigned long *)&key->entries) += (unsigned long)iter;
-#ifdef CONFIG_MODULES
-               key->next = NULL;
-#endif
+               static_key_set_entries(key, iter);
        }
        static_key_initialized = true;
        jump_label_unlock();
@@ -343,6 +371,22 @@ struct static_key_mod {
        struct module *mod;
 };
 
+static inline struct static_key_mod *static_key_mod(struct static_key *key)
+{
+       return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
+}
+
+/* See comments above static_key_set_entries() */
+static void static_key_set_mod(struct static_key *key,
+                              struct static_key_mod *mod)
+{
+       unsigned long type;
+
+       type = static_key_type(key);
+       key->next = mod;
+       key->type |= type;
+}
+
 static int __jump_label_mod_text_reserved(void *start, void *end)
 {
        struct module *mod;
@@ -363,13 +407,19 @@ static int __jump_label_mod_text_reserved(void *start, 
void *end)
 
 static void __jump_label_mod_update(struct static_key *key)
 {
-       struct static_key_mod *mod;
+       struct static_key_mod *mod = static_key_mod(key);
 
-       for (mod = key->next; mod; mod = mod->next) {
+       while (mod) {
+               struct jump_entry *stop;
                struct module *m = mod->mod;
 
-               __jump_label_update(key, mod->entries,
-                                   m->jump_entries + m->num_jump_entries);
+               if (!m)
+                       stop = __stop___jump_table;
+               else
+                       stop = m->jump_entries + m->num_jump_entries;
+               if (mod->entries)
+                       __jump_label_update(key, mod->entries, stop);
+               mod = mod->next;
        }
 }
 
@@ -404,7 +454,7 @@ static int jump_label_add_module(struct module *mod)
        struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
        struct jump_entry *iter;
        struct static_key *key = NULL;
-       struct static_key_mod *jlm;
+       struct static_key_mod *jlm, *jlm2;
 
        /* if the module doesn't have jump label entries, just return */
        if (iter_start == iter_stop)
@@ -421,20 +471,32 @@ static int jump_label_add_module(struct module *mod)
 
                key = iterk;
                if (within_module(iter->key, mod)) {
-                       /*
-                        * Set key->entries to iter, but preserve 
JUMP_LABEL_TRUE_BRANCH.
-                        */
-                       *((unsigned long *)&key->entries) += (unsigned 
long)iter;
-                       key->next = NULL;
+                       static_key_set_entries(key, iter);
                        continue;
                }
                jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
                if (!jlm)
                        return -ENOMEM;
+               if (!static_key_linked(key)) {
+                       jlm2 = kzalloc(sizeof(struct static_key_mod),
+                                      GFP_KERNEL);
+                       if (!jlm2) {
+                               kfree(jlm);
+                               return -ENOMEM;
+                       }
+                       preempt_disable();
+                       jlm2->mod = __module_address((unsigned long)key);
+                       preempt_enable();
+                       jlm2->entries = static_key_entries(key);
+                       jlm2->next = NULL;
+                       static_key_set_mod(key, jlm2);
+                       static_key_set_linked(key);
+               }
                jlm->mod = mod;
                jlm->entries = iter;
-               jlm->next = key->next;
-               key->next = jlm;
+               jlm->next = static_key_mod(key);
+               static_key_set_mod(key, jlm);
+               static_key_set_linked(key);
 
                /* Only update if we've changed from our initial state */
                if (jump_label_type(iter) != jump_label_init_type(iter))
@@ -462,15 +524,28 @@ static void jump_label_del_module(struct module *mod)
                        continue;
 
                prev = &key->next;
-               jlm = key->next;
+               jlm = static_key_mod(key);
 
                while (jlm && jlm->mod != mod) {
                        prev = &jlm->next;
                        jlm = jlm->next;
                }
 
-               if (jlm) {
+               if (!jlm)
+                       continue;
+
+               if (prev == &key->next)
+                       static_key_set_mod(key, jlm->next);
+               else
                        *prev = jlm->next;
+
+               kfree(jlm);
+
+               jlm = static_key_mod(key);
+               /* if only one etry is left, fold it back into the static_key */
+               if (jlm->next == NULL) {
+                       static_key_set_entries(key, jlm->entries);
+                       static_key_clear_linked(key);
                        kfree(jlm);
                }
        }
@@ -565,7 +640,10 @@ static void jump_label_update(struct static_key *key)
 #ifdef CONFIG_MODULES
        struct module *mod;
 
-       __jump_label_mod_update(key);
+       if (static_key_linked(key)) {
+               __jump_label_mod_update(key);
+               return;
+       }
 
        preempt_disable();
        mod = __module_address((unsigned long)key);
-- 
2.6.1

Reply via email to