This allows architectures to have variable sized jumps.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/include/asm/jump_label.h |    4 ++--
 arch/x86/kernel/jump_label.c      |    5 +++++
 include/linux/jump_label.h        |    9 +++++++++
 kernel/jump_label.c               |    2 +-
 4 files changed, 17 insertions(+), 3 deletions(-)

--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -4,8 +4,6 @@
 
 #define HAVE_JUMP_LABEL_BATCH
 
-#define JUMP_LABEL_NOP_SIZE 5
-
 #ifdef CONFIG_X86_64
 # define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
 #else
@@ -53,6 +51,8 @@ static __always_inline bool arch_static_
        return true;
 }
 
+extern int arch_jump_entry_size(struct jump_entry *entry);
+
 #else  /* __ASSEMBLY__ */
 
 .macro STATIC_BRANCH_FALSE_LIKELY target, key
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -16,6 +16,11 @@
 #include <asm/alternative.h>
 #include <asm/text-patching.h>
 
+int arch_jump_entry_size(struct jump_entry *entry)
+{
+       return JMP32_INSN_SIZE;
+}
+
 static const void *
 __jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type 
type, int init)
 {
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -176,6 +176,15 @@ static inline void jump_entry_set_init(s
        entry->key |= 2;
 }
 
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+       return JUMP_LABEL_NOP_SIZE;
+#else
+       return arch_jump_entry_size(entry);
+#endif
+}
+
 #endif
 #endif
 
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -309,7 +309,7 @@ EXPORT_SYMBOL_GPL(jump_label_rate_limit)
 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
 {
        if (jump_entry_code(entry) <= (unsigned long)end &&
-           jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
+           jump_entry_code(entry) + jump_entry_size(entry) > (unsigned 
long)start)
                return 1;
 
        return 0;


Reply via email to