Verify the text we're about to change is as we expect it to be.

Requested-by: Steven Rostedt <rost...@goodmis.org>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/kernel/static_call.c |   28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -41,6 +41,26 @@ static void __static_call_transform(void
        text_poke_bp(insn, code, size, NULL);
 }
 
+static void __static_call_validate(void *insn, bool tail)
+{
+       u8 opcode = *(u8 *)insn;
+
+       if (tail) {
+               if (opcode == JMP32_INSN_OPCODE ||
+                   opcode == RET_INSN_OPCODE)
+                       return;
+       } else {
+               if (opcode == CALL_INSN_OPCODE ||
+                   !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5))
+                       return;
+       }
+
+       /*
+        * If we ever trigger this, our text is corrupt, we'll probably not 
live long.
+        */
+       WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", 
opcode, insn);
+}
+
 static inline enum insn_type __sc_insn(bool null, bool tail)
 {
        /*
@@ -60,11 +80,15 @@ void arch_static_call_transform(void *si
 {
        mutex_lock(&text_mutex);
 
-       if (tramp)
+       if (tramp) {
+               __static_call_validate(tramp, true);
                __static_call_transform(tramp, __sc_insn(!func, true), func);
+       }
 
-       if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site)
+       if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
+               __static_call_validate(site, tail);
                __static_call_transform(site, __sc_insn(!func, tail), func);
+       }
 
        mutex_unlock(&text_mutex);
 }


Reply via email to