4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Masami Hiramatsu <mhira...@kernel.org>

commit 804dec5bda9b4fcdab5f67fe61db4a0498af5221 upstream.

Do not modify singlestep execution buffer (kprobe.ainsn.insn)
while resuming from single-stepping, instead, modifies
the buffer to add a jump back instruction at preparing
buffer.

Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
Cc: Ananth N Mavinakayanahalli <ana...@linux.vnet.ibm.com>
Cc: Andrey Ryabinin <aryabi...@virtuozzo.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamur...@intel.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: David S . Miller <da...@davemloft.net>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ye Xiaolong <xiaolong...@intel.com>
Link: 
http://lkml.kernel.org/r/149076361560.22469.1610155860343077495.stgit@devbox
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Reviewed-by: "Steven Rostedt (VMware)" <rost...@goodmis.org>
Signed-off-by: Alexey Makhalov <amakha...@vmware.com>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 arch/x86/kernel/kprobes/core.c |   42 +++++++++++++++++++----------------------
 1 file changed, 20 insertions(+), 22 deletions(-)

--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -414,25 +414,38 @@ void free_insn_page(void *page)
        module_memfree(page);
 }
 
+/* Prepare reljump right after instruction to boost */
+static void prepare_boost(struct kprobe *p, int length)
+{
+       if (can_boost(p->ainsn.insn, p->addr) &&
+           MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) {
+               /*
+                * These instructions can be executed directly if it
+                * jumps back to correct address.
+                */
+               synthesize_reljump(p->ainsn.insn + length, p->addr + length);
+               p->ainsn.boostable = 1;
+       } else {
+               p->ainsn.boostable = -1;
+       }
+}
+
 static int arch_copy_kprobe(struct kprobe *p)
 {
-       int ret;
+       int len;
 
        set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
 
        /* Copy an instruction with recovering if other optprobe modifies it.*/
-       ret = __copy_instruction(p->ainsn.insn, p->addr);
-       if (!ret)
+       len = __copy_instruction(p->ainsn.insn, p->addr);
+       if (!len)
                return -EINVAL;
 
        /*
         * __copy_instruction can modify the displacement of the instruction,
         * but it doesn't affect boostable check.
         */
-       if (can_boost(p->ainsn.insn, p->addr))
-               p->ainsn.boostable = 0;
-       else
-               p->ainsn.boostable = -1;
+       prepare_boost(p, len);
 
        set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
 
@@ -897,21 +910,6 @@ static void resume_execution(struct kpro
                break;
        }
 
-       if (p->ainsn.boostable == 0) {
-               if ((regs->ip > copy_ip) &&
-                   (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
-                       /*
-                        * These instructions can be executed directly if it
-                        * jumps back to correct address.
-                        */
-                       synthesize_reljump((void *)regs->ip,
-                               (void *)orig_ip + (regs->ip - copy_ip));
-                       p->ainsn.boostable = 1;
-               } else {
-                       p->ainsn.boostable = -1;
-               }
-       }
-
        regs->ip += orig_ip - copy_ip;
 
 no_change:


Reply via email to