Use probe_kernel_read() for avoiding unexpected faults while
copying kernel text in __recover_probed_insn(),
__recover_optprobed_insn() and __copy_instruction().

Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
---
  Note that this is just an update patch which I had been
  sent to LKML last month ( https://lkml.org/lkml/2017/2/27/294 )
---
 arch/x86/kernel/kprobes/core.c |   12 +++++++++---
 arch/x86/kernel/kprobes/opt.c  |    5 ++++-
 2 files changed, 13 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 3238752..a9ae61a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -259,7 +259,10 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long 
addr)
         * Fortunately, we know that the original code is the ideal 5-byte
         * long NOP.
         */
-       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (probe_kernel_read(buf, (void *)addr,
+               MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
+               return 0UL;
+
        if (faddr)
                memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
        else
@@ -271,7 +274,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long 
addr)
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
  * for preventing to release referencing kprobes.
- * Returns zero if the instruction can not get recovered.
+ * Returns zero if the instruction can not get recovered (or access failed).
  */
 unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long 
addr)
 {
@@ -365,7 +368,10 @@ int __copy_instruction(u8 *dest, u8 *src)
        /* Another subsystem puts a breakpoint, failed to recover */
        if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
                return 0;
-       memcpy(dest, insn.kaddr, length);
+
+       /* This can access kernel text if given address is not recovered */
+       if (kernel_probe_read(dest, insn.kaddr, length))
+               return 0;
 
 #ifdef CONFIG_X86_64
        /* Only x86_64 has RIP relative instructions */
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index b121037..5b52334 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -65,7 +65,10 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, 
unsigned long addr)
         * overwritten by jump destination address. In this case, original
         * bytes must be recovered from op->optinsn.copied_insn buffer.
         */
-       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (probe_kernel_read(buf, (void *)addr,
+               MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
+               return 0UL;
+
        if (addr == (unsigned long)kp->addr) {
                buf[0] = kp->opcode;
                memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);

Reply via email to