Since kernel kconfig forbids turning off KPROBES_ON_FTRACE for x86, we
don't need to consider the situation that a kprobe probing on a ftrace
location. The only exception should be early kprobe with
KPROBES_ON_FTRACE enabled. However, it is still impossible for it to get
a tainted by ftrace if it is registered before ftrace is ready.

Thus this patch removes unneed logic to make code simpler.

Signed-off-by: Wang Nan <wangn...@huawei.com>
---
 arch/x86/kernel/kprobes/core.c | 62 ++++++++----------------------------------
 1 file changed, 12 insertions(+), 50 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4e3d5a9..88a99c0 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -219,55 +219,6 @@ retry:
        }
 }
 
-static unsigned long
-__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
-{
-       struct kprobe *kp;
-       unsigned long faddr;
-
-       kp = get_kprobe((void *)addr);
-       faddr = ftrace_location(addr);
-       /*
-        * Addresses inside the ftrace location are refused by
-        * arch_check_ftrace_location(). Something went terribly wrong
-        * if such an address is checked here.
-        */
-       if (WARN_ON(faddr && faddr != addr))
-               return 0UL;
-       /*
-        * Use the current code if it is not modified by Kprobe
-        * and it cannot be modified by ftrace.
-        */
-       if (!kp && !faddr)
-               return addr;
-
-       /*
-        * Basically, kp->ainsn.insn has an original instruction.
-        * However, RIP-relative instruction can not do single-stepping
-        * at different place, __copy_instruction() tweaks the displacement of
-        * that instruction. In that case, we can't recover the instruction
-        * from the kp->ainsn.insn.
-        *
-        * On the other hand, in case on normal Kprobe, kp->opcode has a copy
-        * of the first byte of the probed instruction, which is overwritten
-        * by int3. And the instruction at kp->addr is not modified by kprobes
-        * except for the first byte, we can recover the original instruction
-        * from it and kp->opcode.
-        *
-        * In case of Kprobes using ftrace, we do not have a copy of
-        * the original instruction. In fact, the ftrace location might
-        * be modified at anytime and even could be in an inconsistent state.
-        * Fortunately, we know that the original code is the ideal 5-byte
-        * long NOP.
-        */
-       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-       if (faddr)
-               memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
-       else
-               buf[0] = kp->opcode;
-       return (unsigned long)buf;
-}
-
 /*
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
@@ -282,7 +233,18 @@ unsigned long recover_probed_instruction(kprobe_opcode_t 
*buf, unsigned long add
        if (__addr != addr)
                return __addr;
 
-       return __recover_probed_insn(buf, addr);
+       /*
+        * If KPROBES_ON_FTRACE is off, we are not allowed probing at
+        * ftrace location. If it is on, we should use
+        * arm_kprobe_ftrace() and never get here.  As a result, there
+        * is no need to care about confliction between kprobe and
+        * ftrace. The only exception should be early kprobes. However,
+        * for such kprobes registered before ftrace is ready, it is
+        * impossible to get a tainted instruction; for such kprobes
+        * registered after ftrace ready, it will use
+        * arm_kprobe_ftrace() and won't get here.
+        */
+       return addr;
 }
 
 /* Check if paddr is at an instruction boundary */
-- 
1.8.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to