Similar to x86 kretprobe deadlock issue, arm64 also implements
kretprobe-booster (trampoline code directly call handler.)
So it has same deadlock issue if there are 2 kretprobes on
normal function and the function called from FIQ (or anywhere
which can be invoked when local_irq_disabled).

This fixes the issue as same as we did on x86.

Signed-off-by: Masami Hiramatsu <[email protected]>
---
 arch/arm64/kernel/probes/kprobes.c |   12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kernel/probes/kprobes.c 
b/arch/arm64/kernel/probes/kprobes.c
index 2a07aae..401f7c9 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -561,6 +561,8 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
        return false;
 }
 
+static struct kprobe dummy_retprobe = {.addr = (void *)&kretprobe_trampoline};
+
 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
@@ -572,6 +574,11 @@ void __kprobes __used *trampoline_probe_handler(struct 
pt_regs *regs)
        kprobe_opcode_t *correct_ret_addr = NULL;
 
        INIT_HLIST_HEAD(&empty_rp);
+
+       /* This prevents kernel to change running cpu while processing */
+       preempt_disable();
+       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+       __this_cpu_write(current_kprobe, &dummy_retprobe);
        kretprobe_hash_lock(current, &head, &flags);
 
        /*
@@ -614,10 +621,9 @@ void __kprobes __used *trampoline_probe_handler(struct 
pt_regs *regs)
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
-                       get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, NULL);
+                       __this_cpu_write(current_kprobe, &dummy_retprobe);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -632,6 +638,8 @@ void __kprobes __used *trampoline_probe_handler(struct 
pt_regs *regs)
        }
 
        kretprobe_hash_unlock(current, &flags);
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable_no_resched();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);

Reply via email to