On Fri, 30 Oct 2020 17:31:47 -0400
Steven Rostedt <[email protected]> wrote:

> From: "Steven Rostedt (VMware)" <[email protected]>
> 
> If a ftrace callback does not supply its own recursion protection and
> does not set the RECURSION_SAFE flag in its ftrace_ops, then ftrace will
> make a helper trampoline to do so before calling the callback instead of
> just calling the callback directly.
> 
> The default for ftrace_ops is going to change. It will expect that handlers
> provide their own recursion protection, unless its ftrace_ops states
> otherwise.
> 
> Link: https://lkml.kernel.org/r/[email protected]
> 
> Cc: Andrew Morton <[email protected]>
> Cc: Masami Hiramatsu <[email protected]>
> Cc: Guo Ren <[email protected]>
> Cc: "James E.J. Bottomley" <[email protected]>
> Cc: Helge Deller <[email protected]>
> Cc: Michael Ellerman <[email protected]>
> Cc: Benjamin Herrenschmidt <[email protected]>
> Cc: Paul Mackerras <[email protected]>
> Cc: Heiko Carstens <[email protected]>
> Cc: Vasily Gorbik <[email protected]>
> Cc: Christian Borntraeger <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: [email protected]
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: "Naveen N. Rao" <[email protected]>
> Cc: Anil S Keshavamurthy <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Signed-off-by: Steven Rostedt (VMware) <[email protected]>
> ---
>  arch/csky/kernel/probes/ftrace.c     | 12 ++++++++++--
>  arch/parisc/kernel/ftrace.c          | 13 +++++++++++--
>  arch/powerpc/kernel/kprobes-ftrace.c | 11 ++++++++++-
>  arch/s390/kernel/ftrace.c            | 13 +++++++++++--
>  arch/x86/kernel/kprobes/ftrace.c     | 12 ++++++++++--
>  5 files changed, 52 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/csky/kernel/probes/ftrace.c 
> b/arch/csky/kernel/probes/ftrace.c
> index 5264763d05be..5eb2604fdf71 100644
> --- a/arch/csky/kernel/probes/ftrace.c
> +++ b/arch/csky/kernel/probes/ftrace.c
> @@ -13,16 +13,21 @@ int arch_check_ftrace_location(struct kprobe *p)
>  void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
>                          struct ftrace_ops *ops, struct pt_regs *regs)
>  {
> +     int bit;
>       bool lr_saver = false;
>       struct kprobe *p;
>       struct kprobe_ctlblk *kcb;
>  
> -     /* Preempt is disabled by ftrace */
> +     bit = ftrace_test_recursion_trylock();
> +     if (bit < 0)
> +             return;
> +
> +     preempt_disable_notrace();
>       p = get_kprobe((kprobe_opcode_t *)ip);
>       if (!p) {
>               p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
>               if (unlikely(!p) || kprobe_disabled(p))
> -                     return;
> +                     goto out;
>               lr_saver = true;
>       }
>  
> @@ -56,6 +61,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
> parent_ip,
>                */
>               __this_cpu_write(current_kprobe, NULL);
>       }
> +out:
> +     preempt_enable_notrace();
> +     ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
> diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
> index 63e3ecb9da81..4b1fdf15662c 100644
> --- a/arch/parisc/kernel/ftrace.c
> +++ b/arch/parisc/kernel/ftrace.c
> @@ -208,13 +208,19 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned 
> long parent_ip,
>  {
>       struct kprobe_ctlblk *kcb;
>       struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
> +     int bit;
>  
> -     if (unlikely(!p) || kprobe_disabled(p))
> +     bit = ftrace_test_recursion_trylock();
> +     if (bit < 0)
>               return;
>  
> +     preempt_disable_notrace();

If we disable preempt here, we also move the get_kprobe() here as below.
(get_kprobe() accesses percpu variable)

        p = get_kprobe((kprobe_opcode_t *)ip);


> +     if (unlikely(!p) || kprobe_disabled(p))
> +             goto out;
> +
>       if (kprobe_running()) {
>               kprobes_inc_nmissed_count(p);
> -             return;
> +             goto out;
>       }
>  
>       __this_cpu_write(current_kprobe, p);
> @@ -235,6 +241,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned 
> long parent_ip,
>               }
>       }
>       __this_cpu_write(current_kprobe, NULL);
> +out:
> +     preempt_enable_notrace();
> +     ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
> diff --git a/arch/powerpc/kernel/kprobes-ftrace.c 
> b/arch/powerpc/kernel/kprobes-ftrace.c
> index 972cb28174b2..5df8d50c65ae 100644
> --- a/arch/powerpc/kernel/kprobes-ftrace.c
> +++ b/arch/powerpc/kernel/kprobes-ftrace.c
> @@ -18,10 +18,16 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned 
> long parent_nip,
>  {
>       struct kprobe *p;
>       struct kprobe_ctlblk *kcb;
> +     int bit;
>  
> +     bit = ftrace_test_recursion_trylock();
> +     if (bit < 0)
> +             return;
> +
> +     preempt_disable_notrace();
>       p = get_kprobe((kprobe_opcode_t *)nip);
>       if (unlikely(!p) || kprobe_disabled(p))
> -             return;
> +             goto out;
>  
>       kcb = get_kprobe_ctlblk();
>       if (kprobe_running()) {
> @@ -52,6 +58,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long 
> parent_nip,
>                */
>               __this_cpu_write(current_kprobe, NULL);
>       }
> +out:
> +     preempt_enable_notrace();
> +     ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
> diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
> index b388e87a08bf..88466d7fb6b2 100644
> --- a/arch/s390/kernel/ftrace.c
> +++ b/arch/s390/kernel/ftrace.c
> @@ -202,13 +202,19 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned 
> long parent_ip,
>  {
>       struct kprobe_ctlblk *kcb;
>       struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
> +     int bit;
>  
> -     if (unlikely(!p) || kprobe_disabled(p))
> +     bit = ftrace_test_recursion_trylock();
> +     if (bit < 0)
>               return;
>  
> +     preempt_disable_notrace();

Ditto.

Others look good to me.

Thank you,

> +     if (unlikely(!p) || kprobe_disabled(p))
> +             goto out;
> +
>       if (kprobe_running()) {
>               kprobes_inc_nmissed_count(p);
> -             return;
> +             goto out;
>       }
>  
>       __this_cpu_write(current_kprobe, p);
> @@ -228,6 +234,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned 
> long parent_ip,
>               }
>       }
>       __this_cpu_write(current_kprobe, NULL);
> +out:
> +     preempt_enable_notrace();
> +     ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
> diff --git a/arch/x86/kernel/kprobes/ftrace.c 
> b/arch/x86/kernel/kprobes/ftrace.c
> index 681a4b36e9bb..a40a6cdfcca3 100644
> --- a/arch/x86/kernel/kprobes/ftrace.c
> +++ b/arch/x86/kernel/kprobes/ftrace.c
> @@ -18,11 +18,16 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned 
> long parent_ip,
>  {
>       struct kprobe *p;
>       struct kprobe_ctlblk *kcb;
> +     int bit;
>  
> -     /* Preempt is disabled by ftrace */
> +     bit = ftrace_test_recursion_trylock();
> +     if (bit < 0)
> +             return;
> +
> +     preempt_disable_notrace();
>       p = get_kprobe((kprobe_opcode_t *)ip);
>       if (unlikely(!p) || kprobe_disabled(p))
> -             return;
> +             goto out;
>  
>       kcb = get_kprobe_ctlblk();
>       if (kprobe_running()) {
> @@ -52,6 +57,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long 
> parent_ip,
>                */
>               __this_cpu_write(current_kprobe, NULL);
>       }
> +out:
> +     preempt_enable_notrace();
> +     ftrace_test_recursion_unlock(bit);
>  }
>  NOKPROBE_SYMBOL(kprobe_ftrace_handler);
>  
> -- 
> 2.28.0
> 
> 


-- 
Masami Hiramatsu <[email protected]>

Reply via email to