On Wed,  4 Oct 2017 21:14:14 +0200
Jessica Yu <j...@kernel.org> wrote:

> Improve error handling when disarming ftrace-based kprobes. Like with
> arm_kprobe_ftrace(), propagate any errors from disarm_kprobe_ftrace() so
> that we do not disable/unregister kprobes that are still armed. In other
> words, unregister_kprobe() and disable_kprobe() should not report success
> if the kprobe could not be disarmed.
> 
> disarm_all_kprobes() keeps its current behavior and attempts to
> disarm all kprobes. It returns the last encountered error and gives a
> warning if not all kprobes could be disarmed.
> 
> This patch is based on Petr Mladek's original patchset (patches 2 and 3)
> back in 2015, which improved kprobes error handling, found here:
> 
>    https://lkml.org/lkml/2015/2/26/452
> 
> However, further work on this had been paused since then and the patches
> were not upstreamed.
> 

OK, this looks good to me :)

Acked-by: Masami Hiramatsu <mhira...@kernel.org>

Thanks!

> Based-on-patches-by: Petr Mladek <pmla...@suse.com>
> Signed-off-by: Jessica Yu <j...@kernel.org>
> ---
>  kernel/kprobes.c | 76 
> +++++++++++++++++++++++++++++++++++++-------------------
>  1 file changed, 51 insertions(+), 25 deletions(-)
> 
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 6e889be0d93c..2e9edf646ae3 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -1003,23 +1003,27 @@ static int arm_kprobe_ftrace(struct kprobe *p)
>  }
>  
>  /* Caller must lock kprobe_mutex */
> -static void disarm_kprobe_ftrace(struct kprobe *p)
> +static int disarm_kprobe_ftrace(struct kprobe *p)
>  {
> -     int ret;
> +     int ret = 0;
>  
> -     kprobe_ftrace_enabled--;
> -     if (kprobe_ftrace_enabled == 0) {
> +     if (kprobe_ftrace_enabled == 1) {
>               ret = unregister_ftrace_function(&kprobe_ftrace_ops);
> -             WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
> +             if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", 
> ret))
> +                     return ret;
>       }
> +
> +     kprobe_ftrace_enabled--;
> +
>       ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
>                          (unsigned long)p->addr, 1, 0);
>       WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, 
> ret);
> +     return ret;
>  }
>  #else        /* !CONFIG_KPROBES_ON_FTRACE */
>  #define prepare_kprobe(p)    arch_prepare_kprobe(p)
>  #define arm_kprobe_ftrace(p) (0)
> -#define disarm_kprobe_ftrace(p)      do {} while (0)
> +#define disarm_kprobe_ftrace(p)      (0)
>  #endif
>  
>  /* Arm a kprobe with text_mutex */
> @@ -1038,18 +1042,18 @@ static int arm_kprobe(struct kprobe *kp)
>  }
>  
>  /* Disarm a kprobe with text_mutex */
> -static void disarm_kprobe(struct kprobe *kp, bool reopt)
> +static int disarm_kprobe(struct kprobe *kp, bool reopt)
>  {
> -     if (unlikely(kprobe_ftrace(kp))) {
> -             disarm_kprobe_ftrace(kp);
> -             return;
> -     }
> +     if (unlikely(kprobe_ftrace(kp)))
> +             return disarm_kprobe_ftrace(kp);
>  
>       cpus_read_lock();
>       mutex_lock(&text_mutex);
>       __disarm_kprobe(kp, reopt);
>       mutex_unlock(&text_mutex);
>       cpus_read_unlock();
> +
> +     return 0;
>  }
>  
>  /*
> @@ -1627,11 +1631,12 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
>  static struct kprobe *__disable_kprobe(struct kprobe *p)
>  {
>       struct kprobe *orig_p;
> +     int ret;
>  
>       /* Get an original kprobe for return */
>       orig_p = __get_valid_kprobe(p);
>       if (unlikely(orig_p == NULL))
> -             return NULL;
> +             return ERR_PTR(-EINVAL);
>  
>       if (!kprobe_disabled(p)) {
>               /* Disable probe if it is a child probe */
> @@ -1645,8 +1650,13 @@ static struct kprobe *__disable_kprobe(struct kprobe 
> *p)
>                        * should have already been disarmed, so
>                        * skip unneed disarming process.
>                        */
> -                     if (!kprobes_all_disarmed)
> -                             disarm_kprobe(orig_p, true);
> +                     if (!kprobes_all_disarmed) {
> +                             ret = disarm_kprobe(orig_p, true);
> +                             if (ret) {
> +                                     p->flags &= ~KPROBE_FLAG_DISABLED;
> +                                     return ERR_PTR(ret);
> +                             }
> +                     }
>                       orig_p->flags |= KPROBE_FLAG_DISABLED;
>               }
>       }
> @@ -1663,8 +1673,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
>  
>       /* Disable kprobe. This will disarm it if needed. */
>       ap = __disable_kprobe(p);
> -     if (ap == NULL)
> -             return -EINVAL;
> +     if (IS_ERR(ap))
> +             return PTR_ERR(ap);
>  
>       if (ap == p)
>               /*
> @@ -2095,12 +2105,14 @@ static void kill_kprobe(struct kprobe *p)
>  int disable_kprobe(struct kprobe *kp)
>  {
>       int ret = 0;
> +     struct kprobe *p;
>  
>       mutex_lock(&kprobe_mutex);
>  
>       /* Disable this kprobe */
> -     if (__disable_kprobe(kp) == NULL)
> -             ret = -EINVAL;
> +     p = __disable_kprobe(kp);
> +     if (IS_ERR(p))
> +             ret = PTR_ERR(p);
>  
>       mutex_unlock(&kprobe_mutex);
>       return ret;
> @@ -2470,34 +2482,48 @@ static int arm_all_kprobes(void)
>       return ret;
>  }
>  
> -static void disarm_all_kprobes(void)
> +static int disarm_all_kprobes(void)
>  {
>       struct hlist_head *head;
>       struct kprobe *p;
> -     unsigned int i;
> +     unsigned int i, errors = 0;
> +     int err, ret = 0;
>  
>       mutex_lock(&kprobe_mutex);
>  
>       /* If kprobes are already disarmed, just return */
>       if (kprobes_all_disarmed) {
>               mutex_unlock(&kprobe_mutex);
> -             return;
> +             return 0;
>       }
>  
>       kprobes_all_disarmed = true;
> -     printk(KERN_INFO "Kprobes globally disabled\n");
>  
>       for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
>               head = &kprobe_table[i];
> +             /* Disarm all kprobes on a best-effort basis */
>               hlist_for_each_entry_rcu(p, head, hlist) {
> -                     if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
> -                             disarm_kprobe(p, false);
> +                     if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
> +                             err = disarm_kprobe(p, false);
> +                             if (err) {
> +                                     errors++;
> +                                     ret = err;
> +                             }
> +                     }
>               }
>       }
> +
> +     if (errors)
> +             pr_warn("Kprobes globally disabled, but failed to disarm %d 
> kprobes\n", errors);
> +     else
> +             pr_info("Kprobes globally disabled\n");
> +
>       mutex_unlock(&kprobe_mutex);
>  
>       /* Wait for disarming all kprobes by optimizer */
>       wait_for_kprobe_optimizer();
> +
> +     return ret;
>  }
>  
>  /*
> @@ -2540,7 +2566,7 @@ static ssize_t write_enabled_file_bool(struct file 
> *file,
>       case 'n':
>       case 'N':
>       case '0':
> -             disarm_all_kprobes();
> +             ret = disarm_all_kprobes();
>               break;
>       default:
>               return -EINVAL;
> -- 
> 2.13.6
> 


-- 
Masami Hiramatsu <mhira...@kernel.org>

Reply via email to