Commit-ID:  819319fc93461c07b9cdb3064f154bd8cfd48172
Gitweb:     https://git.kernel.org/tip/819319fc93461c07b9cdb3064f154bd8cfd48172
Author:     Masami Hiramatsu <mhira...@kernel.org>
AuthorDate: Tue, 11 Sep 2018 19:20:40 +0900
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Wed, 12 Sep 2018 08:01:16 +0200

kprobes: Return error if we fail to reuse kprobe instead of BUG_ON()

Make reuse_unused_kprobe() to return error code if
it fails to reuse unused kprobe for optprobe instead
of calling BUG_ON().

Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
Cc: Anil S Keshavamurthy <anil.s.keshavamur...@intel.com>
Cc: David S . Miller <da...@davemloft.net>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Naveen N . Rao <naveen.n....@linux.vnet.ibm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: 
http://lkml.kernel.org/r/153666124040.21306.14150398706331307654.stgit@devbox
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/kprobes.c | 27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 277a6cbe83db..63c342e5e6c3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -700,9 +700,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
 }
 
 /* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
        struct optimized_kprobe *op;
+       int ret;
 
        /*
         * Unused kprobe MUST be on the way of delayed unoptimizing (means
@@ -713,8 +714,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
        /* Enable the probe again */
        ap->flags &= ~KPROBE_FLAG_DISABLED;
        /* Optimize it again (remove from op->list) */
-       BUG_ON(!kprobe_optready(ap));
+       ret = kprobe_optready(ap);
+       if (ret)
+               return ret;
+
        optimize_kprobe(ap);
+       return 0;
 }
 
 /* Remove optimized instructions */
@@ -939,11 +944,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
 #define kprobe_disarmed(p)                     kprobe_disabled(p)
 #define wait_for_kprobe_optimizer()            do {} while (0)
 
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
 {
+       /*
+        * If the optimized kprobe is NOT supported, the aggr kprobe is
+        * released at the same time that the last aggregated kprobe is
+        * unregistered.
+        * Thus there should be no chance to reuse unused kprobe.
+        */
        printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
-       BUG_ON(kprobe_unused(ap));
+       return -EINVAL;
 }
 
 static void free_aggr_kprobe(struct kprobe *p)
@@ -1315,9 +1325,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, 
struct kprobe *p)
                        goto out;
                }
                init_aggr_kprobe(ap, orig_p);
-       } else if (kprobe_unused(ap))
+       } else if (kprobe_unused(ap)) {
                /* This probe is going to die. Rescue it */
-               reuse_unused_kprobe(ap);
+               ret = reuse_unused_kprobe(ap);
+               if (ret)
+                       goto out;
+       }
 
        if (kprobe_gone(ap)) {
                /*

Reply via email to