From: Masami Hiramatsu <mhira...@kernel.org>

commit c85c9a2c6e368dc94907e63babb18a9788e5c9b6 upstream.

Commit 36dadef23fcc ("kprobes: Init kprobes in early_initcall")
moved the kprobe setup in early_initcall(), which includes kprobe
jump optimization.
The kprobes jump optimizer involves synchronize_rcu_tasks() which
depends on the ksoftirqd and rcu_spawn_tasks_*(). However, since
those are setup in core_initcall(), kprobes jump optimizer can not
run at the early_initcall().

To avoid this issue, make the kprobe optimization disabled in the
early_initcall() and enables it in subsys_initcall().

Note that non-optimized kprobes is still available after
early_initcall(). Only jump optimization is delayed.

Link: 
https://lkml.kernel.org/r/161365856280.719838.12423085451287256713.stgit@devnote2

Fixes: 36dadef23fcc ("kprobes: Init kprobes in early_initcall")
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: RCU <r...@vger.kernel.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Daniel Axtens <d...@axtens.net>
Cc: Frederic Weisbecker <frede...@kernel.org>
Cc: Neeraj Upadhyay <neer...@codeaurora.org>
Cc: Joel Fernandes <j...@joelfernandes.org>
Cc: Michal Hocko <mho...@suse.com>
Cc: "Theodore Y . Ts'o" <ty...@mit.edu>
Cc: Oleksiy Avramchenko <oleksiy.avramche...@sonymobile.com>
Cc: sta...@vger.kernel.org
Reported-by: Paul E. McKenney <paul...@kernel.org>
Reported-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Reported-by: Uladzislau Rezki <ure...@gmail.com>
Acked-by: Paul E. McKenney <paul...@kernel.org>
Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 kernel/kprobes.c |   31 +++++++++++++++++++++----------
 1 file changed, 21 insertions(+), 10 deletions(-)

--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -861,7 +861,6 @@ out:
        cpus_read_unlock();
 }
 
-#ifdef CONFIG_SYSCTL
 static void optimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -887,6 +886,7 @@ out:
        mutex_unlock(&kprobe_mutex);
 }
 
+#ifdef CONFIG_SYSCTL
 static void unoptimize_all_kprobes(void)
 {
        struct hlist_head *head;
@@ -2497,18 +2497,14 @@ static int __init init_kprobes(void)
                }
        }
 
-#if defined(CONFIG_OPTPROBES)
-#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
-       /* Init kprobe_optinsn_slots */
-       kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-#endif
-       /* By default, kprobes can be optimized */
-       kprobes_allow_optimization = true;
-#endif
-
        /* By default, kprobes are armed */
        kprobes_all_disarmed = false;
 
+#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
+       /* Init kprobe_optinsn_slots for allocation */
+       kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
+#endif
+
        err = arch_init_kprobes();
        if (!err)
                err = register_die_notifier(&kprobe_exceptions_nb);
@@ -2523,6 +2519,21 @@ static int __init init_kprobes(void)
 }
 early_initcall(init_kprobes);
 
+#if defined(CONFIG_OPTPROBES)
+static int __init init_optprobes(void)
+{
+       /*
+        * Enable kprobe optimization - this kicks the optimizer which
+        * depends on synchronize_rcu_tasks() and ksoftirqd, that is
+        * not spawned in early initcall. So delay the optimization.
+        */
+       optimize_all_kprobes();
+
+       return 0;
+}
+subsys_initcall(init_optprobes);
+#endif
+
 #ifdef CONFIG_DEBUG_FS
 static void report_probe(struct seq_file *pi, struct kprobe *p,
                const char *sym, int offset, char *modname, struct kprobe *pp)


Reply via email to