Introduce the ability for kprobes to override the return values of
functions that have been livepatched. This functionality is guarded by the
CONFIG_KPROBE_OVERRIDE_KLP_FUNC configuration option.

Signed-off-by: Yafang Shao <[email protected]>
---
 kernel/trace/Kconfig        | 14 ++++++++++++++
 kernel/trace/bpf_trace.c    |  3 ++-
 kernel/trace/trace_kprobe.c | 17 +++++++++++++++++
 kernel/trace/trace_probe.h  |  5 +++++
 4 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 49de13cae428..db712c8cb745 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1279,6 +1279,20 @@ config HIST_TRIGGERS_DEBUG
 
           If unsure, say N.
 
+config KPROBE_OVERRIDE_KLP_FUNC
+       bool "Allow kprobes to override livepatched functions"
+       depends on KPROBES && LIVEPATCH
+       help
+         This option allows BPF programs to use kprobes to override functions
+         that have already been patched by Livepatch (KLP).
+
+         Enabling this provides a mechanism to dynamically control execution
+         flow without requiring a reboot or a new livepatch module. It
+         effectively combines the persistence of livepatching with the
+         programmability of BPF.
+
+         If unsure, say N.
+
 source "kernel/trace/rv/Kconfig"
 
 endif # FTRACE
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index c901ace836cb..08ae2b1a912c 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1935,7 +1935,8 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
                if (!tp)
                        return -EINVAL;
                if (!trace_kprobe_on_func_entry(tp) ||
-                   !trace_kprobe_error_injectable(tp))
+                   (!trace_kprobe_error_injectable(tp) &&
+                    !trace_kprobe_klp_func_overridable(tp)))
                        return -EINVAL;
        }
 
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 768702674a5c..6f05451fbc76 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -213,6 +213,23 @@ bool trace_kprobe_error_injectable(struct trace_kprobe *tp)
        return within_error_injection_list(trace_kprobe_address(tp));
 }
 
+bool trace_kprobe_klp_func_overridable(struct trace_kprobe *tp)
+{
+       bool overridable = false;
+#ifdef CONFIG_KPROBE_OVERRIDE_KLP_FUNC
+       struct module *mod;
+       unsigned long addr;
+
+       addr = trace_kprobe_address(tp);
+       rcu_read_lock();
+       mod = __module_address(addr);
+       if (mod && mod->klp)
+               overridable = true;
+       rcu_read_unlock();
+#endif
+       return overridable;
+}
+
 static int register_kprobe_event(struct trace_kprobe *tk);
 static int unregister_kprobe_event(struct trace_kprobe *tk);
 
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 958eb78a9068..84bd2617db7c 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -271,6 +271,7 @@ struct trace_kprobe {
 #ifdef CONFIG_KPROBE_EVENTS
 bool trace_kprobe_on_func_entry(struct trace_kprobe *tp);
 bool trace_kprobe_error_injectable(struct trace_kprobe *tp);
+bool trace_kprobe_klp_func_overridable(struct trace_kprobe *tp);
 #else
 static inline bool trace_kprobe_on_func_entry(struct trace_kprobe *tp)
 {
@@ -281,6 +282,10 @@ static inline bool trace_kprobe_error_injectable(struct 
trace_kprobe *tp)
 {
        return false;
 }
+static inline bool trace_kprobe_klp_func_overridable(struct trace_kprobe *tp)
+{
+       return false;
+}
 #endif /* CONFIG_KPROBE_EVENTS */
 
 static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
-- 
2.47.3


Reply via email to