ftrace_hash_ipmodify_enable() checks IPMODIFY and DIRECT ftrace_ops on
the same kernel function. When needed, ftrace_hash_ipmodify_enable()
calls ops->ops_func() to prepare the direct ftrace (BPF trampoline) to
share the same function as the IPMODIFY ftrace (livepatch).

ftrace_hash_ipmodify_enable() is called in register_ftrace_direct() path,
but not called in modify_ftrace_direct() path. As a result, the following
operations will break livepatch:

1. Load livepatch to a kernel function;
2. Attach fentry program to the kernel function;
3. Attach fexit program to the kernel function.

After 3, the kernel function being used will not be the livepatched
version, but the original version.

Fix this by adding ftrace_hash_ipmodify_enable() to modify_ftrace_direct()
and adjust some logic around the call.

Signed-off-by: Song Liu <[email protected]>
---
 kernel/bpf/trampoline.c | 12 +++++++-----
 kernel/trace/ftrace.c   | 12 ++++++++++--
 2 files changed, 17 insertions(+), 7 deletions(-)

diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 5949095e51c3..8015f5dc3169 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -221,6 +221,13 @@ static int register_fentry(struct bpf_trampoline *tr, void 
*new_addr)
 
        if (tr->func.ftrace_managed) {
                ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
+               /*
+                * Clearing fops->trampoline_mutex and fops->NULL is
+                * needed by the "goto again" case in
+                * bpf_trampoline_update().
+                */
+               tr->fops->trampoline = 0;
+               tr->fops->func = NULL;
                ret = register_ftrace_direct(tr->fops, (long)new_addr);
        } else {
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
@@ -479,11 +486,6 @@ static int bpf_trampoline_update(struct bpf_trampoline 
*tr, bool lock_direct_mut
                 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
                 * trampoline again, and retry register.
                 */
-               /* reset fops->func and fops->trampoline for re-register */
-               tr->fops->func = NULL;
-               tr->fops->trampoline = 0;
-
-               /* free im memory and reallocate later */
                bpf_tramp_image_free(im);
                goto again;
        }
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7f432775a6b5..370f620734cf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2020,8 +2020,6 @@ static int __ftrace_hash_update_ipmodify(struct 
ftrace_ops *ops,
                                if (is_ipmodify)
                                        goto rollback;
 
-                               FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
-
                                /*
                                 * Another ops with IPMODIFY is already
                                 * attached. We are now attaching a direct
@@ -6128,6 +6126,15 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned 
long addr)
        if (err)
                return err;
 
+       /*
+        * Call ftrace_hash_ipmodify_enable() here, so that we can call
+        * ops->ops_func for the ops. This is needed because the above
+        * register_ftrace_function_nolock() worked on tmp_ops.
+        */
+       err = ftrace_hash_ipmodify_enable(ops);
+       if (err)
+               goto out;
+
        /*
         * Now the ftrace_ops_list_func() is called to do the direct callers.
         * We can safely change the direct functions attached to each entry.
@@ -6149,6 +6156,7 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned 
long addr)
 
        mutex_unlock(&ftrace_lock);
 
+out:
        /* Removing the tmp_ops will add the updated direct callers to the 
functions */
        unregister_ftrace_function(&tmp_ops);
 
-- 
2.47.3


Reply via email to