Adding bpf_trampoline_multi_attach/detach functions that allows
to attach/detach multi tracing trampoline.

The attachment is defined with bpf_program and array of BTF ids
of functions to attach the bpf program to.

The attachment will allocate or use currently existing trampoline
for function to attach and link it with the bpf program.

The attach works as follows:
- we get all the needed trampolines
- lock them and add the bpf program to each (__bpf_trampoline_link_prog)
- the trampoline_multi_ops passed in __bpf_trampoline_link_prog gather needed
  ftrace_hash ip->trampoline data
- we call update_ftrace_direct_add/mod to update needed locations
- we unlock all the trampolines

The detach works as follows:
- we lock all the needed trampolines
- remove the program from each (__bpf_trampoline_unlink_prog)
- the trampoline_multi_ops passed in __bpf_trampoline_link_prog gather needed
  ftrace_hash ip->trampoline data
- we call update_ftrace_direct_del/mod to update needed locations
- we unlock and put all the trampolines

Signed-off-by: Jiri Olsa <[email protected]>
---
 include/linux/bpf.h     |  18 ++++
 kernel/bpf/trampoline.c | 186 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 204 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f06f0a11ccb7..5591660da6e1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1466,6 +1466,12 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
 void bpf_trampoline_put(struct bpf_trampoline *tr);
 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int 
num_funcs);
 
+struct bpf_tracing_multi_link;
+int bpf_trampoline_multi_attach(struct bpf_prog *prog, u32 *ids,
+                               struct bpf_tracing_multi_link *link);
+int bpf_trampoline_multi_detach(struct bpf_prog *prog,
+                               struct bpf_tracing_multi_link *link);
+
 /*
  * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
  * indirection with a direct call to the bpf program. If the architecture does
@@ -1898,6 +1904,18 @@ struct bpf_fsession_link {
        struct bpf_tramp_link fexit;
 };
 
+struct bpf_tracing_multi_node {
+       struct bpf_tramp_node node;
+       struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_multi_link {
+       struct bpf_link link;
+       enum bpf_attach_type attach_type;
+       int nodes_cnt;
+       struct bpf_tracing_multi_node nodes[] __counted_by(nodes_cnt);
+};
+
 struct bpf_raw_tp_link {
        struct bpf_link link;
        struct bpf_raw_event_map *btp;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 2be2f1d0b7d7..b76bb545077b 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -367,7 +367,11 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 
key, unsigned long ip)
        head = &trampoline_ip_table[hash_64(tr->ip, TRAMPOLINE_HASH_BITS)];
        hlist_add_head(&tr->hlist_ip, head);
        refcount_set(&tr->refcnt, 1);
+#ifdef CONFIG_LOCKDEP
+       mutex_init_with_key(&tr->mutex, &__lockdep_no_track__);
+#else
        mutex_init(&tr->mutex);
+#endif
        for (i = 0; i < BPF_TRAMP_MAX; i++)
                INIT_HLIST_HEAD(&tr->progs_hlist[i]);
 out:
@@ -1400,6 +1404,188 @@ int __weak arch_bpf_trampoline_size(const struct 
btf_func_model *m, u32 flags,
        return -ENOTSUPP;
 }
 
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) && 
defined(CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS)
+
+struct fentry_multi_data {
+       struct ftrace_hash *unreg;
+       struct ftrace_hash *modify;
+       struct ftrace_hash *reg;
+};
+
+static void free_fentry_multi_data(struct fentry_multi_data *data)
+{
+       free_ftrace_hash(data->reg);
+       free_ftrace_hash(data->unreg);
+       free_ftrace_hash(data->modify);
+}
+
+static int register_fentry_multi(struct bpf_trampoline *tr, void *new_addr, 
void *ptr)
+{
+       struct fentry_multi_data *data = ptr;
+       unsigned long ip = ftrace_location(tr->ip);
+
+       return add_ftrace_hash_entry_direct(data->reg, ip,
+                                           (unsigned long) new_addr) ? 0 : 
-ENOMEM;
+}
+
+static int unregister_fentry_multi(struct bpf_trampoline *tr, u32 orig_flags, 
void *old_addr, void *ptr)
+{
+       struct fentry_multi_data *data = ptr;
+       unsigned long ip = ftrace_location(tr->ip);
+
+       return add_ftrace_hash_entry_direct(data->unreg, ip,
+                                           (unsigned long) old_addr) ? 0 : 
-ENOMEM;
+}
+
+static int modify_fentry_multi(struct bpf_trampoline *tr, u32 orig_flags, void 
*old_addr, void *new_addr,
+                              bool lock_direct_mutex, void *ptr)
+{
+       struct fentry_multi_data *data = ptr;
+       unsigned long ip = ftrace_location(tr->ip);
+
+       return add_ftrace_hash_entry_direct(data->modify, ip,
+                                           (unsigned long) new_addr) ? 0 : 
-ENOMEM;
+}
+
+static struct bpf_trampoline_ops trampoline_multi_ops = {
+       .register_fentry   = register_fentry_multi,
+       .unregister_fentry = unregister_fentry_multi,
+       .modify_fentry     = modify_fentry_multi,
+};
+
+int bpf_trampoline_multi_attach(struct bpf_prog *prog, u32 *ids,
+                               struct bpf_tracing_multi_link *link)
+{
+       struct bpf_attach_target_info tgt_info = {};
+       struct bpf_tracing_multi_node *mnode;
+       int j, i, err, cnt = link->nodes_cnt;
+       struct fentry_multi_data data = {};
+       struct bpf_trampoline *tr;
+       u64 key;
+
+       data.reg = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (!data.reg)
+               return -ENOMEM;
+
+       data.modify = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (!data.modify) {
+               free_ftrace_hash(data.reg);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < cnt; i++) {
+               mnode = &link->nodes[i];
+               err = bpf_check_attach_target(NULL, prog, NULL, ids[i], 
&tgt_info);
+               if (err)
+                       goto rollback_put;
+
+               key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, 
ids[i]);
+
+               tr = bpf_trampoline_get(key, &tgt_info);
+               if (!tr)
+                       goto rollback_put;
+
+               mnode->trampoline = tr;
+               mnode->node.prog = prog;
+       }
+
+       for (i = 0; i < cnt; i++) {
+               mnode = &link->nodes[i];
+               tr = mnode->trampoline;
+
+               mutex_lock(&tr->mutex);
+
+               err = __bpf_trampoline_link_prog(&mnode->node, tr, NULL, 
&trampoline_multi_ops, &data);
+               if (err) {
+                       mutex_unlock(&tr->mutex);
+                       goto rollback_unlink;
+               }
+       }
+
+       if (ftrace_hash_count(data.reg)) {
+               err = update_ftrace_direct_add(&direct_ops, data.reg);
+               if (err)
+                       goto rollback_unlink;
+       }
+
+       if (ftrace_hash_count(data.modify)) {
+               err = update_ftrace_direct_mod(&direct_ops, data.modify, true);
+               if (err) {
+                       WARN_ON_ONCE(update_ftrace_direct_del(&direct_ops, 
data.reg));
+                       goto rollback_unlink;
+               }
+       }
+
+       for (i = 0; i < cnt; i++) {
+               tr = link->nodes[i].trampoline;
+               mutex_unlock(&tr->mutex);
+       }
+
+       free_fentry_multi_data(&data);
+       return 0;
+
+rollback_unlink:
+       for (j = 0; j < i; j++) {
+               mnode = &link->nodes[j];
+               tr = mnode->trampoline;
+               WARN_ON_ONCE(__bpf_trampoline_unlink_prog(&mnode->node, tr, 
NULL,
+                            &trampoline_multi_ops, &data));
+               mutex_unlock(&tr->mutex);
+       }
+
+rollback_put:
+       for (j = 0; j < i; j++) {
+               mnode = &link->nodes[j];
+               bpf_trampoline_put(mnode->trampoline);
+       }
+
+       free_fentry_multi_data(&data);
+       return err;
+}
+
+int bpf_trampoline_multi_detach(struct bpf_prog *prog, struct 
bpf_tracing_multi_link *link)
+{
+       struct bpf_tracing_multi_node *mnode;
+       struct fentry_multi_data data = {};
+       int i, cnt = link->nodes_cnt;
+       struct bpf_trampoline *tr;
+
+       data.unreg = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (!data.unreg)
+               return -ENOMEM;
+
+       data.modify = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       if (!data.modify) {
+               free_ftrace_hash(data.unreg);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < cnt; i++) {
+               mnode = &link->nodes[i];
+               tr = link->nodes[i].trampoline;
+
+               mutex_lock(&tr->mutex);
+               WARN_ON_ONCE(__bpf_trampoline_unlink_prog(&mnode->node, tr, 
NULL,
+                                                         
&trampoline_multi_ops, &data));
+       }
+
+       if (ftrace_hash_count(data.unreg))
+               WARN_ON_ONCE(update_ftrace_direct_del(&direct_ops, data.unreg));
+       if (ftrace_hash_count(data.modify))
+               WARN_ON_ONCE(update_ftrace_direct_mod(&direct_ops, data.modify, 
true));
+
+       for (i = 0; i < cnt; i++) {
+               tr = link->nodes[i].trampoline;
+               mutex_unlock(&tr->mutex);
+               bpf_trampoline_put(tr);
+       }
+
+       free_fentry_multi_data(&data);
+       return 0;
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS) && 
CONFIG_HAVE_SINGLE_FTRACE_DIRECT_OPS */
+
 static int __init init_trampolines(void)
 {
        int i;
-- 
2.52.0


Reply via email to