On Fri, Feb 20, 2026 at 2:07 AM Jiri Olsa <[email protected]> wrote:
>
> Adding mutex lock pool that replaces bpf trampolines mutex.
>
> For tracing_multi link coming in following changes we need to lock all
> the involved trampolines during the attachment. This could mean thousands
> of mutex locks, which is not convenient.
>
> As suggested by Andrii we can replace bpf trampolines mutex with mutex
> pool, where each trampoline is hash-ed to one of the locks from the pool.
>
> It's better to lock all the pool mutexes (64 at the moment) than
> thousands of them.
>
> Removing the mutex_is_locked in bpf_trampoline_put, because we removed
> the mutex from bpf_trampoline.
>
> Suggested-by: Andrii Nakryiko <[email protected]>
> Signed-off-by: Jiri Olsa <[email protected]>
> ---
> include/linux/bpf.h | 2 --
> kernel/bpf/trampoline.c | 74 +++++++++++++++++++++++++++++++----------
> 2 files changed, 56 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index cd9b96434904..46bf3d86bdb2 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -1335,8 +1335,6 @@ struct bpf_trampoline {
> /* hlist for trampoline_ip_table */
> struct hlist_node hlist_ip;
> struct ftrace_ops *fops;
> - /* serializes access to fields of this trampoline */
> - struct mutex mutex;
> refcount_t refcnt;
> u32 flags;
> u64 key;
> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
> index 952cd7932461..05dc0358654d 100644
> --- a/kernel/bpf/trampoline.c
> +++ b/kernel/bpf/trampoline.c
> @@ -30,6 +30,45 @@ static struct hlist_head
> trampoline_ip_table[TRAMPOLINE_TABLE_SIZE];
> /* serializes access to trampoline tables */
> static DEFINE_MUTEX(trampoline_mutex);
>
> +#define TRAMPOLINE_LOCKS_BITS 6
> +#define TRAMPOLINE_LOCKS_TABLE_SIZE (1 << TRAMPOLINE_LOCKS_BITS)
> +
> +static struct {
> + struct mutex mutex;
> + struct lock_class_key key;
> +} *trampoline_locks;
> +
> +static struct mutex *trampoline_locks_lookup(struct bpf_trampoline *tr)
select_trampoline_lock() ?
> +{
> + return &trampoline_locks[hash_64((u64) tr,
> TRAMPOLINE_LOCKS_BITS)].mutex;
> +}
> +
> +static void trampoline_lock(struct bpf_trampoline *tr)
> +{
> + mutex_lock(trampoline_locks_lookup(tr));
> +}
> +
> +static void trampoline_unlock(struct bpf_trampoline *tr)
> +{
> + mutex_unlock(trampoline_locks_lookup(tr));
> +}
> +
> +static int __init trampoline_locks_init(void)
> +{
> + int i;
> +
> + trampoline_locks = kmalloc_array(TRAMPOLINE_LOCKS_TABLE_SIZE,
> + sizeof(trampoline_locks[0]),
> GFP_KERNEL);
why bother with memory allocation? This is just 64 mutexes.
> + if (!trampoline_locks)
> + return -ENOMEM;
> +
> + for (i = 0; i < TRAMPOLINE_LOCKS_TABLE_SIZE; i++) {
> + lockdep_register_key(&trampoline_locks[i].key);
why special key?