On Sun, 8 Mar 2026 at 14:47, Chengkaitao <[email protected]> wrote:
>
> From: Kaitao Cheng <[email protected]>
>
> Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that
> inserts 'new' after 'prev' in the BPF linked list. Both must be in
> the same list; 'prev' must already be in the list. The new node must
> be an owning reference (e.g. from bpf_obj_new); the kfunc consumes
> that reference and the node becomes non-owning once inserted.
>
> We have added an additional parameter bpf_list_head *head to
> bpf_list_add_impl, as the verifier requires the head parameter to
> check whether the lock is being held.
>
> Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new'
> is already in a list (or duplicate insertion). On failure, the
> kernel drops the passed-in node.
>
> Signed-off-by: Kaitao Cheng <[email protected]>
> ---
> kernel/bpf/helpers.c | 56 ++++++++++++++++++++++++++++++-------------
> kernel/bpf/verifier.c | 13 ++++++++--
> 2 files changed, 50 insertions(+), 19 deletions(-)
>
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 01b74c4ac00d..407520fde668 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -2379,11 +2379,12 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void
> *p__refcounted_kptr, void *meta
> return (void *)p__refcounted_kptr;
> }
>
> -static int __bpf_list_add(struct bpf_list_node_kern *node,
> - struct bpf_list_head *head,
> - bool tail, struct btf_record *rec, u64 off)
> +static int __bpf_list_add(struct bpf_list_head *head,
> + struct bpf_list_node_kern *new,
> + struct list_head *prev,
> + struct btf_record *rec, u64 off)
> {
> - struct list_head *n = &node->list_head, *h = (void *)head;
> + struct list_head *n = &new->list_head, *h = (void *)head;
>
> /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
> * called on its fields, so init here
> @@ -2391,39 +2392,59 @@ static int __bpf_list_add(struct bpf_list_node_kern
> *node,
> if (unlikely(!h->next))
> INIT_LIST_HEAD(h);
>
> - /* node->owner != NULL implies !list_empty(n), no need to separately
> + /* When prev is not the list head, it must be a node in this list. */
> + if (prev != h && WARN_ON_ONCE(READ_ONCE(container_of(
> + prev, struct bpf_list_node_kern, list_head)->owner) != head))
> + goto fail;
There is a slight issue here, if head is not initialized, prev will be
NULL here, since it passes h->prev.
So we'll do a bad deref. I think we should probably pass a pointer to
prev (list_head **) and then load it after INIT_LIST_HEAD(h) is done.
prev != h check looks ok (since we want to establish that prev is not
a node) otherwise.
Probably also add a test for such a case to catch this sort of bug.
You can see whether it crashes without changing your patch, and
doesn't with the fix.
> +
> + /* new->owner != NULL implies !list_empty(n), no need to separately
> * check the latter
> */
> - if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
> - /* Only called from BPF prog, no need to migrate_disable */
> - __bpf_obj_drop_impl((void *)n - off, rec, false);
> - return -EINVAL;
> - }
> -
> - tail ? list_add_tail(n, h) : list_add(n, h);
> - WRITE_ONCE(node->owner, head);
> + if (cmpxchg(&new->owner, NULL, BPF_PTR_POISON))
> + goto fail;
>
> + list_add(n, prev);
> + WRITE_ONCE(new->owner, head);
> return 0;
> +
> +fail:
> + /* Only called from BPF prog, no need to migrate_disable */
> + __bpf_obj_drop_impl((void *)n - off, rec, false);
> + return -EINVAL;
> }
>
> [...]
> @@ -12996,6 +12998,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id)
> {
> return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> + btf_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
> btf_id == special_kfunc_list[KF_bpf_list_pop_back] ||
> btf_id == special_kfunc_list[KF_bpf_list_del] ||
> @@ -13122,6 +13125,7 @@ static bool check_kfunc_is_graph_node_api(struct
> bpf_verifier_env *env,
> case BPF_LIST_NODE:
> ret = (kfunc_btf_id ==
> special_kfunc_list[KF_bpf_list_push_front_impl] ||
> kfunc_btf_id ==
> special_kfunc_list[KF_bpf_list_push_back_impl] ||
> + kfunc_btf_id ==
> special_kfunc_list[KF_bpf_list_add_impl] ||
> kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]);
> break;
> case BPF_RB_NODE:
> @@ -14264,6 +14268,7 @@ static int check_kfunc_call(struct bpf_verifier_env
> *env, struct bpf_insn *insn,
>
> if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
> meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
> + meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] ||
> meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
> insn_aux->insert_off = regs[BPF_REG_2].off;
Please rebase patches on bpf-next/master always before sending, this
one didn't apply cleanly.
> @@ -23230,13 +23235,17 @@ static int fixup_kfunc_call(struct bpf_verifier_env
> *env, struct bpf_insn *insn,
> *cnt = 3;
> } else if (desc->func_id ==
> special_kfunc_list[KF_bpf_list_push_back_impl] ||
> desc->func_id ==
> special_kfunc_list[KF_bpf_list_push_front_impl] ||
> + desc->func_id == special_kfunc_list[KF_bpf_list_add_impl]
> ||
> desc->func_id ==
> special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> struct btf_struct_meta *kptr_struct_meta =
> env->insn_aux_data[insn_idx].kptr_struct_meta;
> int struct_meta_reg = BPF_REG_3;
> int node_offset_reg = BPF_REG_4;
>
> - /* rbtree_add has extra 'less' arg, so args-to-fixup are in
> diff regs */
> - if (desc->func_id ==
> special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> + /* list/rbtree_add_impl have an extra arg (prev/less),
> + * so args-to-fixup are in different regs.
> + */
> + if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl]
> ||
> + desc->func_id ==
> special_kfunc_list[KF_bpf_rbtree_add_impl]) {
> struct_meta_reg = BPF_REG_4;
> node_offset_reg = BPF_REG_5;
> }
> --
> 2.50.1 (Apple Git-155)
>
>