> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index f73e08c223b5..64976009ba3c 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1734,11 +1734,11 @@ tracing_prog_func_proto(enum bpf_func_id func_id,
> const struct bpf_prog *prog)
> case BPF_FUNC_d_path:
> return &bpf_d_path_proto;
> case BPF_FUNC_get_func_arg:
> - return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto
> : NULL;
> + return &bpf_get_func_arg_proto;
Removing the bpf_prog_has_trampoline() check here allows all TRACING
program types to use bpf_get_func_arg(), not just RAW_TP and trampoline-
based types. In particular, BPF_TRACE_ITER programs can now use this
helper.
However, looking at do_misc_fixups() in verifier.c, the inline code
generation only handles BPF_TRACE_RAW_TP specially. For other non-
trampoline types like BPF_TRACE_ITER, the else branch generates:
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8)
This loads nr_args from ctx-8, but ITER programs use a different context
layout (e.g. struct bpf_iter__bpf_map_elem with meta, map, key, value
pointers). There is no nr_args at ctx-8 for ITER programs.
Would this cause ITER programs using bpf_get_func_arg() to read arbitrary
memory at ctx-8, potentially leaking kernel data or causing incorrect
bounds checks for subsequent argument access?
> case BPF_FUNC_get_func_ret:
> return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto
> : NULL;
> case BPF_FUNC_get_func_arg_cnt:
> - return bpf_prog_has_trampoline(prog) ?
> &bpf_get_func_arg_cnt_proto : NULL;
> + return &bpf_get_func_arg_cnt_proto;
Same question applies here for bpf_get_func_arg_cnt() with ITER programs.
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index 9de0ec0c3ed9..0b281b7c41eb 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -23323,8 +23323,20 @@ static int do_misc_fixups(struct bpf_verifier_env
> *env)
> /* Implement bpf_get_func_arg inline. */
> if (prog_type == BPF_PROG_TYPE_TRACING &&
> insn->imm == BPF_FUNC_get_func_arg) {
> + if (eatype == BPF_TRACE_RAW_TP) {
> + int nr_args =
> btf_type_vlen(prog->aux->attach_func_proto);
> +
> + /*
> + * skip first 'void *__data' argument in
> btf_trace_##name
> + * typedef
> + */
> + nr_args--;
> + /* Save nr_args to reg0 */
> + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, nr_args);
> + } else {
> + /* Load nr_args from ctx - 8 */
> + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
> BPF_REG_1, -8);
> + }
This else branch now applies to BPF_TRACE_ITER as well, since the
trampoline check was removed in tracing_prog_func_proto(). Should the
condition be extended to also handle ITER, or should ITER be explicitly
blocked from using these helpers?
[ ... ]
---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
CI run summary: https://github.com/kernel-patches/bpf/actions/runs/21147849826