On Wed, Apr 01, 2026 at 04:05:04PM +0900, Hoyeon Lee wrote:
> Currently, attach_probe covers manual single-kprobe attaches by
> func_name, but not the raw-address form that the PMU-based
> single-kprobe path can accept.
> 
> This commit adds PERF and LINK raw-address coverage. It resolves
> SYS_NANOSLEEP_KPROBE_NAME through kallsyms, passes the absolute address
> in bpf_kprobe_opts.offset with func_name = NULL, and verifies that
> kprobe and kretprobe are still triggered. It also verifies that LEGACY
> rejects the same form.

left 2 nits below

Acked-by: Jiri Olsa <[email protected]>

jirka

> 
> Signed-off-by: Hoyeon Lee <[email protected]>
> ---
>  .../selftests/bpf/prog_tests/attach_probe.c   | 82 +++++++++++++++++++
>  1 file changed, 82 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c 
> b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> index 9e77e5da7097..a41542f4b35d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c
> @@ -123,6 +123,82 @@ static void test_attach_probe_manual(enum 
> probe_attach_mode attach_mode)
>       test_attach_probe_manual__destroy(skel);
>  }
>  
> +/* manual attach address-based kprobe/kretprobe testings */
> +static void test_attach_kprobe_by_addr(enum probe_attach_mode attach_mode)
> +{
> +     LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> +     struct test_attach_probe_manual *skel;
> +     unsigned long func_addr;
> +
> +     if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> +             return;
> +
> +     func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> +     if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> +             return;
> +
> +     skel = test_attach_probe_manual__open_and_load();
> +     if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> +             return;
> +
> +     kprobe_opts.attach_mode = attach_mode;
> +     kprobe_opts.retprobe = false;
> +     kprobe_opts.offset = func_addr;
> +     skel->links.handle_kprobe =
> +             bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> +                                             NULL, &kprobe_opts);
> +     if (!ASSERT_OK_PTR(skel->links.handle_kprobe, "attach_kprobe_by_addr"))
> +             goto cleanup;
> +
> +     kprobe_opts.retprobe = true;
> +     skel->links.handle_kretprobe =
> +             bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
> +                                             NULL, &kprobe_opts);
> +     if (!ASSERT_OK_PTR(skel->links.handle_kretprobe,
> +                        "attach_kretprobe_by_addr"))

nit, no need to split the line

> +             goto cleanup;
> +
> +     /* trigger & validate kprobe && kretprobe */
> +     usleep(1);
> +
> +     ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res");
> +     ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res");
> +
> +cleanup:
> +     test_attach_probe_manual__destroy(skel);
> +}
> +
> +/* reject legacy address-based kprobe attach */
> +static void test_attach_kprobe_legacy_by_addr_reject(void)
> +{
> +     LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
> +     struct test_attach_probe_manual *skel;
> +     unsigned long func_addr;
> +
> +     if (!ASSERT_OK(load_kallsyms(), "load_kallsyms"))
> +             return;
> +
> +     func_addr = ksym_get_addr(SYS_NANOSLEEP_KPROBE_NAME);
> +     if (!ASSERT_NEQ(func_addr, 0UL, "func_addr"))
> +             return;
> +
> +     skel = test_attach_probe_manual__open_and_load();
> +     if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load"))
> +             return;
> +
> +     kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY;
> +     kprobe_opts.offset = func_addr;
> +     skel->links.handle_kprobe =
> +             bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
> +                                             NULL, &kprobe_opts);
> +     if (ASSERT_ERR_PTR(skel->links.handle_kprobe,
> +                        "attach_kprobe_legacy_by_addr"))
> +             ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe),
> +                       -EOPNOTSUPP, "attach_kprobe_legacy_by_addr_err");

nit, maybe we could do just:

       ASSERT_ERR_PTR(skel->links.handle_kprobe, 
"attach_kprobe_legacy_by_addr");
       ASSERT_EQ(libbpf_get_error(skel->links.handle_kprobe),
                 -EOPNOTSUPP, "attach_kprobe_legacy_by_addr_err");

> +
> +     test_attach_probe_manual__destroy(skel);
> +}
> +
>  /* attach uprobe/uretprobe long event name testings */
>  static void test_attach_uprobe_long_event_name(void)
>  {
> @@ -416,6 +492,12 @@ void test_attach_probe(void)
>               test_attach_probe_manual(PROBE_ATTACH_MODE_PERF);
>       if (test__start_subtest("manual-link"))
>               test_attach_probe_manual(PROBE_ATTACH_MODE_LINK);
> +     if (test__start_subtest("kprobe-perf-by-addr"))
> +             test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_PERF);
> +     if (test__start_subtest("kprobe-link-by-addr"))
> +             test_attach_kprobe_by_addr(PROBE_ATTACH_MODE_LINK);
> +     if (test__start_subtest("kprobe-legacy-by-addr-reject"))
> +             test_attach_kprobe_legacy_by_addr_reject();
>  
>       if (test__start_subtest("auto"))
>               test_attach_probe_auto(skel);
> -- 
> 2.52.0
> 

Reply via email to