Trampoline-based tracing programs that call bpf_get_func_ip() rely on
the func IP stored on the stack. Mixing !call_get_func_ip progs with
call_get_func_ip progs via tail calls could break this assumption.

To address this, reject the combination of !call_get_func_ip progs with
call_get_func_ip progs in bpf_map_owner_matches(), which prevents the
tail callee from getting a bogus func IP.

Also reject call_get_func_ip mismatches during initialization to
prevent bypassing the above restriction.

Without this check, the above restriction can be bypassed as follows.

struct {
        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
        __uint(max_entries, 1);
        __uint(key_size, sizeof(__u32));
        __uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");

SEC("?fentry")
int BPF_PROG(prog_a)
{
        bpf_printk("FUNC IP: 0x%llx\n", bpf_get_func_ip());
        bpf_tail_call_static(ctx, &jmp_table, 0);
        return 0;
}

SEC("?fentry")
int BPF_PROG(prog_b)
{
        bpf_tail_call_static(ctx, &jmp_table, 0);
        return 0;
}

The jmp_table is shared between prog_a and prog_b.

* Load prog_a first.
  At this point, owner->call_get_func_ip=true.
* Load prog_b next.
  At this point, prog_b passes the compatibility check.
* Add prog_a to jmp_table.
* Attach prog_b to a kernel function.

When the kernel function runs, prog_a will get a bogus func IP because
no func IP is prepared on the trampoline stack.

Fixes: 1e37392cccde ("bpf: Enable BPF_TRAMP_F_IP_ARG for trampolines with 
call_get_func_ip")
Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf.h |  1 +
 kernel/bpf/core.c   | 10 ++++++++++
 2 files changed, 11 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index dbafed52b2ba..fb978650b169 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -289,6 +289,7 @@ struct bpf_map_owner {
        u32 xdp_has_frags:1;
        u32 sleepable:1;
        u32 kprobe_write_ctx:1;
+       u32 call_get_func_ip:1;
        u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
        const struct btf_type *attach_func_proto;
        enum bpf_attach_type expected_attach_type;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 121a697d4da5..d20a90bd1d2b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2391,6 +2391,7 @@ static void bpf_map_owner_init(struct bpf_map_owner 
*owner, const struct bpf_pro
        owner->xdp_has_frags = aux->xdp_has_frags;
        owner->sleepable = fp->sleepable;
        owner->kprobe_write_ctx = aux->kprobe_write_ctx;
+       owner->call_get_func_ip = fp->call_get_func_ip;
        owner->expected_attach_type = fp->expected_attach_type;
        owner->attach_func_proto = aux->attach_func_proto;
        for_each_cgroup_storage_type(i)
@@ -2407,6 +2408,7 @@ static bool bpf_map_owner_matches(const struct bpf_map 
*map, const struct bpf_pr
                                  enum bpf_prog_type prog_type,
                                  enum bpf_map_owner_match_type match)
 {
+       bool has_trampoline = bpf_prog_has_trampoline(fp);
        struct bpf_map_owner *owner = map->owner;
        struct bpf_prog_aux *aux = fp->aux;
        enum bpf_cgroup_storage_type i;
@@ -2422,11 +2424,19 @@ static bool bpf_map_owner_matches(const struct bpf_map 
*map, const struct bpf_pr
        case BPF_MAP_OWNER_MATCH_FOR_INIT:
                if (owner->kprobe_write_ctx != aux->kprobe_write_ctx)
                        return false;
+               if (has_trampoline) {
+                       if (owner->call_get_func_ip != fp->call_get_func_ip)
+                               return false;
+               }
                break;
 
        case BPF_MAP_OWNER_MATCH_FOR_UPDATE:
                if (!owner->kprobe_write_ctx && aux->kprobe_write_ctx)
                        return false;
+               if (has_trampoline) {
+                       if (!owner->call_get_func_ip && fp->call_get_func_ip)
+                               return false;
+               }
                break;
        }
 
-- 
2.52.0


Reply via email to