Introduce BPF_BRANCH_SNAPSHOT_F_COPY flag for tracing programs to copy
branch entries from *bpf_branch_snapshot*.

Instead of introducing a new kfunc, extend bpf_get_branch_snapshot
helper to add the BPF_BRANCH_SNAPSHOT_F_COPY flag support.

Therefore, when BPF_BRANCH_SNAPSHOT_F_COPY is specified:

* Check the *flags* value in verifier's 'check_helper_call()'.
* Skip inlining 'bpf_get_branch_snapshot()' helper in verifier's
  'do_misc_fixups()'.
* 'memcpy()' branch entries in the 'bpf_get_branch_snapshot()' helper.

Signed-off-by: Leon Hwang <[email protected]>
---
 include/linux/bpf.h          |  4 ++++
 include/linux/bpf_verifier.h |  1 +
 kernel/bpf/verifier.c        | 30 ++++++++++++++++++++++++++++++
 kernel/trace/bpf_trace.c     | 17 ++++++++++++++---
 4 files changed, 49 insertions(+), 3 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 16dc21836a06..71ce225e5160 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1249,6 +1249,10 @@ struct bpf_tramp_branch_entries {
 DECLARE_PER_CPU(struct bpf_tramp_branch_entries, bpf_branch_snapshot);
 #endif

+enum {
+       BPF_BRANCH_SNAPSHOT_F_COPY      = 1,    /* Copy branch snapshot from 
bpf_branch_snapshot. */
+};
+
 /* Different use cases for BPF trampoline:
  * 1. replace nop at the function entry (kprobe equivalent)
  *    flags = BPF_TRAMP_F_RESTORE_REGS
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 130bcbd66f60..c60a145e0466 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -561,6 +561,7 @@ struct bpf_insn_aux_data {
        bool non_sleepable; /* helper/kfunc may be called from non-sleepable 
context */
        bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
        bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog 
percpu alloc */
+       bool copy_branch_snapshot; /* BPF_BRANCH_SNAPSHOT_F_COPY for 
bpf_get_branch_snapshot helper */
        u8 alu_state; /* used in combination with alu_limit */
        /* true if STX or LDX instruction is a part of a spill/fill
         * pattern for a bpf_fastcall call.
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 53635ea2e41b..0a537f9c2f8c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11772,6 +11772,33 @@ static int check_helper_call(struct bpf_verifier_env 
*env, struct bpf_insn *insn
                err = push_callback_call(env, insn, insn_idx, meta.subprogno,
                                         set_user_ringbuf_callback_state);
                break;
+       case BPF_FUNC_get_branch_snapshot:
+       {
+               u64 flags;
+
+               if (!is_reg_const(&regs[BPF_REG_3], false)) {
+                       verbose(env, "Flags in bpf_get_branch_snapshot helper 
must be const.\n");
+                       return -EINVAL;
+               }
+               flags = reg_const_value(&regs[BPF_REG_3], false);
+               if (flags & ~BPF_BRANCH_SNAPSHOT_F_COPY) {
+                       verbose(env, "Invalid flags in bpf_get_branch_snapshot 
helper.\n");
+                       return -EINVAL;
+               }
+
+               if (flags & BPF_BRANCH_SNAPSHOT_F_COPY) {
+                       if (env->prog->type != BPF_PROG_TYPE_TRACING ||
+                           (env->prog->expected_attach_type != 
BPF_TRACE_FENTRY &&
+                            env->prog->expected_attach_type != 
BPF_TRACE_FEXIT)) {
+                               verbose(env, "Only fentry and fexit programs 
support BPF_BRANCH_SNAPSHOT_F_COPY.\n");
+                               return -EINVAL;
+                       }
+
+                       env->insn_aux_data[insn_idx].copy_branch_snapshot = 
true;
+                       env->prog->copy_branch_snapshot = true;
+               }
+               break;
+       }
        }

        if (err)
@@ -23370,6 +23397,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
                         */
                        BUILD_BUG_ON(br_entry_size != 24);

+                       if (env->insn_aux_data[i + delta].copy_branch_snapshot)
+                               goto patch_call_imm;
+
                        /* if (unlikely(flags)) return -EINVAL */
                        insn_buf[0] = BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 7);

diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 6e076485bf70..e9e1698cf608 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1172,10 +1172,20 @@ BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, 
size, u64, flags)
        static const u32 br_entry_size = sizeof(struct perf_branch_entry);
        u32 entry_cnt = size / br_entry_size;

-       entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt);
-
-       if (unlikely(flags))
+       if (likely(!flags)) {
+               entry_cnt = static_call(perf_snapshot_branch_stack)(buf, 
entry_cnt);
+#ifdef CONFIG_X86_64
+       } else if (flags & BPF_BRANCH_SNAPSHOT_F_COPY) {
+               struct bpf_tramp_branch_entries *br;
+
+               br = this_cpu_ptr(&bpf_branch_snapshot);
+               entry_cnt = min_t(u32, entry_cnt, br->cnt);
+               if (entry_cnt)
+                       memcpy(buf, (void *) br->entries, entry_cnt * 
br_entry_size);
+#endif
+       } else {
                return -EINVAL;
+       }

        if (!entry_cnt)
                return -ENOENT;
@@ -1189,6 +1199,7 @@ const struct bpf_func_proto bpf_get_branch_snapshot_proto 
= {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
+       .arg3_type      = ARG_ANYTHING,
 };

 BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
--
2.52.0


Reply via email to