From: Xichen Lin <linxichen...@bytedance.com>

Check the signature of a BPF program against the same set of keys for
module signature checking.

Currently the format of a signed BPF program is similar to that of
a signed kernel module, composed of BPF bytecode, signature,
module_signature structure and a magic string, in order, aligned to
struct sock_filter.

Signed-off-by: Xichen Lin <linxichen...@bytedance.com>
---
 include/linux/bpf_sig.h | 26 ++++++++++++++++++
 init/Kconfig            | 10 +++++++
 kernel/bpf/Makefile     |  3 +++
 kernel/bpf/bpf_sig.c    | 70 +++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/bpf/syscall.c    |  5 ++++
 kernel/sysctl.c         | 14 ++++++++++
 6 files changed, 128 insertions(+)
 create mode 100644 include/linux/bpf_sig.h
 create mode 100644 kernel/bpf/bpf_sig.c

diff --git a/include/linux/bpf_sig.h b/include/linux/bpf_sig.h
new file mode 100644
index 000000000000..da87ba50f340
--- /dev/null
+++ b/include/linux/bpf_sig.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2020 Bytedance
+ */
+
+#ifndef _LINUX_BPF_SIG_H
+#define _LINUX_BPF_SIG_H
+
+#include <linux/bpf.h>
+
+#define BPF_PROG_SIG_STRING "~BPF signature appended~\n"
+
+#ifdef CONFIG_BPF_SIGNATURE
+extern int sysctl_bpf_signature_enable;
+#endif /* CONFIG_BPF_SIGNATURE */
+
+#ifdef CONFIG_BPF_SIGNATURE
+int bpf_check_prog_sig(struct bpf_prog *prog);
+#else
+static inline int bpf_check_prog_sig(struct bpf_prog *prog)
+{
+       return 0;
+}
+#endif /* CONFIG_BPF_SIGNATURE */
+#endif /* _LINUX_BPF_SIG_H */
diff --git a/init/Kconfig b/init/Kconfig
index b77c60f8b963..24225c966803 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -2212,6 +2212,16 @@ config MODULE_SIG_HASH
        default "sha384" if MODULE_SIG_SHA384
        default "sha512" if MODULE_SIG_SHA512
 
+config BPF_SIGNATURE
+       bool "BPF program signature verification"
+       depends on MODULE_SIG
+       help
+         Check BPF programs for valid signatures upon load: the signature
+         is appended to the end of the BPF program similar to module signing.
+
+         BPF signature checking will use the same kernel facilities as
+         module signature checking as well as the keys and hash functions.
+
 config MODULE_COMPRESS
        bool "Compress modules on installation"
        help
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index d1249340fd6b..c6d2b200e795 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -37,3 +37,6 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
 obj-${CONFIG_BPF_LSM} += bpf_lsm.o
 endif
 obj-$(CONFIG_BPF_PRELOAD) += preload/
+ifeq ($(CONFIG_BPF_SIGNATURE),y)
+obj-$(CONFIG_BPF_SIGNATURE) += bpf_sig.o
+endif
diff --git a/kernel/bpf/bpf_sig.c b/kernel/bpf/bpf_sig.c
new file mode 100644
index 000000000000..7fcfc1b5d5d8
--- /dev/null
+++ b/kernel/bpf/bpf_sig.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2021 Bytedance */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/bpf_sig.h>
+#include <linux/module_signature.h>
+#include <linux/verification.h>
+
+int sysctl_bpf_signature_enable;
+
+static bool bpf_strip_prog_ms(struct bpf_prog *prog, unsigned int *sig_len_ptr)
+{
+       void *data = (void *) prog->insns;
+       unsigned int prog_len = bpf_prog_size(prog->len) - sizeof(struct 
bpf_prog);
+       unsigned int rounded_ms_len = round_up(sizeof(struct module_signature),
+                                              sizeof(struct sock_filter));
+       struct module_signature *ms = (struct module_signature *)
+               (data + prog_len - rounded_ms_len);
+       unsigned int rounded_sig_len;
+
+       *sig_len_ptr = be32_to_cpu(ms->sig_len);
+       rounded_sig_len = round_up(*sig_len_ptr, sizeof(struct sock_filter));
+
+       if (mod_check_sig(ms, prog_len, "bpf"))
+               return false;
+
+       if (prog_len > rounded_ms_len + rounded_sig_len) {
+               prog->len -= rounded_ms_len / sizeof(struct sock_filter);
+               prog->len -= rounded_sig_len / sizeof(struct sock_filter);
+               return true;
+       }
+
+       return false;
+}
+
+static bool bpf_strip_prog_sig(struct bpf_prog *prog, unsigned int 
*sig_len_ptr)
+{
+       void *data = (void *) prog->insns;
+       const unsigned int marker_len = sizeof(BPF_PROG_SIG_STRING) - 1;
+       const unsigned int rounded_marker_len = round_up(marker_len,
+                                                        sizeof(struct 
sock_filter));
+       unsigned int prog_len = bpf_prog_size(prog->len) - sizeof(struct 
bpf_prog);
+
+       if (prog_len > rounded_marker_len &&
+           memcmp(data + prog_len - rounded_marker_len,
+                  BPF_PROG_SIG_STRING, marker_len) == 0) {
+               prog->len -= rounded_marker_len / sizeof(struct sock_filter);
+               return bpf_strip_prog_ms(prog, sig_len_ptr);
+       }
+
+       return false;
+}
+
+int bpf_check_prog_sig(struct bpf_prog *prog)
+{
+       bool stripped;
+       unsigned int sig_len;
+
+       stripped = bpf_strip_prog_sig(prog, &sig_len);
+       if (!sysctl_bpf_signature_enable)
+               return 0;
+       if (!stripped)
+               return -ENODATA;
+       return verify_pkcs7_signature(prog->insns, prog->len * sizeof(struct 
sock_filter),
+                                     prog->insns + prog->len,
+                                     sig_len, VERIFY_USE_SECONDARY_KEYRING,
+                                     VERIFYING_MODULE_SIGNATURE,
+                                     NULL, NULL);
+}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4caf06fe4152..2ce0afb12248 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -32,6 +32,7 @@
 #include <linux/bpf-netns.h>
 #include <linux/rcupdate_trace.h>
 #include <linux/memcontrol.h>
+#include <linux/bpf_sig.h>
 
 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
                          (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -2201,6 +2202,10 @@ static int bpf_prog_load(union bpf_attr *attr, union 
bpf_attr __user *uattr)
        if (err < 0)
                goto free_prog_sec;
 
+       err = bpf_check_prog_sig(prog);
+       if (err < 0)
+               goto free_prog;
+
        /* run eBPF verifier */
        err = bpf_check(&prog, attr, uattr);
        if (err < 0)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c9fbdd848138..d447d26dd0eb 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,9 @@
 #ifdef CONFIG_LOCKUP_DETECTOR
 #include <linux/nmi.h>
 #endif
+#ifdef CONFIG_BPF_SIGNATURE
+#include <linux/bpf_sig.h>
+#endif /* CONFIG_BPF_SIGNATURE */
 
 #if defined(CONFIG_SYSCTL)
 
@@ -2621,6 +2624,17 @@ static struct ctl_table kern_table[] = {
        },
 #endif
 #ifdef CONFIG_BPF_SYSCALL
+#ifdef CONFIG_BPF_SIGNATURE
+       {
+               .procname       = "bpf_signature_enable",
+               .data           = &sysctl_bpf_signature_enable,
+               .maxlen         = sizeof(sysctl_bpf_signature_enable),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = SYSCTL_ONE,
+               .extra2         = SYSCTL_ONE,
+       },
+#endif /* CONFIG_BPF_SIGNATURE */
        {
                .procname       = "unprivileged_bpf_disabled",
                .data           = &sysctl_unprivileged_bpf_disabled,
-- 
2.11.0

Reply via email to