This emulates absolutely the most basic seccomp filters to figure out
if they will always give the same results for a given arch/nr combo.

Nearly all seccomp filters are built from the following ops:

BPF_LD  | BPF_W    | BPF_ABS
BPF_JMP | BPF_JEQ  | BPF_K
BPF_JMP | BPF_JGE  | BPF_K
BPF_JMP | BPF_JGT  | BPF_K
BPF_JMP | BPF_JSET | BPF_K
BPF_JMP | BPF_JA
BPF_RET | BPF_K

These are now emulated to check for accesses beyond seccomp_data::arch
or unknown instructions.

Not yet implemented are:

BPF_ALU | BPF_AND (generated by libseccomp and Chrome)

Suggested-by: Jann Horn <ja...@google.com>
Link: 
https://lore.kernel.org/lkml/CAG48ez1p=dR_2ikKq=xVxkoGg0fYpTBpkhJSv1w-6BG=76p...@mail.gmail.com/
Signed-off-by: Kees Cook <keesc...@chromium.org>
---
 kernel/seccomp.c  | 82 ++++++++++++++++++++++++++++++++++++++++++++---
 net/core/filter.c |  3 +-
 2 files changed, 79 insertions(+), 6 deletions(-)

diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 111a238bc532..9921f6f39d12 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -610,7 +610,12 @@ static struct seccomp_filter 
*seccomp_prepare_filter(struct sock_fprog *fprog)
 {
        struct seccomp_filter *sfilter;
        int ret;
-       const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
+       const bool save_orig =
+#if defined(CONFIG_CHECKPOINT_RESTORE) || defined(SECCOMP_ARCH)
+               true;
+#else
+               false;
+#endif
 
        if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
                return ERR_PTR(-EINVAL);
@@ -690,11 +695,78 @@ static inline bool sd_touched(pte_t *ptep)
  * This approach could also be used to test for access to sd->arch too,
  * if we wanted to warn about compat-unsafe filters.
  */
-static inline bool seccomp_filter_action_is_constant(struct bpf_prog *prog,
-                                                    struct seccomp_data *sd,
-                                                    u32 *action)
+static bool seccomp_filter_action_is_constant(struct bpf_prog *prog,
+                                             struct seccomp_data *sd,
+                                             u32 *action)
 {
-       /* No evaluation implementation yet. */
+       struct sock_fprog_kern *fprog = prog->orig_prog;
+       unsigned int insns;
+       unsigned int reg_value = 0;
+       unsigned int pc;
+       bool op_res;
+
+       if (WARN_ON_ONCE(!fprog))
+               return false;
+
+       insns = bpf_classic_proglen(fprog);
+       for (pc = 0; pc < insns; pc++) {
+               struct sock_filter *insn = &fprog->filter[pc];
+               u16 code = insn->code;
+               u32 k = insn->k;
+
+               switch (code) {
+               case BPF_LD | BPF_W | BPF_ABS:
+                       switch (k) {
+                       case offsetof(struct seccomp_data, nr):
+                               reg_value = sd->nr;
+                               break;
+                       case offsetof(struct seccomp_data, arch):
+                               reg_value = sd->arch;
+                               break;
+                       default:
+                               /* can't optimize (non-constant value load) */
+                               return false;
+                       }
+                       break;
+               case BPF_RET | BPF_K:
+                       *action = insn->k;
+                       /* success: reached return with constant values only */
+                       return true;
+               case BPF_JMP | BPF_JA:
+                       pc += insn->k;
+                       break;
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JSET | BPF_K:
+                       switch (BPF_OP(code)) {
+                       case BPF_JEQ:
+                               op_res = reg_value == k;
+                               break;
+                       case BPF_JGE:
+                               op_res = reg_value >= k;
+                               break;
+                       case BPF_JGT:
+                               op_res = reg_value > k;
+                               break;
+                       case BPF_JSET:
+                               op_res = !!(reg_value & k);
+                               break;
+                       default:
+                               /* can't optimize (unknown jump) */
+                               return false;
+                       }
+
+                       pc += op_res ? insn->jt : insn->jf;
+                       break;
+               default:
+                       /* can't optimize (unknown insn) */
+                       return false;
+               }
+       }
+
+       /* ran off the end of the filter?! */
+       WARN_ON(1);
        return false;
 }
 
diff --git a/net/core/filter.c b/net/core/filter.c
index b2df52086445..cb1bdb0bfe87 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1145,7 +1145,7 @@ static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
        return 0;
 }
 
-static void bpf_release_orig_filter(struct bpf_prog *fp)
+void bpf_release_orig_filter(struct bpf_prog *fp)
 {
        struct sock_fprog_kern *fprog = fp->orig_prog;
 
@@ -1154,6 +1154,7 @@ static void bpf_release_orig_filter(struct bpf_prog *fp)
                kfree(fprog);
        }
 }
+EXPORT_SYMBOL_GPL(bpf_release_orig_filter);
 
 static void __bpf_prog_release(struct bpf_prog *prog)
 {
-- 
2.25.1

Reply via email to