Insert KASAN shadow memory checks before memory load and store
operations in JIT-compiled BPF programs. This helps detect memory safety
bugs such as use-after-free and out-of-bounds accesses at runtime.

The main instructions being targeted are BPF_LDX and BPF_STX, but not
all of them are being instrumented:
- if the load/store instruction is in fact accessing the program stack,
  emit_kasan_check silently skips the instrumentation, as we already
  have page guards to monitor stack accesses. Stack accesses _could_ be
  monitored more finely by adding kasan checks, but it would need JIT
  compiler to insert red zones around any variable on stack, and we likely
  do not have enough info in JIT compiler to do so.
- if the load/store instruction is a BPF_PROBE_MEM or a BPF_PROBE_ATOMIC
  instruction, we do not instrument it, as the passed address can fault
  (hence the custom fault management with BPF_PROBE_XXX instructions),
  and so the corresponding kasan check could fault as well.

Signed-off-by: Alexis Lothoré (eBPF Foundation) <[email protected]>
---
This RFC also ignores for now atomic operations, because I am not
perfectly clear yet about how they are JITed and so how much kasan
instrumentation is legitimate here.
---
 arch/x86/net/bpf_jit_comp.c | 13 +++++++++++++
 1 file changed, 13 insertions(+)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index b90103bd0080..111fe1d55121 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1811,6 +1811,7 @@ static int do_jit(struct bpf_verifier_env *env, struct 
bpf_prog *bpf_prog, int *
                const s32 imm32 = insn->imm;
                u32 dst_reg = insn->dst_reg;
                u32 src_reg = insn->src_reg;
+               bool accesses_stack;
                u8 b2 = 0, b3 = 0;
                u8 *start_of_ldx;
                s64 jmp_offset;
@@ -1831,6 +1832,7 @@ static int do_jit(struct bpf_verifier_env *env, struct 
bpf_prog *bpf_prog, int *
                        EMIT_ENDBR();
 
                ip = image + addrs[i - 1] + (prog - temp);
+               accesses_stack = bpf_insn_accesses_stack(env, bpf_prog, i - 1);
 
                switch (insn->code) {
                        /* ALU */
@@ -2242,6 +2244,11 @@ st:                      if (is_imm8(insn->off))
                case BPF_STX | BPF_MEM | BPF_H:
                case BPF_STX | BPF_MEM | BPF_W:
                case BPF_STX | BPF_MEM | BPF_DW:
+                       err = emit_kasan_check(&prog, dst_reg, insn,
+                                              image + addrs[i - 1],
+                                              accesses_stack);
+                       if (err)
+                               return err;
                        emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, 
insn->off);
                        break;
 
@@ -2390,6 +2397,12 @@ st:                      if (is_imm8(insn->off))
                                /* populate jmp_offset for JAE above to jump to 
start_of_ldx */
                                start_of_ldx = prog;
                                end_of_jmp[-1] = start_of_ldx - end_of_jmp;
+                       } else {
+                               err = emit_kasan_check(&prog, src_reg, insn,
+                                                      image + addrs[i - 1],
+                                                      accesses_stack);
+                               if (err)
+                                       return err;
                        }
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX ||
                            BPF_MODE(insn->code) == BPF_MEMSX)

-- 
2.53.0


Reply via email to