With KUAP enabled, any kernel code which wants to access userspace
needs to be surrounded by disable-enable KUAP. But that is not
happening for BPF_PROBE_MEM load instruction. Though PPC32 does not
support read protection, considering the fact that PTR_TO_BTF_ID
(which uses BPF_PROBE_MEM mode) could either be a valid kernel pointer
or NULL but should never be a pointer to userspace address, execute
BPF_PROBE_MEM load only if addr > TASK_SIZE_MAX, otherwise set
dst_reg=0 and move on.

This will catch NULL, valid or invalid userspace pointers. Only bad
kernel pointer will be handled by BPF exception table.

[Alexei suggested for x86]
Suggested-by: Alexei Starovoitov <a...@kernel.org>
Signed-off-by: Hari Bathini <hbath...@linux.ibm.com>
---

Changes in v2:
* New patch to handle bad userspace pointers on PPC32. 


 arch/powerpc/net/bpf_jit_comp32.c | 39 +++++++++++++++++++++++++++++++
 1 file changed, 39 insertions(+)

diff --git a/arch/powerpc/net/bpf_jit_comp32.c 
b/arch/powerpc/net/bpf_jit_comp32.c
index c6262289dcc4..faa8047fcf4a 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -821,6 +821,45 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, 
struct codegen_context *
                /* dst = *(u64 *)(ul) (src + off) */
                case BPF_LDX | BPF_MEM | BPF_DW:
                case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+                       /*
+                        * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could 
either be a valid
+                        * kernel pointer or NULL but not a userspace address, 
execute BPF_PROBE_MEM
+                        * load only if addr > TASK_SIZE_MAX, otherwise set 
dst_reg=0 and move on.
+                        */
+                       if (BPF_MODE(code) == BPF_PROBE_MEM) {
+                               bool extra_insn_needed = false;
+                               unsigned int adjusted_idx;
+
+                               /*
+                                * For BPF_DW case, "li reg_h,0" would be 
needed when
+                                * !fp->aux->verifier_zext. Adjust conditional 
branch'ing
+                                * address accordingly.
+                                */
+                               if ((size == BPF_DW) && !fp->aux->verifier_zext)
+                                       extra_insn_needed = true;
+
+                               /*
+                                * Need to jump two instructions instead of one 
for BPF_DW case
+                                * as there are two load instructions for 
dst_reg_h & dst_reg
+                                * respectively.
+                                */
+                               adjusted_idx = (size == BPF_DW) ? 1 : 0;
+
+                               EMIT(PPC_RAW_ADDI(b2p[TMP_REG], src_reg, off));
+                               PPC_LI32(_R0, TASK_SIZE_MAX);
+                               EMIT(PPC_RAW_CMPLW(b2p[TMP_REG], _R0));
+                               PPC_BCC(COND_GT, (ctx->idx + 4 + 
(extra_insn_needed ? 1 : 0)) * 4);
+                               EMIT(PPC_RAW_LI(dst_reg, 0));
+                               /*
+                                * Note that "li reg_h,0" is emitted for 
BPF_B/H/W case,
+                                * if necessary. So, jump there insted of 
emitting an
+                                * additional "li reg_h,0" instruction.
+                                */
+                               if (extra_insn_needed)
+                                       EMIT(PPC_RAW_LI(dst_reg_h, 0));
+                               PPC_JMP((ctx->idx + 2 + adjusted_idx) * 4);
+                       }
+
                        switch (size) {
                        case BPF_B:
                                EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
-- 
2.31.1

Reply via email to