On 01/18/2017 06:29 PM, William Tu wrote:
Hi Daniel,
Thanks for the patch. Yes, it solves my issue. Then there is another
related due to BPF_AND

LBB0_12:
; if (ebpf_packetEnd < ebpf_packetStart + BYTES(ebpf_packetOffsetInBits + 64)) {

from 52 to 80: R0=imm1,min_value=1,max_value=1 R1=pkt(id=0,off=0,r=34)
R2=pkt_end R3=inv R4=imm272 R5=inv56,min_value=17,max_value=17
R6=pkt(id=0,off=26,r=34) R10=fp
80: (07) r4 += 71
81: (18) r5 = 0xfffffff8
83: (5f) r4 &= r5
84: (77) r4 >>= 3
85: (0f) r1 += r4
cannot add integer value with 3 upper zero bits to ptr_to_packet

I have no idea why this time the compiler wants to do "and" then
"right shift", instead of directly shit right 3 bit. R5's type is
UNKNOWN_VALUE, So in addition to your patch, I add

But what the compiler is doing here is still correct eventually I assume?
We could also add BPF_AND, but that r5's type is UNKNOWN_VALUE doesn't
seem right if I'm not mistaken. That r5 = 0xfffffff8 should be a dw imm
load, print_bpf_insn() seems only to print first part. So next r4 &= r5
should be purely imm operation as well and thus trackable. Only that we
need to teach verifier to keep the dw imm when not used with analyzer_ops
(nfp related).

Could you try the below (only compile tested here) to see whether the
verifier can now analyze that part?

Thanks,
Daniel

 kernel/bpf/verifier.c                       | 35 ++++++++++++++++++++++-------
 tools/testing/selftests/bpf/test_verifier.c | 24 ++++++++++++++++++++
 2 files changed, 51 insertions(+), 8 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d60e12c..777ef0a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1567,19 +1567,44 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env 
*env,
        struct bpf_reg_state *src_reg = &regs[insn->src_reg];
        u8 opcode = BPF_OP(insn->code);

-       /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
-        * insn. Don't care about overflow or negative values, just add them
+       /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'/...
+        * insn. Don't care about overflow or negative values, just add them.
         */
        if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
                dst_reg->imm += insn->imm;
        else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
                 src_reg->type == CONST_IMM)
                dst_reg->imm += src_reg->imm;
+       else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm -= insn->imm;
+       else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm -= src_reg->imm;
+       else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm *= insn->imm;
+       else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm *= src_reg->imm;
        else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
                dst_reg->imm |= insn->imm;
        else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
                 src_reg->type == CONST_IMM)
                dst_reg->imm |= src_reg->imm;
+       else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm &= insn->imm;
+       else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm &= src_reg->imm;
+       else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm >>= insn->imm;
+       else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm >>= src_reg->imm;
+       else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K)
+               dst_reg->imm <<= insn->imm;
+       else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
+                src_reg->type == CONST_IMM)
+               dst_reg->imm <<= src_reg->imm;
        else
                mark_reg_unknown_value(regs, insn->dst_reg);
        return 0;
@@ -2225,14 +2250,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, 
struct bpf_insn *insn)
                return err;

        if (insn->src_reg == 0) {
-               /* generic move 64-bit immediate into a register,
-                * only analyzer needs to collect the ld_imm value.
-                */
                u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;

-               if (!env->analyzer_ops)
-                       return 0;
-
                regs[insn->dst_reg].type = CONST_IMM;
                regs[insn->dst_reg].imm = imm;
                return 0;
diff --git a/tools/testing/selftests/bpf/test_verifier.c 
b/tools/testing/selftests/bpf/test_verifier.c
index 1aa7324..d60bd07 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -2326,6 +2326,30 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
+               "direct packet access: test11 (shift, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 144),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
--
2.5.5


_______________________________________________
iovisor-dev mailing list
iovisor-dev@lists.iovisor.org
https://lists.iovisor.org/mailman/listinfo/iovisor-dev

Reply via email to