This patch adds two optimizations for BPF_ALU BPF_END BPF_FROM_LE in
the RV64 BPF JIT.

First, it enables the verifier zero-extension optimization to avoid zero
extension when imm == 32. Second, it avoids generating code for imm ==
64, since it is equivalent to a no-op.

Co-developed-by: Xi Wang <xi.w...@gmail.com>
Signed-off-by: Xi Wang <xi.w...@gmail.com>
Signed-off-by: Luke Nelson <luke.r.n...@gmail.com>
---
 arch/riscv/net/bpf_jit_comp64.c | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index e2636902a74e..c3ce9a911b66 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -542,13 +542,21 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct 
rv_jit_context *ctx,
 
        /* dst = BSWAP##imm(dst) */
        case BPF_ALU | BPF_END | BPF_FROM_LE:
-       {
-               int shift = 64 - imm;
-
-               emit(rv_slli(rd, rd, shift), ctx);
-               emit(rv_srli(rd, rd, shift), ctx);
+               switch (imm) {
+               case 16:
+                       emit(rv_slli(rd, rd, 48), ctx);
+                       emit(rv_srli(rd, rd, 48), ctx);
+                       break;
+               case 32:
+                       if (!aux->verifier_zext)
+                               emit_zext_32(rd, ctx);
+                       break;
+               case 64:
+                       /* Do nothing */
+                       break;
+               }
                break;
-       }
+
        case BPF_ALU | BPF_END | BPF_FROM_BE:
                emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
 
-- 
2.17.1

Reply via email to