On 11/27/20 9:57 AM, Brendan Jackman wrote:
This adds instructions for

atomic[64]_[fetch_]and
atomic[64]_[fetch_]or
atomic[64]_[fetch_]xor

All these operations are isomorphic enough to implement with the same
verifier, interpreter, and x86 JIT code, hence being a single commit.

The main interesting thing here is that x86 doesn't directly support
the fetch_ version these operations, so we need to generate a CMPXCHG
loop in the JIT. This requires the use of two temporary registers,
IIUC it's safe to use BPF_REG_AX and x86's AUX_REG for this purpose.

similar to previous xsub (atomic[64]_sub), should we implement
xand, xor, xxor in llvm?


Signed-off-by: Brendan Jackman <jackm...@google.com>
---
  arch/x86/net/bpf_jit_comp.c  | 49 ++++++++++++++++++++++++++++-
  include/linux/filter.h       | 60 ++++++++++++++++++++++++++++++++++++
  kernel/bpf/core.c            |  5 ++-
  kernel/bpf/disasm.c          |  7 +++--
  kernel/bpf/verifier.c        |  6 ++++
  tools/include/linux/filter.h | 60 ++++++++++++++++++++++++++++++++++++
  6 files changed, 183 insertions(+), 4 deletions(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index a8a9fab13fcf..46b977ee21c4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -823,8 +823,11 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
/* emit opcode */
        switch (atomic_op) {
-       case BPF_SUB:
        case BPF_ADD:
+       case BPF_SUB:
+       case BPF_AND:
+       case BPF_OR:
+       case BPF_XOR:
                /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
                EMIT1(simple_alu_opcodes[atomic_op]);
                break;
@@ -1307,6 +1310,50 @@ st:                      if (is_imm8(insn->off))
case BPF_STX | BPF_ATOMIC | BPF_W:
                case BPF_STX | BPF_ATOMIC | BPF_DW:
+                       if (insn->imm == (BPF_AND | BPF_FETCH) ||
+                           insn->imm == (BPF_OR | BPF_FETCH) ||
+                           insn->imm == (BPF_XOR | BPF_FETCH)) {
+                               u8 *branch_target;
+                               bool is64 = BPF_SIZE(insn->code) == BPF_DW;
+
+                               /*
+                                * Can't be implemented with a single x86 insn.
+                                * Need to do a CMPXCHG loop.
+                                */
+
+                               /* Will need RAX as a CMPXCHG operand so save 
R0 */
+                               emit_mov_reg(&prog, true, BPF_REG_AX, 
BPF_REG_0);
+                               branch_target = prog;
+                               /* Load old value */
+                               emit_ldx(&prog, BPF_SIZE(insn->code),
+                                        BPF_REG_0, dst_reg, insn->off);
+                               /*
+                                * Perform the (commutative) operation locally,
+                                * put the result in the AUX_REG.
+                                */
+                               emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
+                               maybe_emit_rex(&prog, AUX_REG, src_reg, is64);
+                               EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
+                                     add_2reg(0xC0, AUX_REG, src_reg));
+                               /* Attempt to swap in new value */
+                               err = emit_atomic(&prog, BPF_CMPXCHG,
+                                                 dst_reg, AUX_REG, insn->off,
+                                                 BPF_SIZE(insn->code));
+                               if (WARN_ON(err))
+                                       return err;
+                               /*
+                                * ZF tells us whether we won the race. If it's
+                                * cleared we need to try again.
+                                */
+                               EMIT2(X86_JNE, -(prog - branch_target) - 2);
+                               /* Return the pre-modification value */
+                               emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
+                               /* Restore R0 after clobbering RAX */
+                               emit_mov_reg(&prog, true, BPF_REG_0, 
BPF_REG_AX);
+                               break;
+
+                       }
+
                        if (insn->imm == (BPF_SUB | BPF_FETCH)) {
                                /*
                                 * x86 doesn't have an XSUB insn, so we negate
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a20a3a536bf5..cb5d865cce3c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -300,6 +300,66 @@ static inline bool insn_is_zext(const struct bpf_insn 
*insn)
                .off   = OFF,                                   \
                .imm   = BPF_SUB | BPF_FETCH })
+/* Atomic memory and, *(uint *)(dst_reg + off16) -= src_reg */
+
+#define BPF_ATOMIC_AND(SIZE, DST, SRC, OFF)                    \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = BPF_AND })
+
+/* Atomic memory and with fetch, src_reg = atomic_fetch_and(*(dst_reg + off), 
src_reg); */
+
+#define BPF_ATOMIC_FETCH_AND(SIZE, DST, SRC, OFF)              \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = BPF_AND | BPF_FETCH })
+
[...]

Reply via email to