From: Abhishek Dubey <[email protected]> The modified prologue/epilogue generation code now enables exception-callback to use the stack frame of the program marked as exception boundary, where callee saved registers are stored.
As per ppc64 ABIv2 documentation[1], r14-r31 are callee saved registers. BPF programs on ppc64 already saves r26-r31 registers. Saving the remaining set of callee saved registers(r14-r25) is handled in the next patch. [1] https://ftp.rtems.org/pub/rtems/people/sebh/ABI64BitOpenPOWERv1.1_16July2015_pub.pdf Signed-off-by: Abhishek Dubey <[email protected]> --- arch/powerpc/net/bpf_jit.h | 2 ++ arch/powerpc/net/bpf_jit_comp.c | 7 ++++ arch/powerpc/net/bpf_jit_comp64.c | 53 +++++++++++++++++++++---------- 3 files changed, 45 insertions(+), 17 deletions(-) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 5d735bc5e6bd..fb548ae5d143 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -179,6 +179,8 @@ struct codegen_context { u64 arena_vm_start; u64 user_vm_start; bool is_subprog; + bool exception_boundary; + bool exception_cb; }; #define bpf_to_ppc(r) (ctx->b2p[r]) diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index e3088cf089d1..26991940d36e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -207,6 +207,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp) cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); cgctx.is_subprog = bpf_is_subprog(fp); + cgctx.exception_boundary = fp->aux->exception_boundary; + cgctx.exception_cb = fp->aux->exception_cb; /* Scouting faux-generate pass 0 */ if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) { @@ -436,6 +438,11 @@ void bpf_jit_free(struct bpf_prog *fp) bpf_prog_unlock_free(fp); } +bool bpf_jit_supports_exceptions(void) +{ + return IS_ENABLED(CONFIG_PPC64); +} + bool bpf_jit_supports_subprog_tailcalls(void) { return IS_ENABLED(CONFIG_PPC64); diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index ec58395f74f7..a6083dd9786c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -89,7 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * - the bpf program uses its stack area * The latter condition is deduced from the usage of BPF_REG_FP */ - return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); + return ctx->seen & SEEN_FUNC || + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)) || + ctx->exception_cb; } /* @@ -190,23 +192,32 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size))); } - /* - * Back up non-volatile regs -- BPF registers 6-10 - * If we haven't created our own stack frame, we save these - * in the protected zone below the previous stack frame - */ - for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) - EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); + if (!ctx->exception_cb) { + /* + * Back up non-volatile regs -- BPF registers 6-10 + * If we haven't created our own stack frame, we save these + * in the protected zone below the previous stack frame + */ + for (i = BPF_REG_6; i <= BPF_REG_10; i++) + if (ctx->exception_boundary || bpf_is_seen_register(ctx, bpf_to_ppc(i))) + EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, + bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); - if (ctx->arena_vm_start) - EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, + if (ctx->exception_boundary || ctx->arena_vm_start) + EMIT(PPC_RAW_STD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); - /* Setup frame pointer to point to the bpf stack area */ - if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) - EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, + /* Setup frame pointer to point to the bpf stack area */ + if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) + EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1, STACK_FRAME_MIN_SIZE + ctx->stack_size)); + } else { + /* + * Exception callback receives Frame Pointer of main + * program as third arg + */ + EMIT(PPC_RAW_MR(_R1, _R5)); + } if (ctx->arena_vm_start) PPC_LI64(bpf_to_ppc(ARENA_VM_START), ctx->arena_vm_start); @@ -218,17 +229,25 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx /* Restore NVRs */ for (i = BPF_REG_6; i <= BPF_REG_10; i++) - if (bpf_is_seen_register(ctx, bpf_to_ppc(i))) + if (ctx->exception_cb || bpf_is_seen_register(ctx, bpf_to_ppc(i))) EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i)))); - if (ctx->arena_vm_start) + if (ctx->exception_cb || ctx->arena_vm_start) EMIT(PPC_RAW_LD(bpf_to_ppc(ARENA_VM_START), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(ARENA_VM_START)))); + if (ctx->exception_cb) { + /* + * LR value from boundary-frame is received as second parameter + * in exception callback. + */ + EMIT(PPC_RAW_MTLR(_R4)); + } + /* Tear down our stack frame */ if (bpf_has_stack_frame(ctx)) { EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size)); - if (ctx->seen & SEEN_FUNC) { + if (ctx->seen & SEEN_FUNC || ctx->exception_cb) { EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF)); EMIT(PPC_RAW_MTLR(_R0)); } -- 2.48.1

