We use an AUIPC+JALR pair to jump into a ftrace trampoline. Since instruction fetch can break down to 4 byte at a time, it is impossible to update two instructions without a race. In order to mitigate it, we initialize the patchable entry to AUIPC + NOP4. Then, the run-time code patching can change NOP4 to JALR to eable/disable ftrcae from a function. This limits the reach of each ftrace entry to +-2KB displacing from ftrace_caller.
Starting from the trampoline, we add a level of indirection for it to reach ftrace caller target. Now, it loads the target address from a memory location, then perform the jump. This enable the kernel to update the target atomically. The ordering of reading/updating the targert address should be guarded by generic ftrace code, where it sends smp_rmb ipi. Signed-off-by: Andy Chiu <andy.c...@sifive.com> --- arch/riscv/include/asm/ftrace.h | 4 +++ arch/riscv/kernel/ftrace.c | 80 ++++++++++++++++++++++++++--------------- arch/riscv/kernel/mcount-dyn.S | 9 +++-- 3 files changed, 62 insertions(+), 31 deletions(-) diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 5f81c53dbfd9..7199383f8c02 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -81,6 +81,7 @@ struct dyn_arch_ftrace { #define JALR_T0 (0x000282e7) #define AUIPC_T0 (0x00000297) #define NOP4 (0x00000013) +#define JALR_RANGE (JALR_SIGN_MASK - 1) #define to_jalr_t0(offset) \ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0) @@ -118,6 +119,9 @@ do { \ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. */ #define MCOUNT_INSN_SIZE 8 +#define MCOUNT_AUIPC_SIZE 4 +#define MCOUNT_JALR_SIZE 4 +#define MCOUNT_NOP4_SIZE 4 #ifndef __ASSEMBLY__ struct dyn_ftrace; diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 87cbd86576b2..f3b09f2d3ecc 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -64,42 +64,64 @@ static int ftrace_check_current_call(unsigned long hook_pos, return 0; } -static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, - bool enable, bool ra) +static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target, bool validate) { unsigned int call[2]; - unsigned int nops[2] = {NOP4, NOP4}; + unsigned int replaced[2]; + + make_call_t0(hook_pos, target, call); - if (ra) - make_call_ra(hook_pos, target, call); - else - make_call_t0(hook_pos, target, call); + if (validate) { + /* + * Read the text we want to modify; + * return must be -EFAULT on read error + */ + if (copy_from_kernel_nofault(replaced, (void *)hook_pos, + MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (replaced[0] != call[0]) { + pr_err("%p: expected (%08x) but got (%08x)\n", + (void *)hook_pos, call[0], replaced[0]); + return -EINVAL; + } + } - /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */ - if (patch_insn_write((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE)) + /* Replace the jalr at once. Return -EPERM on write error. */ + if (patch_insn_write((void *)(hook_pos + MCOUNT_AUIPC_SIZE), call + 1, MCOUNT_JALR_SIZE)) return -EPERM; return 0; } -int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +static int __ftrace_modify_call_site(ftrace_func_t *hook_pos, ftrace_func_t target, bool enable) { - unsigned int call[2]; + ftrace_func_t call = target; + ftrace_func_t nops = &ftrace_stub; - make_call_t0(rec->ip, addr, call); - - if (patch_insn_write((void *)rec->ip, call, MCOUNT_INSN_SIZE)) - return -EPERM; + WRITE_ONCE(*hook_pos, enable ? call : nops); return 0; } +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long distance, orig_addr; + + orig_addr = (unsigned long)&ftrace_caller; + distance = addr > orig_addr ? addr - orig_addr : orig_addr - addr; + if (distance > JALR_RANGE) + return -EINVAL; + + return __ftrace_modify_call(rec->ip, addr, false); +} + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - unsigned int nops[2] = {NOP4, NOP4}; + unsigned int nops[1] = {NOP4}; - if (patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE)) + if (patch_insn_write((void *)(rec->ip + MCOUNT_AUIPC_SIZE), nops, MCOUNT_NOP4_SIZE)) return -EPERM; return 0; @@ -114,10 +136,14 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, */ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { + unsigned int nops[2]; int out; + make_call_t0(rec->ip, &ftrace_caller, nops); + nops[1] = NOP4; + mutex_lock(&text_mutex); - out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); + out = patch_insn_write((void *)rec->ip, nops, MCOUNT_INSN_SIZE); mutex_unlock(&text_mutex); if (!mod) @@ -126,12 +152,10 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) return out; } +ftrace_func_t ftrace_call_dest = ftrace_stub; int ftrace_update_ftrace_func(ftrace_func_t func) { - int ret = __ftrace_modify_call((unsigned long)&ftrace_call, - (unsigned long)func, true, true); - - return ret; + return __ftrace_modify_call_site(&ftrace_call_dest, func, true); } struct ftrace_modify_param { @@ -185,7 +209,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, if (ret) return ret; - return __ftrace_modify_call(caller, addr, true, false); + return __ftrace_modify_call(caller, addr, true); } #endif @@ -220,17 +244,17 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, prepare_ftrace_return(&fregs->ra, ip, fregs->s0); } #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ -extern void ftrace_graph_call(void); +ftrace_func_t ftrace_graph_call_dest = ftrace_stub; int ftrace_enable_ftrace_graph_caller(void) { - return __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, true, true); + return __ftrace_modify_call_site(&ftrace_graph_call_dest, + &prepare_ftrace_return, true); } int ftrace_disable_ftrace_graph_caller(void) { - return __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, false, true); + return __ftrace_modify_call_site(&ftrace_graph_call_dest, + &prepare_ftrace_return, false); } #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index e988bd26b28b..bc06e8ab81cf 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -162,7 +162,8 @@ SYM_FUNC_START(ftrace_caller) mv a3, sp SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) - call ftrace_stub + REG_L ra, ftrace_call_dest + jalr 0(ra) #ifdef CONFIG_FUNCTION_GRAPH_TRACER addi a0, sp, ABI_RA @@ -172,7 +173,8 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) mv a2, s0 #endif SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) - call ftrace_stub + REG_L ra, ftrace_graph_call_dest + jalr 0(ra) #endif RESTORE_ABI jr t0 @@ -185,7 +187,8 @@ SYM_FUNC_START(ftrace_caller) PREPARE_ARGS SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) - call ftrace_stub + REG_L ra, ftrace_call_dest + jalr 0(ra) RESTORE_ABI_REGS bnez t1, .Ldirect -- 2.43.0