On 1/19/19 5:39 AM, Torsten Duwe wrote:
> Once gcc8 adds 2 NOPs at the beginning of each function, replace the
> first NOP thus generated with a quick LR saver (move it to scratch reg
> x9), so the 2nd replacement insn, the call to ftrace, does not clobber
> the value. Ftrace will then generate the standard stack frames.
> 
> Note that patchable-function-entry in GCC disables IPA-RA, which means
> ABI register calling conventions are obeyed *and* scratch registers
> such as x9 are available.
> 
> Introduce and handle an ftrace_regs_trampoline for module PLTs, right
> after ftrace_trampoline, and double the size of this special section.
> 
> Signed-off-by: Torsten Duwe <[email protected]>
> 
> ---
> 
> Mark, if you see your ftrace entry macro code being represented correctly
> here, please add your sign-off, As I've initially copied it from your mail.
> 
> ---
>  arch/arm64/include/asm/ftrace.h  |   17 ++++-
>  arch/arm64/include/asm/module.h  |    3 
>  arch/arm64/kernel/entry-ftrace.S |  125 
> +++++++++++++++++++++++++++++++++++++--
>  arch/arm64/kernel/ftrace.c       |  114 ++++++++++++++++++++++++++---------
>  arch/arm64/kernel/module-plts.c  |    3 
>  arch/arm64/kernel/module.c       |    2 
>  6 files changed, 227 insertions(+), 37 deletions(-)
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -14,9 +14,24 @@
>  #include <asm/insn.h>
>  
>  #define HAVE_FUNCTION_GRAPH_FP_TEST
> -#define MCOUNT_ADDR          ((unsigned long)_mcount)
>  #define MCOUNT_INSN_SIZE     AARCH64_INSN_SIZE
>  
> +/*
> + * DYNAMIC_FTRACE_WITH_REGS is implemented by adding 2 NOPs at the beginning
> + * of each function, with the second NOP actually calling ftrace. In contrary
> + * to a classic _mcount call, the call instruction to be modified is thus
> + * the second one, and not the only one.
> + */
> +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
> +#define ARCH_SUPPORTS_FTRACE_OPS 1
> +#define REC_IP_BRANCH_OFFSET AARCH64_INSN_SIZE
> +/* All we need is some magic value. Simply use "_mCount:" */
> +#define MCOUNT_ADDR          (0x5f6d436f756e743a)
> +#else
> +#define REC_IP_BRANCH_OFFSET 0
> +#define MCOUNT_ADDR          ((unsigned long)_mcount)
> +#endif
> +
>  #ifndef __ASSEMBLY__
>  #include <linux/compat.h>
>  
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -10,6 +10,7 @@
>   */
>  
>  #include <linux/linkage.h>
> +#include <asm/asm-offsets.h>
>  #include <asm/assembler.h>
>  #include <asm/ftrace.h>
>  #include <asm/insn.h>
> @@ -124,6 +125,7 @@ EXPORT_SYMBOL(_mcount)
>  NOKPROBE(_mcount)
>  
>  #else /* CONFIG_DYNAMIC_FTRACE */
> +#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
>  /*
>   * _mcount() is used to build the kernel with -pg option, but all the branch
>   * instructions to _mcount() are replaced to NOP initially at kernel start 
> up,
> @@ -163,11 +165,6 @@ GLOBAL(ftrace_graph_call)                // ftrace_gra
>  
>       mcount_exit
>  ENDPROC(ftrace_caller)
> -#endif /* CONFIG_DYNAMIC_FTRACE */
> -
> -ENTRY(ftrace_stub)
> -     ret
> -ENDPROC(ftrace_stub)
>  
>  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
>  /*
> @@ -187,7 +184,125 @@ ENTRY(ftrace_graph_caller)
>  
>       mcount_exit
>  ENDPROC(ftrace_graph_caller)
> +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +
> +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
> +
> +     .macro  ftrace_regs_entry, allregs=0
> +     /* make room for pt_regs, plus a callee frame */
> +     sub     sp, sp, #(S_FRAME_SIZE + 16)
> +
> +     /* save function arguments */
> +     stp     x0, x1, [sp, #S_X0]
> +     stp     x2, x3, [sp, #S_X2]
> +     stp     x4, x5, [sp, #S_X4]
> +     stp     x6, x7, [sp, #S_X6]
> +     stp     x8, x9, [sp, #S_X8]
>  
> +     .if \allregs == 1
> +     stp     x10, x11, [sp, #S_X10]
> +     stp     x12, x13, [sp, #S_X12]
> +     stp     x14, x15, [sp, #S_X14]
> +     stp     x16, x17, [sp, #S_X16]
> +     stp     x18, x19, [sp, #S_X18]
> +     stp     x20, x21, [sp, #S_X20]
> +     stp     x22, x23, [sp, #S_X22]
> +     stp     x24, x25, [sp, #S_X24]
> +     stp     x26, x27, [sp, #S_X26]
> +     .endif
> +
> +     /* Save fp and x28, which is used in this function. */
> +     stp     x28, x29, [sp, #S_X28]
> +
> +     /* The stack pointer as it was on ftrace_caller entry... */
> +     add     x28, sp, #(S_FRAME_SIZE + 16)   
> +     /* ...and the link Register at callee entry */
> +     stp     x9, x28, [sp, #S_LR]    /* to pt_regs.r[30] and .sp */
> +
> +     /* The program counter just after the ftrace call site */
> +     str     lr, [sp, #S_PC]
> +
> +     /* Now fill in callee's preliminary stackframe. */
> +     stp     x29, x9, [sp, #S_FRAME_SIZE]
> +     /* Let FP point to it. */
> +     add     x29, sp, #S_FRAME_SIZE
> +
> +     /* Our stackframe, stored inside pt_regs. */
> +     stp     x29, x30, [sp, #S_STACKFRAME]
> +     add     x29, sp, #S_STACKFRAME
> +     .endm
> +
> +ENTRY(ftrace_regs_caller)
> +     ftrace_regs_entry       1
> +     b       ftrace_common
> +ENDPROC(ftrace_regs_caller)
> +
> +ENTRY(ftrace_caller)
> +     ftrace_regs_entry       0
> +     b       ftrace_common
> +ENDPROC(ftrace_caller)
> +
> +ENTRY(ftrace_common)
> +
> +     mov     x3, sp          /* pt_regs are @sp */
> +     ldr_l   x2, function_trace_op, x0
> +     mov     x1, x9          /* parent IP */
> +     sub     x0, lr, #8      /* function entry == IP */
> +
> +GLOBAL(ftrace_call)
> +     bl      ftrace_stub
> +
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +GLOBAL(ftrace_graph_call)            // ftrace_graph_caller();
> +     nop                             // If enabled, this will be replaced
> +                                     // "b ftrace_graph_caller"
> +#endif
> +
> +/*
> + * GCC's patchable-function-entry implicitly disables IPA-RA,
> + * so all non-argument registers are either scratch / dead
> + * or callee-saved (within the ftrace framework). Function
> + * arguments of the call we are intercepting right now however
> + * need to be preserved in any case.
> + */
> +ftrace_common_return:
> +     /* restore function args */
> +     ldp     x0, x1, [sp]
> +     ldp     x2, x3, [sp, #S_X2]
> +     ldp     x4, x5, [sp, #S_X4]
> +     ldp     x6, x7, [sp, #S_X6]
> +     ldr     x8, [sp, #S_X8]
> +
> +     /* restore fp and x28 */
> +     ldp     x28, x29, [sp, #S_X28]
> +
> +     ldr     lr, [sp, #S_LR]
> +     ldr     x9, [sp, #S_PC]

Is it fair to assume that we never modify registers beyond LR and PC as a 
result of ftrace/livepatching? I presume it is, but just checking.

> +     /* clean up both frames, ours and callee preliminary */
> +     add     sp, sp, #S_FRAME_SIZE + 16
> +
> +     ret     x9
> +ENDPROC(ftrace_common)
> +
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +ENTRY(ftrace_graph_caller)
> +     ldr     x0, [sp, #S_PC]            /* pc */
> +     sub     x0, x0, #8      /* start of the ftrace call site */
> +     add     x1, sp, #S_LR              /* &lr */
> +     ldr     x2, [sp, #S_FRAME_SIZE]    /* fp */
> +     bl      prepare_ftrace_return
> +     b       ftrace_common_return
> +ENDPROC(ftrace_graph_caller)
> +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
> +#endif /* CONFIG_DYNAMIC_FTRACE */
> +
> +ENTRY(ftrace_stub)
> +     ret
> +ENDPROC(ftrace_stub)
> +
> +
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
>  /*
>   * void return_to_handler(void)
>   *
> --- a/arch/arm64/kernel/ftrace.c
> +++ b/arch/arm64/kernel/ftrace.c
> @@ -65,19 +65,67 @@ int ftrace_update_ftrace_func(ftrace_fun
>       return ftrace_modify_code(pc, 0, new, false);
>  }
>  
> +#ifdef CONFIG_ARM64_MODULE_PLTS
> +static int install_ftrace_trampoline(struct module *mod, unsigned long *addr)
> +{
> +     struct plt_entry trampoline, *mod_trampoline;
> +
> +     /*
> +      * Iterate over
> +      * mod->arch.ftrace_trampolines[MOD_ARCH_NR_FTRACE_TRAMPOLINES]
> +      * The assignment to various ftrace functions happens here.
> +      */
> +     if (*addr == FTRACE_ADDR)
> +             mod_trampoline = &mod->arch.ftrace_trampolines[0];
> +     else if (*addr == FTRACE_REGS_ADDR)
> +             mod_trampoline = &mod->arch.ftrace_trampolines[1];
> +     else
> +             return -EINVAL;
> +
> +     trampoline = get_plt_entry(*addr, mod_trampoline);
> +
> +     if (!plt_entries_equal(mod_trampoline, &trampoline)) {
> +             /* point the trampoline at our ftrace entry point */
> +             module_disable_ro(mod);
> +             *mod_trampoline = trampoline;
> +             module_enable_ro(mod, true);
> +
> +             /* update trampoline before patching in the branch */
> +             smp_wmb();
> +     }
> +     *addr = (unsigned long)(void *)mod_trampoline;
> +
> +     return 0;
> +}
> +#endif
> +
> +/*
> + * Ftrace with regs generates the tracer calls as close as possible to
> + * the function entry; no stack frame has been set up at that point.
> + * In order to make another call e.g to ftrace_caller, the LR must be
> + * saved from being overwritten.
> + * Between two functions, and with IPA-RA turned off, the scratch registers
> + * are available, so move the LR to x9 before calling into ftrace.
> + * "mov x9, lr" is officially aliased from "orr x9, xzr, lr".
> + */
> +#define MOV_X9_X30 aarch64_insn_gen_logical_shifted_reg( \
> +                     AARCH64_INSN_REG_9, AARCH64_INSN_REG_ZR, \
> +                     AARCH64_INSN_REG_LR, 0, AARCH64_INSN_VARIANT_64BIT, \
> +                     AARCH64_INSN_LOGIC_ORR)
> +
>  /*
>   * Turn on the call to ftrace_caller() in instrumented function
>   */
>  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
>  {
> -     unsigned long pc = rec->ip;
> +     unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
>       u32 old, new;
>       long offset = (long)pc - (long)addr;
>  
>       if (offset < -SZ_128M || offset >= SZ_128M) {
>  #ifdef CONFIG_ARM64_MODULE_PLTS
> -             struct plt_entry trampoline;
>               struct module *mod;
> +             int ret;
>  
>               /*
>                * On kernels that support module PLTs, the offset between the
> @@ -96,32 +144,14 @@ int ftrace_make_call(struct dyn_ftrace *
>               if (WARN_ON(!mod))
>                       return -EINVAL;
>  
> -             /*
> -              * There is only one ftrace trampoline per module. For now,
> -              * this is not a problem since on arm64, all dynamic ftrace
> -              * invocations are routed via ftrace_caller(). This will need
> -              * to be revisited if support for multiple ftrace entry points
> -              * is added in the future, but for now, the pr_err() below
> -              * deals with a theoretical issue only.
> -              */
> -             trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
> -             if (!plt_entries_equal(mod->arch.ftrace_trampoline,
> -                                    &trampoline)) {
> -                     if (!plt_entries_equal(mod->arch.ftrace_trampoline,
> -                                            &(struct plt_entry){})) {
> -                             pr_err("ftrace: far branches to multiple entry 
> points unsupported inside a single module\n");
> -                             return -EINVAL;
> -                     }
> -
> -                     /* point the trampoline to our ftrace entry point */
> -                     module_disable_ro(mod);
> -                     *mod->arch.ftrace_trampoline = trampoline;
> -                     module_enable_ro(mod, true);
> +             /* Check against our well-known list of ftrace entry points */
> +             if (addr == FTRACE_ADDR || addr == FTRACE_REGS_ADDR) {
> +                     ret = install_ftrace_trampoline(mod, &addr);
> +                     if (ret < 0)
> +                             return ret;
> +             } else
> +                     return -EINVAL;
>  
> -                     /* update trampoline before patching in the branch */
> -                     smp_wmb();
> -             }
> -             addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
>  #else /* CONFIG_ARM64_MODULE_PLTS */
>               return -EINVAL;
>  #endif /* CONFIG_ARM64_MODULE_PLTS */
> @@ -133,17 +163,45 @@ int ftrace_make_call(struct dyn_ftrace *
>       return ftrace_modify_code(pc, old, new, true);
>  }
>  
> +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
> +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
> +                     unsigned long addr)
> +{
> +     unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
> +     u32 old, new;
> +
> +     old = aarch64_insn_gen_branch_imm(pc, old_addr, true);
> +     new = aarch64_insn_gen_branch_imm(pc, addr, true);
> +

Is this a branch or a call? Does addr always fit in the immediate limits?

> +     return ftrace_modify_code(pc, old, new, true);

Can you talk to the semantics of whether this operation is atomic w.r.t system? 
Will old and new return consistent values? Given the nature of ftrace, I 
presume it's well isolated. 

> +}
> +#endif
> +
>  /*
>   * Turn off the call to ftrace_caller() in instrumented function
>   */
>  int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
>                   unsigned long addr)
>  {
> -     unsigned long pc = rec->ip;
> +     unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
>       bool validate = true;
>       u32 old = 0, new;
>       long offset = (long)pc - (long)addr;
>  
> +     /*
> +      * -fpatchable-function-entry= does not generate a profiling call
> +      *  initially; the NOPs are already there. So instead,
> +      *  put the LR saver there ahead of time, in order to avoid
> +      *  any race condition over patching 2 instructions.
> +      */
> +     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
> +             addr == MCOUNT_ADDR) {
> +             old = aarch64_insn_gen_nop();
> +             new = MOV_X9_X30;
> +             pc -= REC_IP_BRANCH_OFFSET;
> +             return ftrace_modify_code(pc, old, new, validate);

I presume all the icache flush and barrier handling is in ftrace_modify_code()?

> +     }
> +
>       if (offset < -SZ_128M || offset >= SZ_128M) {
>  #ifdef CONFIG_ARM64_MODULE_PLTS
>               u32 replaced;
> --- a/arch/arm64/include/asm/module.h
> +++ b/arch/arm64/include/asm/module.h
> @@ -32,7 +32,8 @@ struct mod_arch_specific {
>       struct mod_plt_sec      init;
>  
>       /* for CONFIG_DYNAMIC_FTRACE */
> -     struct plt_entry        *ftrace_trampoline;
> +     struct plt_entry        *ftrace_trampolines;
> +#define MOD_ARCH_NR_FTRACE_TRAMPOLINES       2

I don't see the generation of ftrace_trampolines[1]

>  };
>  #endif
>  
> --- a/arch/arm64/kernel/module.c
> +++ b/arch/arm64/kernel/module.c
> @@ -452,7 +452,7 @@ int module_finalize(const Elf_Ehdr *hdr,
>  #ifdef CONFIG_ARM64_MODULE_PLTS
>               if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
>                   !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
> -                     me->arch.ftrace_trampoline = (void *)s->sh_addr;
> +                     me->arch.ftrace_trampolines = (void *)s->sh_addr;
>  #endif
>       }
>  
> --- a/arch/arm64/kernel/module-plts.c
> +++ b/arch/arm64/kernel/module-plts.c
> @@ -333,7 +333,8 @@ int module_frob_arch_sections(Elf_Ehdr *
>               tramp->sh_type = SHT_NOBITS;
>               tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
>               tramp->sh_addralign = __alignof__(struct plt_entry);
> -             tramp->sh_size = sizeof(struct plt_entry);
> +             tramp->sh_size = MOD_ARCH_NR_FTRACE_TRAMPOLINES
> +                              * sizeof(struct plt_entry);
>       }
>  
>       return 0;
> 

Balbir Singh.

Reply via email to