On Wed, 19 Apr 2017 18:21:04 +0530
"Naveen N. Rao" <naveen.n....@linux.vnet.ibm.com> wrote:

Factor out code to emulate instruction into a try_to_emulate()
helper function. This makes ...

> No functional changes.

Thanks,

> 
> Acked-by: Ananth N Mavinakayanahalli <ana...@linux.vnet.ibm.com>
> Signed-off-by: Naveen N. Rao <naveen.n....@linux.vnet.ibm.com>
> ---
>  arch/powerpc/kernel/kprobes.c | 52 
> ++++++++++++++++++++++++++-----------------
>  1 file changed, 31 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
> index d743bacefa8c..46e8c1e03ce4 100644
> --- a/arch/powerpc/kernel/kprobes.c
> +++ b/arch/powerpc/kernel/kprobes.c
> @@ -206,6 +206,35 @@ void __kprobes arch_prepare_kretprobe(struct 
> kretprobe_instance *ri,
>       regs->link = (unsigned long)kretprobe_trampoline;
>  }
>  
> +int __kprobes try_to_emulate(struct kprobe *p, struct pt_regs *regs)
> +{
> +     int ret;
> +     unsigned int insn = *p->ainsn.insn;
> +
> +     /* regs->nip is also adjusted if emulate_step returns 1 */
> +     ret = emulate_step(regs, insn);
> +     if (ret > 0) {
> +             /*
> +              * Once this instruction has been boosted
> +              * successfully, set the boostable flag
> +              */
> +             if (unlikely(p->ainsn.boostable == 0))
> +                     p->ainsn.boostable = 1;
> +     } else if (ret < 0) {
> +             /*
> +              * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
> +              * So, we should never get here... but, its still
> +              * good to catch them, just in case...
> +              */
> +             printk("Can't step on instruction %x\n", insn);
> +             BUG();
> +     } else if (ret == 0)
> +             /* This instruction can't be boosted */
> +             p->ainsn.boostable = -1;
> +
> +     return ret;
> +}
> +
>  int __kprobes kprobe_handler(struct pt_regs *regs)
>  {
>       struct kprobe *p;
> @@ -301,18 +330,9 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
>  
>  ss_probe:
>       if (p->ainsn.boostable >= 0) {
> -             unsigned int insn = *p->ainsn.insn;
> +             ret = try_to_emulate(p, regs);
>  
> -             /* regs->nip is also adjusted if emulate_step returns 1 */
> -             ret = emulate_step(regs, insn);
>               if (ret > 0) {
> -                     /*
> -                      * Once this instruction has been boosted
> -                      * successfully, set the boostable flag
> -                      */
> -                     if (unlikely(p->ainsn.boostable == 0))
> -                             p->ainsn.boostable = 1;
> -
>                       if (p->post_handler)
>                               p->post_handler(p, regs, 0);
>  
> @@ -320,17 +340,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
>                       reset_current_kprobe();
>                       preempt_enable_no_resched();
>                       return 1;
> -             } else if (ret < 0) {
> -                     /*
> -                      * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
> -                      * So, we should never get here... but, its still
> -                      * good to catch them, just in case...
> -                      */
> -                     printk("Can't step on instruction %x\n", insn);
> -                     BUG();
> -             } else if (ret == 0)
> -                     /* This instruction can't be boosted */
> -                     p->ainsn.boostable = -1;
> +             }
>       }
>       prepare_singlestep(p, regs);
>       kcb->kprobe_status = KPROBE_HIT_SS;
> -- 
> 2.12.1
> 


-- 
Masami Hiramatsu <mhira...@kernel.org>

Reply via email to