Re: [PATCH v4 3/9] powerpc/kprobes/optprobes: Move over to patch_instruction

2017-06-27 Thread Balbir Singh
On Tue, 2017-06-27 at 10:34 +0200, Christophe LEROY wrote:
> 
> Le 27/06/2017 à 09:48, Balbir Singh a écrit :
> > With text moving to read-only migrate optprobes to using
> > the patch_instruction infrastructure. Without this optprobes
> > will fail and complain.
> > 
> > Signed-off-by: Balbir Singh 
> 
> Didn't Michael picked it up already ?
>

Yes, he did, I posted the entire series and I'll let him keep the
better versions, he has edited. I spoke to him, but I was not 100%
sure what was picked up, the email responses mentioned 3, but I thought
4 patches were picked up

Balbir Singh.



Re: [PATCH v4 3/9] powerpc/kprobes/optprobes: Move over to patch_instruction

2017-06-27 Thread Christophe LEROY



Le 27/06/2017 à 09:48, Balbir Singh a écrit :

With text moving to read-only migrate optprobes to using
the patch_instruction infrastructure. Without this optprobes
will fail and complain.

Signed-off-by: Balbir Singh 


Didn't Michael picked it up already ?

Christophe



---
  arch/powerpc/kernel/optprobes.c | 58 ++---
  1 file changed, 37 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index ec60ed0..1c7326c 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -158,12 +158,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe 
*op)
  void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
  {
/* addis r4,0,(insn)@h */
-   *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ADDIS | ___PPC_RT(4) |
+   ((val >> 16) & 0x));
+   addr++;
  
  	/* ori r4,r4,(insn)@l */

-   *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
-   (val & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(4) |
+   ___PPC_RS(4) | (val & 0x));
  }
  
  /*

@@ -173,24 +174,28 @@ void patch_imm32_load_insns(unsigned int val, 
kprobe_opcode_t *addr)
  void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
  {
/* lis r3,(op)@highest */
-   *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ADDIS | ___PPC_RT(3) |
+   ((val >> 48) & 0x));
+   addr++;
  
  	/* ori r3,r3,(op)@higher */

-   *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 32) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(3) |
+   ___PPC_RS(3) | ((val >> 32) & 0x));
+   addr++;
  
  	/* rldicr r3,r3,32,31 */

-   *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
- __PPC_SH64(32) | __PPC_ME64(31);
+   patch_instruction((unsigned int *)addr, PPC_INST_RLDICR | ___PPC_RA(3) |
+   ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+   addr++;
  
  	/* oris r3,r3,(op)@h */

-   *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 16) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORIS | ___PPC_RA(3) |
+   ___PPC_RS(3) | ((val >> 16) & 0x));
+   addr++;
  
  	/* ori r3,r3,(op)@l */

-   *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
-   (val & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(3) |
+   ___PPC_RS(3) | (val & 0x));
  }
  
  int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)

@@ -198,7 +203,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
long b_offset;
-   unsigned long nip;
+   unsigned long nip, size;
+   int rc, i;
  
  	kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  
@@ -231,8 +237,15 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)

goto error;
  
  	/* Setup template */

-   memcpy(buff, optprobe_template_entry,
-   TMPL_END_IDX * sizeof(kprobe_opcode_t));
+   /* We can optimize this via patch_instruction_window later */
+   size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
+   pr_devel("Copying template to %p, size %lu\n", buff, size);
+   for (i = 0; i < size; i++) {
+   rc = patch_instruction((unsigned int *)buff + i,
+   *((unsigned int *)(optprobe_template_entry) + i));
+   if (rc < 0)
+   goto error;
+   }
  
  	/*

 * Fixup the template with instructions to:
@@ -261,8 +274,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
if (!branch_op_callback || !branch_emulate_step)
goto error;
  
-	buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;

-   buff[TMPL_EMULATE_IDX] = branch_emulate_step;
+   patch_instruction((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
+   branch_op_callback);
+   patch_instruction((unsigned int *)buff + TMPL_EMULATE_IDX,
+   branch_emulate_step);
  
  	/*

 * 3. load instruction to be emulated into relevant register, and
@@ -272,8 +287,9 @@ int arch_prepare_optimized_kprobe(struct 

[PATCH v4 3/9] powerpc/kprobes/optprobes: Move over to patch_instruction

2017-06-27 Thread Balbir Singh
With text moving to read-only migrate optprobes to using
the patch_instruction infrastructure. Without this optprobes
will fail and complain.

Signed-off-by: Balbir Singh 
---
 arch/powerpc/kernel/optprobes.c | 58 ++---
 1 file changed, 37 insertions(+), 21 deletions(-)

diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index ec60ed0..1c7326c 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -158,12 +158,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe 
*op)
 void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
 {
/* addis r4,0,(insn)@h */
-   *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ADDIS | ___PPC_RT(4) |
+   ((val >> 16) & 0x));
+   addr++;
 
/* ori r4,r4,(insn)@l */
-   *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
-   (val & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(4) |
+   ___PPC_RS(4) | (val & 0x));
 }
 
 /*
@@ -173,24 +174,28 @@ void patch_imm32_load_insns(unsigned int val, 
kprobe_opcode_t *addr)
 void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
 {
/* lis r3,(op)@highest */
-   *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ADDIS | ___PPC_RT(3) |
+   ((val >> 48) & 0x));
+   addr++;
 
/* ori r3,r3,(op)@higher */
-   *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 32) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(3) |
+   ___PPC_RS(3) | ((val >> 32) & 0x));
+   addr++;
 
/* rldicr r3,r3,32,31 */
-   *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
- __PPC_SH64(32) | __PPC_ME64(31);
+   patch_instruction((unsigned int *)addr, PPC_INST_RLDICR | ___PPC_RA(3) |
+   ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+   addr++;
 
/* oris r3,r3,(op)@h */
-   *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 16) & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORIS | ___PPC_RA(3) |
+   ___PPC_RS(3) | ((val >> 16) & 0x));
+   addr++;
 
/* ori r3,r3,(op)@l */
-   *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
-   (val & 0x);
+   patch_instruction((unsigned int *)addr, PPC_INST_ORI | ___PPC_RA(3) |
+   ___PPC_RS(3) | (val & 0x));
 }
 
 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe 
*p)
@@ -198,7 +203,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
long b_offset;
-   unsigned long nip;
+   unsigned long nip, size;
+   int rc, i;
 
kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
 
@@ -231,8 +237,15 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
goto error;
 
/* Setup template */
-   memcpy(buff, optprobe_template_entry,
-   TMPL_END_IDX * sizeof(kprobe_opcode_t));
+   /* We can optimize this via patch_instruction_window later */
+   size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
+   pr_devel("Copying template to %p, size %lu\n", buff, size);
+   for (i = 0; i < size; i++) {
+   rc = patch_instruction((unsigned int *)buff + i,
+   *((unsigned int *)(optprobe_template_entry) + i));
+   if (rc < 0)
+   goto error;
+   }
 
/*
 * Fixup the template with instructions to:
@@ -261,8 +274,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
if (!branch_op_callback || !branch_emulate_step)
goto error;
 
-   buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;
-   buff[TMPL_EMULATE_IDX] = branch_emulate_step;
+   patch_instruction((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
+   branch_op_callback);
+   patch_instruction((unsigned int *)buff + TMPL_EMULATE_IDX,
+   branch_emulate_step);
 
/*
 * 3. load instruction to be emulated into relevant register, and
@@ -272,8 +287,9 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op, struct kprobe *p)
/*
 * 4. branch back from