[PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-18 Thread Jack Miller
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.

This patch disables the control bit on CPU setup and enables that bit
when a facility unavailable exception is taken for using it. On context
switch, this bit is then used to determine whether the two relevant
registers are saved and restored. This is done lazily for performance
reasons.

Signed-off-by: Jack Miller 
---
 arch/powerpc/include/asm/processor.h  |  2 ++
 arch/powerpc/include/asm/reg.h|  5 +
 arch/powerpc/kernel/cpu_setup_power.S |  3 ++-
 arch/powerpc/kernel/process.c | 20 
 arch/powerpc/kernel/traps.c   |  4 
 5 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index 009fab1..2bb822b 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long   mmcr2;
unsignedmmcr0;
unsignedused_ebb;
+   unsigned long   lmrr;
+   unsigned long   lmser;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7972c9f..ab98ca4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
 #define SPRN_HRMOR 0x139   /* Real mode offset register */
 #define SPRN_HSRR0 0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1 0x13B   /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR  0x32D   /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E   /* Load Monitor Section Enable Register */
 #define SPRN_IC0x350   /* Virtual Instruction Count */
 #define SPRN_VTB   0x351   /* Virtual Time Base */
 #define SPRN_LDBAR 0x352   /* LD Base Address Register */
@@ -291,6 +293,7 @@
 #define SPRN_PMCR  0x374   /* Power Management Control Register */
 
 /* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11  /* Enable Load Monitor Registers */
 #define FSCR_TAR_LG8   /* Enable Target Address Register */
 #define FSCR_EBB_LG7   /* Enable Event Based Branching */
 #define FSCR_TM_LG 5   /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
 #define FSCR_VECVSX_LG 1   /* Enable VMX/VSX  */
 #define FSCR_FP_LG 0   /* Enable Floating Point */
 #define SPRN_FSCR  0x099   /* Facility Status & Control Register */
+#define   FSCR_LM  __MASK(FSCR_LM_LG)
 #define   FSCR_TAR __MASK(FSCR_TAR_LG)
 #define   FSCR_EBB __MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR__MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR 0xbe/* HV=1 Facility Status & Control Register */
+#define   HFSCR_LM __MASK(FSCR_LM_LG)
 #define   HFSCR_TAR__MASK(FSCR_TAR_LG)
 #define   HFSCR_EBB__MASK(FSCR_EBB_LG)
 #define   HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S 
b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..a232930 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -157,7 +157,8 @@ __init_LPCR:
 
 __init_FSCR:
mfspr   r3,SPRN_FSCR
-   ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
+   ori r3,r3,FSCR_LM|FSCR_TAR|FSCR_DSCR|FSCR_EBB
+   xorir3,r3,FSCR_LM
mtspr   SPRN_FSCR,r3
blr
 
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 00bf6f5..f0061ec 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1005,6 +1005,14 @@ static inline void save_sprs(struct thread_struct *t)
 */
t->tar = mfspr(SPRN_TAR);
}
+
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally save Load Monitor registers, if enabled */
+   if (t->fscr & FSCR_LM) {
+   t->lmrr = mfspr(SPRN_LMRR);
+   t->lmser = mfspr(SPRN_LMSER);
+   }
+   }
 #endif
 }
 
@@ -1041,6 +1049,16 @@ static inline void restore_sprs(struct thread_struct 
*old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally restore Load Monitor registers, if enabled */
+   if (new_thread->fscr & FSCR_LM) {
+   if (old_thread->lmrr != new_thread->lmrr);
+   mtspr(SPRN_LMRR, new_thread->lmrr);
+   if (old_thread->lmser != new_thread->lmser);
+   mtspr(SPRN_LMSER, new_thread->lmser);
+   }
+   }
 #endif
 }
 
@@ -1566,6 +1584,8 @@ void start_thread(struct pt_regs *regs, unsigned long 
start, unsigned long sp)
regs->gp

Re: [PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-18 Thread kbuild test robot
Hi Jack,

[auto build test ERROR on powerpc/next]
[also build test ERROR on v4.6-rc4 next-20160418]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improving the system]

url:
https://github.com/0day-ci/linux/commits/Jack-Miller/powerpc-Complete-FSCR-context-switch/20160419-031650
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allnoconfig (attached as .config)
reproduce:
wget 
https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross
 -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
make.cross ARCH=powerpc 

All errors (new ones prefixed by >>):

   arch/powerpc/kernel/process.c: In function 'start_thread':
>> arch/powerpc/kernel/process.c:1615:17: error: 'struct thread_struct' has no 
>> member named 'fscr'
 current->thread.fscr &= ~FSCR_LM;
^

vim +1615 arch/powerpc/kernel/process.c

  1609  if (cpu_has_feature(CPU_FTR_TM))
  1610  regs->msr |= MSR_TM;
  1611  current->thread.tm_tfhar = 0;
  1612  current->thread.tm_texasr = 0;
  1613  current->thread.tm_tfiar = 0;
  1614  #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
> 1615  current->thread.fscr &= ~FSCR_LM;
  1616  }
  1617  EXPORT_SYMBOL(start_thread);
  1618  

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: Binary data
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-18 Thread Jack Miller
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.

This patch disables the control bit on CPU setup and enables that bit
when a facility unavailable exception is taken for using it. On context
switch, this bit is then used to determine whether the two relevant
registers are saved and restored. This is done lazily for performance
reasons.

Signed-off-by: Jack Miller 
---
 arch/powerpc/include/asm/processor.h  |  2 ++
 arch/powerpc/include/asm/reg.h|  5 +
 arch/powerpc/kernel/cpu_setup_power.S |  3 ++-
 arch/powerpc/kernel/process.c | 19 +++
 arch/powerpc/kernel/traps.c   |  4 
 5 files changed, 32 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index 009fab1..2bb822b 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long   mmcr2;
unsignedmmcr0;
unsignedused_ebb;
+   unsigned long   lmrr;
+   unsigned long   lmser;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7972c9f..ab98ca4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
 #define SPRN_HRMOR 0x139   /* Real mode offset register */
 #define SPRN_HSRR0 0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1 0x13B   /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR  0x32D   /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E   /* Load Monitor Section Enable Register */
 #define SPRN_IC0x350   /* Virtual Instruction Count */
 #define SPRN_VTB   0x351   /* Virtual Time Base */
 #define SPRN_LDBAR 0x352   /* LD Base Address Register */
@@ -291,6 +293,7 @@
 #define SPRN_PMCR  0x374   /* Power Management Control Register */
 
 /* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11  /* Enable Load Monitor Registers */
 #define FSCR_TAR_LG8   /* Enable Target Address Register */
 #define FSCR_EBB_LG7   /* Enable Event Based Branching */
 #define FSCR_TM_LG 5   /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
 #define FSCR_VECVSX_LG 1   /* Enable VMX/VSX  */
 #define FSCR_FP_LG 0   /* Enable Floating Point */
 #define SPRN_FSCR  0x099   /* Facility Status & Control Register */
+#define   FSCR_LM  __MASK(FSCR_LM_LG)
 #define   FSCR_TAR __MASK(FSCR_TAR_LG)
 #define   FSCR_EBB __MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR__MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR 0xbe/* HV=1 Facility Status & Control Register */
+#define   HFSCR_LM __MASK(FSCR_LM_LG)
 #define   HFSCR_TAR__MASK(FSCR_TAR_LG)
 #define   HFSCR_EBB__MASK(FSCR_EBB_LG)
 #define   HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S 
b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..a232930 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -157,7 +157,8 @@ __init_LPCR:
 
 __init_FSCR:
mfspr   r3,SPRN_FSCR
-   ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
+   ori r3,r3,FSCR_LM|FSCR_TAR|FSCR_DSCR|FSCR_EBB
+   xorir3,r3,FSCR_LM
mtspr   SPRN_FSCR,r3
blr
 
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 00bf6f5..3e91bd6 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1005,6 +1005,14 @@ static inline void save_sprs(struct thread_struct *t)
 */
t->tar = mfspr(SPRN_TAR);
}
+
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally save Load Monitor registers, if enabled */
+   if (t->fscr & FSCR_LM) {
+   t->lmrr = mfspr(SPRN_LMRR);
+   t->lmser = mfspr(SPRN_LMSER);
+   }
+   }
 #endif
 }
 
@@ -1041,6 +1049,16 @@ static inline void restore_sprs(struct thread_struct 
*old_thread,
if (old_thread->tar != new_thread->tar)
mtspr(SPRN_TAR, new_thread->tar);
}
+
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally restore Load Monitor registers, if enabled */
+   if (new_thread->fscr & FSCR_LM) {
+   if (old_thread->lmrr != new_thread->lmrr);
+   mtspr(SPRN_LMRR, new_thread->lmrr);
+   if (old_thread->lmser != new_thread->lmser);
+   mtspr(SPRN_LMSER, new_thread->lmser);
+   }
+   }
 #endif
 }
 
@@ -1592,6 +1610,7 @@ void start_thread(struct pt_regs *regs, unsigned long 
start, unsigned long sp)
current->thread.t

Re: [PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-13 Thread Jack Miller
Thanks, yeah, that's more readable and more correct. I'll change it in
the next spin.

- Jack

On Tue, Apr 12, 2016 at 12:40 AM, Segher Boessenkool
 wrote:
> Hi,
>
> On Mon, Apr 11, 2016 at 01:57:44PM -0500, Jack Miller wrote:
>>  __init_FSCR:
>>   mfspr   r3,SPRN_FSCR
>> + andi.   r3,r3,(~FSCR_LM)@L
>>   ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
>>   mtspr   SPRN_FSCR,r3
>>   blr
>
> This clears the top 48 bits as well.  Shouldn't matter currently; but
> more robust (and easier to read, if you know the idiom) is
>
> ori r3,r3,FSCR_LM|FSCR_TAR|FSCR_DSCR|FSCR_EBB
> xorir3,r3,FSCR_LM
>
>
> Segher
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Re: [PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-11 Thread Segher Boessenkool
Hi,

On Mon, Apr 11, 2016 at 01:57:44PM -0500, Jack Miller wrote:
>  __init_FSCR:
>   mfspr   r3,SPRN_FSCR
> + andi.   r3,r3,(~FSCR_LM)@L
>   ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
>   mtspr   SPRN_FSCR,r3
>   blr

This clears the top 48 bits as well.  Shouldn't matter currently; but
more robust (and easier to read, if you know the idiom) is

ori r3,r3,FSCR_LM|FSCR_TAR|FSCR_DSCR|FSCR_EBB
xorir3,r3,FSCR_LM


Segher
___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

[PATCH 2/3] powerpc: Load Monitor Register Support

2016-04-11 Thread Jack Miller
This enables new registers, LMRR and LMSER, that can trigger an EBB in
userspace code when a monitored load (via the new ldmx instruction)
loads memory from a monitored space. This facility is controlled by a
new FSCR bit, LM.

This patch disables the control bit on CPU setup and enables that bit
when a facility unavailable exception is taken for using it. On context
switch, this bit is then used to determine whether the two relevant
registers are saved and restored. This is done lazily for performance
reasons.

Signed-off-by: Jack Miller 
---
 arch/powerpc/include/asm/processor.h  |  2 ++
 arch/powerpc/include/asm/reg.h|  5 +
 arch/powerpc/kernel/cpu_setup_power.S |  1 +
 arch/powerpc/kernel/process.c | 19 +++
 arch/powerpc/kernel/traps.c   |  4 
 5 files changed, 31 insertions(+)

diff --git a/arch/powerpc/include/asm/processor.h 
b/arch/powerpc/include/asm/processor.h
index 009fab1..2bb822b 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -314,6 +314,8 @@ struct thread_struct {
unsigned long   mmcr2;
unsignedmmcr0;
unsignedused_ebb;
+   unsigned long   lmrr;
+   unsigned long   lmser;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7972c9f..ab98ca4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -282,6 +282,8 @@
 #define SPRN_HRMOR 0x139   /* Real mode offset register */
 #define SPRN_HSRR0 0x13A   /* Hypervisor Save/Restore 0 */
 #define SPRN_HSRR1 0x13B   /* Hypervisor Save/Restore 1 */
+#define SPRN_LMRR  0x32D   /* Load Monitor Region Register */
+#define SPRN_LMSER 0x32E   /* Load Monitor Section Enable Register */
 #define SPRN_IC0x350   /* Virtual Instruction Count */
 #define SPRN_VTB   0x351   /* Virtual Time Base */
 #define SPRN_LDBAR 0x352   /* LD Base Address Register */
@@ -291,6 +293,7 @@
 #define SPRN_PMCR  0x374   /* Power Management Control Register */
 
 /* HFSCR and FSCR bit numbers are the same */
+#define FSCR_LM_LG 11  /* Enable Load Monitor Registers */
 #define FSCR_TAR_LG8   /* Enable Target Address Register */
 #define FSCR_EBB_LG7   /* Enable Event Based Branching */
 #define FSCR_TM_LG 5   /* Enable Transactional Memory */
@@ -300,10 +303,12 @@
 #define FSCR_VECVSX_LG 1   /* Enable VMX/VSX  */
 #define FSCR_FP_LG 0   /* Enable Floating Point */
 #define SPRN_FSCR  0x099   /* Facility Status & Control Register */
+#define   FSCR_LM  __MASK(FSCR_LM_LG)
 #define   FSCR_TAR __MASK(FSCR_TAR_LG)
 #define   FSCR_EBB __MASK(FSCR_EBB_LG)
 #define   FSCR_DSCR__MASK(FSCR_DSCR_LG)
 #define SPRN_HFSCR 0xbe/* HV=1 Facility Status & Control Register */
+#define   HFSCR_LM __MASK(FSCR_LM_LG)
 #define   HFSCR_TAR__MASK(FSCR_TAR_LG)
 #define   HFSCR_EBB__MASK(FSCR_EBB_LG)
 #define   HFSCR_TM __MASK(FSCR_TM_LG)
diff --git a/arch/powerpc/kernel/cpu_setup_power.S 
b/arch/powerpc/kernel/cpu_setup_power.S
index 584e119..0474856 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -157,6 +157,7 @@ __init_LPCR:
 
 __init_FSCR:
mfspr   r3,SPRN_FSCR
+   andi.   r3,r3,(~FSCR_LM)@L
ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
mtspr   SPRN_FSCR,r3
blr
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 0c7e797..e39af31 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1005,6 +1005,14 @@ static inline void save_sprs(struct thread_struct *t)
 */
t->tar = mfspr(SPRN_TAR);
}
+
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally save Load Monitor registers, if enabled */
+   if (t->fscr & FSCR_LM) {
+   t->lmrr = mfspr(SPRN_LMRR);
+   t->lmser = mfspr(SPRN_LMSER);
+   }
+   }
 #endif
 }
 
@@ -1046,7 +1054,16 @@ static inline void restore_sprs(struct thread_struct 
*old_thread,
 
if (old_thread->fscr != new_thread->fscr)
mtspr(SPRN_FSCR, new_thread->fscr);
+   }
 
+   if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+   /* Conditionally restore Load Monitor registers, if enabled */
+   if (new_thread->fscr & FSCR_LM) {
+   if (old_thread->lmrr != new_thread->lmrr);
+   mtspr(SPRN_LMRR, new_thread->lmrr);
+   if (old_thread->lmser != new_thread->lmser);
+   mtspr(SPRN_LMSER, new_thread->lmser);
+   }
}
 #endif
 }
@@ -1573,6 +1590,8 @@ void start_thread(struct pt_regs *regs, unsigned long 
start, unsigned long sp)
regs->gpr[2] = 0;
regs->msr = MSR_USER32;
}
+
+