Re: [v3 2/2] [PowerPC] Allow use of SIMD in interrupts from kernel code

2019-05-14 Thread kbuild test robot
Hi Shawn,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on powerpc/next]
[also build test ERROR on v5.1 next-20190514]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:
https://github.com/0day-ci/linux/commits/Shawn-Landden/Add-simd-h-implementation/20190515-062235
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-mpc836x_mds_defconfig (attached as .config)
compiler: powerpc-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.2.0 make.cross ARCH=powerpc 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot 

All errors (new ones prefixed by >>):

>> arch/powerpc/kernel/process.c:194:13: error: 'interrupted_kernel_fpu_idle' 
>> defined but not used [-Werror=unused-function]
static bool interrupted_kernel_fpu_idle(void)
^~~
   cc1: all warnings being treated as errors

vim +/interrupted_kernel_fpu_idle +194 arch/powerpc/kernel/process.c

   193  
 > 194  static bool interrupted_kernel_fpu_idle(void)
   195  {
   196  return !kernel_fpu_disabled();
   197  }
   198  

---
0-DAY kernel test infrastructureOpen Source Technology Center
https://lists.01.org/pipermail/kbuild-all   Intel Corporation


.config.gz
Description: application/gzip


[v3 2/2] [PowerPC] Allow use of SIMD in interrupts from kernel code

2019-05-14 Thread Shawn Landden
This second patch is separate because it could be wrong,
like I am not sure about how kernel thread migration works,
and it is even allowing simd in preemptible kernel code.

Signed-off-by: Shawn Landden 
---
 arch/powerpc/include/asm/switch_to.h | 15 ++-
 arch/powerpc/kernel/process.c| 60 ++--
 2 files changed, 59 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/include/asm/switch_to.h 
b/arch/powerpc/include/asm/switch_to.h
index 5b03d8a82..c79f7d24a 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -30,10 +30,7 @@ extern void enable_kernel_fp(void);
 extern void flush_fp_to_thread(struct task_struct *);
 extern void giveup_fpu(struct task_struct *);
 extern void save_fpu(struct task_struct *);
-static inline void disable_kernel_fp(void)
-{
-   msr_check_and_clear(MSR_FP);
-}
+extern void disable_kernel_fp(void);
 #else
 static inline void save_fpu(struct task_struct *t) { }
 static inline void flush_fp_to_thread(struct task_struct *t) { }
@@ -44,10 +41,7 @@ extern void enable_kernel_altivec(void);
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
 extern void save_altivec(struct task_struct *);
-static inline void disable_kernel_altivec(void)
-{
-   msr_check_and_clear(MSR_VEC);
-}
+extern void disable_kernel_altivec(void);
 #else
 static inline void save_altivec(struct task_struct *t) { }
 static inline void __giveup_altivec(struct task_struct *t) { }
@@ -56,10 +50,7 @@ static inline void __giveup_altivec(struct task_struct *t) { 
}
 #ifdef CONFIG_VSX
 extern void enable_kernel_vsx(void);
 extern void flush_vsx_to_thread(struct task_struct *);
-static inline void disable_kernel_vsx(void)
-{
-   msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
-}
+extern void disable_kernel_vsx(void);
 #endif
 
 #ifdef CONFIG_SPE
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ef534831f..4ffc9c664 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -170,6 +170,29 @@ void __msr_check_and_clear(unsigned long bits)
 EXPORT_SYMBOL(__msr_check_and_clear);
 
 #ifdef CONFIG_PPC_FPU
+/*
+ * Track whether the kernel is using the FPU state
+ * currently.
+ *
+ * This flag is used:
+ *
+ *   - by IRQ context code to potentially use the FPU
+ * if it's unused.
+ *
+ *   - to debug kernel_fpu/altivec/vsx_begin()/end() correctness
+ */
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
+static bool kernel_fpu_disabled(void)
+{
+   return this_cpu_read(in_kernel_fpu);
+}
+
+static bool interrupted_kernel_fpu_idle(void)
+{
+   return !kernel_fpu_disabled();
+}
+
 static void __giveup_fpu(struct task_struct *tsk)
 {
unsigned long msr;
@@ -230,7 +253,8 @@ void enable_kernel_fp(void)
 {
unsigned long cpumsr;
 
-   WARN_ON(preemptible());
+   WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, true);
 
cpumsr = msr_check_and_set(MSR_FP);
 
@@ -251,6 +275,15 @@ void enable_kernel_fp(void)
 }
 EXPORT_SYMBOL(enable_kernel_fp);
 
+void disable_kernel_fp(void)
+{
+   WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, false);
+
+   msr_check_and_clear(MSR_FP);
+}
+EXPORT_SYMBOL(disable_kernel_fp);
+
 static int restore_fp(struct task_struct *tsk)
 {
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
@@ -295,7 +328,8 @@ void enable_kernel_altivec(void)
 {
unsigned long cpumsr;
 
-   WARN_ON(preemptible());
+   WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, true);
 
cpumsr = msr_check_and_set(MSR_VEC);
 
@@ -316,6 +350,14 @@ void enable_kernel_altivec(void)
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
 
+extern void disable_kernel_altivec(void)
+{
+   WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, false);
+   msr_check_and_clear(MSR_VEC);
+}
+EXPORT_SYMBOL(disable_kernel_altivec);
+
 /*
  * Make sure the VMX/Altivec register state in the
  * the thread_struct is up to date for task tsk.
@@ -371,7 +413,8 @@ static bool interrupted_user_mode(void)
 bool may_use_simd(void)
 {
return !in_interrupt() ||
-   interrupted_user_mode();
+   interrupted_user_mode() ||
+   interrupted_kernel_fpu_idle();
 }
 EXPORT_SYMBOL(may_use_simd);
 
@@ -411,7 +454,8 @@ void enable_kernel_vsx(void)
 {
unsigned long cpumsr;
 
-   WARN_ON(preemptible());
+   WARN_ON_ONCE(this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, true);
 
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
 
@@ -433,6 +477,14 @@ void enable_kernel_vsx(void)
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
+void disable_kernel_vsx(void)
+{
+   WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+   this_cpu_write(in_kernel_fpu, false);
+   msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);