[PATCH RFC 3/5] x86,fpu: add kernel fpu argument to __kernel_fpu_begin

2016-10-01 Thread riel
From: Rik van Riel 

Most kernel FPU contexts are transient, but a KVM VCPU context
persists.  Add a kernel FPU argument to __kernel_fpu_begin, so
we can know whether or not the KVM VCPU context got clobbered
by another kernel FPU context.

Signed-off-by: Rik van Riel 
---
 arch/x86/include/asm/efi.h | 2 +-
 arch/x86/include/asm/fpu/api.h | 2 +-
 arch/x86/kernel/fpu/core.c | 6 +++---
 arch/x86/kvm/x86.c | 8 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index d0bb76d81402..603d2cdd6b82 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -71,7 +71,7 @@ struct efi_scratch {
 ({ \
efi_sync_low_kernel_mappings(); \
preempt_disable();  \
-   __kernel_fpu_begin();   \
+   __kernel_fpu_begin(NULL);   \
\
if (efi_scratch.use_pgd) {  \
efi_scratch.prev_cr3 = read_cr3();  \
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index edd7dc7ae4f7..f6704edf9904 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -20,7 +20,7 @@
  * All other cases use kernel_fpu_begin/end() which disable preemption
  * during kernel FPU usage.
  */
-extern void __kernel_fpu_begin(void);
+extern void __kernel_fpu_begin(struct fpu *fpu);
 extern void __kernel_fpu_end(void);
 extern void kernel_fpu_begin(void);
 extern void kernel_fpu_end(void);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index c4350f188be1..537eb65b6ae6 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -110,7 +110,7 @@ bool irq_fpu_usable(void)
 }
 EXPORT_SYMBOL(irq_fpu_usable);
 
-void __kernel_fpu_begin(void)
+void __kernel_fpu_begin(struct fpu *kernelfpu)
 {
struct fpu *fpu = >thread.fpu;
 
@@ -118,7 +118,7 @@ void __kernel_fpu_begin(void)
 
kernel_fpu_disable();
 
-   this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+   this_cpu_write(fpu_fpregs_owner_ctx, kernelfpu);
 
if (fpu->fpregs_active) {
/*
@@ -150,7 +150,7 @@ EXPORT_SYMBOL(__kernel_fpu_end);
 void kernel_fpu_begin(void)
 {
preempt_disable();
-   __kernel_fpu_begin();
+   __kernel_fpu_begin(NULL);
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 699f8726539a..55c82d066d3a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7332,6 +7332,8 @@ static void fx_init(struct kvm_vcpu *vcpu)
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
+   struct fpu *fpu;
+
if (vcpu->guest_fpu_loaded)
return;
 
@@ -7340,9 +7342,11 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 * and assume host would use all available bits.
 * Guest xcr0 would be loaded later.
 */
+   fpu = >arch.guest_fpu;
+
vcpu->guest_fpu_loaded = 1;
-   __kernel_fpu_begin();
-   __copy_kernel_to_fpregs(>arch.guest_fpu.state);
+   __kernel_fpu_begin(fpu);
+   __copy_kernel_to_fpregs(>state);
trace_kvm_fpu(1);
 }
 
-- 
2.7.4



[PATCH RFC 3/5] x86,fpu: add kernel fpu argument to __kernel_fpu_begin

2016-10-01 Thread riel
From: Rik van Riel 

Most kernel FPU contexts are transient, but a KVM VCPU context
persists.  Add a kernel FPU argument to __kernel_fpu_begin, so
we can know whether or not the KVM VCPU context got clobbered
by another kernel FPU context.

Signed-off-by: Rik van Riel 
---
 arch/x86/include/asm/efi.h | 2 +-
 arch/x86/include/asm/fpu/api.h | 2 +-
 arch/x86/kernel/fpu/core.c | 6 +++---
 arch/x86/kvm/x86.c | 8 ++--
 4 files changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index d0bb76d81402..603d2cdd6b82 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -71,7 +71,7 @@ struct efi_scratch {
 ({ \
efi_sync_low_kernel_mappings(); \
preempt_disable();  \
-   __kernel_fpu_begin();   \
+   __kernel_fpu_begin(NULL);   \
\
if (efi_scratch.use_pgd) {  \
efi_scratch.prev_cr3 = read_cr3();  \
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index edd7dc7ae4f7..f6704edf9904 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -20,7 +20,7 @@
  * All other cases use kernel_fpu_begin/end() which disable preemption
  * during kernel FPU usage.
  */
-extern void __kernel_fpu_begin(void);
+extern void __kernel_fpu_begin(struct fpu *fpu);
 extern void __kernel_fpu_end(void);
 extern void kernel_fpu_begin(void);
 extern void kernel_fpu_end(void);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index c4350f188be1..537eb65b6ae6 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -110,7 +110,7 @@ bool irq_fpu_usable(void)
 }
 EXPORT_SYMBOL(irq_fpu_usable);
 
-void __kernel_fpu_begin(void)
+void __kernel_fpu_begin(struct fpu *kernelfpu)
 {
struct fpu *fpu = >thread.fpu;
 
@@ -118,7 +118,7 @@ void __kernel_fpu_begin(void)
 
kernel_fpu_disable();
 
-   this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+   this_cpu_write(fpu_fpregs_owner_ctx, kernelfpu);
 
if (fpu->fpregs_active) {
/*
@@ -150,7 +150,7 @@ EXPORT_SYMBOL(__kernel_fpu_end);
 void kernel_fpu_begin(void)
 {
preempt_disable();
-   __kernel_fpu_begin();
+   __kernel_fpu_begin(NULL);
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 699f8726539a..55c82d066d3a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7332,6 +7332,8 @@ static void fx_init(struct kvm_vcpu *vcpu)
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
+   struct fpu *fpu;
+
if (vcpu->guest_fpu_loaded)
return;
 
@@ -7340,9 +7342,11 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 * and assume host would use all available bits.
 * Guest xcr0 would be loaded later.
 */
+   fpu = >arch.guest_fpu;
+
vcpu->guest_fpu_loaded = 1;
-   __kernel_fpu_begin();
-   __copy_kernel_to_fpregs(>arch.guest_fpu.state);
+   __kernel_fpu_begin(fpu);
+   __copy_kernel_to_fpregs(>state);
trace_kvm_fpu(1);
 }
 
-- 
2.7.4