4.12-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Paolo Bonzini <[email protected]>

commit 38cfd5e3df9c4f88e76b547eee2087ee5c042ae2 upstream.

The host pkru is restored right after vcpu exit (commit 1be0e61), so
KVM_GET_XSAVE will return the host PKRU value instead.  Fix this by
using the guest PKRU explicitly in fill_xsave and load_xsave.  This
part is based on a patch by Junkang Fu.

The host PKRU data may also not match the value in vcpu->arch.guest_fpu.state,
because it could have been changed by userspace since the last time
it was saved, so skip loading it in kvm_load_guest_fpu.

Reported-by: Junkang Fu <[email protected]>
Cc: Yang Zhang <[email protected]>
Fixes: 1be0e61c1f255faaeab04a390e00c8b9b9042870
Signed-off-by: Paolo Bonzini <[email protected]>
Signed-off-by: Greg Kroah-Hartman <[email protected]>

---
 arch/x86/include/asm/fpu/internal.h |    6 +++---
 arch/x86/kvm/x86.c                  |   17 ++++++++++++++---
 2 files changed, 17 insertions(+), 6 deletions(-)

--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate
        return 0;
 }
 
-static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 
mask)
 {
        if (use_xsave()) {
-               copy_kernel_to_xregs(&fpstate->xsave, -1);
+               copy_kernel_to_xregs(&fpstate->xsave, mask);
        } else {
                if (use_fxsr())
                        copy_kernel_to_fxregs(&fpstate->fxsave);
@@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs
                        : : [addr] "m" (fpstate));
        }
 
-       __copy_kernel_to_fpregs(fpstate);
+       __copy_kernel_to_fpregs(fpstate, -1);
 }
 
 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int 
size);
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct
                        u32 size, offset, ecx, edx;
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
-                       memcpy(dest + offset, src, size);
+                       if (feature == XFEATURE_MASK_PKRU)
+                               memcpy(dest + offset, &vcpu->arch.pkru,
+                                      sizeof(vcpu->arch.pkru));
+                       else
+                               memcpy(dest + offset, src, size);
+
                }
 
                valid -= feature;
@@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *
                        u32 size, offset, ecx, edx;
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
-                       memcpy(dest, src + offset, size);
+                       if (feature == XFEATURE_MASK_PKRU)
+                               memcpy(&vcpu->arch.pkru, src + offset,
+                                      sizeof(vcpu->arch.pkru));
+                       else
+                               memcpy(dest, src + offset, size);
                }
 
                valid -= feature;
@@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu
         */
        vcpu->guest_fpu_loaded = 1;
        __kernel_fpu_begin();
-       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
+       /* PKRU is separately restored in kvm_x86_ops->run.  */
+       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+                               ~XFEATURE_MASK_PKRU);
        trace_kvm_fpu(1);
 }
 


Reply via email to