Commit-ID:  1d731e731c4cd7cbd3b1aa295f0932e7610da82f
Gitweb:     https://git.kernel.org/tip/1d731e731c4cd7cbd3b1aa295f0932e7610da82f
Author:     Sebastian Andrzej Siewior <bige...@linutronix.de>
AuthorDate: Wed, 3 Apr 2019 18:41:53 +0200
Committer:  Borislav Petkov <b...@suse.de>
CommitDate: Fri, 12 Apr 2019 20:04:49 +0200

x86/fpu: Add a fastpath to __fpu__restore_sig()

The previous commits refactor the restoration of the FPU registers so
that they can be loaded from in-kernel memory. This overhead can be
avoided if the load can be performed without a pagefault.

Attempt to restore FPU registers by invoking
copy_user_to_fpregs_zeroing(). If it fails try the slowpath which can
handle pagefaults.

 [ bp: Add a comment over the fastpath to be able to find one's way
   around the function. ]

Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Signed-off-by: Borislav Petkov <b...@suse.de>
Reviewed-by: Dave Hansen <dave.han...@intel.com>
Reviewed-by: Thomas Gleixner <t...@linutronix.de>
Cc: Andy Lutomirski <l...@kernel.org>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Jann Horn <ja...@google.com>
Cc: "Jason A. Donenfeld" <ja...@zx2c4.com>
Cc: kvm ML <k...@vger.kernel.org>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: Radim Krčmář <rkrc...@redhat.com>
Cc: Rik van Riel <r...@surriel.com>
Cc: x86-ml <x...@kernel.org>
Link: https://lkml.kernel.org/r/20190403164156.19645-25-bige...@linutronix.de
---
 arch/x86/kernel/fpu/signal.c | 23 +++++++++++++++++++++--
 1 file changed, 21 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 6df1f15e0cd5..a1bd7be70206 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -242,10 +242,10 @@ sanitize_restored_xstate(union fpregs_state *state,
 /*
  * Restore the extended state if present. Otherwise, restore the FP/SSE state.
  */
-static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int 
fx_only)
+static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
 {
        if (use_xsave()) {
-               if ((unsigned long)buf % 64 || fx_only) {
+               if (fx_only) {
                        u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
                        copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
                        return copy_user_to_fxregs(buf);
@@ -327,8 +327,27 @@ static int __fpu__restore_sig(void __user *buf, void 
__user *buf_fx, int size)
                if (ret)
                        goto err_out;
                envp = &env;
+       } else {
+               /*
+                * Attempt to restore the FPU registers directly from user
+                * memory. For that to succeed, the user access cannot cause
+                * page faults. If it does, fall back to the slow path below,
+                * going through the kernel buffer with the enabled pagefault
+                * handler.
+                */
+               fpregs_lock();
+               pagefault_disable();
+               ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
+               pagefault_enable();
+               if (!ret) {
+                       fpregs_mark_activate();
+                       fpregs_unlock();
+                       return 0;
+               }
+               fpregs_unlock();
        }
 
+
        if (use_xsave() && !fx_only) {
                u64 init_bv = xfeatures_mask & ~xfeatures;
 

Reply via email to