Remove the failure code and propagate this down to callers.

Note that this function still has an 'init' aspect, which must be
called.

Reviewed-by: Borislav Petkov <b...@alien8.de>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/fpu/internal.h |  2 +-
 arch/x86/kernel/fpu/core.c          | 37 +++++++------------------------------
 arch/x86/kernel/fpu/xsave.c         |  4 ++--
 arch/x86/kvm/x86.c                  |  4 ++--
 arch/x86/math-emu/fpu_entry.c       |  8 ++------
 5 files changed, 14 insertions(+), 41 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 9454f21f0edf..1d0c5cee29eb 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void);
 extern void fpu__init_cpu_xstate(void);
 extern void fpu__init_system(struct cpuinfo_x86 *c);
 
-extern int fpstate_alloc_init(struct fpu *fpu);
+extern void fpstate_alloc_init(struct fpu *fpu);
 extern void fpstate_init(struct fpu *fpu);
 extern void fpu__clear(struct task_struct *tsk);
 
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 7d42a54b5f23..567d789d7736 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -259,26 +259,17 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 }
 
 /*
- * Allocate the backing store for the current task's FPU registers
- * and initialize the registers themselves as well.
- *
- * Can fail.
+ * Initialize the current task's in-memory FPU context:
  */
-int fpstate_alloc_init(struct fpu *fpu)
+void fpstate_alloc_init(struct fpu *fpu)
 {
-       int ret;
-
-       if (WARN_ON_ONCE(fpu != &current->thread.fpu))
-               return -EINVAL;
-       if (WARN_ON_ONCE(fpu->fpstate_active))
-               return -EINVAL;
+       WARN_ON_ONCE(fpu != &current->thread.fpu);
+       WARN_ON_ONCE(fpu->fpstate_active);
 
        fpstate_init(fpu);
 
        /* Safe to do for the current task: */
        fpu->fpstate_active = 1;
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(fpstate_alloc_init);
 
@@ -340,20 +331,8 @@ void fpu__restore(void)
        struct task_struct *tsk = current;
        struct fpu *fpu = &tsk->thread.fpu;
 
-       if (!fpu->fpstate_active) {
-               local_irq_enable();
-               /*
-                * does a slab alloc which can sleep
-                */
-               if (fpstate_alloc_init(fpu)) {
-                       /*
-                        * ran out of memory!
-                        */
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-               local_irq_disable();
-       }
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
        kernel_fpu_disable();
@@ -379,9 +358,7 @@ void fpu__clear(struct task_struct *tsk)
                drop_fpu(fpu);
        } else {
                 if (!fpu->fpstate_active) {
-                       /* kthread execs. TODO: cleanup this horror. */
-                       if (WARN_ON(fpstate_alloc_init(fpu)))
-                               force_sig(SIGKILL, tsk);
+                       fpstate_alloc_init(fpu);
                        user_fpu_begin();
                        restore_init_xstate();
                }
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index c7d48eb0a194..dd2cef08a1a4 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -358,8 +358,8 @@ int __restore_xstate_sig(void __user *buf, void __user 
*buf_fx, int size)
        if (!access_ok(VERIFY_READ, buf, size))
                return -EACCES;
 
-       if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
-               return -1;
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        if (!static_cpu_has(X86_FEATURE_FPU))
                return fpregs_soft_set(current, NULL,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 68529251e897..707f4e27ee91 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6601,8 +6601,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct 
kvm_run *kvm_run)
        int r;
        sigset_t sigsaved;
 
-       if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
-               return -ENOMEM;
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 5e003704ebfa..99ddfc274df3 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -149,12 +149,8 @@ void math_emulate(struct math_emu_info *info)
        struct desc_struct code_descriptor;
        struct fpu *fpu = &current->thread.fpu;
 
-       if (!fpu->fpstate_active) {
-               if (fpstate_alloc_init(fpu)) {
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-       }
+       if (!fpu->fpstate_active)
+               fpstate_alloc_init(fpu);
 
 #ifdef RE_ENTRANT_CHECKING
        if (emulating) {
-- 
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to