Module Name: src
Committed By: riastradh
Date: Fri Mar 3 14:32:48 UTC 2023
Modified Files:
src/sys/arch/amd64/amd64: genassym.cf locore.S
src/sys/arch/x86/x86: fpu.c
Log Message:
Revert "x86: Add kthread_fpu_enter/exit support, take two."
kthread_fpu_enter/exit changes broke some hardware, unclear why, to
investigate before fixing and reapplying these changes.
To generate a diff of this commit:
cvs rdiff -u -r1.94 -r1.95 src/sys/arch/amd64/amd64/genassym.cf
cvs rdiff -u -r1.217 -r1.218 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.84 -r1.85 src/sys/arch/x86/x86/fpu.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/amd64/amd64/genassym.cf
diff -u src/sys/arch/amd64/amd64/genassym.cf:1.94 src/sys/arch/amd64/amd64/genassym.cf:1.95
--- src/sys/arch/amd64/amd64/genassym.cf:1.94 Sat Feb 25 18:04:42 2023
+++ src/sys/arch/amd64/amd64/genassym.cf Fri Mar 3 14:32:48 2023
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.94 2023/02/25 18:04:42 riastradh Exp $
+# $NetBSD: genassym.cf,v 1.95 2023/03/03 14:32:48 riastradh Exp $
#
# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@@ -166,7 +166,6 @@ define L_MD_FLAGS offsetof(struct lwp,
define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
define LW_SYSTEM LW_SYSTEM
-define LW_SYSTEM_FPU LW_SYSTEM_FPU
define MDL_IRET MDL_IRET
define MDL_COMPAT32 MDL_COMPAT32
define MDL_FPU_IN_CPU MDL_FPU_IN_CPU
Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.217 src/sys/arch/amd64/amd64/locore.S:1.218
--- src/sys/arch/amd64/amd64/locore.S:1.217 Wed Mar 1 08:38:50 2023
+++ src/sys/arch/amd64/amd64/locore.S Fri Mar 3 14:32:48 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.217 2023/03/01 08:38:50 riastradh Exp $ */
+/* $NetBSD: locore.S,v 1.218 2023/03/03 14:32:48 riastradh Exp $ */
/*
* Copyright-o-rama!
@@ -1271,7 +1271,7 @@ ENTRY(cpu_switchto)
/* Don't bother with the rest if switching to a system process. */
testl $LW_SYSTEM,L_FLAG(%r12)
- jnz .Lswitch_system
+ jnz .Lswitch_return
/* Is this process using RAS (restartable atomic sequences)? */
movq L_PROC(%r12),%rdi
@@ -1360,21 +1360,6 @@ ENTRY(cpu_switchto)
popq %r12
popq %rbx
ret
-
-.Lswitch_system:
- /*
- * If it has LWP_SYSTEM_FPU set, meaning it's running in
- * kthread_fpu_enter/exit, we need to restore the FPU state
- * and enable FPU instructions with fpu_handle_deferred.
- *
- * No need to test MDL_FPU_IN_CPU via HANDLE_DEFERRED_FPU --
- * fpu_switch guarantees it is clear, so we can just call
- * fpu_handle_deferred unconditionally.
- */
- testl $LW_SYSTEM_FPU,L_FLAG(%r12)
- jz .Lswitch_return
- callq _C_LABEL(fpu_handle_deferred)
- jmp .Lswitch_return
END(cpu_switchto)
/*
Index: src/sys/arch/x86/x86/fpu.c
diff -u src/sys/arch/x86/x86/fpu.c:1.84 src/sys/arch/x86/x86/fpu.c:1.85
--- src/sys/arch/x86/x86/fpu.c:1.84 Fri Mar 3 14:32:38 2023
+++ src/sys/arch/x86/x86/fpu.c Fri Mar 3 14:32:48 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: fpu.c,v 1.84 2023/03/03 14:32:38 riastradh Exp $ */
+/* $NetBSD: fpu.c,v 1.85 2023/03/03 14:32:48 riastradh Exp $ */
/*
* Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. All
@@ -96,7 +96,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.84 2023/03/03 14:32:38 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.85 2023/03/03 14:32:48 riastradh Exp $");
#include "opt_multiprocessor.h"
@@ -107,7 +107,6 @@ __KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.84
#include <sys/file.h>
#include <sys/proc.h>
#include <sys/kernel.h>
-#include <sys/kthread.h>
#include <sys/sysctl.h>
#include <sys/xcall.h>
@@ -132,35 +131,13 @@ void fpu_switch(struct lwp *, struct lwp
uint32_t x86_fpu_mxcsr_mask __read_mostly = 0;
-/*
- * True if this a thread that is allowed to use the FPU -- either a
- * user thread, or a system thread with LW_SYSTEM_FPU enabled.
- */
-static inline bool
-lwp_can_haz_fpu(struct lwp *l)
-{
-
- return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) != LW_SYSTEM;
-}
-
-/*
- * True if this is a system thread with its own private FPU state.
- */
-static inline bool
-lwp_system_fpu_p(struct lwp *l)
-{
-
- return (l->l_flag & (LW_SYSTEM|LW_SYSTEM_FPU)) ==
- (LW_SYSTEM|LW_SYSTEM_FPU);
-}
-
static inline union savefpu *
fpu_lwp_area(struct lwp *l)
{
struct pcb *pcb = lwp_getpcb(l);
union savefpu *area = &pcb->pcb_savefpu;
- KASSERT(lwp_can_haz_fpu(l));
+ KASSERT((l->l_flag & LW_SYSTEM) == 0);
if (l == curlwp) {
fpu_save();
}
@@ -178,7 +155,7 @@ fpu_save_lwp(struct lwp *l)
s = splvm();
if (l->l_md.md_flags & MDL_FPU_IN_CPU) {
- KASSERT(lwp_can_haz_fpu(l));
+ KASSERT((l->l_flag & LW_SYSTEM) == 0);
fpu_area_save(area, x86_xsave_features, !(l->l_proc->p_flag & PK_32));
l->l_md.md_flags &= ~MDL_FPU_IN_CPU;
}
@@ -337,7 +314,7 @@ fpu_switch(struct lwp *oldlwp, struct lw
cpu_index(ci), ci->ci_ilevel);
if (oldlwp->l_md.md_flags & MDL_FPU_IN_CPU) {
- KASSERT(lwp_can_haz_fpu(oldlwp));
+ KASSERT(!(oldlwp->l_flag & LW_SYSTEM));
pcb = lwp_getpcb(oldlwp);
fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features,
!(oldlwp->l_proc->p_flag & PK_32));
@@ -353,11 +330,11 @@ fpu_lwp_fork(struct lwp *l1, struct lwp
union savefpu *fpu_save;
/* Kernel threads have no FPU. */
- if (__predict_false(!lwp_can_haz_fpu(l2))) {
+ if (__predict_false(l2->l_flag & LW_SYSTEM)) {
return;
}
/* For init(8). */
- if (__predict_false(!lwp_can_haz_fpu(l1))) {
+ if (__predict_false(l1->l_flag & LW_SYSTEM)) {
memset(&pcb2->pcb_savefpu, 0, x86_fpu_save_size);
return;
}
@@ -381,13 +358,6 @@ fpu_lwp_abandon(struct lwp *l)
/* -------------------------------------------------------------------------- */
-static const union savefpu safe_fpu __aligned(64) = {
- .sv_xmm = {
- .fx_mxcsr = __SAFE_MXCSR__,
- },
-};
-static const union savefpu zero_fpu __aligned(64);
-
/*
* fpu_kern_enter()
*
@@ -403,15 +373,15 @@ static const union savefpu zero_fpu __al
void
fpu_kern_enter(void)
{
+ static const union savefpu safe_fpu __aligned(64) = {
+ .sv_xmm = {
+ .fx_mxcsr = __SAFE_MXCSR__,
+ },
+ };
struct lwp *l = curlwp;
struct cpu_info *ci;
int s;
- if (lwp_system_fpu_p(l) && !cpu_intr_p()) {
- KASSERT(!cpu_softintr_p());
- return;
- }
-
s = splvm();
ci = curcpu();
@@ -457,16 +427,10 @@ fpu_kern_enter(void)
void
fpu_kern_leave(void)
{
- struct cpu_info *ci;
+ static const union savefpu zero_fpu __aligned(64);
+ struct cpu_info *ci = curcpu();
int s;
- if (lwp_system_fpu_p(curlwp) && !cpu_intr_p()) {
- KASSERT(!cpu_softintr_p());
- return;
- }
-
- ci = curcpu();
-
#if 0
/*
* Can't assert this because if the caller holds a spin lock at
@@ -495,24 +459,6 @@ fpu_kern_leave(void)
splx(s);
}
-void
-kthread_fpu_enter_md(void)
-{
-
- /* Enable the FPU by clearing CR0_TS, and enter a safe FPU state. */
- clts();
- fpu_area_restore(&safe_fpu, x86_xsave_features, /*is_64bit*/false);
-}
-
-void
-kthread_fpu_exit_md(void)
-{
-
- /* Zero the FPU state and disable the FPU by setting CR0_TS. */
- fpu_area_restore(&zero_fpu, x86_xsave_features, /*is_64bit*/false);
- stts();
-}
-
/* -------------------------------------------------------------------------- */
/*