Module Name:    src
Committed By:   ad
Date:           Thu Mar 26 20:19:06 UTC 2020

Modified Files:
        src/sys/kern: kern_lwp.c kern_softint.c
        src/sys/sys: intr.h userret.h

Log Message:
softint_overlay() (slow case) gains ~nothing but creates potential headaches.
In the interests of simplicity remove it and always use the kthreads.


To generate a diff of this commit:
cvs rdiff -u -r1.229 -r1.230 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.62 -r1.63 src/sys/kern/kern_softint.c
cvs rdiff -u -r1.19 -r1.20 src/sys/sys/intr.h
cvs rdiff -u -r1.32 -r1.33 src/sys/sys/userret.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.229 src/sys/kern/kern_lwp.c:1.230
--- src/sys/kern/kern_lwp.c:1.229	Sun Mar  8 17:04:45 2020
+++ src/sys/kern/kern_lwp.c	Thu Mar 26 20:19:06 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_lwp.c,v 1.229 2020/03/08 17:04:45 ad Exp $	*/
+/*	$NetBSD: kern_lwp.c,v 1.230 2020/03/26 20:19:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
@@ -211,7 +211,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.229 2020/03/08 17:04:45 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.230 2020/03/26 20:19:06 ad Exp $");
 
 #include "opt_ddb.h"
 #include "opt_lockdebug.h"
@@ -1593,12 +1593,6 @@ lwp_userret(struct lwp *l)
 	KASSERT(l->l_stat == LSONPROC);
 	p = l->l_proc;
 
-#ifndef __HAVE_FAST_SOFTINTS
-	/* Run pending soft interrupts. */
-	if (l->l_cpu->ci_data.cpu_softints != 0)
-		softint_overlay();
-#endif
-
 	/*
 	 * It is safe to do this read unlocked on a MP system..
 	 */

Index: src/sys/kern/kern_softint.c
diff -u src/sys/kern/kern_softint.c:1.62 src/sys/kern/kern_softint.c:1.63
--- src/sys/kern/kern_softint.c:1.62	Sun Mar  8 15:05:18 2020
+++ src/sys/kern/kern_softint.c	Thu Mar 26 20:19:06 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern_softint.c,v 1.62 2020/03/08 15:05:18 ad Exp $	*/
+/*	$NetBSD: kern_softint.c,v 1.63 2020/03/26 20:19:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.62 2020/03/08 15:05:18 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.63 2020/03/26 20:19:06 ad Exp $");
 
 #include <sys/param.h>
 #include <sys/proc.h>
@@ -196,7 +196,7 @@ typedef struct softint {
 	uintptr_t		si_machdep;
 	struct evcnt		si_evcnt;
 	struct evcnt		si_evcnt_block;
-	int			si_active;
+	volatile int		si_active;
 	char			si_name[8];
 	char			si_name_block[8+6];
 } softint_t;
@@ -546,11 +546,7 @@ softint_execute(softint_t *si, lwp_t *l,
 {
 	softhand_t *sh;
 
-#ifdef __HAVE_FAST_SOFTINTS
 	KASSERT(si->si_lwp == curlwp);
-#else
-	/* May be running in user context. */
-#endif
 	KASSERT(si->si_cpu == curcpu());
 	KASSERT(si->si_lwp->l_wchan == NULL);
 	KASSERT(si->si_active);
@@ -678,12 +674,22 @@ softint_trigger(uintptr_t machdep)
 	ci = curcpu();
 	ci->ci_data.cpu_softints |= machdep;
 	l = ci->ci_onproc;
+
+	/*
+	 * Arrange for mi_switch() to be called.  If called from interrupt
+	 * mode, we don't know if curlwp is executing in kernel or user, so
+	 * post an AST and have it take a trip through userret().  If not in
+	 * interrupt mode, curlwp is running in kernel and will notice the
+	 * resched soon enough; avoid the AST.
+	 */
 	if (l == ci->ci_data.cpu_idlelwp) {
 		atomic_or_uint(&ci->ci_want_resched,
 		    RESCHED_IDLE | RESCHED_UPREEMPT);
 	} else {
-		/* MI equivalent of aston() */
-		cpu_signotify(l);
+		atomic_or_uint(&ci->ci_want_resched, RESCHED_UPREEMPT);
+		if (cpu_intr_p()) {
+			cpu_signotify(l);
+		}
 	}
 }
 
@@ -754,65 +760,6 @@ softint_picklwp(void)
 	return l;
 }
 
-/*
- * softint_overlay:
- *
- *	Slow path: called from lwp_userret() to run a soft interrupt
- *	within the context of a user thread.
- */
-void
-softint_overlay(void)
-{
-	struct cpu_info *ci;
-	u_int softints, oflag;
-	softint_t *si;
-	pri_t obase;
-	lwp_t *l;
-	int s;
-
-	l = curlwp;
-	KASSERT((l->l_pflag & LP_INTR) == 0);
-
-	/*
-	 * Arrange to elevate priority if the LWP blocks.  Also, bind LWP
-	 * to the CPU.  Note: disable kernel preemption before doing that.
-	 */
-	s = splhigh();
-	ci = l->l_cpu;
-	si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
-
-	obase = l->l_kpribase;
-	l->l_kpribase = PRI_KERNEL_RT;
-	oflag = l->l_pflag;
-	l->l_pflag = oflag | LP_INTR | LP_BOUND;
-
-	while ((softints = ci->ci_data.cpu_softints) != 0) {
-		if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
-			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
-			softint_execute(&si[SOFTINT_SERIAL], l, s);
-			continue;
-		}
-		if ((softints & (1 << SOFTINT_NET)) != 0) {
-			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
-			softint_execute(&si[SOFTINT_NET], l, s);
-			continue;
-		}
-		if ((softints & (1 << SOFTINT_BIO)) != 0) {
-			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
-			softint_execute(&si[SOFTINT_BIO], l, s);
-			continue;
-		}
-		if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
-			ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
-			softint_execute(&si[SOFTINT_CLOCK], l, s);
-			continue;
-		}
-	}
-	l->l_pflag = oflag;
-	l->l_kpribase = obase;
-	splx(s);
-}
-
 #else	/*  !__HAVE_FAST_SOFTINTS */
 
 /*
@@ -892,20 +839,13 @@ softint_dispatch(lwp_t *pinned, int s)
 
 	/*
 	 * If we blocked while handling the interrupt, the pinned LWP is
-	 * gone so switch to the idle LWP.  It will select a new LWP to
-	 * run.
-	 *
-	 * We must drop the priority level as switching at IPL_HIGH could
-	 * deadlock the system.  We have already set si->si_active = 0,
-	 * which means another interrupt at this level can be triggered. 
-	 * That's not be a problem: we are lowering to level 's' which will
-	 * prevent softint_dispatch() from being reentered at level 's',
-	 * until the priority is finally dropped to IPL_NONE on entry to
-	 * the LWP chosen by mi_switch().
+	 * gone, so find another LWP to run.  It will select a new LWP to
+	 * run.  softint_dispatch() won't be reentered until the priority
+	 * is finally dropped to IPL_NONE on entry to the LWP chosen by
+	 * mi_switch().
 	 */
 	l->l_stat = LSIDL;
 	if (l->l_switchto == NULL) {
-		splx(s);
 		lwp_lock(l);
 		spc_lock(l->l_cpu);
 		mi_switch(l);

Index: src/sys/sys/intr.h
diff -u src/sys/sys/intr.h:1.19 src/sys/sys/intr.h:1.20
--- src/sys/sys/intr.h:1.19	Mon Aug 17 06:16:03 2015
+++ src/sys/sys/intr.h	Thu Mar 26 20:19:06 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: intr.h,v 1.19 2015/08/17 06:16:03 knakahara Exp $	*/
+/*	$NetBSD: intr.h,v 1.20 2020/03/26 20:19:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 2007 The NetBSD Foundation, Inc.
@@ -50,7 +50,6 @@ void	softint_schedule_cpu(void *, struct
 /* MI hooks. */
 void	softint_init(struct cpu_info *);
 lwp_t	*softint_picklwp(void);
-void	softint_overlay(void);
 void	softint_block(lwp_t *);
 
 /* MD-MI interface. */

Index: src/sys/sys/userret.h
diff -u src/sys/sys/userret.h:1.32 src/sys/sys/userret.h:1.33
--- src/sys/sys/userret.h:1.32	Wed Jan 22 12:23:04 2020
+++ src/sys/sys/userret.h	Thu Mar 26 20:19:06 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: userret.h,v 1.32 2020/01/22 12:23:04 ad Exp $	*/
+/*	$NetBSD: userret.h,v 1.33 2020/03/26 20:19:06 ad Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2000, 2003, 2006, 2008, 2019, 2020
@@ -91,11 +91,7 @@ mi_userret(struct lwp *l)
 		preempt();
 		ci = l->l_cpu;
 	}
-#ifdef __HAVE_FAST_SOFTINTS
 	if (__predict_false(l->l_flag & LW_USERRET)) {
-#else
-	if (((l->l_flag & LW_USERRET) | ci->ci_data.cpu_softints) != 0) {
-#endif
 		KPREEMPT_ENABLE(l);
 		lwp_userret(l);
 		KPREEMPT_DISABLE(l);

Reply via email to