Module Name: src
Committed By: ad
Date: Sat Sep 23 20:23:07 UTC 2023
Modified Files:
src/sys/kern: kern_lwp.c kern_sleepq.c kern_synch.c
src/sys/sys: lwp.h
Log Message:
Sigh.. Adjust previous to work as intended. The boosted LWP priority
didn't persist as far as the run queue because l_syncobj gets reset
earlier than I recalled.
To generate a diff of this commit:
cvs rdiff -u -r1.256 -r1.257 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.75 -r1.76 src/sys/kern/kern_sleepq.c
cvs rdiff -u -r1.359 -r1.360 src/sys/kern/kern_synch.c
cvs rdiff -u -r1.221 -r1.222 src/sys/sys/lwp.h
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.256 src/sys/kern/kern_lwp.c:1.257
--- src/sys/kern/kern_lwp.c:1.256 Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_lwp.c Sat Sep 23 20:23:07 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_lwp.c,v 1.256 2023/09/23 18:48:04 ad Exp $ */
+/* $NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -217,7 +217,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.256 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.257 2023/09/23 20:23:07 ad Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@@ -851,7 +851,13 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_
return EAGAIN;
}
- l2->l_priority = l1->l_priority;
+ /*
+ * If vfork(), we want the LWP to run fast and on the same CPU
+ * as its parent, so that it can reuse the VM context and cache
+ * footprint on the local CPU.
+ */
+ l2->l_boostpri = ((flags & LWP_VFORK) ? PRI_KERNEL : PRI_USER);
+ l2->l_priority = l1->l_priority;
l2->l_inheritedprio = -1;
l2->l_protectprio = -1;
l2->l_auxprio = -1;
@@ -1666,7 +1672,6 @@ lwp_lendpri(lwp_t *l, pri_t pri)
pri_t
lwp_eprio(lwp_t *l)
{
- pri_t boostpri = l->l_syncobj->sobj_boostpri;
pri_t pri = l->l_priority;
KASSERT(mutex_owned(l->l_mutex));
@@ -1681,8 +1686,8 @@ lwp_eprio(lwp_t *l)
* boost and could be preempted very quickly by another LWP but that
* won't happen often enough to be a annoyance.
*/
- if (pri <= MAXPRI_USER && boostpri > PRI_USER)
- pri = (pri >> 1) + boostpri;
+ if (pri <= MAXPRI_USER && l->l_boostpri > MAXPRI_USER)
+ pri = (pri >> 1) + l->l_boostpri;
return MAX(l->l_auxprio, pri);
}
Index: src/sys/kern/kern_sleepq.c
diff -u src/sys/kern/kern_sleepq.c:1.75 src/sys/kern/kern_sleepq.c:1.76
--- src/sys/kern/kern_sleepq.c:1.75 Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_sleepq.c Sat Sep 23 20:23:07 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_sleepq.c,v 1.75 2023/09/23 18:48:04 ad Exp $ */
+/* $NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $ */
/*-
* Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -36,7 +36,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.75 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.76 2023/09/23 20:23:07 ad Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -368,6 +368,7 @@ sleepq_block(int timo, bool catch_p, syn
l->l_flag &= ~LW_STIMO;
callout_schedule(&l->l_timeout_ch, timo);
}
+ l->l_boostpri = l->l_syncobj->sobj_boostpri;
spc_lock(l->l_cpu);
mi_switch(l);
Index: src/sys/kern/kern_synch.c
diff -u src/sys/kern/kern_synch.c:1.359 src/sys/kern/kern_synch.c:1.360
--- src/sys/kern/kern_synch.c:1.359 Sat Sep 23 18:48:04 2023
+++ src/sys/kern/kern_synch.c Sat Sep 23 20:23:07 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_synch.c,v 1.359 2023/09/23 18:48:04 ad Exp $ */
+/* $NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $ */
/*-
* Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020, 2023
@@ -69,7 +69,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.359 2023/09/23 18:48:04 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.360 2023/09/23 20:23:07 ad Exp $");
#include "opt_kstack.h"
#include "opt_ddb.h"
@@ -561,6 +561,7 @@ nextlwp(struct cpu_info *ci, struct sche
KASSERT(newl->l_cpu == ci);
newl->l_stat = LSONPROC;
newl->l_pflag |= LP_RUNNING;
+ newl->l_boostpri = PRI_NONE;
spc->spc_curpriority = lwp_eprio(newl);
spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE);
lwp_setlock(newl, spc->spc_lwplock);
Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.221 src/sys/sys/lwp.h:1.222
--- src/sys/sys/lwp.h:1.221 Sat Sep 23 18:48:05 2023
+++ src/sys/sys/lwp.h Sat Sep 23 20:23:07 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: lwp.h,v 1.221 2023/09/23 18:48:05 ad Exp $ */
+/* $NetBSD: lwp.h,v 1.222 2023/09/23 20:23:07 ad Exp $ */
/*
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010, 2019, 2020, 2023
@@ -112,6 +112,7 @@ struct lwp {
u_int l_slpticksum; /* l: Sum of ticks spent sleeping */
int l_biglocks; /* l: biglock count before sleep */
int l_class; /* l: scheduling class */
+ pri_t l_boostpri; /* l: boosted priority after blocking */
pri_t l_priority; /* l: scheduler priority */
pri_t l_inheritedprio;/* l: inherited priority */
pri_t l_protectprio; /* l: for PTHREAD_PRIO_PROTECT */