Module Name: src
Committed By: ad
Date: Tue Sep 12 16:17:22 UTC 2023
Modified Files:
src/common/lib/libc/gen: radixtree.c
src/sys/kern: init_main.c kern_descrip.c kern_lwp.c kern_mutex_obj.c
kern_resource.c kern_rwlock_obj.c kern_turnstile.c subr_kcpuset.c
vfs_cwd.c vfs_init.c vfs_lockf.c
src/sys/rump/include/rump: rump_namei.h
src/sys/rump/librump/rumpkern: rump.c
src/sys/rump/librump/rumpvfs: rump_vfs.c
src/sys/sys: namei.h namei.src
src/sys/uvm: uvm_init.c uvm_map.c uvm_readahead.c
Log Message:
Back out recent change to replace pool_cache with then general allocator.
Will return to this when I have time again.
To generate a diff of this commit:
cvs rdiff -u -r1.30 -r1.31 src/common/lib/libc/gen/radixtree.c
cvs rdiff -u -r1.544 -r1.545 src/sys/kern/init_main.c
cvs rdiff -u -r1.259 -r1.260 src/sys/kern/kern_descrip.c
cvs rdiff -u -r1.253 -r1.254 src/sys/kern/kern_lwp.c
cvs rdiff -u -r1.12 -r1.13 src/sys/kern/kern_mutex_obj.c
cvs rdiff -u -r1.192 -r1.193 src/sys/kern/kern_resource.c
cvs rdiff -u -r1.10 -r1.11 src/sys/kern/kern_rwlock_obj.c
cvs rdiff -u -r1.47 -r1.48 src/sys/kern/kern_turnstile.c
cvs rdiff -u -r1.18 -r1.19 src/sys/kern/subr_kcpuset.c
cvs rdiff -u -r1.9 -r1.10 src/sys/kern/vfs_cwd.c
cvs rdiff -u -r1.62 -r1.63 src/sys/kern/vfs_init.c
cvs rdiff -u -r1.79 -r1.80 src/sys/kern/vfs_lockf.c
cvs rdiff -u -r1.50 -r1.51 src/sys/rump/include/rump/rump_namei.h
cvs rdiff -u -r1.358 -r1.359 src/sys/rump/librump/rumpkern/rump.c
cvs rdiff -u -r1.95 -r1.96 src/sys/rump/librump/rumpvfs/rump_vfs.c
cvs rdiff -u -r1.117 -r1.118 src/sys/sys/namei.h
cvs rdiff -u -r1.62 -r1.63 src/sys/sys/namei.src
cvs rdiff -u -r1.57 -r1.58 src/sys/uvm/uvm_init.c
cvs rdiff -u -r1.408 -r1.409 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.14 -r1.15 src/sys/uvm/uvm_readahead.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/common/lib/libc/gen/radixtree.c
diff -u src/common/lib/libc/gen/radixtree.c:1.30 src/common/lib/libc/gen/radixtree.c:1.31
--- src/common/lib/libc/gen/radixtree.c:1.30 Sun Sep 10 14:45:52 2023
+++ src/common/lib/libc/gen/radixtree.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: radixtree.c,v 1.31 2023/09/12 16:17:21 ad Exp $ */
/*-
* Copyright (c)2011,2012,2013 YAMAMOTO Takashi,
@@ -43,7 +43,7 @@
*
* Intermediate nodes are automatically allocated and freed internally and
* basically users don't need to care about them. The allocation is done via
- * kmem_zalloc(9) for _KERNEL, malloc(3) for userland, and alloc() for
+ * pool_cache_get(9) for _KERNEL, malloc(3) for userland, and alloc() for
* _STANDALONE environment. Only radix_tree_insert_node function can allocate
* memory for intermediate nodes and thus can fail for ENOMEM.
*
@@ -112,17 +112,17 @@
#include <sys/cdefs.h>
#if defined(_KERNEL) || defined(_STANDALONE)
-__KERNEL_RCSID(0, "$NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: radixtree.c,v 1.31 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/errno.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
#include <sys/radixtree.h>
#include <lib/libkern/libkern.h>
#if defined(_STANDALONE)
#include <lib/libsa/stand.h>
#endif /* defined(_STANDALONE) */
#else /* defined(_KERNEL) || defined(_STANDALONE) */
-__RCSID("$NetBSD: radixtree.c,v 1.30 2023/09/10 14:45:52 ad Exp $");
+__RCSID("$NetBSD: radixtree.c,v 1.31 2023/09/12 16:17:21 ad Exp $");
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
@@ -303,6 +303,18 @@ radix_tree_node_init(struct radix_tree_n
}
#if defined(_KERNEL)
+pool_cache_t radix_tree_node_cache __read_mostly;
+
+static int
+radix_tree_node_ctor(void *dummy, void *item, int flags)
+{
+ struct radix_tree_node *n = item;
+
+ KASSERT(dummy == NULL);
+ radix_tree_node_init(n);
+ return 0;
+}
+
/*
* radix_tree_init:
*
@@ -313,7 +325,10 @@ void
radix_tree_init(void)
{
- /* nothing right now */
+ radix_tree_node_cache = pool_cache_init(sizeof(struct radix_tree_node),
+ coherency_unit, 0, PR_LARGECACHE, "radixnode", NULL, IPL_NONE,
+ radix_tree_node_ctor, NULL, NULL);
+ KASSERT(radix_tree_node_cache != NULL);
}
/*
@@ -331,10 +346,10 @@ radix_tree_await_memory(void)
int i;
for (i = 0; i < __arraycount(nodes); i++) {
- nodes[i] = kmem_alloc(sizeof(struct radix_tree_node), KM_SLEEP);
+ nodes[i] = pool_cache_get(radix_tree_node_cache, PR_WAITOK);
}
while (--i >= 0) {
- kmem_free(nodes[i], sizeof(struct radix_tree_node));
+ pool_cache_put(radix_tree_node_cache, nodes[i]);
}
}
@@ -409,10 +424,11 @@ radix_tree_alloc_node(void)
#if defined(_KERNEL)
/*
- * note that kmem_alloc can block.
+ * note that pool_cache_get can block.
*/
- n = kmem_alloc(sizeof(struct radix_tree_node), KM_SLEEP);
-#elif defined(_STANDALONE)
+ n = pool_cache_get(radix_tree_node_cache, PR_NOWAIT);
+#else /* defined(_KERNEL) */
+#if defined(_STANDALONE)
n = alloc(sizeof(*n));
#else /* defined(_STANDALONE) */
n = malloc(sizeof(*n));
@@ -420,6 +436,7 @@ radix_tree_alloc_node(void)
if (n != NULL) {
radix_tree_node_init(n);
}
+#endif /* defined(_KERNEL) */
KASSERT(n == NULL || radix_tree_sum_node(n) == 0);
return n;
}
@@ -430,7 +447,7 @@ radix_tree_free_node(struct radix_tree_n
KASSERT(radix_tree_sum_node(n) == 0);
#if defined(_KERNEL)
- kmem_free(n, sizeof(struct radix_tree_node));
+ pool_cache_put(radix_tree_node_cache, n);
#elif defined(_STANDALONE)
dealloc(n, sizeof(*n));
#else
Index: src/sys/kern/init_main.c
diff -u src/sys/kern/init_main.c:1.544 src/sys/kern/init_main.c:1.545
--- src/sys/kern/init_main.c:1.544 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/init_main.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: init_main.c,v 1.544 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: init_main.c,v 1.545 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -97,7 +97,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.544 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: init_main.c,v 1.545 2023/09/12 16:17:21 ad Exp $");
#include "opt_cnmagic.h"
#include "opt_ddb.h"
@@ -327,6 +327,9 @@ main(void)
percpu_init();
+ /* Initialize lock caches. */
+ mutex_obj_init();
+
/* Initialize radix trees (used by numerous subsystems). */
radix_tree_init();
@@ -500,10 +503,14 @@ main(void)
fstrans_init();
vfsinit();
+ lf_init();
/* Initialize the file descriptor system. */
fd_sys_init();
+ /* Initialize cwd structures */
+ cwd_sys_init();
+
/* Initialize kqueue. */
kqueue_init();
Index: src/sys/kern/kern_descrip.c
diff -u src/sys/kern/kern_descrip.c:1.259 src/sys/kern/kern_descrip.c:1.260
--- src/sys/kern/kern_descrip.c:1.259 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_descrip.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: kern_descrip.c,v 1.259 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_descrip.c,v 1.260 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2008, 2009, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -70,7 +70,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.259 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.260 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -106,11 +106,12 @@ kmutex_t filelist_lock __cacheline_alig
static pool_cache_t filedesc_cache __read_mostly;
static pool_cache_t file_cache __read_mostly;
+static pool_cache_t fdfile_cache __read_mostly;
static int file_ctor(void *, void *, int);
static void file_dtor(void *, void *);
-static void fdfile_ctor(fdfile_t *);
-static void fdfile_dtor(fdfile_t *);
+static int fdfile_ctor(void *, void *, int);
+static void fdfile_dtor(void *, void *);
static int filedesc_ctor(void *, void *, int);
static void filedesc_dtor(void *, void *);
static int filedescopen(dev_t, int, int, lwp_t *);
@@ -156,6 +157,11 @@ fd_sys_init(void)
0, "file", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
KASSERT(file_cache != NULL);
+ fdfile_cache = pool_cache_init(sizeof(fdfile_t), coherency_unit, 0,
+ PR_LARGECACHE, "fdfile", NULL, IPL_NONE, fdfile_ctor, fdfile_dtor,
+ NULL);
+ KASSERT(fdfile_cache != NULL);
+
filedesc_cache = pool_cache_init(sizeof(filedesc_t), coherency_unit,
0, 0, "filedesc", NULL, IPL_NONE, filedesc_ctor, filedesc_dtor,
NULL);
@@ -782,8 +788,7 @@ fd_dup2(file_t *fp, unsigned newfd, int
while (newfd >= atomic_load_consume(&fdp->fd_dt)->dt_nfiles) {
fd_tryexpand(curproc);
}
- ff = kmem_alloc(sizeof(*ff), KM_SLEEP);
- fdfile_ctor(ff);
+ ff = pool_cache_get(fdfile_cache, PR_WAITOK);
/*
* If there is already a file open, close it. If the file is
@@ -819,8 +824,7 @@ fd_dup2(file_t *fp, unsigned newfd, int
/* Slot is now allocated. Insert copy of the file. */
fd_affix(curproc, fp, newfd);
if (ff != NULL) {
- cv_destroy(&ff->ff_closing);
- kmem_free(ff, sizeof(*ff));
+ pool_cache_put(fdfile_cache, ff);
}
return 0;
}
@@ -871,8 +875,6 @@ closef(file_t *fp)
/*
* Allocate a file descriptor for the process.
- *
- * Future idea for experimentation: replace all of this with radixtree.
*/
int
fd_alloc(proc_t *p, int want, int *result)
@@ -894,7 +896,6 @@ fd_alloc(proc_t *p, int want, int *resul
KASSERT(dt->dt_ff[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
lim = uimin((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
last = uimin(dt->dt_nfiles, lim);
-
for (;;) {
if ((i = want) < fdp->fd_freefile)
i = fdp->fd_freefile;
@@ -919,8 +920,7 @@ fd_alloc(proc_t *p, int want, int *resul
}
if (dt->dt_ff[i] == NULL) {
KASSERT(i >= NDFDFILE);
- dt->dt_ff[i] = kmem_alloc(sizeof(fdfile_t), KM_SLEEP);
- fdfile_ctor(dt->dt_ff[i]);
+ dt->dt_ff[i] = pool_cache_get(fdfile_cache, PR_WAITOK);
}
KASSERT(dt->dt_ff[i]->ff_file == NULL);
fd_used(fdp, i);
@@ -1267,17 +1267,21 @@ file_dtor(void *arg, void *obj)
mutex_destroy(&fp->f_lock);
}
-static void
-fdfile_ctor(fdfile_t *ff)
+static int
+fdfile_ctor(void *arg, void *obj, int flags)
{
+ fdfile_t *ff = obj;
memset(ff, 0, sizeof(*ff));
cv_init(&ff->ff_closing, "fdclose");
+
+ return 0;
}
static void
-fdfile_dtor(fdfile_t *ff)
+fdfile_dtor(void *arg, void *obj)
{
+ fdfile_t *ff = obj;
cv_destroy(&ff->ff_closing);
}
@@ -1363,7 +1367,8 @@ filedesc_ctor(void *arg, void *obj, int
CTASSERT(sizeof(fdp->fd_dfdfile[0]) >= sizeof(fdfile_t));
for (i = 0, ffp = fdp->fd_dt->dt_ff; i < NDFDFILE; i++, ffp++) {
- fdfile_ctor(*ffp = (fdfile_t *)fdp->fd_dfdfile[i]);
+ *ffp = (fdfile_t *)fdp->fd_dfdfile[i];
+ (void)fdfile_ctor(NULL, fdp->fd_dfdfile[i], PR_WAITOK);
}
return 0;
@@ -1376,7 +1381,7 @@ filedesc_dtor(void *arg, void *obj)
int i;
for (i = 0; i < NDFDFILE; i++) {
- fdfile_dtor((fdfile_t *)fdp->fd_dfdfile[i]);
+ fdfile_dtor(NULL, fdp->fd_dfdfile[i]);
}
mutex_destroy(&fdp->fd_lock);
@@ -1511,8 +1516,7 @@ fd_copy(void)
/* Allocate an fdfile_t to represent it. */
if (i >= NDFDFILE) {
- ff2 = kmem_alloc(sizeof(*ff2), KM_SLEEP);
- fdfile_ctor(ff2);
+ ff2 = pool_cache_get(fdfile_cache, PR_WAITOK);
*nffp = ff2;
} else {
ff2 = newdt->dt_ff[i];
@@ -1601,8 +1605,7 @@ fd_free(void)
KASSERT(!ff->ff_exclose);
KASSERT(!ff->ff_allocated);
if (fd >= NDFDFILE) {
- cv_destroy(&ff->ff_closing);
- kmem_free(ff, sizeof(*ff));
+ pool_cache_put(fdfile_cache, ff);
dt->dt_ff[fd] = NULL;
}
}
Index: src/sys/kern/kern_lwp.c
diff -u src/sys/kern/kern_lwp.c:1.253 src/sys/kern/kern_lwp.c:1.254
--- src/sys/kern/kern_lwp.c:1.253 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_lwp.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: kern_lwp.c,v 1.253 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_lwp.c,v 1.254 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020, 2023
+ * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
* The NetBSD Foundation, Inc.
* All rights reserved.
*
@@ -217,7 +217,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.253 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.254 2023/09/12 16:17:21 ad Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@@ -397,8 +397,7 @@ lwp_ctor(void *arg, void *obj, int flags
l->l_stat = LSIDL;
l->l_cpu = curcpu();
l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock;
- l->l_ts = kmem_alloc(sizeof(*l->l_ts), flags == PR_WAITOK ?
- KM_SLEEP : KM_NOSLEEP);
+ l->l_ts = pool_get(&turnstile_pool, flags);
if (l->l_ts == NULL) {
return ENOMEM;
@@ -423,7 +422,7 @@ lwp_dtor(void *arg, void *obj)
* so if it comes up just drop it quietly and move on.
*/
if (l->l_ts != &turnstile0)
- kmem_free(l->l_ts, sizeof(*l->l_ts));
+ pool_put(&turnstile_pool, l->l_ts);
}
/*
Index: src/sys/kern/kern_mutex_obj.c
diff -u src/sys/kern/kern_mutex_obj.c:1.12 src/sys/kern/kern_mutex_obj.c:1.13
--- src/sys/kern/kern_mutex_obj.c:1.12 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_mutex_obj.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: kern_mutex_obj.c,v 1.12 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_mutex_obj.c,v 1.13 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2008, 2019, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -30,12 +30,12 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.12 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_mutex_obj.c,v 1.13 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
#include <sys/mutex.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
/* Mutex cache */
#define MUTEX_OBJ_MAGIC 0x5aa3c85d
@@ -43,10 +43,41 @@ struct kmutexobj {
kmutex_t mo_lock;
u_int mo_magic;
u_int mo_refcnt;
- uint8_t mo_pad[COHERENCY_UNIT - sizeof(kmutex_t) -
- sizeof(u_int) * 2];
};
+static int mutex_obj_ctor(void *, void *, int);
+
+static pool_cache_t mutex_obj_cache __read_mostly;
+
+/*
+ * mutex_obj_init:
+ *
+ * Initialize the mutex object store.
+ */
+void
+mutex_obj_init(void)
+{
+
+ mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj),
+ coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor,
+ NULL, NULL);
+}
+
+/*
+ * mutex_obj_ctor:
+ *
+ * Initialize a new lock for the cache.
+ */
+static int
+mutex_obj_ctor(void *arg, void *obj, int flags)
+{
+ struct kmutexobj * mo = obj;
+
+ mo->mo_magic = MUTEX_OBJ_MAGIC;
+
+ return 0;
+}
+
/*
* mutex_obj_alloc:
*
@@ -57,11 +88,9 @@ mutex_obj_alloc(kmutex_type_t type, int
{
struct kmutexobj *mo;
- mo = kmem_alloc(sizeof(*mo), KM_SLEEP);
- KASSERT(ALIGNED_POINTER(mo, coherency_unit));
+ mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
_mutex_init(&mo->mo_lock, type, ipl,
(uintptr_t)__builtin_return_address(0));
- mo->mo_magic = MUTEX_OBJ_MAGIC;
mo->mo_refcnt = 1;
return (kmutex_t *)mo;
@@ -77,12 +106,10 @@ mutex_obj_tryalloc(kmutex_type_t type, i
{
struct kmutexobj *mo;
- mo = kmem_alloc(sizeof(*mo), KM_NOSLEEP);
- KASSERT(ALIGNED_POINTER(mo, coherency_unit));
+ mo = pool_cache_get(mutex_obj_cache, PR_NOWAIT);
if (__predict_true(mo != NULL)) {
_mutex_init(&mo->mo_lock, type, ipl,
(uintptr_t)__builtin_return_address(0));
- mo->mo_magic = MUTEX_OBJ_MAGIC;
mo->mo_refcnt = 1;
}
@@ -134,7 +161,7 @@ mutex_obj_free(kmutex_t *lock)
}
membar_acquire();
mutex_destroy(&mo->mo_lock);
- kmem_free(mo, sizeof(*mo));
+ pool_cache_put(mutex_obj_cache, mo);
return true;
}
Index: src/sys/kern/kern_resource.c
diff -u src/sys/kern/kern_resource.c:1.192 src/sys/kern/kern_resource.c:1.193
--- src/sys/kern/kern_resource.c:1.192 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_resource.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: kern_resource.c,v 1.192 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_resource.c,v 1.193 2023/09/12 16:17:21 ad Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.192 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_resource.c,v 1.193 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -65,6 +65,9 @@ __KERNEL_RCSID(0, "$NetBSD: kern_resourc
rlim_t maxdmap = MAXDSIZ;
rlim_t maxsmap = MAXSSIZ;
+static pool_cache_t plimit_cache __read_mostly;
+static pool_cache_t pstats_cache __read_mostly;
+
static kauth_listener_t resource_listener;
static struct sysctllog *proc_sysctllog;
@@ -138,6 +141,11 @@ void
resource_init(void)
{
+ plimit_cache = pool_cache_init(sizeof(struct plimit), 0, 0, 0,
+ "plimitpl", NULL, IPL_NONE, NULL, NULL, NULL);
+ pstats_cache = pool_cache_init(sizeof(struct pstats), 0, 0, 0,
+ "pstatspl", NULL, IPL_NONE, NULL, NULL, NULL);
+
resource_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
resource_listener_cb, NULL);
@@ -682,7 +690,7 @@ lim_copy(struct plimit *lim)
char *corename;
size_t alen, len;
- newlim = kmem_alloc(sizeof(*newlim), KM_SLEEP);
+ newlim = pool_cache_get(plimit_cache, PR_WAITOK);
mutex_init(&newlim->pl_lock, MUTEX_DEFAULT, IPL_NONE);
newlim->pl_writeable = false;
newlim->pl_refcnt = 1;
@@ -803,7 +811,7 @@ lim_free(struct plimit *lim)
}
sv_lim = lim->pl_sv_limit;
mutex_destroy(&lim->pl_lock);
- kmem_free(lim, sizeof(*lim));
+ pool_cache_put(plimit_cache, lim);
} while ((lim = sv_lim) != NULL);
}
@@ -813,7 +821,7 @@ pstatscopy(struct pstats *ps)
struct pstats *nps;
size_t len;
- nps = kmem_alloc(sizeof(*nps), KM_SLEEP);
+ nps = pool_cache_get(pstats_cache, PR_WAITOK);
len = (char *)&nps->pstat_endzero - (char *)&nps->pstat_startzero;
memset(&nps->pstat_startzero, 0, len);
@@ -828,7 +836,7 @@ void
pstatsfree(struct pstats *ps)
{
- kmem_free(ps, sizeof(*ps));
+ pool_cache_put(pstats_cache, ps);
}
/*
Index: src/sys/kern/kern_rwlock_obj.c
diff -u src/sys/kern/kern_rwlock_obj.c:1.10 src/sys/kern/kern_rwlock_obj.c:1.11
--- src/sys/kern/kern_rwlock_obj.c:1.10 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_rwlock_obj.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: kern_rwlock_obj.c,v 1.10 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_rwlock_obj.c,v 1.11 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2008, 2009, 2019, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2009, 2019 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -30,11 +30,11 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.10 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_rwlock_obj.c,v 1.11 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
#include <sys/rwlock.h>
/* Mutex cache */
@@ -43,10 +43,41 @@ struct krwobj {
krwlock_t ro_lock;
u_int ro_magic;
u_int ro_refcnt;
- uint8_t mo_pad[COHERENCY_UNIT - sizeof(krwlock_t) -
- sizeof(u_int) * 2];
};
+static int rw_obj_ctor(void *, void *, int);
+
+static pool_cache_t rw_obj_cache __read_mostly;
+
+/*
+ * rw_obj_init:
+ *
+ * Initialize the rw object store.
+ */
+void
+rw_obj_init(void)
+{
+
+ rw_obj_cache = pool_cache_init(sizeof(struct krwobj),
+ coherency_unit, 0, 0, "rwlock", NULL, IPL_NONE, rw_obj_ctor,
+ NULL, NULL);
+}
+
+/*
+ * rw_obj_ctor:
+ *
+ * Initialize a new lock for the cache.
+ */
+static int
+rw_obj_ctor(void *arg, void *obj, int flags)
+{
+ struct krwobj * ro = obj;
+
+ ro->ro_magic = RW_OBJ_MAGIC;
+
+ return 0;
+}
+
/*
* rw_obj_alloc:
*
@@ -57,10 +88,8 @@ rw_obj_alloc(void)
{
struct krwobj *ro;
- ro = kmem_alloc(sizeof(*ro), KM_SLEEP);
- KASSERT(ALIGNED_POINTER(ro, coherency_unit));
+ ro = pool_cache_get(rw_obj_cache, PR_WAITOK);
_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
- ro->ro_magic = RW_OBJ_MAGIC;
ro->ro_refcnt = 1;
return (krwlock_t *)ro;
@@ -76,11 +105,9 @@ rw_obj_tryalloc(void)
{
struct krwobj *ro;
- ro = kmem_alloc(sizeof(*ro), KM_NOSLEEP);
- KASSERT(ALIGNED_POINTER(ro, coherency_unit));
+ ro = pool_cache_get(rw_obj_cache, PR_NOWAIT);
if (__predict_true(ro != NULL)) {
_rw_init(&ro->ro_lock, (uintptr_t)__builtin_return_address(0));
- ro->ro_magic = RW_OBJ_MAGIC;
ro->ro_refcnt = 1;
}
@@ -124,7 +151,7 @@ rw_obj_free(krwlock_t *lock)
}
membar_acquire();
rw_destroy(&ro->ro_lock);
- kmem_free(ro, sizeof(*ro));
+ pool_cache_put(rw_obj_cache, ro);
return true;
}
Index: src/sys/kern/kern_turnstile.c
diff -u src/sys/kern/kern_turnstile.c:1.47 src/sys/kern/kern_turnstile.c:1.48
--- src/sys/kern/kern_turnstile.c:1.47 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/kern_turnstile.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: kern_turnstile.c,v 1.47 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: kern_turnstile.c,v 1.48 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020, 2023
+ * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020
* The NetBSD Foundation, Inc.
* All rights reserved.
*
@@ -61,10 +61,11 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.47 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.48 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/lockdebug.h>
+#include <sys/pool.h>
#include <sys/proc.h>
#include <sys/sleepq.h>
#include <sys/sleeptab.h>
@@ -80,6 +81,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_turnsti
#define TS_HASH(obj) (((uintptr_t)(obj) >> 6) & TS_HASH_MASK)
static tschain_t turnstile_chains[TS_HASH_SIZE] __cacheline_aligned;
+struct pool turnstile_pool;
static union {
kmutex_t lock;
@@ -101,6 +103,9 @@ turnstile_init(void)
mutex_init(&turnstile_locks[i].lock, MUTEX_DEFAULT, IPL_SCHED);
}
+ pool_init(&turnstile_pool, sizeof(turnstile_t), coherency_unit,
+ 0, 0, "tstile", NULL, IPL_NONE);
+
turnstile_ctor(&turnstile0);
}
Index: src/sys/kern/subr_kcpuset.c
diff -u src/sys/kern/subr_kcpuset.c:1.18 src/sys/kern/subr_kcpuset.c:1.19
--- src/sys/kern/subr_kcpuset.c:1.18 Mon Sep 11 08:55:01 2023
+++ src/sys/kern/subr_kcpuset.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: subr_kcpuset.c,v 1.18 2023/09/11 08:55:01 martin Exp $ */
+/* $NetBSD: subr_kcpuset.c,v 1.19 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2011, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2011 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
@@ -41,7 +41,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.18 2023/09/11 08:55:01 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.19 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/types.h>
@@ -50,7 +50,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset
#include <sys/intr.h>
#include <sys/sched.h>
#include <sys/kcpuset.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
/* Number of CPUs to support. */
#define KC_MAXCPUS roundup2(MAXCPUS, 32)
@@ -97,7 +97,8 @@ static bool kc_initialised = false;
*/
static size_t kc_bitsize __read_mostly = KC_BITSIZE_EARLY;
static size_t kc_nfields __read_mostly = KC_NFIELDS_EARLY;
-static size_t kc_memsize __read_mostly;
+
+static pool_cache_t kc_cache __read_mostly;
static kcpuset_t * kcpuset_create_raw(bool);
@@ -114,10 +115,12 @@ kcpuset_sysinit(void)
/* Set a kcpuset_t sizes. */
kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
kc_bitsize = sizeof(uint32_t) * kc_nfields;
- kc_memsize = sizeof(kcpuset_impl_t) + kc_bitsize;
KASSERT(kc_nfields != 0);
KASSERT(kc_bitsize != 0);
+ kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
+ coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
+
/* First, pre-allocate kcpuset entries. */
for (i = 0; i < kc_last_idx; i++) {
kcp = kcpuset_create_raw(true);
@@ -193,7 +196,7 @@ kcpuset_create_raw(bool zero)
{
kcpuset_impl_t *kc;
- kc = kmem_alloc(kc_memsize, KM_SLEEP);
+ kc = pool_cache_get(kc_cache, PR_WAITOK);
kc->kc_refcnt = 1;
kc->kc_next = NULL;
@@ -227,7 +230,6 @@ kcpuset_clone(kcpuset_t **retkcp, const
void
kcpuset_destroy(kcpuset_t *kcp)
{
- const size_t size = kc_memsize;
kcpuset_impl_t *kc;
KASSERT(kc_initialised);
@@ -236,7 +238,7 @@ kcpuset_destroy(kcpuset_t *kcp)
do {
kc = KC_GETSTRUCT(kcp);
kcp = kc->kc_next;
- kmem_free(kc, size);
+ pool_cache_put(kc_cache, kc);
} while (kcp);
}
Index: src/sys/kern/vfs_cwd.c
diff -u src/sys/kern/vfs_cwd.c:1.9 src/sys/kern/vfs_cwd.c:1.10
--- src/sys/kern/vfs_cwd.c:1.9 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/vfs_cwd.c Tue Sep 12 16:17:21 2023
@@ -1,7 +1,7 @@
-/* $NetBSD: vfs_cwd.c,v 1.9 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: vfs_cwd.c,v 1.10 2023/09/12 16:17:21 ad Exp $ */
/*-
- * Copyright (c) 2008, 2020, 2023 The NetBSD Foundation, Inc.
+ * Copyright (c) 2008, 2020 The NetBSD Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,14 +31,27 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_cwd.c,v 1.9 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_cwd.c,v 1.10 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/atomic.h>
#include <sys/filedesc.h>
#include <sys/proc.h>
#include <sys/vnode.h>
-#include <sys/kmem.h>
+
+static int cwdi_ctor(void *, void *, int);
+static void cwdi_dtor(void *, void *);
+
+static pool_cache_t cwdi_cache;
+
+void
+cwd_sys_init(void)
+{
+
+ cwdi_cache = pool_cache_init(sizeof(struct cwdinfo), coherency_unit,
+ 0, 0, "cwdi", NULL, IPL_NONE, cwdi_ctor, cwdi_dtor, NULL);
+ KASSERT(cwdi_cache != NULL);
+}
/*
* Create an initial cwdinfo structure, using the same current and root
@@ -50,9 +63,7 @@ cwdinit(void)
struct cwdinfo *cwdi;
struct cwdinfo *copy;
- cwdi = kmem_alloc(sizeof(*cwdi), KM_SLEEP);
- KASSERT(ALIGNED_POINTER(cwdi, COHERENCY_UNIT));
- rw_init(&cwdi->cwdi_lock);
+ cwdi = pool_cache_get(cwdi_cache, PR_WAITOK);
copy = curproc->p_cwdi;
rw_enter(©->cwdi_lock, RW_READER);
@@ -65,14 +76,31 @@ cwdinit(void)
cwdi->cwdi_edir = copy->cwdi_edir;
if (cwdi->cwdi_edir)
vref(cwdi->cwdi_edir);
- rw_exit(©->cwdi_lock);
-
cwdi->cwdi_cmask = copy->cwdi_cmask;
cwdi->cwdi_refcnt = 1;
+ rw_exit(©->cwdi_lock);
return (cwdi);
}
+static int
+cwdi_ctor(void *arg, void *obj, int flags)
+{
+ struct cwdinfo *cwdi = obj;
+
+ rw_init(&cwdi->cwdi_lock);
+
+ return 0;
+}
+
+static void
+cwdi_dtor(void *arg, void *obj)
+{
+ struct cwdinfo *cwdi = obj;
+
+ rw_destroy(&cwdi->cwdi_lock);
+}
+
/*
* Make p2 share p1's cwdinfo.
*/
@@ -116,12 +144,11 @@ cwdfree(struct cwdinfo *cwdi)
membar_acquire();
vrele(cwdi->cwdi_cdir);
- rw_destroy(&cwdi->cwdi_lock);
if (cwdi->cwdi_rdir)
vrele(cwdi->cwdi_rdir);
if (cwdi->cwdi_edir)
vrele(cwdi->cwdi_edir);
- kmem_free(cwdi, sizeof(*cwdi));
+ pool_cache_put(cwdi_cache, cwdi);
}
void
Index: src/sys/kern/vfs_init.c
diff -u src/sys/kern/vfs_init.c:1.62 src/sys/kern/vfs_init.c:1.63
--- src/sys/kern/vfs_init.c:1.62 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/vfs_init.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_init.c,v 1.62 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: vfs_init.c,v 1.63 2023/09/12 16:17:21 ad Exp $ */
/*-
* Copyright (c) 1998, 2000, 2008 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v 1.62 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v 1.63 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/types.h>
@@ -104,6 +104,8 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_init.c,v
SDT_PROVIDER_DEFINE(vfs);
+pool_cache_t pnbuf_cache;
+
/*
* These vnodeopv_descs are listed here because they are not
* associated with any particular file system, and thus cannot
@@ -406,6 +408,13 @@ vfsinit(void)
sysctl_vfs_setup();
/*
+ * Initialize the namei pathname buffer pool and cache.
+ */
+ pnbuf_cache = pool_cache_init(MAXPATHLEN, 0, 0, 0, "pnbufpl",
+ NULL, IPL_NONE, NULL, NULL, NULL);
+ KASSERT(pnbuf_cache != NULL);
+
+ /*
* Initialize the vnode table
*/
vntblinit();
Index: src/sys/kern/vfs_lockf.c
diff -u src/sys/kern/vfs_lockf.c:1.79 src/sys/kern/vfs_lockf.c:1.80
--- src/sys/kern/vfs_lockf.c:1.79 Sun Sep 10 14:45:52 2023
+++ src/sys/kern/vfs_lockf.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: vfs_lockf.c,v 1.79 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: vfs_lockf.c,v 1.80 2023/09/12 16:17:21 ad Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1993
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.79 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,v 1.80 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -43,7 +43,7 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_lockf.c,
#include <sys/file.h>
#include <sys/proc.h>
#include <sys/vnode.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
#include <sys/fcntl.h>
#include <sys/lockf.h>
#include <sys/atomic.h>
@@ -69,7 +69,6 @@ struct lockf {
kcondvar_t lf_cv; /* Signalling */
short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */
short lf_type; /* Lock type: F_RDLCK, F_WRLCK */
- uid_t lf_uid; /* User ID responsible */
off_t lf_start; /* The byte # of the start of the lock */
off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/
void *lf_id; /* process or file description holding lock */
@@ -77,13 +76,14 @@ struct lockf {
struct lockf *lf_next; /* Next lock on this vnode, or blocking lock */
struct locklist lf_blkhd; /* List of requests blocked on this lock */
TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */
- struct uidinfo *lf_uip; /* Cached pointer to uidinfo */
+ uid_t lf_uid; /* User ID responsible */
};
/* Maximum length of sleep chains to traverse to try and detect deadlock. */
#define MAXDEPTH 50
-static kmutex_t lockf_lock __cacheline_aligned;
+static pool_cache_t lockf_cache;
+static kmutex_t *lockf_lock;
static char lockstr[] = "lockf";
/*
@@ -205,20 +205,39 @@ lf_alloc(int allowfail)
return NULL;
}
- lock = kmem_alloc(sizeof(*lock), KM_SLEEP);
+ lock = pool_cache_get(lockf_cache, PR_WAITOK);
lock->lf_uid = uid;
- lock->lf_uip = uip;
- cv_init(&lock->lf_cv, lockstr);
return lock;
}
static void
lf_free(struct lockf *lock)
{
+ struct uidinfo *uip;
- atomic_dec_ulong(&lock->lf_uip->ui_lockcnt);
+ uip = uid_find(lock->lf_uid);
+ atomic_dec_ulong(&uip->ui_lockcnt);
+ pool_cache_put(lockf_cache, lock);
+}
+
+static int
+lf_ctor(void *arg, void *obj, int flag)
+{
+ struct lockf *lock;
+
+ lock = obj;
+ cv_init(&lock->lf_cv, lockstr);
+
+ return 0;
+}
+
+static void
+lf_dtor(void *arg, void *obj)
+{
+ struct lockf *lock;
+
+ lock = obj;
cv_destroy(&lock->lf_cv);
- kmem_free(lock, sizeof(*lock));
}
/*
@@ -792,7 +811,7 @@ lf_advlock(struct vop_advlock_args *ap,
struct flock *fl = ap->a_fl;
struct lockf *lock = NULL;
struct lockf *sparelock;
- kmutex_t *interlock = &lockf_lock;
+ kmutex_t *interlock = lockf_lock;
off_t start, end;
int error = 0;
@@ -954,3 +973,17 @@ quit:
return error;
}
+
+/*
+ * Initialize subsystem. XXX We use a global lock. This could be the
+ * vnode interlock, but the deadlock detection code may need to inspect
+ * locks belonging to other files.
+ */
+void
+lf_init(void)
+{
+
+ lockf_cache = pool_cache_init(sizeof(struct lockf), 0, 0, 0, "lockf",
+ NULL, IPL_NONE, lf_ctor, lf_dtor, NULL);
+ lockf_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
+}
Index: src/sys/rump/include/rump/rump_namei.h
diff -u src/sys/rump/include/rump/rump_namei.h:1.50 src/sys/rump/include/rump/rump_namei.h:1.51
--- src/sys/rump/include/rump/rump_namei.h:1.50 Sun Sep 10 14:46:19 2023
+++ src/sys/rump/include/rump/rump_namei.h Tue Sep 12 16:17:22 2023
@@ -1,11 +1,11 @@
-/* $NetBSD: rump_namei.h,v 1.50 2023/09/10 14:46:19 ad Exp $ */
+/* $NetBSD: rump_namei.h,v 1.51 2023/09/12 16:17:22 ad Exp $ */
/*
* WARNING: GENERATED FILE. DO NOT EDIT
* (edit namei.src and run make namei in src/sys/sys)
* by: NetBSD: gennameih.awk,v 1.5 2009/12/23 14:17:19 pooka Exp
- * from: NetBSD: namei.src,v 1.62 2023/09/10 14:45:53 ad Exp
+ * from: NetBSD: namei.src,v 1.61 2023/09/09 18:27:59 ad Exp
*/
#ifndef _RUMP_RUMP_NAMEI_H_
Index: src/sys/rump/librump/rumpkern/rump.c
diff -u src/sys/rump/librump/rumpkern/rump.c:1.358 src/sys/rump/librump/rumpkern/rump.c:1.359
--- src/sys/rump/librump/rumpkern/rump.c:1.358 Sun Sep 10 14:45:52 2023
+++ src/sys/rump/librump/rumpkern/rump.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: rump.c,v 1.358 2023/09/10 14:45:52 ad Exp $ */
+/* $NetBSD: rump.c,v 1.359 2023/09/12 16:17:21 ad Exp $ */
/*
* Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved.
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.358 2023/09/10 14:45:52 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.359 2023/09/12 16:17:21 ad Exp $");
#include <sys/systm.h>
#define ELFSIZE ARCH_ELFSIZE
@@ -293,6 +293,8 @@ rump_init_callback(void (*cpuinit_callba
uvm_ra_init();
uao_init();
+ mutex_obj_init();
+ rw_obj_init();
callout_startup();
kprintf_init();
Index: src/sys/rump/librump/rumpvfs/rump_vfs.c
diff -u src/sys/rump/librump/rumpvfs/rump_vfs.c:1.95 src/sys/rump/librump/rumpvfs/rump_vfs.c:1.96
--- src/sys/rump/librump/rumpvfs/rump_vfs.c:1.95 Sun Sep 10 14:45:53 2023
+++ src/sys/rump/librump/rumpvfs/rump_vfs.c Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: rump_vfs.c,v 1.95 2023/09/10 14:45:53 ad Exp $ */
+/* $NetBSD: rump_vfs.c,v 1.96 2023/09/12 16:17:21 ad Exp $ */
/*
* Copyright (c) 2008 Antti Kantee. All Rights Reserved.
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.95 2023/09/10 14:45:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rump_vfs.c,v 1.96 2023/09/12 16:17:21 ad Exp $");
#include <sys/param.h>
#include <sys/buf.h>
@@ -124,6 +124,8 @@ RUMP_COMPONENT(RUMP__FACTION_VFS)
fstrans_init();
vfsinit();
bufinit();
+ cwd_sys_init();
+ lf_init();
spec_init();
root_device = &rump_rootdev;
Index: src/sys/sys/namei.h
diff -u src/sys/sys/namei.h:1.117 src/sys/sys/namei.h:1.118
--- src/sys/sys/namei.h:1.117 Sun Sep 10 14:46:18 2023
+++ src/sys/sys/namei.h Tue Sep 12 16:17:21 2023
@@ -1,11 +1,11 @@
-/* $NetBSD: namei.h,v 1.117 2023/09/10 14:46:18 ad Exp $ */
+/* $NetBSD: namei.h,v 1.118 2023/09/12 16:17:21 ad Exp $ */
/*
* WARNING: GENERATED FILE. DO NOT EDIT
* (edit namei.src and run make namei in src/sys/sys)
* by: NetBSD: gennameih.awk,v 1.5 2009/12/23 14:17:19 pooka Exp
- * from: NetBSD: namei.src,v 1.62 2023/09/10 14:45:53 ad Exp
+ * from: NetBSD: namei.src,v 1.61 2023/09/09 18:27:59 ad Exp
*/
/*
@@ -252,13 +252,15 @@ struct namecache {
#endif /* __NAMECACHE_PRIVATE */
#ifdef _KERNEL
-#include <sys/kmem.h>
+#include <sys/pool.h>
struct mount;
struct cpu_info;
-#define PNBUF_GET() ((char *)kmem_alloc(MAXPATHLEN, KM_SLEEP))
-#define PNBUF_PUT(pnb) kmem_free((pnb), MAXPATHLEN)
+extern pool_cache_t pnbuf_cache; /* pathname buffer cache */
+
+#define PNBUF_GET() ((char *)pool_cache_get(pnbuf_cache, PR_WAITOK))
+#define PNBUF_PUT(pnb) pool_cache_put(pnbuf_cache, (void *)(pnb))
/*
* Typesafe flags for namei_simple/nameiat_simple.
Index: src/sys/sys/namei.src
diff -u src/sys/sys/namei.src:1.62 src/sys/sys/namei.src:1.63
--- src/sys/sys/namei.src:1.62 Sun Sep 10 14:45:53 2023
+++ src/sys/sys/namei.src Tue Sep 12 16:17:21 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: namei.src,v 1.62 2023/09/10 14:45:53 ad Exp $ */
+/* $NetBSD: namei.src,v 1.63 2023/09/12 16:17:21 ad Exp $ */
/*
* Copyright (c) 1985, 1989, 1991, 1993
@@ -244,13 +244,15 @@ struct namecache {
#endif /* __NAMECACHE_PRIVATE */
#ifdef _KERNEL
-#include <sys/kmem.h>
+#include <sys/pool.h>
struct mount;
struct cpu_info;
-#define PNBUF_GET() ((char *)kmem_alloc(MAXPATHLEN, KM_SLEEP))
-#define PNBUF_PUT(pnb) kmem_free((pnb), MAXPATHLEN)
+extern pool_cache_t pnbuf_cache; /* pathname buffer cache */
+
+#define PNBUF_GET() ((char *)pool_cache_get(pnbuf_cache, PR_WAITOK))
+#define PNBUF_PUT(pnb) pool_cache_put(pnbuf_cache, (void *)(pnb))
/*
* Typesafe flags for namei_simple/nameiat_simple.
Index: src/sys/uvm/uvm_init.c
diff -u src/sys/uvm/uvm_init.c:1.57 src/sys/uvm/uvm_init.c:1.58
--- src/sys/uvm/uvm_init.c:1.57 Sun Sep 10 14:45:53 2023
+++ src/sys/uvm/uvm_init.c Tue Sep 12 16:17:22 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_init.c,v 1.57 2023/09/10 14:45:53 ad Exp $ */
+/* $NetBSD: uvm_init.c,v 1.58 2023/09/12 16:17:22 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.57 2023/09/10 14:45:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_init.c,v 1.58 2023/09/12 16:17:22 ad Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -174,6 +174,7 @@ uvm_init(void)
* so initialize that first.
*/
+ rw_obj_init();
uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
UAO_FLAG_KERNSWAP);
Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.408 src/sys/uvm/uvm_map.c:1.409
--- src/sys/uvm/uvm_map.c:1.408 Sun Sep 10 14:45:53 2023
+++ src/sys/uvm/uvm_map.c Tue Sep 12 16:17:22 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_map.c,v 1.408 2023/09/10 14:45:53 ad Exp $ */
+/* $NetBSD: uvm_map.c,v 1.409 2023/09/12 16:17:22 ad Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.408 2023/09/10 14:45:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.409 2023/09/12 16:17:22 ad Exp $");
#include "opt_ddb.h"
#include "opt_pax.h"
@@ -144,6 +144,12 @@ UVMMAP_EVCNT_DEFINE(mlk_treeloop)
const char vmmapbsy[] = "vmmapbsy";
/*
+ * cache for vmspace structures.
+ */
+
+static struct pool_cache uvm_vmspace_cache;
+
+/*
* cache for dynamically-allocated map entries.
*/
@@ -925,6 +931,8 @@ uvm_map_init_caches(void)
pool_cache_bootstrap(&uvm_map_entry_cache, sizeof(struct vm_map_entry),
coherency_unit, 0, PR_LARGECACHE, "vmmpepl", NULL, IPL_NONE, NULL,
NULL, NULL);
+ pool_cache_bootstrap(&uvm_vmspace_cache, sizeof(struct vmspace),
+ 0, 0, 0, "vmsppl", NULL, IPL_NONE, NULL, NULL, NULL);
}
/*
@@ -4104,7 +4112,7 @@ uvmspace_alloc(vaddr_t vmin, vaddr_t vma
struct vmspace *vm;
UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
- vm = kmem_alloc(sizeof(*vm), KM_SLEEP);
+ vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
uvmspace_init(vm, NULL, vmin, vmax, topdown);
UVMHIST_LOG(maphist,"<- done (vm=%#jx)", (uintptr_t)vm, 0, 0, 0);
return (vm);
@@ -4360,7 +4368,7 @@ uvmspace_free(struct vmspace *vm)
rw_destroy(&map->lock);
cv_destroy(&map->cv);
pmap_destroy(map->pmap);
- kmem_free(vm, sizeof(*vm));
+ pool_cache_put(&uvm_vmspace_cache, vm);
}
static struct vm_map_entry *
Index: src/sys/uvm/uvm_readahead.c
diff -u src/sys/uvm/uvm_readahead.c:1.14 src/sys/uvm/uvm_readahead.c:1.15
--- src/sys/uvm/uvm_readahead.c:1.14 Sun Sep 10 14:45:53 2023
+++ src/sys/uvm/uvm_readahead.c Tue Sep 12 16:17:22 2023
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_readahead.c,v 1.14 2023/09/10 14:45:53 ad Exp $ */
+/* $NetBSD: uvm_readahead.c,v 1.15 2023/09/12 16:17:22 ad Exp $ */
/*-
* Copyright (c)2003, 2005, 2009 YAMAMOTO Takashi,
@@ -40,10 +40,10 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.14 2023/09/10 14:45:53 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_readahead.c,v 1.15 2023/09/12 16:17:22 ad Exp $");
#include <sys/param.h>
-#include <sys/kmem.h>
+#include <sys/pool.h>
#include <uvm/uvm.h>
#include <uvm/uvm_readahead.h>
@@ -83,6 +83,8 @@ static off_t ra_startio(struct uvm_objec
static struct uvm_ractx *ra_allocctx(void);
static void ra_freectx(struct uvm_ractx *);
+static struct pool_cache ractx_cache;
+
/*
* uvm_ra_init: initialize readahead module.
*/
@@ -91,20 +93,22 @@ void
uvm_ra_init(void)
{
+ pool_cache_bootstrap(&ractx_cache, sizeof(struct uvm_ractx), 0, 0, 0,
+ "ractx", NULL, IPL_NONE, NULL, NULL, NULL);
}
static struct uvm_ractx *
ra_allocctx(void)
{
- return kmem_alloc(sizeof(struct uvm_ractx), KM_NOSLEEP);
+ return pool_cache_get(&ractx_cache, PR_NOWAIT);
}
static void
ra_freectx(struct uvm_ractx *ra)
{
- kmem_free(ra, sizeof(struct uvm_ractx));
+ pool_cache_put(&ractx_cache, ra);
}
/*