Module Name:    src
Committed By:   rmind
Date:           Fri May 13 22:22:56 UTC 2011

Modified Files:
        src/sys/kern: sysv_shm.c

Log Message:
- Replace shmmap_entry_pool with kmem(9), as pool is not worth.
- Sprinkle __cacheline_aligned and __read_mostly.


To generate a diff of this commit:
cvs rdiff -u -r1.118 -r1.119 src/sys/kern/sysv_shm.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/sysv_shm.c
diff -u src/sys/kern/sysv_shm.c:1.118 src/sys/kern/sysv_shm.c:1.119
--- src/sys/kern/sysv_shm.c:1.118	Tue Jul 27 14:25:23 2010
+++ src/sys/kern/sysv_shm.c	Fri May 13 22:22:55 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: sysv_shm.c,v 1.118 2010/07/27 14:25:23 jakllsch Exp $	*/
+/*	$NetBSD: sysv_shm.c,v 1.119 2011/05/13 22:22:55 rmind Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2007 The NetBSD Foundation, Inc.
@@ -61,7 +61,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.118 2010/07/27 14:25:23 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sysv_shm.c,v 1.119 2011/05/13 22:22:55 rmind Exp $");
 
 #define SYSVSHM
 
@@ -76,26 +76,25 @@
 #include <sys/mount.h>		/* XXX for <sys/syscallargs.h> */
 #include <sys/syscallargs.h>
 #include <sys/queue.h>
-#include <sys/pool.h>
 #include <sys/kauth.h>
 
 #include <uvm/uvm_extern.h>
 #include <uvm/uvm_object.h>
 
-int shm_nused;
-struct	shmid_ds *shmsegs;
-
 struct shmmap_entry {
 	SLIST_ENTRY(shmmap_entry) next;
 	vaddr_t va;
 	int shmid;
 };
 
-static kmutex_t		shm_lock;
-static kcondvar_t *	shm_cv;
-static struct pool	shmmap_entry_pool;
-static int		shm_last_free, shm_use_phys;
-static size_t		shm_committed;
+int			shm_nused		__cacheline_aligned;
+struct shmid_ds *	shmsegs			__read_mostly;
+
+static kmutex_t		shm_lock		__cacheline_aligned;
+static kcondvar_t *	shm_cv			__cacheline_aligned;
+static int		shm_last_free		__cacheline_aligned;
+static size_t		shm_committed		__cacheline_aligned;
+static int		shm_use_phys		__read_mostly;
 
 static kcondvar_t	shm_realloc_cv;
 static bool		shm_realloc_state;
@@ -230,7 +229,7 @@
 
 	/* 3. A shared shm map, copy to a fresh one and adjust refcounts */
 	SLIST_FOREACH(oshmmap_se, &oshmmap_s->entries, next) {
-		shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
+		shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
 		shmmap_se->va = oshmmap_se->va;
 		shmmap_se->shmid = oshmmap_se->shmid;
 		SLIST_INSERT_HEAD(&shmmap_s->entries, shmmap_se, next);
@@ -354,9 +353,10 @@
 	mutex_exit(&shm_lock);
 
 	uvm_deallocate(&p->p_vmspace->vm_map, shmmap_se->va, size);
-	if (uobj != NULL)
+	if (uobj != NULL) {
 		uao_detach(uobj);
-	pool_put(&shmmap_entry_pool, shmmap_se);
+	}
+	kmem_free(shmmap_se, sizeof(struct shmmap_entry));
 
 	return 0;
 }
@@ -385,7 +385,7 @@
 	vsize_t size;
 
 	/* Allocate a new map entry and set it */
-	shmmap_se = pool_get(&shmmap_entry_pool, PR_WAITOK);
+	shmmap_se = kmem_alloc(sizeof(struct shmmap_entry), KM_SLEEP);
 	shmmap_se->shmid = SCARG(uap, shmid);
 
 	mutex_enter(&shm_lock);
@@ -475,8 +475,9 @@
 err:
 	cv_broadcast(&shm_realloc_cv);
 	mutex_exit(&shm_lock);
-	if (error && shmmap_se)
-		pool_put(&shmmap_entry_pool, shmmap_se);
+	if (error && shmmap_se) {
+		kmem_free(shmmap_se, sizeof(struct shmmap_entry));
+	}
 	return error;
 
 err_detach:
@@ -486,9 +487,10 @@
 	shm_realloc_disable--;
 	cv_broadcast(&shm_realloc_cv);
 	mutex_exit(&shm_lock);
-	if (uobj != NULL)
+	if (uobj != NULL) {
 		uao_detach(uobj);
-	pool_put(&shmmap_entry_pool, shmmap_se);
+	}
+	kmem_free(shmmap_se, sizeof(struct shmmap_entry));
 	return error;
 }
 
@@ -847,7 +849,7 @@
 		if (uobj != NULL) {
 			uao_detach(uobj);
 		}
-		pool_put(&shmmap_entry_pool, shmmap_se);
+		kmem_free(shmmap_se, sizeof(struct shmmap_entry));
 
 		if (SLIST_EMPTY(&shmmap_s->entries)) {
 			break;
@@ -945,8 +947,6 @@
 	int i;
 
 	mutex_init(&shm_lock, MUTEX_DEFAULT, IPL_NONE);
-	pool_init(&shmmap_entry_pool, sizeof(struct shmmap_entry), 0, 0, 0,
-	    "shmmp", &pool_allocator_nointr, IPL_NONE);
 	cv_init(&shm_realloc_cv, "shmrealc");
 
 	/* Allocate the wired memory for our structures */

Reply via email to