Module Name:    src
Committed By:   pooka
Date:           Wed Sep  1 19:37:59 UTC 2010

Modified Files:
        src/sys/rump/librump/rumpkern: Makefile.rumpkern emul.c rump.c
            rump_private.h scheduler.c sysproxy_socket.c threads.c
        src/sys/sys: lwp.h
Added Files:
        src/sys/rump/librump/rumpkern: lwproc.c

Log Message:
Implement rump_lwproc: the new lwp/proc management routines for
rump.  These move the management of the pid/lwpid space from the
application into the kernel, make code more robust, and make it
possible to attach multiple lwp's to non-proc0 processes.


To generate a diff of this commit:
cvs rdiff -u -r1.95 -r1.96 src/sys/rump/librump/rumpkern/Makefile.rumpkern
cvs rdiff -u -r1.144 -r1.145 src/sys/rump/librump/rumpkern/emul.c
cvs rdiff -u -r0 -r1.1 src/sys/rump/librump/rumpkern/lwproc.c
cvs rdiff -u -r1.183 -r1.184 src/sys/rump/librump/rumpkern/rump.c
cvs rdiff -u -r1.54 -r1.55 src/sys/rump/librump/rumpkern/rump_private.h
cvs rdiff -u -r1.18 -r1.19 src/sys/rump/librump/rumpkern/scheduler.c
cvs rdiff -u -r1.7 -r1.8 src/sys/rump/librump/rumpkern/sysproxy_socket.c
cvs rdiff -u -r1.11 -r1.12 src/sys/rump/librump/rumpkern/threads.c
cvs rdiff -u -r1.137 -r1.138 src/sys/sys/lwp.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/rump/librump/rumpkern/Makefile.rumpkern
diff -u src/sys/rump/librump/rumpkern/Makefile.rumpkern:1.95 src/sys/rump/librump/rumpkern/Makefile.rumpkern:1.96
--- src/sys/rump/librump/rumpkern/Makefile.rumpkern:1.95	Mon Aug 30 09:44:40 2010
+++ src/sys/rump/librump/rumpkern/Makefile.rumpkern	Wed Sep  1 19:37:58 2010
@@ -1,4 +1,4 @@
-#	$NetBSD: Makefile.rumpkern,v 1.95 2010/08/30 09:44:40 pooka Exp $
+#	$NetBSD: Makefile.rumpkern,v 1.96 2010/09/01 19:37:58 pooka Exp $
 #
 
 .include "${RUMPTOP}/Makefile.rump"
@@ -15,9 +15,9 @@
 #
 # Source modules, first the ones specifically implemented for librump.
 # 
-SRCS=	rump.c rumpcopy.c emul.c intr.c klock.c kobj_rename.c	\
-	ltsleep.c memalloc.c scheduler.c signals.c sleepq.c	\
-	sysproxy_socket.c threads.c vm.c
+SRCS=	rump.c rumpcopy.c emul.c intr.c lwproc.c klock.c	\
+	kobj_rename.c ltsleep.c memalloc.c scheduler.c		\
+	signals.c sleepq.c sysproxy_socket.c threads.c vm.c
 SRCS+=	compat.c
 
 # Multiprocessor or uniprocessor locking.  TODO: select right

Index: src/sys/rump/librump/rumpkern/emul.c
diff -u src/sys/rump/librump/rumpkern/emul.c:1.144 src/sys/rump/librump/rumpkern/emul.c:1.145
--- src/sys/rump/librump/rumpkern/emul.c:1.144	Wed Jun 23 08:36:03 2010
+++ src/sys/rump/librump/rumpkern/emul.c	Wed Sep  1 19:37:58 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: emul.c,v 1.144 2010/06/23 08:36:03 pooka Exp $	*/
+/*	$NetBSD: emul.c,v 1.145 2010/09/01 19:37:58 pooka Exp $	*/
 
 /*
  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.144 2010/06/23 08:36:03 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: emul.c,v 1.145 2010/09/01 19:37:58 pooka Exp $");
 
 #include <sys/param.h>
 #include <sys/null.h>
@@ -158,6 +158,25 @@
 	(*l->l_syncobj->sobj_unsleep)(l, cleanup);
 }
 
+void
+lwp_update_creds(struct lwp *l)
+{
+	struct proc *p;
+	kauth_cred_t oldcred;
+
+	p = l->l_proc;
+	oldcred = l->l_cred;
+	l->l_prflag &= ~LPR_CRMOD;
+
+	mutex_enter(p->p_lock);
+	kauth_cred_hold(p->p_cred);
+	l->l_cred = p->p_cred;
+	mutex_exit(p->p_lock);
+
+	if (oldcred != NULL)
+		kauth_cred_free(oldcred);
+}
+
 vaddr_t
 calc_cache_size(struct vm_map *map, int pct, int va_pct)
 {

Index: src/sys/rump/librump/rumpkern/rump.c
diff -u src/sys/rump/librump/rumpkern/rump.c:1.183 src/sys/rump/librump/rumpkern/rump.c:1.184
--- src/sys/rump/librump/rumpkern/rump.c:1.183	Mon Aug 30 10:49:40 2010
+++ src/sys/rump/librump/rumpkern/rump.c	Wed Sep  1 19:37:58 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: rump.c,v 1.183 2010/08/30 10:49:40 pooka Exp $	*/
+/*	$NetBSD: rump.c,v 1.184 2010/09/01 19:37:58 pooka Exp $	*/
 
 /*
  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.183 2010/08/30 10:49:40 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: rump.c,v 1.184 2010/09/01 19:37:58 pooka Exp $");
 
 #include <sys/systm.h>
 #define ELFSIZE ARCH_ELFSIZE
@@ -303,11 +303,12 @@
 	kauth_init();
 	rump_susercred = rump_cred_create(0, 0, 0, NULL);
 
-	l->l_cred = rump_cred_suserget();
-	l->l_proc = &proc0;
-
 	procinit();
 	proc0_init();
+
+	l->l_proc = &proc0;
+	lwp_update_creds(l);
+
 	lwpinit_specificdata();
 	lwp_initspecific(&lwp0);
 
@@ -483,138 +484,6 @@
 	return resid;
 }
 
-static pid_t nextpid = 1;
-struct lwp *
-rump_newproc_switch()
-{
-	struct lwp *l;
-	pid_t mypid;
-
-	mypid = atomic_inc_uint_nv(&nextpid);
-	if (__predict_false(mypid == 0))
-		mypid = atomic_inc_uint_nv(&nextpid);
-
-	l = rump_lwp_alloc(mypid, 0);
-	rump_lwp_switch(l);
-
-	return l;
-}
-
-struct lwp *
-rump_lwp_alloc_and_switch(pid_t pid, lwpid_t lid)
-{
-	struct lwp *l;
-
-	l = rump_lwp_alloc(pid, lid);
-	rump_lwp_switch(l);
-
-	return l;
-}
-
-struct lwp *
-rump_lwp_alloc(pid_t pid, lwpid_t lid)
-{
-	struct lwp *l;
-	struct proc *p;
-
-	l = kmem_zalloc(sizeof(*l), KM_SLEEP);
-	if (pid != 0) {
-		p = kmem_zalloc(sizeof(*p), KM_SLEEP);
-		if (rump_proc_vfs_init)
-			rump_proc_vfs_init(p);
-		p->p_stats = proc0.p_stats; /* XXX */
-		p->p_limit = lim_copy(proc0.p_limit);
-		p->p_pid = pid;
-		p->p_vmspace = &vmspace0;
-		p->p_emul = &emul_netbsd;
-		p->p_fd = fd_init(NULL);
-		p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
-		p->p_pgrp = &rump_pgrp;
-		p->p_cred = l->l_cred = rump_cred_suserget();
-
-		chgproccnt(0, 1);
-		atomic_inc_uint(&nprocs);
-	} else {
-		p = &proc0;
-		l->l_cred = rump_susercred;
-	}
-
-	l->l_proc = p;
-	l->l_lid = lid;
-	l->l_fd = p->p_fd;
-	if (pid == 0)
-		fd_hold(l);
-	l->l_cpu = NULL;
-	l->l_target_cpu = rump_cpu;
-	lwp_initspecific(l);
-	LIST_INSERT_HEAD(&alllwp, l, l_list);
-
-	return l;
-}
-
-void
-rump_lwp_switch(struct lwp *newlwp)
-{
-	struct lwp *l = curlwp;
-
-	rumpuser_set_curlwp(NULL);
-	newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
-	newlwp->l_mutex = l->l_mutex;
-	l->l_mutex = NULL;
-	l->l_cpu = NULL;
-	rumpuser_set_curlwp(newlwp);
-	if (l->l_flag & LW_WEXIT)
-		rump_lwp_free(l);
-}
-
-/* XXX: this has effect only on non-pid0 lwps */
-void
-rump_lwp_release(struct lwp *l)
-{
-	struct proc *p;
-
-	p = l->l_proc;
-	if (p->p_pid != 0) {
-		mutex_obj_free(p->p_lock);
-		fd_free();
-		chgproccnt(kauth_cred_getuid(p->p_cred), -1);
-		if (rump_proc_vfs_release)
-			rump_proc_vfs_release(p);
-		rump_cred_put(l->l_cred);
-		limfree(p->p_limit);
-		kmem_free(p, sizeof(*p));
-
-		atomic_dec_uint(&nprocs);
-	} else {
-		fd_free();
-	}
-	KASSERT((l->l_flag & LW_WEXIT) == 0);
-	l->l_flag |= LW_WEXIT;
-}
-
-void
-rump_lwp_free(struct lwp *l)
-{
-
-	KASSERT(l->l_flag & LW_WEXIT);
-	KASSERT(l->l_mutex == NULL);
-	if (l->l_name)
-		kmem_free(l->l_name, MAXCOMLEN);
-	lwp_finispecific(l);
-	LIST_REMOVE(l, l_list);
-	kmem_free(l, sizeof(*l));
-}
-
-struct lwp *
-rump_lwp_curlwp(void)
-{
-	struct lwp *l = curlwp;
-
-	if (l->l_flag & LW_WEXIT)
-		return NULL;
-	return l;
-}
-
 /* rump private.  NEEDS WORK! */
 void
 rump_set_vmspace(struct vmspace *vm)
@@ -652,36 +521,6 @@
 	kauth_cred_free(cred);
 }
 
-kauth_cred_t
-rump_cred_suserget(void)
-{
-
-	kauth_cred_hold(rump_susercred);
-	return rump_susercred;
-}
-
-/*
- * Return the next system lwpid
- */
-lwpid_t
-rump_nextlid(void)
-{
-	lwpid_t retid;
-
-	mutex_enter(proc0.p_lock);
-	/*
-	 * Take next one, don't return 0
-	 * XXX: most likely we'll have collisions in case this
-	 * wraps around.
-	 */
-	if (++proc0.p_nlwpid == 0)
-		++proc0.p_nlwpid;
-	retid = proc0.p_nlwpid;
-	mutex_exit(proc0.p_lock);
-
-	return retid;
-}
-
 static int compcounter[RUMP_COMPONENT_MAX];
 
 static void
@@ -840,3 +679,18 @@
 		rumpuser_dprintf("%s / %s: %" PRIu64 "\n",
 		    ev->ev_group, ev->ev_name, ev->ev_count);
 }
+
+/*
+ * If you use this interface ... well ... all bets are off.
+ * The original purpose is for the p2k fs server library to be
+ * able to use the same pid/lid for VOPs as the host kernel.
+ */
+void
+rump_allbetsareoff_setid(pid_t pid, int lid)
+{
+	struct lwp *l = curlwp;
+	struct proc *p = l->l_proc;
+
+	l->l_lid = lid;
+	p->p_pid = pid;
+}

Index: src/sys/rump/librump/rumpkern/rump_private.h
diff -u src/sys/rump/librump/rumpkern/rump_private.h:1.54 src/sys/rump/librump/rumpkern/rump_private.h:1.55
--- src/sys/rump/librump/rumpkern/rump_private.h:1.54	Mon Jun 14 21:04:56 2010
+++ src/sys/rump/librump/rumpkern/rump_private.h	Wed Sep  1 19:37:58 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: rump_private.h,v 1.54 2010/06/14 21:04:56 pooka Exp $	*/
+/*	$NetBSD: rump_private.h,v 1.55 2010/09/01 19:37:58 pooka Exp $	*/
 
 /*
  * Copyright (c) 2007 Antti Kantee.  All Rights Reserved.
@@ -93,8 +93,6 @@
 void		rump_gettime(struct timespec *);
 void		rump_getuptime(struct timespec *);
 
-void		rump_lwp_free(struct lwp *);
-lwpid_t		rump_nextlid(void);
 void		rump_set_vmspace(struct vmspace *);
 
 typedef void	(*rump_proc_vfs_init_fn)(struct proc *);
@@ -110,6 +108,8 @@
 int		rump_sysproxy_copyout(const void *, void *, size_t);
 int		rump_sysproxy_copyin(const void *, void *, size_t);
 
+struct lwp *	rump__lwproc_allockernlwp(void);
+
 void	rump_cpus_bootstrap(int);
 void	rump_scheduler_init(void);
 void	rump_schedule(void);

Index: src/sys/rump/librump/rumpkern/scheduler.c
diff -u src/sys/rump/librump/rumpkern/scheduler.c:1.18 src/sys/rump/librump/rumpkern/scheduler.c:1.19
--- src/sys/rump/librump/rumpkern/scheduler.c:1.18	Sun Aug 15 20:23:04 2010
+++ src/sys/rump/librump/rumpkern/scheduler.c	Wed Sep  1 19:37:59 2010
@@ -1,4 +1,4 @@
-/*      $NetBSD: scheduler.c,v 1.18 2010/08/15 20:23:04 pooka Exp $	*/
+/*      $NetBSD: scheduler.c,v 1.19 2010/09/01 19:37:59 pooka Exp $	*/
 
 /*
  * Copyright (c) 2010 Antti Kantee.  All Rights Reserved.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.18 2010/08/15 20:23:04 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: scheduler.c,v 1.19 2010/09/01 19:37:59 pooka Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -78,7 +78,7 @@
 static struct rumpuser_cv *lwp0cv;
 static unsigned nextcpu;
 
-static bool lwp0busy = false;
+static bool lwp0isbusy = false;
 
 /*
  * Keep some stats.
@@ -187,6 +187,30 @@
 	    ts->tv_sec, ts->tv_nsec);
 }
 
+static void
+lwp0busy(void)
+{
+
+	/* busy lwp0 */
+	KASSERT(curlwp == NULL || curlwp->l_cpu == NULL);
+	rumpuser_mutex_enter_nowrap(lwp0mtx);
+	while (lwp0isbusy)
+		rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
+	lwp0isbusy = true;
+	rumpuser_mutex_exit(lwp0mtx);
+}
+
+static void
+lwp0rele(void)
+{
+
+	rumpuser_mutex_enter_nowrap(lwp0mtx);
+	KASSERT(lwp0isbusy == true);
+	lwp0isbusy = false;
+	rumpuser_cv_signal(lwp0cv);
+	rumpuser_mutex_exit(lwp0mtx);
+}
+
 void
 rump_schedule()
 {
@@ -199,31 +223,27 @@
 	 * for this case -- anyone who cares about performance will
 	 * start a real thread.
 	 */
-	l = rumpuser_get_curlwp();
-	if (l == NULL) {
-		/* busy lwp0 */
-		rumpuser_mutex_enter_nowrap(lwp0mtx);
-		while (lwp0busy)
-			rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
-		lwp0busy = true;
-		rumpuser_mutex_exit(lwp0mtx);
+	if (__predict_true((l = rumpuser_get_curlwp()) != NULL)) {
+		rump_schedule_cpu(l);
+		LWP_CACHE_CREDS(l, l->l_proc);
+	} else {
+		lwp0busy();
 
 		/* schedule cpu and use lwp0 */
 		rump_schedule_cpu(&lwp0);
 		rumpuser_set_curlwp(&lwp0);
-		l = rump_lwp_alloc(0, rump_nextlid());
 
-		/* release lwp0 */
-		rump_lwp_switch(l);
-		rumpuser_mutex_enter_nowrap(lwp0mtx);
-		lwp0busy = false;
-		rumpuser_cv_signal(lwp0cv);
-		rumpuser_mutex_exit(lwp0mtx);
+		/* allocate thread, switch to it, and release lwp0 */
+		l = rump__lwproc_allockernlwp();
+		rump_lwproc_switch(l);
+		lwp0rele();
 
-		/* mark new lwp as dead-on-exit */
-		rump_lwp_release(l);
-	} else {
-		rump_schedule_cpu(l);
+		/*
+		 * mark new thread dead-on-unschedule.  this
+		 * means that we'll be running with l_refcnt == 0.
+		 * relax, it's fine.
+		 */
+		rump_lwproc_releaselwp();
 	}
 }
 
@@ -331,29 +351,31 @@
 	l->l_mutex = NULL;
 
 	/*
-	 * If we're using a temp lwp, need to take lwp0 for rump_lwp_free().
-	 * (we could maybe cache idle lwp's to avoid constant bouncing)
+	 * Check special conditions:
+	 *  1) do we need to free the lwp which just unscheduled?
+	 *     (locking order: lwp0, cpu)
+	 *  2) do we want to clear curlwp for the current host thread
 	 */
-	if (l->l_flag & LW_WEXIT) {
-		rumpuser_set_curlwp(NULL);
+	if (__predict_false(l->l_flag & LW_WEXIT)) {
+		lwp0busy();
 
-		/* busy lwp0 */
-		rumpuser_mutex_enter_nowrap(lwp0mtx);
-		while (lwp0busy)
-			rumpuser_cv_wait_nowrap(lwp0cv, lwp0mtx);
-		lwp0busy = true;
-		rumpuser_mutex_exit(lwp0mtx);
+		/* Now that we have lwp0, we can schedule a CPU again */
+		rump_schedule_cpu(l);
 
-		rump_schedule_cpu(&lwp0);
-		rumpuser_set_curlwp(&lwp0);
-		rump_lwp_free(l);
+		/* switch to lwp0.  this frees the old thread */
+		KASSERT(l->l_flag & LW_WEXIT);
+		rump_lwproc_switch(&lwp0);
+
+		/* release lwp0 */
 		rump_unschedule_cpu(&lwp0);
+		lwp0.l_mutex = NULL;
+		lwp0.l_pflag &= ~LP_RUNNING;
+		lwp0rele();
 		rumpuser_set_curlwp(NULL);
 
-		rumpuser_mutex_enter_nowrap(lwp0mtx);
-		lwp0busy = false;
-		rumpuser_cv_signal(lwp0cv);
-		rumpuser_mutex_exit(lwp0mtx);
+	} else if (__predict_false(l->l_flag & LW_RUMP_CLEAR)) {
+		rumpuser_set_curlwp(NULL);
+		l->l_flag &= ~LW_RUMP_CLEAR;
 	}
 }
 

Index: src/sys/rump/librump/rumpkern/sysproxy_socket.c
diff -u src/sys/rump/librump/rumpkern/sysproxy_socket.c:1.7 src/sys/rump/librump/rumpkern/sysproxy_socket.c:1.8
--- src/sys/rump/librump/rumpkern/sysproxy_socket.c:1.7	Wed Aug 11 11:51:06 2010
+++ src/sys/rump/librump/rumpkern/sysproxy_socket.c	Wed Sep  1 19:37:59 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: sysproxy_socket.c,v 1.7 2010/08/11 11:51:06 pgoyette Exp $	*/
+/*	$NetBSD: sysproxy_socket.c,v 1.8 2010/09/01 19:37:59 pooka Exp $	*/
 
 /*
  * Copyright (c) 2009 Antti Kantee.  All Rights Reserved.
@@ -28,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: sysproxy_socket.c,v 1.7 2010/08/11 11:51:06 pgoyette Exp $");
+__KERNEL_RCSID(0, "$NetBSD: sysproxy_socket.c,v 1.8 2010/09/01 19:37:59 pooka Exp $");
 
 #include <sys/param.h>
 #include <sys/kernel.h>
@@ -388,14 +388,14 @@
 
 	callp = rump_sysent + req->rpc_sysnum;
 	mylwp = curlwp;
-	l = rump_newproc_switch();
+	rump_lwproc_newproc();
 	rump_set_vmspace(&rump_sysproxy_vmspace);
 
 	resp.rpc_retval = 0; /* default */
 	resp.rpc_error = callp->sy_call(l, (void *)req->rpc_data,
 	    &resp.rpc_retval);
-	rump_lwp_release(l);
-	rump_lwp_switch(mylwp);
+	rump_lwproc_releaselwp();
+	rump_lwproc_switch(mylwp);
 	kmem_free(req, req->rpc_head.rpch_flen);
 
 	dosend(sock, qent, (uint8_t *)&resp, sizeof(resp), false);

Index: src/sys/rump/librump/rumpkern/threads.c
diff -u src/sys/rump/librump/rumpkern/threads.c:1.11 src/sys/rump/librump/rumpkern/threads.c:1.12
--- src/sys/rump/librump/rumpkern/threads.c:1.11	Thu Jun  3 19:36:21 2010
+++ src/sys/rump/librump/rumpkern/threads.c	Wed Sep  1 19:37:59 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: threads.c,v 1.11 2010/06/03 19:36:21 pooka Exp $	*/
+/*	$NetBSD: threads.c,v 1.12 2010/09/01 19:37:59 pooka Exp $	*/
 
 /*
  * Copyright (c) 2007-2009 Antti Kantee.  All Rights Reserved.
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.11 2010/06/03 19:36:21 pooka Exp $");
+__KERNEL_RCSID(0, "$NetBSD: threads.c,v 1.12 2010/09/01 19:37:59 pooka Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -139,7 +139,7 @@
 	k = malloc(sizeof(*k), M_TEMP, M_WAITOK);
 	k->f = func;
 	k->arg = arg;
-	k->mylwp = l = rump_lwp_alloc(0, rump_nextlid());
+	k->mylwp = l = rump__lwproc_allockernlwp();
 	l->l_flag |= LW_SYSTEM;
 	if (flags & KTHREAD_MPSAFE)
 		l->l_pflag |= LP_MPSAFE;
@@ -174,7 +174,7 @@
 
 	if ((curlwp->l_pflag & LP_MPSAFE) == 0)
 		KERNEL_UNLOCK_LAST(NULL);
-	rump_lwp_release(curlwp);
+	rump_lwproc_releaselwp();
 	/* unschedule includes membar */
 	rump_unschedule();
 	rumpuser_thread_exit();

Index: src/sys/sys/lwp.h
diff -u src/sys/sys/lwp.h:1.137 src/sys/sys/lwp.h:1.138
--- src/sys/sys/lwp.h:1.137	Thu Jul  8 12:23:31 2010
+++ src/sys/sys/lwp.h	Wed Sep  1 19:37:58 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: lwp.h,v 1.137 2010/07/08 12:23:31 rmind Exp $	*/
+/*	$NetBSD: lwp.h,v 1.138 2010/09/01 19:37:58 pooka Exp $	*/
 
 /*-
  * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
@@ -240,6 +240,7 @@
 #define	LW_UNPARKED	0x10000000 /* Unpark op pending */
 #define	LW_SA_YIELD	0x40000000 /* LWP on VP is yielding */
 #define	LW_SA_IDLE	0x80000000 /* VP is idle */
+#define	LW_RUMP_CLEAR	LW_SA_IDLE /* clear curlwp in rump scheduler */
 
 /* The second set of flags is kept in l_pflag. */
 #define	LP_KTRACTIVE	0x00000001 /* Executing ktrace operation */

Added files:

Index: src/sys/rump/librump/rumpkern/lwproc.c
diff -u /dev/null src/sys/rump/librump/rumpkern/lwproc.c:1.1
--- /dev/null	Wed Sep  1 19:37:59 2010
+++ src/sys/rump/librump/rumpkern/lwproc.c	Wed Sep  1 19:37:58 2010
@@ -0,0 +1,339 @@
+/*      $NetBSD: lwproc.c,v 1.1 2010/09/01 19:37:58 pooka Exp $	*/
+
+/*
+ * Copyright (c) 2010 Antti Kantee.  All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: lwproc.c,v 1.1 2010/09/01 19:37:58 pooka Exp $");
+
+#include <sys/param.h>
+#include <sys/atomic.h>
+#include <sys/filedesc.h>
+#include <sys/kauth.h>
+#include <sys/kmem.h>
+#include <sys/lwp.h>
+#include <sys/pool.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/resourcevar.h>
+#include <sys/uidinfo.h>
+
+#include <rump/rumpuser.h>
+
+#include "rump_private.h"
+
+static void
+lwproc_proc_free(struct proc *p)
+{
+	kauth_cred_t cred;
+
+	mutex_enter(proc_lock);
+
+	KASSERT(p->p_nlwps == 0);
+	KASSERT(LIST_EMPTY(&p->p_lwps));
+	KASSERT(p->p_stat == SIDL || p->p_stat == SDEAD);
+
+	LIST_REMOVE(p, p_list);
+	LIST_REMOVE(p, p_sibling);
+	proc_free_pid(p->p_pid); /* decrements nprocs */
+	proc_leavepgrp(p); /* releases proc_lock */
+
+	cred = p->p_cred;
+	chgproccnt(kauth_cred_getuid(cred), -1);
+	if (rump_proc_vfs_release)
+		rump_proc_vfs_release(p);
+
+	limfree(p->p_limit);
+	pstatsfree(p->p_stats);
+	kauth_cred_free(p->p_cred);
+	proc_finispecific(p);
+
+	mutex_obj_free(p->p_lock);
+	mutex_destroy(&p->p_stmutex);
+	mutex_destroy(&p->p_auxlock);
+	rw_destroy(&p->p_reflock);
+	cv_destroy(&p->p_waitcv);
+	cv_destroy(&p->p_lwpcv);
+
+	proc_free_mem(p);
+}
+
+/*
+ * Allocate a new process.  Mostly mimic fork by
+ * copying the properties of the parent.  However, there are some
+ * differences.  For example, we never share the fd table.
+ *
+ * Switch to the new lwp and return a pointer to it.
+ */
+static struct proc *
+lwproc_newproc(struct proc *parent)
+{
+	uid_t uid = kauth_cred_getuid(parent->p_cred);
+	struct proc *p;
+
+	/* maxproc not enforced */
+	atomic_inc_uint(&nprocs);
+
+	/* allocate process */
+	p = proc_alloc();
+	memset(&p->p_startzero, 0,
+	    offsetof(struct proc, p_endzero)
+	      - offsetof(struct proc, p_startzero));
+	memcpy(&p->p_startcopy, &parent->p_startcopy,
+	    offsetof(struct proc, p_endcopy)
+	      - offsetof(struct proc, p_startcopy));
+
+	p->p_stats = pstatscopy(parent->p_stats);
+
+	/* not based on parent */
+	p->p_vmspace = &vmspace0;
+	p->p_emul = &emul_netbsd;
+	p->p_fd = fd_init(NULL);
+	lim_addref(parent->p_limit);
+	p->p_limit = parent->p_limit;
+
+	LIST_INIT(&p->p_lwps);
+	LIST_INIT(&p->p_children);
+
+	p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
+	mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_NONE);
+	mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
+	rw_init(&p->p_reflock);
+	cv_init(&p->p_waitcv, "pwait");
+	cv_init(&p->p_lwpcv, "plwp");
+
+	p->p_pptr = parent;
+	p->p_ppid = parent->p_pid;
+
+	kauth_proc_fork(parent, p);
+
+	/* initialize cwd in rump kernels with vfs */
+	if (rump_proc_vfs_init)
+		rump_proc_vfs_init(p);
+
+	chgproccnt(uid, 1); /* not enforced */
+
+	/* publish proc various proc lists */
+	mutex_enter(proc_lock);
+	LIST_INSERT_HEAD(&allproc, p, p_list);
+	LIST_INSERT_HEAD(&parent->p_children, p, p_sibling);
+	LIST_INSERT_AFTER(parent, p, p_pglist);
+	mutex_exit(proc_lock);
+
+	return p;
+}
+
+static void
+lwproc_freelwp(struct lwp *l)
+{
+	struct proc *p;
+	bool freeproc;
+
+	p = l->l_proc;
+	mutex_enter(p->p_lock);
+
+	/* XXX: l_refcnt */
+	KASSERT(l->l_flag & LW_WEXIT);
+	KASSERT(l->l_refcnt == 0);
+
+	/* ok, zero references, continue with nuke */
+	LIST_REMOVE(l, l_sibling);
+	KASSERT(p->p_nlwps >= 1);
+	if (--p->p_nlwps == 0) {
+		KASSERT(p != &proc0);
+		p->p_stat = SDEAD;
+	}
+	freeproc = p->p_nlwps == 0;
+	cv_broadcast(&p->p_lwpcv); /* nobody sleeps on this in rump? */
+	kauth_cred_free(l->l_cred);
+	mutex_exit(p->p_lock);
+
+	mutex_enter(proc_lock);
+	LIST_REMOVE(l, l_list);
+	mutex_exit(proc_lock);
+
+	if (l->l_name)
+		kmem_free(l->l_name, MAXCOMLEN);
+	lwp_finispecific(l);
+
+	kmem_free(l, sizeof(*l));
+
+	if (p->p_stat == SDEAD)
+		lwproc_proc_free(p);	
+}
+
+/*
+ * called with p_lock held, releases lock before return
+ */
+static void
+lwproc_makelwp(struct proc *p, struct lwp *l, bool doswitch, bool procmake)
+{
+
+	p->p_nlwps++;
+	l->l_refcnt = 1;
+	l->l_proc = p;
+
+	l->l_lid = p->p_nlwpid++;
+	LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling);
+	mutex_exit(p->p_lock);
+
+	lwp_update_creds(l);
+
+	l->l_fd = p->p_fd;
+	l->l_cpu = NULL;
+	l->l_target_cpu = rump_cpu; /* Initial target CPU always the same */
+
+	lwp_initspecific(l);
+
+	if (doswitch) {
+		rump_lwproc_switch(l);
+	}
+
+	/* filedesc already has refcount 1 when process is created */
+	if (!procmake) {
+		fd_hold(l);
+	}
+
+	mutex_enter(proc_lock);
+	LIST_INSERT_HEAD(&alllwp, l, l_list);
+	mutex_exit(proc_lock);
+}
+
+struct lwp *
+rump__lwproc_allockernlwp(void)
+{
+	struct proc *p;
+	struct lwp *l;
+
+	l = kmem_zalloc(sizeof(*l), KM_SLEEP);
+
+	p = &proc0;
+	mutex_enter(p->p_lock);
+	lwproc_makelwp(p, l, false, false);
+
+	return l;
+}
+
+int
+rump_lwproc_newlwp(pid_t pid)
+{
+	struct proc *p;
+	struct lwp *l;
+
+	l = kmem_zalloc(sizeof(*l), KM_SLEEP);
+	mutex_enter(proc_lock);
+	p = proc_find_raw(pid);
+	if (p == NULL) {
+		mutex_exit(proc_lock);
+		kmem_free(l, sizeof(*l));
+		return ESRCH;
+	}
+	mutex_enter(p->p_lock);
+	mutex_exit(proc_lock);
+	lwproc_makelwp(p, l, true, false);
+
+	return 0;
+}
+
+int
+rump_lwproc_newproc(void)
+{
+	struct proc *p;
+	struct lwp *l;
+
+	p = lwproc_newproc(curproc);
+	l = kmem_zalloc(sizeof(*l), KM_SLEEP);
+	mutex_enter(p->p_lock);
+	lwproc_makelwp(p, l, true, true);
+
+	return 0;
+}
+
+/*
+ * Switch to a new process/thread.  Release previous one if
+ * deemed to be exiting.
+ */
+void
+rump_lwproc_switch(struct lwp *newlwp)
+{
+	struct lwp *l = curlwp;
+
+	KASSERT(!(l->l_flag & LW_WEXIT) || newlwp);
+
+	if (__predict_false(newlwp && (newlwp->l_pflag & LP_RUNNING)))
+		panic("lwp %p (%d:%d) already running",
+		    newlwp, newlwp->l_proc->p_pid, newlwp->l_lid);
+
+	if (newlwp == NULL) {
+		l->l_pflag &= ~LP_RUNNING;
+		l->l_flag |= LW_RUMP_CLEAR;
+		return;
+	}
+
+	/* fd_free() must be called from curlwp context.  talk about ugh */
+	if (l->l_flag & LW_WEXIT) {
+		fd_free();
+	}
+
+	rumpuser_set_curlwp(NULL);
+
+	newlwp->l_cpu = newlwp->l_target_cpu = l->l_cpu;
+	newlwp->l_mutex = l->l_mutex;
+	newlwp->l_pflag |= LP_RUNNING;
+
+	rumpuser_set_curlwp(newlwp);
+
+	l->l_mutex = NULL;
+	l->l_cpu = NULL;
+	l->l_pflag &= ~LP_RUNNING;
+
+	if (l->l_flag & LW_WEXIT) {
+		lwproc_freelwp(l);
+	}
+}
+
+void
+rump_lwproc_releaselwp(void)
+{
+	struct proc *p;
+	struct lwp *l = curlwp;
+
+	p = l->l_proc;
+	mutex_enter(p->p_lock);
+	KASSERT(l->l_refcnt != 0);
+	l->l_refcnt--;
+	mutex_exit(p->p_lock);
+	l->l_flag |= LW_WEXIT; /* will be released when unscheduled */
+}
+
+struct lwp *
+rump_lwproc_curlwp(void)
+{
+	struct lwp *l = curlwp;
+
+	if (l->l_flag & LW_WEXIT)
+		return NULL;
+	return l;
+}

Reply via email to