Module Name:    src
Committed By:   dyoung
Date:           Tue Oct 25 22:26:18 UTC 2011

Modified Files:
        src/sys/net: if.c if.h

Log Message:
Document the ifioctl locking in comments.

Add a missing percpu_free(9) call.


To generate a diff of this commit:
cvs rdiff -u -r1.254 -r1.255 src/sys/net/if.c
cvs rdiff -u -r1.153 -r1.154 src/sys/net/if.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/net/if.c
diff -u src/sys/net/if.c:1.254 src/sys/net/if.c:1.255
--- src/sys/net/if.c:1.254	Wed Oct 19 21:29:51 2011
+++ src/sys/net/if.c	Tue Oct 25 22:26:18 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: if.c,v 1.254 2011/10/19 21:29:51 dyoung Exp $	*/
+/*	$NetBSD: if.c,v 1.255 2011/10/25 22:26:18 dyoung Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc.
@@ -90,7 +90,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.254 2011/10/19 21:29:51 dyoung Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.255 2011/10/25 22:26:18 dyoung Exp $");
 
 #include "opt_inet.h"
 
@@ -297,6 +297,9 @@ int
 if_nullioctl(struct ifnet *ifp, u_long cmd, void *data)
 {
 
+	/* Wake ifioctl_detach(), who may wait for all threads to
+	 * quit the critical section.
+	 */
 	cv_signal(&ifp->if_ioctl_lock->il_emptied);
 	return ENXIO;
 }
@@ -504,7 +507,8 @@ if_attach(struct ifnet *ifp)
 	TAILQ_INIT(&ifp->if_addrlist);
 	TAILQ_INSERT_TAIL(&ifnet, ifp, if_list);
 
-	ifioctl_attach(ifp);	/* XXX ifioctl_attach can fail! */ 
+	if (ifioctl_attach(ifp) != 0)
+		panic("%s: ifioctl_attach() failed", __func__);
 
 	mutex_enter(&index_gen_mtx);
 	ifp->if_index_gen = index_gen++;
@@ -1702,6 +1706,11 @@ ifnet_lock_enter(struct ifnet_lock *il)
 {
 	uint64_t *nenter;
 
+	/* Before trying to acquire the mutex, increase the count of threads
+	 * who have entered or who wait to enter the critical section.
+	 * Avoid one costly locked memory transaction by keeping a count for
+	 * each CPU.
+	 */
 	nenter = percpu_getref(il->il_nenter);
 	(*nenter)++;
 	percpu_putref(il->il_nenter);
@@ -1711,6 +1720,9 @@ ifnet_lock_enter(struct ifnet_lock *il)
 static void
 ifnet_lock_exit(struct ifnet_lock *il)
 {
+	/* Increase the count of threads who have exited the critical
+	 * section.  Increase while we still hold the lock.
+	 */
 	il->il_nexit++;
 	mutex_exit(&il->il_lock);
 }
@@ -1857,6 +1869,10 @@ ifioctl(struct socket *so, u_long cmd, v
 	return error;
 }
 
+/* This callback adds to the sum in `arg' the number of
+ * threads on `ci' who have entered or who wait to enter the
+ * critical section.
+ */
 static void
 ifnet_lock_sum(void *p, void *arg, struct cpu_info *ci)
 {
@@ -1865,6 +1881,9 @@ ifnet_lock_sum(void *p, void *arg, struc
 	*sum += *nenter;
 }
 
+/* Return the number of threads who have entered or who wait
+ * to enter the critical section on all CPUs.
+ */
 static uint64_t
 ifnet_lock_entrances(struct ifnet_lock *il)
 {
@@ -1880,9 +1899,13 @@ ifioctl_attach(struct ifnet *ifp)
 {
 	struct ifnet_lock *il;
 
+	/* If the driver has not supplied its own if_ioctl, then
+	 * supply the default.
+	 */
 	if (ifp->if_ioctl == NULL)
 		ifp->if_ioctl = ifioctl_common;
 
+	/* Create an ifnet_lock for synchronizing ifioctls. */
 	if ((il = kmem_zalloc(sizeof(*il), KM_SLEEP)) == NULL)
 		return ENOMEM;
 
@@ -1900,6 +1923,11 @@ ifioctl_attach(struct ifnet *ifp)
 	return 0;
 }
 
+/*
+ * This must not be called until after `ifp' has been withdrawn from the
+ * ifnet tables so that ifioctl() cannot get a handle on it by calling
+ * ifunit().
+ */
 static void
 ifioctl_detach(struct ifnet *ifp)
 {
@@ -1907,11 +1935,25 @@ ifioctl_detach(struct ifnet *ifp)
 
 	il = ifp->if_ioctl_lock;
 	mutex_enter(&il->il_lock);
+	/* Install if_nullioctl to make sure that any thread that
+	 * subsequently enters the critical section will quit it
+	 * immediately and signal the condition variable that we
+	 * wait on, below.
+	 */
 	ifp->if_ioctl = if_nullioctl;
+	/* Sleep while threads are still in the critical section or
+	 * wait to enter it.
+	 */
 	while (ifnet_lock_entrances(il) != il->il_nexit)
 		cv_wait(&il->il_emptied, &il->il_lock);
+	/* At this point, we are the only thread still in the critical
+	 * section, and no new thread can get a handle on the ifioctl
+	 * lock, so it is safe to free its memory.
+	 */
 	mutex_exit(&il->il_lock);
 	ifp->if_ioctl_lock = NULL;
+	percpu_free(il->il_nenter, sizeof(uint64_t));
+	il->il_nenter = NULL;
 	kmem_free(il, sizeof(*il));
 }
 

Index: src/sys/net/if.h
diff -u src/sys/net/if.h:1.153 src/sys/net/if.h:1.154
--- src/sys/net/if.h:1.153	Wed Oct 19 21:29:51 2011
+++ src/sys/net/if.h	Tue Oct 25 22:26:18 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: if.h,v 1.153 2011/10/19 21:29:51 dyoung Exp $	*/
+/*	$NetBSD: if.h,v 1.154 2011/10/25 22:26:18 dyoung Exp $	*/
 
 /*-
  * Copyright (c) 1999, 2000, 2001 The NetBSD Foundation, Inc.
@@ -209,10 +209,22 @@ struct ifnet_lock;
 #include <sys/percpu.h>
 
 struct ifnet_lock {
-	kmutex_t il_lock;
-	uint64_t il_nexit;
-	percpu_t *il_nenter;
-	kcondvar_t il_emptied;
+	kmutex_t il_lock;	/* Protects the critical section. */
+	uint64_t il_nexit;	/* Counts threads across all CPUs who
+				 * have exited the critical section.
+				 * Access to il_nexit is synchronized
+				 * by il_lock.
+				 */
+	percpu_t *il_nenter;	/* Counts threads on each CPU who have
+				 * entered or who wait to enter the
+				 * critical section protected by il_lock.
+				 * Synchronization is not required.
+				 */
+	kcondvar_t il_emptied;	/* The ifnet_lock user must arrange for
+				 * the last threads in the critical
+				 * section to signal this condition variable
+				 * before they leave.
+				 */
 };
 #endif /* _KERNEL */
 

Reply via email to