Module Name:    src
Committed By:   snj
Date:           Fri May 12 05:44:10 UTC 2017

Modified Files:
        src/sys/net [netbsd-7]: route.c
        src/sys/netinet [netbsd-7]: ip_flow.c
        src/sys/netinet6 [netbsd-7]: ip6_flow.c nd6.c

Log Message:
Pull up following revision(s) (requested by skrll/ozaki-r in ticket #1402):
        sys/net/route.c: revision 1.170 via patch
        sys/netinet/ip_flow.c: revision 1.73 via patch
        sys/netinet6/ip6_flow.c: revision 1.28 via patch
        sys/netinet6/nd6.c: revision 1.203 via patch
Run timers in workqueue
Timers (such as nd6_timer) typically free/destroy some data in callout
(softint). If we apply psz/psref for such data, we cannot do free/destroy
process in there because synchronization of psz/psref cannot be used in
softint. So run timer callbacks in workqueue works (normal LWP context).
Doing workqueue_enqueue a work twice (i.e., call workqueue_enqueue before
a previous task is scheduled) isn't allowed. For nd6_timer and
rt_timer_timer, this doesn't happen because callout_reset is called only
from workqueue's work. OTOH, ip{,6}flow_slowtimo's callout can be called
before its work starts and completes because the callout is periodically
called regardless of completion of the work. To avoid such a situation,
add a flag for each protocol; the flag is set true when a work is
enqueued and set false after the work finished. workqueue_enqueue is
called only if the flag is false.
Proposed on tech-net and tech-kern.


To generate a diff of this commit:
cvs rdiff -u -r1.132 -r1.132.2.1 src/sys/net/route.c
cvs rdiff -u -r1.64 -r1.64.2.1 src/sys/netinet/ip_flow.c
cvs rdiff -u -r1.23 -r1.23.2.1 src/sys/netinet6/ip6_flow.c
cvs rdiff -u -r1.152.2.3 -r1.152.2.4 src/sys/netinet6/nd6.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/net/route.c
diff -u src/sys/net/route.c:1.132 src/sys/net/route.c:1.132.2.1
--- src/sys/net/route.c:1.132	Fri Jun  6 01:27:32 2014
+++ src/sys/net/route.c	Fri May 12 05:44:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: route.c,v 1.132 2014/06/06 01:27:32 rmind Exp $	*/
+/*	$NetBSD: route.c,v 1.132.2.1 2017/05/12 05:44:10 snj Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2008 The NetBSD Foundation, Inc.
@@ -93,7 +93,7 @@
 #include "opt_route.h"
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: route.c,v 1.132 2014/06/06 01:27:32 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: route.c,v 1.132.2.1 2017/05/12 05:44:10 snj Exp $");
 
 #include <sys/param.h>
 #include <sys/kmem.h>
@@ -110,6 +110,7 @@ __KERNEL_RCSID(0, "$NetBSD: route.c,v 1.
 #include <sys/ioctl.h>
 #include <sys/pool.h>
 #include <sys/kauth.h>
+#include <sys/workqueue.h>
 
 #include <net/if.h>
 #include <net/if_dl.h>
@@ -133,6 +134,8 @@ struct pool rtentry_pool;
 struct pool rttimer_pool;
 
 struct callout rt_timer_ch; /* callout for rt_timer_timer() */
+struct workqueue	*rt_timer_wq;
+struct work		rt_timer_wk;
 
 #ifdef RTFLUSH_DEBUG
 static int _rtcache_debug = 0;
@@ -1031,14 +1034,22 @@ static int rt_init_done = 0;
  * that this is run when the first queue is added...
  */
 
+static void rt_timer_work(struct work *, void *);
+
 void
 rt_timer_init(void)
 {
+	int error;
+
 	assert(rt_init_done == 0);
 
 	LIST_INIT(&rttimer_queue_head);
 	callout_init(&rt_timer_ch, 0);
 	callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
+	error = workqueue_create(&rt_timer_wq, "rt_timer",
+	    rt_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, WQ_MPSAFE);
+	if (error)
+		panic("%s: workqueue_create failed (%d)\n", __func__, error);
 	rt_init_done = 1;
 }
 
@@ -1171,9 +1182,8 @@ rt_timer_add(struct rtentry *rt,
 	return 0;
 }
 
-/* ARGSUSED */
-void
-rt_timer_timer(void *arg)
+static void
+rt_timer_work(struct work *wk, void *arg)
 {
 	struct rttimer_queue *rtq;
 	struct rttimer *r;
@@ -1198,6 +1208,13 @@ rt_timer_timer(void *arg)
 	callout_reset(&rt_timer_ch, hz, rt_timer_timer, NULL);
 }
 
+void
+rt_timer_timer(void *arg)
+{
+
+	workqueue_enqueue(rt_timer_wq, &rt_timer_wk, NULL);
+}
+
 static struct rtentry *
 _rtcache_init(struct route *ro, int flag)
 {

Index: src/sys/netinet/ip_flow.c
diff -u src/sys/netinet/ip_flow.c:1.64 src/sys/netinet/ip_flow.c:1.64.2.1
--- src/sys/netinet/ip_flow.c:1.64	Thu May 22 22:01:12 2014
+++ src/sys/netinet/ip_flow.c	Fri May 12 05:44:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: ip_flow.c,v 1.64 2014/05/22 22:01:12 rmind Exp $	*/
+/*	$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $	*/
 
 /*-
  * Copyright (c) 1998 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.64 2014/05/22 22:01:12 rmind Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 1.64.2.1 2017/05/12 05:44:10 snj Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -45,6 +45,7 @@ __KERNEL_RCSID(0, "$NetBSD: ip_flow.c,v 
 #include <sys/kernel.h>
 #include <sys/pool.h>
 #include <sys/sysctl.h>
+#include <sys/workqueue.h>
 
 #include <net/if.h>
 #include <net/if_dl.h>
@@ -96,6 +97,10 @@ static int ip_hashsize = IPFLOW_DEFAULT_
 
 static void ipflow_sysctl_init(struct sysctllog **);
 
+static void ipflow_slowtimo_work(struct work *, void *);
+static struct workqueue	*ipflow_slowtimo_wq;
+static struct work	ipflow_slowtimo_wk;
+
 static size_t 
 ipflow_hash(const struct ip *ip)
 {
@@ -130,6 +135,12 @@ ipflow_lookup(const struct ip *ip)
 void
 ipflow_poolinit(void)
 {
+	int error;
+
+	error = workqueue_create(&ipflow_slowtimo_wq, "ipflow_slowtimo",
+	    ipflow_slowtimo_work, NULL, PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
+	if (error != 0)
+		panic("%s: workqueue_create failed (%d)\n", __func__, error);
 
 	pool_init(&ipflow_pool, sizeof(struct ipflow), 0, 0, 0, "ipflowpl",
 	    NULL, IPL_NET);
@@ -390,8 +401,10 @@ ipflow_reap(bool just_one)
 	return NULL;
 }
 
-void
-ipflow_slowtimo(void)
+static bool ipflow_work_enqueued = false;
+
+static void
+ipflow_slowtimo_work(struct work *wk, void *arg)
 {
 	struct rtentry *rt;
 	struct ipflow *ipf, *next_ipf;
@@ -415,11 +428,28 @@ ipflow_slowtimo(void)
 			ipf->ipf_uses = 0;
 		}
 	}
+	ipflow_work_enqueued = false;
 	KERNEL_UNLOCK_ONE(NULL);
 	mutex_exit(softnet_lock);
 }
 
 void
+ipflow_slowtimo(void)
+{
+
+	/* Avoid enqueuing another work when one is already enqueued */
+	KERNEL_LOCK(1, NULL);
+	if (ipflow_work_enqueued) {
+		KERNEL_UNLOCK_ONE(NULL);
+		return;
+	}
+	ipflow_work_enqueued = true;
+	KERNEL_UNLOCK_ONE(NULL);
+
+	workqueue_enqueue(ipflow_slowtimo_wq, &ipflow_slowtimo_wk, NULL);
+}
+
+void
 ipflow_create(const struct route *ro, struct mbuf *m)
 {
 	const struct ip *const ip = mtod(m, const struct ip *);

Index: src/sys/netinet6/ip6_flow.c
diff -u src/sys/netinet6/ip6_flow.c:1.23 src/sys/netinet6/ip6_flow.c:1.23.2.1
--- src/sys/netinet6/ip6_flow.c:1.23	Tue May 20 20:23:56 2014
+++ src/sys/netinet6/ip6_flow.c	Fri May 12 05:44:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: ip6_flow.c,v 1.23 2014/05/20 20:23:56 bouyer Exp $	*/
+/*	$NetBSD: ip6_flow.c,v 1.23.2.1 2017/05/12 05:44:10 snj Exp $	*/
 
 /*-
  * Copyright (c) 2007 The NetBSD Foundation, Inc.
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.23 2014/05/20 20:23:56 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v 1.23.2.1 2017/05/12 05:44:10 snj Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -52,6 +52,7 @@ __KERNEL_RCSID(0, "$NetBSD: ip6_flow.c,v
 #include <sys/kernel.h>
 #include <sys/pool.h>
 #include <sys/sysctl.h>
+#include <sys/workqueue.h>
 
 #include <net/if.h>
 #include <net/if_dl.h>
@@ -92,6 +93,10 @@ static struct ip6flowhead *ip6flowtable 
 static struct ip6flowhead ip6flowlist;
 static int ip6flow_inuse;
 
+static void ip6flow_slowtimo_work(struct work *, void *);
+static struct workqueue	*ip6flow_slowtimo_wq;
+static struct work	ip6flow_slowtimo_wk;
+
 /*
  * Insert an ip6flow into the list.
  */
@@ -182,6 +187,12 @@ ip6flow_init(int table_size)
 {
 	struct ip6flowhead *new_table;
 	size_t i;
+	int error;
+
+	error = workqueue_create(&ip6flow_slowtimo_wq, "ip6flow_slowtimo",
+	    ip6flow_slowtimo_work, NULL, PRI_SOFTNET, IPL_NET, WQ_MPSAFE);
+	if (error != 0)
+		panic("%s: workqueue_create failed (%d)\n", __func__, error);
 
 	new_table = (struct ip6flowhead *)malloc(sizeof(struct ip6flowhead) *
 	    table_size, M_RTABLE, M_NOWAIT);
@@ -415,8 +426,10 @@ ip6flow_reap(int just_one)
 	return NULL;
 }
 
+static bool ip6flow_work_enqueued = false;
+
 void
-ip6flow_slowtimo(void)
+ip6flow_slowtimo_work(struct work *wk, void *arg)
 {
 	struct ip6flow *ip6f, *next_ip6f;
 
@@ -436,11 +449,28 @@ ip6flow_slowtimo(void)
 			ip6f->ip6f_forwarded = 0;
 		}
 	}
+	ip6flow_work_enqueued = false;
 
 	KERNEL_UNLOCK_ONE(NULL);
 	mutex_exit(softnet_lock);
 }
 
+void
+ip6flow_slowtimo(void)
+{
+
+	/* Avoid enqueuing another work when one is already enqueued */
+	KERNEL_LOCK(1, NULL);
+	if (ip6flow_work_enqueued) {
+		KERNEL_UNLOCK_ONE(NULL);
+		return;
+	}
+	ip6flow_work_enqueued = true;
+	KERNEL_UNLOCK_ONE(NULL);
+
+	workqueue_enqueue(ip6flow_slowtimo_wq, &ip6flow_slowtimo_wk, NULL);
+}
+
 /*
  * We have successfully forwarded a packet using the normal
  * IPv6 stack. Now create/update a flow.

Index: src/sys/netinet6/nd6.c
diff -u src/sys/netinet6/nd6.c:1.152.2.3 src/sys/netinet6/nd6.c:1.152.2.4
--- src/sys/netinet6/nd6.c:1.152.2.3	Mon Apr  6 01:32:33 2015
+++ src/sys/netinet6/nd6.c	Fri May 12 05:44:10 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: nd6.c,v 1.152.2.3 2015/04/06 01:32:33 snj Exp $	*/
+/*	$NetBSD: nd6.c,v 1.152.2.4 2017/05/12 05:44:10 snj Exp $	*/
 /*	$KAME: nd6.c,v 1.279 2002/06/08 11:16:51 itojun Exp $	*/
 
 /*
@@ -31,7 +31,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nd6.c,v 1.152.2.3 2015/04/06 01:32:33 snj Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nd6.c,v 1.152.2.4 2017/05/12 05:44:10 snj Exp $");
 
 #include "bridge.h"
 #include "carp.h"
@@ -53,6 +53,7 @@ __KERNEL_RCSID(0, "$NetBSD: nd6.c,v 1.15
 #include <sys/syslog.h>
 #include <sys/queue.h>
 #include <sys/cprng.h>
+#include <sys/workqueue.h>
 
 #include <net/if.h>
 #include <net/if_dl.h>
@@ -122,11 +123,14 @@ static void nd6_setmtu0(struct ifnet *, 
 static void nd6_slowtimo(void *);
 static int regen_tmpaddr(struct in6_ifaddr *);
 static struct llinfo_nd6 *nd6_free(struct rtentry *, int);
+static void nd6_timer_work(struct work *, void *);
 static void nd6_llinfo_timer(void *);
 static void clear_llinfo_pqueue(struct llinfo_nd6 *);
 
 callout_t nd6_slowtimo_ch;
 callout_t nd6_timer_ch;
+static struct workqueue	*nd6_timer_wq;
+static struct work	nd6_timer_wk;
 extern callout_t in6_tmpaddrtimer_ch;
 
 static int fill_drlist(void *, size_t *, size_t);
@@ -148,6 +152,7 @@ void
 nd6_init(void)
 {
 	static int nd6_init_done = 0;
+	int error;
 
 	if (nd6_init_done) {
 		log(LOG_NOTICE, "nd6_init called more than once(ignored)\n");
@@ -162,6 +167,11 @@ nd6_init(void)
 	callout_init(&nd6_slowtimo_ch, CALLOUT_MPSAFE);
 	callout_init(&nd6_timer_ch, CALLOUT_MPSAFE);
 
+	error = workqueue_create(&nd6_timer_wq, "nd6_timer",
+	    nd6_timer_work, NULL, PRI_SOFTNET, IPL_SOFTNET, 0);
+	if (error)
+		panic("%s: workqueue_create failed (%d)\n", __func__, error);
+
 	/* start timer */
 	callout_reset(&nd6_slowtimo_ch, ND6_SLOWTIMER_INTERVAL * hz,
 	    nd6_slowtimo, NULL);
@@ -541,7 +551,7 @@ nd6_llinfo_timer(void *arg)
  * ND6 timer routine to expire default route list and prefix list
  */
 void
-nd6_timer(void *ignored_arg)
+nd6_timer_work(struct work *wk, void *arg)
 {
 	struct nd_defrouter *next_dr, *dr;
 	struct nd_prefix *next_pr, *pr;
@@ -661,6 +671,13 @@ nd6_timer(void *ignored_arg)
 	mutex_exit(softnet_lock);
 }
 
+void
+nd6_timer(void *ignored_arg)
+{
+
+	workqueue_enqueue(nd6_timer_wq, &nd6_timer_wk, NULL);
+}
+
 /* ia6: deprecated/invalidated temporary address */
 static int
 regen_tmpaddr(struct in6_ifaddr *ia6)

Reply via email to