Module Name:    src
Committed By:   knakahara
Date:           Fri Feb 17 12:16:37 UTC 2017

Modified Files:
        src/sys/dev/pci: if_wm.c

Log Message:
unify Tx softint and Rx softint to uniform ixg(4) design.

Furthermore, that can remove wm_deferred_start() which holds all txq's
txq_lock, so it would reduce lock contention between Tx interrupt handler and
Tx softint handler.


To generate a diff of this commit:
cvs rdiff -u -r1.483 -r1.484 src/sys/dev/pci/if_wm.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_wm.c
diff -u src/sys/dev/pci/if_wm.c:1.483 src/sys/dev/pci/if_wm.c:1.484
--- src/sys/dev/pci/if_wm.c:1.483	Fri Feb 17 11:57:26 2017
+++ src/sys/dev/pci/if_wm.c	Fri Feb 17 12:16:37 2017
@@ -1,4 +1,4 @@
-/*	$NetBSD: if_wm.c,v 1.483 2017/02/17 11:57:26 knakahara Exp $	*/
+/*	$NetBSD: if_wm.c,v 1.484 2017/02/17 12:16:37 knakahara Exp $	*/
 
 /*
  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
@@ -84,7 +84,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.483 2017/02/17 11:57:26 knakahara Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.484 2017/02/17 12:16:37 knakahara Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -404,6 +404,8 @@ struct wm_queue {
 
 	struct wm_txqueue wmq_txq;
 	struct wm_rxqueue wmq_rxq;
+
+	void *wmq_si;
 };
 
 struct wm_phyop {
@@ -709,8 +711,8 @@ static void	wm_nq_start_locked(struct if
 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
-static void	wm_deferred_start(struct ifnet *);
 static void	wm_deferred_start_locked(struct wm_txqueue *);
+static void	wm_handle_queue(void *);
 /* Interrupt */
 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
 static void	wm_rxeof(struct wm_rxqueue *);
@@ -1645,7 +1647,6 @@ wm_attach(device_t parent, device_t self
 	bool force_clear_smbi;
 	uint32_t link_mode;
 	uint32_t reg;
-	void (*deferred_start_func)(struct ifnet *) = NULL;
 
 	sc->sc_dev = self;
 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
@@ -2539,16 +2540,12 @@ alloc_retry:
 	ifp->if_ioctl = wm_ioctl;
 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
 		ifp->if_start = wm_nq_start;
-		if (sc->sc_nqueues > 1) {
+		if (sc->sc_nqueues > 1)
 			ifp->if_transmit = wm_nq_transmit;
-			deferred_start_func = wm_deferred_start;
-		}
 	} else {
 		ifp->if_start = wm_start;
-		if (sc->sc_nqueues > 1) {
+		if (sc->sc_nqueues > 1)
 			ifp->if_transmit = wm_transmit;
-			deferred_start_func = wm_deferred_start;
-		}
 	}
 	ifp->if_watchdog = wm_watchdog;
 	ifp->if_init = wm_init;
@@ -2649,7 +2646,6 @@ alloc_retry:
 	/* Attach the interface. */
 	if_initialize(ifp);
 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
-	if_deferred_start_init(ifp, deferred_start_func);
 	ether_ifattach(ifp, enaddr);
 	if_register(ifp);
 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
@@ -4680,8 +4676,19 @@ wm_setup_msix(struct wm_softc *sc)
 			    "for TX and RX interrupting at %s\n", intrstr);
 		}
 		sc->sc_ihs[intr_idx] = vih;
-		wmq->wmq_id= qidx;
+		wmq->wmq_id = qidx;
 		wmq->wmq_intr_idx = intr_idx;
+		wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
+		    wm_handle_queue, wmq);
+		if (wmq->wmq_si == NULL) {
+			aprint_error_dev(sc->sc_dev,
+			    "unable to establish queue[%d] handler\n",
+			    wmq->wmq_id);
+			pci_intr_disestablish(sc->sc_pc,
+			    sc->sc_ihs[wmq->wmq_intr_idx]);
+			sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
+			goto fail;
+		}
 
 		txrx_established++;
 		intr_idx++;
@@ -7085,11 +7092,11 @@ wm_nq_transmit(struct ifnet *ifp, struct
 	 * The situations which this mutex_tryenter() fails at running time
 	 * are below two patterns.
 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
-	 *     (2) contention with deferred if_start softint(wm_deferred_start())
+	 *     (2) contention with deferred if_start softint(wm_handle_queue())
 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
-	 * dequeued by wm_deferred_start(). So, it does not get stuck.
+	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
-	 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
+	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
 	 */
 	if (mutex_tryenter(txq->txq_lock)) {
 		if (!txq->txq_stopping)
@@ -7395,34 +7402,6 @@ wm_nq_send_common_locked(struct ifnet *i
 }
 
 static void
-wm_deferred_start(struct ifnet *ifp)
-{
-	struct wm_softc *sc = ifp->if_softc;
-	int qid = 0;
-
-	/*
-	 * Try to transmit on all Tx queues. Passing a txq somehow and
-	 * transmitting only on the txq may be better.
-	 */
-	for (; qid < sc->sc_nqueues; qid++) {
-		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
-
-		/*
-		 * We must mutex_enter(txq->txq_lock) instead of
-		 * mutex_tryenter(txq->txq_lock) here.
-		 * mutex_tryenter(txq->txq_lock) would fail as this txq's
-		 * txq_stopping flag is being set. In this case, this device
-		 * begin to stop, so we must not start any Tx processing.
-		 * However, it may start Tx processing for sc_queue[qid+1]
-		 * if we use mutex_tryenter() here.
-		 */
-		mutex_enter(txq->txq_lock);
-		wm_deferred_start_locked(txq);
-		mutex_exit(txq->txq_lock);
-	}
-}
-
-static void
 wm_deferred_start_locked(struct wm_txqueue *txq)
 {
 	struct wm_softc *sc = txq->txq_sc;
@@ -8234,9 +8213,9 @@ static int
 wm_intr_legacy(void *arg)
 {
 	struct wm_softc *sc = arg;
-	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
-	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	struct wm_queue *wmq = &sc->sc_queue[0];
+	struct wm_txqueue *txq = &wmq->wmq_txq;
+	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
 	uint32_t icr, rndval = 0;
 	int handled = 0;
 
@@ -8314,7 +8293,7 @@ wm_intr_legacy(void *arg)
 
 	if (handled) {
 		/* Try to get more packets going. */
-		if_schedule_deferred_start(ifp);
+		softint_schedule(wmq->wmq_si);
 	}
 
 	return handled;
@@ -8353,7 +8332,6 @@ wm_txrxintr_msix(void *arg)
 	struct wm_txqueue *txq = &wmq->wmq_txq;
 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
 	struct wm_softc *sc = txq->txq_sc;
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 
 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
 
@@ -8371,17 +8349,7 @@ wm_txrxintr_msix(void *arg)
 
 	WM_Q_EVCNT_INCR(txq, txdw);
 	wm_txeof(sc, txq);
-
-	/* Try to get more packets going. */
-	if (pcq_peek(txq->txq_interq) != NULL)
-		if_schedule_deferred_start(ifp);
-	/*
-	 * There are still some upper layer processing which call
-	 * ifp->if_start(). e.g. ALTQ
-	 */
-	if (wmq->wmq_id == 0)
-		if_schedule_deferred_start(ifp);
-
+	/* wm_deferred start() is done in wm_handle_queue(). */
 	mutex_exit(txq->txq_lock);
 
 	DPRINTF(WM_DEBUG_RX,
@@ -8397,11 +8365,40 @@ wm_txrxintr_msix(void *arg)
 	wm_rxeof(rxq);
 	mutex_exit(rxq->rxq_lock);
 
+	softint_schedule(wmq->wmq_si);
+
 	wm_txrxintr_enable(wmq);
 
 	return 1;
 }
 
+static void
+wm_handle_queue(void *arg)
+{
+	struct wm_queue *wmq = arg;
+	struct wm_txqueue *txq = &wmq->wmq_txq;
+	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
+	struct wm_softc *sc = txq->txq_sc;
+
+	mutex_enter(txq->txq_lock);
+	if (txq->txq_stopping) {
+		mutex_exit(txq->txq_lock);
+		return;
+	}
+	wm_txeof(sc, txq);
+	wm_deferred_start_locked(txq);
+	mutex_exit(txq->txq_lock);
+
+	mutex_enter(rxq->rxq_lock);
+	if (rxq->rxq_stopping) {
+		mutex_exit(rxq->rxq_lock);
+		return;
+	}
+	WM_Q_EVCNT_INCR(rxq, rxintr);
+	wm_rxeof(rxq);
+	mutex_exit(rxq->rxq_lock);
+}
+
 /*
  * wm_linkintr_msix:
  *

Reply via email to