Module Name:    src
Committed By:   riastradh
Date:           Thu Apr 30 03:41:31 UTC 2020

Modified Files:
        src/sys/dev/pci: if_wm.c

Log Message:
Revert "- rnd_add_*() call must be serialized. Unserialized accesses broke the"

It is no longer necessary to serialize calls to rnd_add_data, so we
can use the same rndsource for every queue.

This also reverts some unrelated changes that were mixed in:

1. WM_NRXDESC was made unsigned
2. The specific samples were changed.

(1) will be redone in a separate commit.  (2) was buggy (wrote `='
where `|=' was meant) and is not needed; it's fine to do rnd_add_data
in a thread or softint too, not just in a hard interrupt handler.


To generate a diff of this commit:
cvs rdiff -u -r1.674 -r1.675 src/sys/dev/pci/if_wm.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_wm.c
diff -u src/sys/dev/pci/if_wm.c:1.674 src/sys/dev/pci/if_wm.c:1.675
--- src/sys/dev/pci/if_wm.c:1.674	Thu Apr  9 06:55:51 2020
+++ src/sys/dev/pci/if_wm.c	Thu Apr 30 03:41:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: if_wm.c,v 1.674 2020/04/09 06:55:51 jdolecek Exp $	*/
+/*	$NetBSD: if_wm.c,v 1.675 2020/04/30 03:41:31 riastradh Exp $	*/
 
 /*
  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
@@ -82,7 +82,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.674 2020/04/09 06:55:51 jdolecek Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.675 2020/04/30 03:41:31 riastradh Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -239,7 +239,7 @@ static int wm_watchdog_timeout = WM_WATC
  * packet.  We allocate 256 receive descriptors, each with a 2k
  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
  */
-#define	WM_NRXDESC		256U
+#define	WM_NRXDESC		256
 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
@@ -472,7 +472,6 @@ struct wm_queue {
 	bool wmq_txrx_use_workqueue;
 	struct work wmq_cookie;
 	void *wmq_si;
-	krndsource_t rnd_source;	/* random source */
 };
 
 struct wm_phyop {
@@ -608,6 +607,8 @@ struct wm_softc {
 
 	int sc_mchash_type;		/* multicast filter offset */
 
+	krndsource_t rnd_source;	/* random source */
+
 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
 
 	kmutex_t *sc_core_lock;		/* lock for softc operations */
@@ -3041,6 +3042,8 @@ alloc_retry:
 	ether_ifattach(ifp, enaddr);
 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
 	if_register(ifp);
+	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
+	    RND_FLAG_DEFAULT);
 
 #ifdef WM_EVENT_COUNTERS
 	/* Attach event counters. */
@@ -3101,6 +3104,8 @@ wm_detach(device_t self, int flags __unu
 	evcnt_detach(&sc->sc_ev_rx_macctl);
 #endif /* WM_EVENT_COUNTERS */
 
+	rnd_detach_source(&sc->rnd_source);
+
 	/* Tell the firmware about the release */
 	WM_CORE_LOCK(sc);
 	wm_release_manageability(sc);
@@ -6998,15 +7003,6 @@ wm_alloc_txrx_queues(struct wm_softc *sc
 	if (error)
 		goto fail_2;
 
-	for (i = 0; i < sc->sc_nqueues; i++) {
-		char rndname[16];
-
-		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
-		    device_xname(sc->sc_dev), i);
-		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
-		    RND_TYPE_NET, RND_FLAG_DEFAULT);
-	}
-
 	return 0;
 
  fail_2:
@@ -7042,9 +7038,6 @@ wm_free_txrx_queues(struct wm_softc *sc)
 {
 	int i;
 
-	for (i = 0; i < sc->sc_nqueues; i++)
-		rnd_detach_source(&sc->sc_queue[i].rnd_source);
-
 	for (i = 0; i < sc->sc_nqueues; i++) {
 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
 
@@ -8720,6 +8713,9 @@ wm_txeof(struct wm_txqueue *txq, u_int l
 	DPRINTF(WM_DEBUG_TX,
 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
 
+	if (count != 0)
+		rnd_add_uint32(&sc->rnd_source, count);
+
 	/*
 	 * If there are no more pending transmissions, cancel the watchdog
 	 * timer.
@@ -9133,6 +9129,9 @@ wm_rxeof(struct wm_rxqueue *rxq, u_int l
 			break;
 	}
 
+	if (count != 0)
+		rnd_add_uint32(&sc->rnd_source, count);
+
 	DPRINTF(WM_DEBUG_RX,
 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
 
@@ -9610,8 +9609,6 @@ wm_intr_legacy(void *arg)
 		 * So, we can call wm_rxeof() in interrupt context.
 		 */
 		wm_rxeof(rxq, UINT_MAX);
-		/* Fill lower bits with RX index. See below for the upper. */
-		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
 
 		mutex_exit(rxq->rxq_lock);
 		mutex_enter(txq->txq_lock);
@@ -9630,8 +9627,6 @@ wm_intr_legacy(void *arg)
 		}
 #endif
 		wm_txeof(txq, UINT_MAX);
-		/* Fill upper bits with TX index. See above for the lower. */
-		rndval = txq->txq_next * WM_NRXDESC;
 
 		mutex_exit(txq->txq_lock);
 		WM_CORE_LOCK(sc);
@@ -9658,7 +9653,7 @@ wm_intr_legacy(void *arg)
 		}
 	}
 
-	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
+	rnd_add_uint32(&sc->rnd_source, rndval);
 
 	if (handled) {
 		/* Try to get more packets going. */
@@ -9716,7 +9711,6 @@ wm_txrxintr_msix(void *arg)
 	struct wm_softc *sc = txq->txq_sc;
 	u_int txlimit = sc->sc_tx_intr_process_limit;
 	u_int rxlimit = sc->sc_rx_intr_process_limit;
-	uint32_t rndval = 0;
 	bool txmore;
 	bool rxmore;
 
@@ -9736,8 +9730,6 @@ wm_txrxintr_msix(void *arg)
 
 	WM_Q_EVCNT_INCR(txq, txdw);
 	txmore = wm_txeof(txq, txlimit);
-	/* Fill upper bits with TX index. See below for the lower. */
-	rndval = txq->txq_next * WM_NRXDESC;
 	/* wm_deferred start() is done in wm_handle_queue(). */
 	mutex_exit(txq->txq_lock);
 
@@ -9752,20 +9744,10 @@ wm_txrxintr_msix(void *arg)
 
 	WM_Q_EVCNT_INCR(rxq, intr);
 	rxmore = wm_rxeof(rxq, rxlimit);
-
-	/* Fill lower bits with RX index. See above for the upper. */
-	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
 	mutex_exit(rxq->rxq_lock);
 
 	wm_itrs_writereg(sc, wmq);
 
-	/*
-	 * This function is called in the hardware interrupt context and
-	 * per-CPU, so it's not required to take a lock.
-	 */
-	if (rndval != 0)
-		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
-
 	if (txmore || rxmore) {
 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
 		wm_sched_handle_queue(sc, wmq);

Reply via email to