Module Name:    src
Committed By:   matt
Date:           Thu Nov  8 00:59:12 UTC 2012

Modified Files:
        src/sys/arch/arm/broadcom: bcm53xx_eth.c

Log Message:
Make sure to 0 sc_work_flags in ifstop.
return immediately in ifstart if IFF_RUNNING is not set.
Make RCVMAGIC checks conditional
Release softc lock when calling if_input.


To generate a diff of this commit:
cvs rdiff -u -r1.15 -r1.16 src/sys/arch/arm/broadcom/bcm53xx_eth.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/arm/broadcom/bcm53xx_eth.c
diff -u src/sys/arch/arm/broadcom/bcm53xx_eth.c:1.15 src/sys/arch/arm/broadcom/bcm53xx_eth.c:1.16
--- src/sys/arch/arm/broadcom/bcm53xx_eth.c:1.15	Thu Nov  1 21:33:12 2012
+++ src/sys/arch/arm/broadcom/bcm53xx_eth.c	Thu Nov  8 00:59:12 2012
@@ -34,7 +34,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.15 2012/11/01 21:33:12 matt Exp $");
+__KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.c,v 1.16 2012/11/08 00:59:12 matt Exp $");
 
 #include <sys/param.h>
 #include <sys/atomic.h>
@@ -61,7 +61,9 @@ __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.
 #include <arm/broadcom/bcm53xx_reg.h>
 #include <arm/broadcom/bcm53xx_var.h>
 
-#define	BCMETH_RCVOFFSET	6
+//#define BCMETH_MPSAFE
+
+#define	BCMETH_RCVOFFSET	10
 #define	BCMETH_MAXTXMBUFS	128
 #define	BCMETH_NTXSEGS		30
 #define	BCMETH_MAXRXMBUFS	255
@@ -69,7 +71,9 @@ __KERNEL_RCSID(1, "$NetBSD: bcm53xx_eth.
 #define	BCMETH_NRXSEGS		1
 #define	BCMETH_RINGSIZE		PAGE_SIZE
 
+#if 0
 #define	BCMETH_RCVMAGIC		0xfeedface
+#endif
 
 static int bcmeth_ccb_match(device_t, cfdata_t, void *);
 static void bcmeth_ccb_attach(device_t, device_t, void *);
@@ -204,6 +208,9 @@ static void bcmeth_rxq_reset(struct bcme
     struct bcmeth_rxqueue *);
 
 static int bcmeth_intr(void *);
+#ifdef BCMETH_MPSAFETX
+static void bcmeth_soft_txintr(struct bcmeth_softc *);
+#endif
 static void bcmeth_soft_intr(void *);
 static void bcmeth_worker(struct work *, void *);
 
@@ -361,6 +368,9 @@ bcmeth_ccb_attach(device_t parent, devic
 	ifp->if_baudrate = IF_Mbps(1000);
 	ifp->if_capabilities = 0;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+#ifdef BCMETH_MPSAFE
+	ifp->if_flags2 = IFF2_MPSAFE;
+#endif
 	ifp->if_ioctl = bcmeth_ifioctl;
 	ifp->if_start = bcmeth_ifstart;
 	ifp->if_watchdog = bcmeth_ifwatchdog;
@@ -541,6 +551,7 @@ bcmeth_ifstop(struct ifnet *ifp, int dis
 	KASSERT(!cpu_intr_p());
 
 	sc->sc_soft_flags = 0;
+	sc->sc_work_flags = 0;
 
 	/* Disable Rx processing */
 	bcmeth_write_4(sc, rxq->rxq_reg_rcvctl,
@@ -842,12 +853,16 @@ bcmeth_rx_buf_alloc(
 	}
 	KASSERT(((map->_dm_flags ^ sc->sc_dmat->_ranges[0].dr_flags) & _BUS_DMAMAP_COHERENT) == 0);
 	KASSERT(map->dm_mapsize == MCLBYTES);
+#ifdef BCMETH_RCVMAGIC
 	*mtod(m, uint32_t *) = BCMETH_RCVMAGIC;
 	bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
 	bus_dmamap_sync(sc->sc_dmat, map, sizeof(uint32_t),
 	    map->dm_mapsize - sizeof(uint32_t), BUS_DMASYNC_PREREAD);
+#else
+	bus_dmamap_sync(sc->sc_dmat, map, 0, sizeof(uint32_t),
+	    BUS_DMASYNC_PREREAD);
+#endif
 
 	return m;
 }
@@ -955,10 +970,16 @@ bcmeth_rx_input(
 	/*
 	 * Let's give it to the network subsystm to deal with.
 	 */
+#ifdef BCMETH_MPSAFE
+	mutex_exit(sc->sc_lock);
+	(*ifp->if_input)(ifp, m);
+	mutex_enter(sc->sc_lock);
+#else
 	int s = splnet();
 	bpf_mtap(ifp, m);
 	(*ifp->if_input)(ifp, m);
 	splx(s);
+#endif
 }
 
 static void
@@ -1006,7 +1027,11 @@ bcmeth_rxq_consume(
 		 * Get the count of descriptors.  Fetch the correct number
 		 * of mbufs.
 		 */
+#ifdef BCMETH_RCVMAGIC
 		size_t desc_count = rxsts != BCMETH_RCVMAGIC ? __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1 : 1;
+#else
+		size_t desc_count = __SHIFTOUT(rxsts, RXSTS_DESC_COUNT) + 1;
+#endif
 		struct mbuf *m = rxq->rxq_mhead;
 		struct mbuf *m_last = m;
 		for (size_t i = 1; i < desc_count; i++) {
@@ -1027,6 +1052,7 @@ bcmeth_rxq_consume(
 			rxq->rxq_mtail = &rxq->rxq_mhead;
 		m_last->m_next = NULL;
 
+#ifdef BCMETH_RCVMAGIC
 		if (rxsts == BCMETH_RCVMAGIC) {	
 			ifp->if_ierrors++;
 			if ((m->m_ext.ext_paddr >> 28) == 8) {
@@ -1035,7 +1061,9 @@ bcmeth_rxq_consume(
 				sc->sc_ev_rx_badmagic_hi.ev_count++;
 			}
 			IF_ENQUEUE(&sc->sc_rx_bufcache, m);
-		} else if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
+		} else
+#endif /* BCMETH_RCVMAGIC */
+		if (rxsts & (RXSTS_CRC_ERROR|RXSTS_OVERSIZED|RXSTS_PKT_OVERFLOW)) {
 			aprint_error_dev(sc->sc_dev, "[%zu]: count=%zu rxsts=%#x\n",
 			    consumer - rxq->rxq_first, desc_count, rxsts);
 			/*
@@ -1059,7 +1087,34 @@ bcmeth_rxq_consume(
 			} else {
 				m_last->m_len = framelen & (MCLBYTES - 1);
 			}
+
+#ifdef BCMETH_MPSAFE
+			/*
+			 * Wrap at the last entry!
+			 */
+			if (++consumer == rxq->rxq_last) {
+				KASSERT(consumer[-1].rxdb_flags & RXDB_FLAG_ET);
+				rxq->rxq_consumer = rxq->rxq_first;
+			} else {
+				rxq->rxq_consumer = consumer;
+			}
+			rxq->rxq_inuse -= rxconsumed;
+#endif /* BCMETH_MPSAFE */
+
+			/*
+			 * Receive the packet (which releases our lock)
+			 */
 			bcmeth_rx_input(sc, m, rxsts);
+
+#ifdef BCMETH_MPSAFE
+			/*
+			 * Since we had to give up our lock, we need to
+			 * refresh these.
+			 */
+			consumer = rxq->rxq_consumer;
+			rxconsumed = 0;
+			continue;
+#endif /* BCMETH_MPSAFE */
 		}
 
 		/*
@@ -1362,6 +1417,64 @@ bcmeth_txq_produce(
 	return true;
 }
 
+static struct mbuf *
+bcmeth_copy_packet(struct mbuf *m)
+{
+	struct mbuf *mext = NULL;
+	size_t misalignment = 0;
+	size_t hlen = 0;
+
+	for (mext = m; mext != NULL; mext = mext->m_next) {
+		if (mext->m_flags & M_EXT) {
+			misalignment = mtod(mext, vaddr_t) & arm_dcache_align;
+			break;
+		}
+		hlen += m->m_len;
+	}
+
+	struct mbuf *n = m->m_next;
+	if (m != mext && hlen + misalignment <= MHLEN && false) {
+		KASSERT(m->m_pktdat <= m->m_data && m->m_data <= &m->m_pktdat[MHLEN - m->m_len]);
+		size_t oldoff = m->m_data - m->m_pktdat;
+		size_t off;
+		if (mext == NULL) {
+			off = (oldoff + hlen > MHLEN) ? 0 : oldoff;
+		} else {
+			off = MHLEN - (hlen + misalignment);
+		}
+		KASSERT(off + hlen + misalignment <= MHLEN);
+		if (((oldoff ^ off) & arm_dcache_align) != 0 || off < oldoff) {
+			memmove(&m->m_pktdat[off], m->m_data, m->m_len);
+			m->m_data = &m->m_pktdat[off];
+		}
+		m_copydata(n, 0, hlen - m->m_len, &m->m_data[m->m_len]);
+		m->m_len = hlen;
+		m->m_next = mext;
+		while (n != mext) {
+			n = m_free(n);
+		}
+		return m;
+	}
+
+	struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
+	if (m0 == NULL) {
+		return NULL;
+	}
+	M_COPY_PKTHDR(m0, m);
+	MCLAIM(m0, m->m_owner);
+	if (m0->m_pkthdr.len > MHLEN) {
+		MCLGET(m0, M_DONTWAIT);
+		if ((m0->m_flags & M_EXT) == 0) {
+			m_freem(m0);
+			return NULL;
+		}
+	}
+	m0->m_len = m->m_pkthdr.len;
+	m_copydata(m, 0, m0->m_len, mtod(m0, void *));
+	m_freem(m);
+	return m0;
+}
+
 static bool
 bcmeth_txq_enqueue(
 	struct bcmeth_softc *sc,
@@ -1386,24 +1499,11 @@ bcmeth_txq_enqueue(
 		 * consolidate it into a single mbuf.
 		 */
 		if (m->m_next != NULL && (sc->sc_if.if_flags & IFF_LINK2)) {
-			struct mbuf *m0 = m_gethdr(M_DONTWAIT, m->m_type);
+			struct mbuf *m0 = bcmeth_copy_packet(m);
 			if (m0 == NULL) {
 				txq->txq_next = m;
 				return true;
 			}
-			M_COPY_PKTHDR(m0, m);
-			MCLAIM(m0, m->m_owner);
-			if (m0->m_pkthdr.len > MHLEN) {
-				MCLGET(m0, M_DONTWAIT);
-				if ((m0->m_flags & M_EXT) == 0) {
-					m_freem(m0);
-					txq->txq_next = m;
-					return true;
-				}
-			}
-			m0->m_len = m->m_pkthdr.len;
-			m_copydata(m, 0, m0->m_len, mtod(m0, void *));
-			m_freem(m);
 			m = m0;
 		}
 		int error = bcmeth_txq_map_load(sc, txq, m);
@@ -1577,8 +1677,25 @@ bcmeth_ifstart(struct ifnet *ifp)
 {
 	struct bcmeth_softc * const sc = ifp->if_softc;
 
-	atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
-	softint_schedule(sc->sc_soft_ih);
+	if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) {
+		return;
+	}
+
+#ifdef BCMETH_MPSAFETX
+	if (cpu_intr_p()) {
+#endif
+		atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR);
+		softint_schedule(sc->sc_soft_ih);
+#ifdef BCMETH_MPSAFETX
+	} else {
+		/*
+		 * Either we are in a softintr thread already or some other
+		 * thread so just borrow it to do the send and save ourselves
+		 * the overhead of a fast soft int.
+		 */
+		bcmeth_soft_txintr(sc);
+	}
+#endif
 }
 
 int
@@ -1693,6 +1810,32 @@ bcmeth_intr(void *arg)
 	return rv;
 }
 
+#ifdef BCMETH_MPSAFETX
+void
+bcmeth_soft_txintr(struct bcmeth_softc *sc)
+{
+	mutex_enter(sc->sc_lock);
+	/*
+	 * Let's do what we came here for.  Consume transmitted
+	 * packets off the the transmit ring.
+	 */
+	if (!bcmeth_txq_consume(sc, &sc->sc_txq)
+	    || !bcmeth_txq_enqueue(sc, &sc->sc_txq)) {
+		sc->sc_ev_tx_stall.ev_count++;
+		sc->sc_if.if_flags |= IFF_OACTIVE;
+	} else {
+		sc->sc_if.if_flags &= ~IFF_OACTIVE;
+	}
+	if (sc->sc_if.if_flags & IFF_RUNNING) {
+		mutex_spin_enter(sc->sc_hwlock);
+		sc->sc_intmask |= XMTINT_0;
+		bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);
+		mutex_spin_exit(sc->sc_hwlock);
+	}
+	mutex_exit(sc->sc_lock);
+}
+#endif /* BCMETH_MPSAFETX */
+
 void
 bcmeth_soft_intr(void *arg)
 {
@@ -1785,6 +1928,14 @@ bcmeth_worker(struct work *wk, void *arg
 
 	if (ifp->if_flags & IFF_RUNNING) {
 		bcmeth_rxq_produce(sc, &sc->sc_rxq);
+#if 0
+		uint32_t intstatus = bcmeth_read_4(sc, GMAC_INTSTATUS);
+		if (intstatus & RCVINT) {
+			bcmeth_write_4(sc, GMAC_INTSTATUS, RCVINT);
+			work_flags |= WORK_RXINTR;
+			continue;
+		}
+#endif
 		mutex_spin_enter(sc->sc_hwlock);
 		sc->sc_intmask |= intmask;
 		bcmeth_write_4(sc, GMAC_INTMASK, sc->sc_intmask);

Reply via email to