Greetings,
With help from dlg@, claudio@, oga@, miod@ & @weerd, I've been trying to
write
to get mclgeti support for xl(4).
It's been working fine on -current for quite some time.
I've tried flooding it with tcpbench, ping -f, and siege.
I'm looking for feedback on how I can improve it.
Here's the diff:
xlreg.h
Index: src/sys/dev/ic/xlreg.h
===================================================================
RCS file: /cvs/src/sys/dev/ic/xlreg.h,v
retrieving revision 1.24
diff -u -p -r1.24 xlreg.h
--- src/sys/dev/ic/xlreg.h 7 Sep 2010 16:21:43 -0000 1.24
+++ src/sys/dev/ic/xlreg.h 12 Sep 2010 06:33:39 -0000
@@ -480,7 +480,10 @@ struct xl_chain_data {
struct xl_chain_onefrag xl_rx_chain[XL_RX_LIST_CNT];
struct xl_chain xl_tx_chain[XL_TX_LIST_CNT];
- struct xl_chain_onefrag *xl_rx_head;
+ struct xl_chain_onefrag *xl_rx_cons;
+ struct xl_chain_onefrag *xl_rx_prod;
+ int xl_rx_cnt;
+
/* 3c90x "boomerang" queuing stuff */
struct xl_chain *xl_tx_head;
xl.c
Index: src/sys/dev/ic/xl.c
===================================================================
RCS file: /cvs/src/sys/dev/ic/xl.c,v
retrieving revision 1.96
diff -u -p -r1.96 xl.c
--- src/sys/dev/ic/xl.c 7 Sep 2010 16:21:43 -0000 1.96
+++ src/sys/dev/ic/xl.c 12 Sep 2010 10:49:32 -0000
@@ -180,6 +180,7 @@ void xl_iff(struct xl_softc *);
void xl_iff_90x(struct xl_softc *);
void xl_iff_905b(struct xl_softc *);
int xl_list_rx_init(struct xl_softc *);
+void xl_fill_rx_ring(struct xl_softc *);
int xl_list_tx_init(struct xl_softc *);
int xl_list_tx_init_90xB(struct xl_softc *);
void xl_wait(struct xl_softc *);
@@ -1076,8 +1077,8 @@ xl_list_rx_init(struct xl_softc *sc)
for (i = 0; i < XL_RX_LIST_CNT; i++) {
cd->xl_rx_chain[i].xl_ptr =
(struct xl_list_onefrag *)&ld->xl_rx_list[i];
- if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
- return(ENOBUFS);
+ /*if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS)
+ return(ENOBUFS);*/
if (i == (XL_RX_LIST_CNT - 1))
n = 0;
else
@@ -1088,11 +1089,33 @@ xl_list_rx_init(struct xl_softc *sc)
ld->xl_rx_list[i].xl_next = htole32(next);
}
- cd->xl_rx_head = &cd->xl_rx_chain[0];
-
+ cd->xl_rx_prod = cd->xl_rx_cons = &cd->xl_rx_chain[0];
+ cd->xl_rx_cnt = 0;
+ xl_fill_rx_ring(sc);
return (0);
}
+void
+xl_fill_rx_ring(struct xl_softc *sc)
+{
+ struct xl_chain_data *cd;
+ struct xl_list_data *ld;
+
+ cd = &sc->xl_cdata;
+ ld = sc->xl_ldata;
+
+ while (cd->xl_rx_cnt < XL_RX_LIST_CNT) {
+ if (xl_newbuf(sc, cd->xl_rx_prod) == ENOBUFS){
+ /*printf("Not filling");*/
+ break;
+ }
+ cd->xl_rx_prod = cd->xl_rx_prod->xl_next;
+ cd->xl_rx_cnt++;
+ /*printf("filling works %d",cd->xl_rx_cnt);*/
+ }
+}
+
+
/*
* Initialize an RX descriptor and attach an MBUF cluster.
*/
@@ -1102,7 +1125,7 @@ xl_newbuf(struct xl_softc *sc, struct xl
struct mbuf *m_new = NULL;
bus_dmamap_t map;
- MGETHDR(m_new, M_DONTWAIT, MT_DATA);
+ /*MGETHDR(m_new, M_DONTWAIT, MT_DATA);
if (m_new == NULL)
return (ENOBUFS);
@@ -1110,6 +1133,13 @@ xl_newbuf(struct xl_softc *sc, struct xl
if (!(m_new->m_flags & M_EXT)) {
m_freem(m_new);
return (ENOBUFS);
+ }*/
+
+ m_new = MCLGETI(NULL, M_DONTWAIT, &sc->sc_arpcom.ac_if, MCLBYTES);
+
+ if (!m_new){
+ /*printf("\n no mbufs");*/
+ return (ENOBUFS);
}
m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
@@ -1119,17 +1149,24 @@ xl_newbuf(struct xl_softc *sc, struct xl
return (ENOBUFS);
}
+
+ /*if (bus_dmamap_load_mbuf(sc->sc_dmat, c->map, m_new,
BUS_DMA_NOWAIT)){
+ m_free(m_new);
+ return (ENOBUFS);
+ }*/
+
+
/* sync the old map, and unload it (if necessary) */
if (c->map->dm_nsegs != 0) {
bus_dmamap_sync(sc->sc_dmat, c->map,
0, c->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, c->map);
}
-
+
map = c->map;
c->map = sc->sc_rx_sparemap;
sc->sc_rx_sparemap = map;
-
+
/* Force longword alignment for packet payload. */
m_adj(m_new, ETHER_ALIGN);
@@ -1156,8 +1193,8 @@ xl_rx_resync(struct xl_softc *sc)
struct xl_chain_onefrag *pos;
int i;
- pos = sc->xl_cdata.xl_rx_head;
-
+ pos = sc->xl_cdata.xl_rx_cons;
+ printf("\n resync");
for (i = 0; i < XL_RX_LIST_CNT; i++) {
bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
((caddr_t)pos->xl_ptr - sc->sc_listkva),
@@ -1172,8 +1209,8 @@ xl_rx_resync(struct xl_softc *sc)
if (i == XL_RX_LIST_CNT)
return (0);
- sc->xl_cdata.xl_rx_head = pos;
-
+ sc->xl_cdata.xl_rx_cons = pos;
+ /*xl_fill_rx_ring(sc);*/
return (EAGAIN);
}
@@ -1195,12 +1232,15 @@ xl_rxeof(struct xl_softc *sc)
again:
- while ((rxstat =
letoh32(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))
- != 0) {
- cur_rx = sc->xl_cdata.xl_rx_head;
- sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
+ while ((rxstat =
letoh32(sc->xl_cdata.xl_rx_cons->xl_ptr->xl_status))
+ != 0 && sc->xl_cdata.xl_rx_cnt > 0) {
+ cur_rx = sc->xl_cdata.xl_rx_cons;
+ m = cur_rx->xl_mbuf;
+ cur_rx->xl_mbuf = NULL;
+ sc->xl_cdata.xl_rx_cons = cur_rx->xl_next;
+ sc->xl_cdata.xl_rx_cnt--;
total_len = rxstat & XL_RXSTAT_LENMASK;
-
+ /*printf("\n RX_CNT %d",sc->xl_cdata.xl_rx_cnt);*/
bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
((caddr_t)cur_rx->xl_ptr - sc->sc_listkva),
sizeof(struct xl_list),
@@ -1222,8 +1262,10 @@ again:
* comes up in the ring.
*/
if (rxstat & XL_RXSTAT_UP_ERROR) {
+ printf("\n error occured");
ifp->if_ierrors++;
cur_rx->xl_ptr->xl_status = htole32(0);
+ m_freem(m);
continue;
}
@@ -1237,12 +1279,16 @@ again:
"packet dropped\n", sc->sc_dev.dv_xname);
ifp->if_ierrors++;
cur_rx->xl_ptr->xl_status = htole32(0);
+ m_freem(m);
continue;
}
/* No errors; receive the packet. */
- m = cur_rx->xl_mbuf;
-
+ /*m = cur_rx->xl_mbuf;*/
+ /*bus_dmamap_sync(sc->sc_dmat, cur_rx->map, 0,
+ cur_rx->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->sc_dmat, cur_rx->map);
+ */
/*
* Try to conjure up a new mbuf cluster. If that
* fails, it means we have an out of memory condition and
@@ -1250,11 +1296,11 @@ again:
* result in a lost packet, but there's little else we
* can do in this situation.
*/
- if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
+ /*if (xl_newbuf(sc, cur_rx) == ENOBUFS) {
ifp->if_ierrors++;
cur_rx->xl_ptr->xl_status = htole32(0);
continue;
- }
+ }*/
ifp->if_ipackets++;
m->m_pkthdr.rcvif = ifp;
@@ -1286,7 +1332,7 @@ again:
ether_input_mbuf(ifp, m);
}
-
+ xl_fill_rx_ring(sc);
/*
* Handle the 'end of channel' condition. When the upload
* engine hits the end of the RX ring, it will stall. This
@@ -1301,15 +1347,20 @@ again:
*/
if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
+ /*printf("\n Upstall");*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
xl_wait(sc);
- CSR_WRITE_4(sc, XL_UPLIST_PTR,
+ /*CSR_WRITE_4(sc, XL_UPLIST_PTR,
sc->sc_listmap->dm_segs[0].ds_addr +
offsetof(struct xl_list_data, xl_rx_list[0]));
- sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
+ sc->xl_cdata.xl_rx_cons = &sc->xl_cdata.xl_rx_chain[0];*/
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
+ xl_fill_rx_ring(sc);
goto again;
}
+ /*else
+ xl_fill_rx_ring(sc);
+ */
}
/*
@@ -1519,10 +1570,10 @@ xl_intr(void *arg)
curpkts = ifp->if_ipackets;
xl_rxeof(sc);
- if (curpkts == ifp->if_ipackets) {
+ /*if (curpkts == ifp->if_ipackets) {
while (xl_rx_resync(sc))
xl_rxeof(sc);
- }
+ }*/
}
if (status & XL_STAT_DOWN_COMPLETE) {
@@ -2677,7 +2728,7 @@ xl_attach(struct xl_softc *sc)
*/
if_attach(ifp);
ether_ifattach(ifp);
-
+ m_clsetwms(ifp, MCLBYTES, 2, XL_RX_LIST_CNT - 1);
sc->sc_sdhook = shutdownhook_establish(xl_shutdown, sc);
}