Module Name: src
Committed By: martin
Date: Wed Feb 2 14:25:49 UTC 2022
Modified Files:
src/sys/dev/pci/ixgbe [netbsd-9]: ix_txrx.c ixgbe.c ixgbe_mbx.c
ixgbe_netbsd.h ixv.c
Log Message:
Pull up the following revisions (requested by msaitoh in ticket #1424):
sys/dev/pci/ixgbe/ix_txrx.c 1.95
sys/dev/pci/ixgbe/ixgbe.c 1.305 via patch
sys/dev/pci/ixgbe/ixgbe_mbx.c 1.19
sys/dev/pci/ixgbe/ixgbe_netbsd.h 1.15-1.16
sys/dev/pci/ixgbe/ixv.c 1.178 via patch
Use atomic_{load,store}_relaxed() for event counters.
To generate a diff of this commit:
cvs rdiff -u -r1.54.2.7 -r1.54.2.8 src/sys/dev/pci/ixgbe/ix_txrx.c
cvs rdiff -u -r1.199.2.19 -r1.199.2.20 src/sys/dev/pci/ixgbe/ixgbe.c
cvs rdiff -u -r1.11.2.3 -r1.11.2.4 src/sys/dev/pci/ixgbe/ixgbe_mbx.c
cvs rdiff -u -r1.11.4.2 -r1.11.4.3 src/sys/dev/pci/ixgbe/ixgbe_netbsd.h
cvs rdiff -u -r1.125.2.16 -r1.125.2.17 src/sys/dev/pci/ixgbe/ixv.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/dev/pci/ixgbe/ix_txrx.c
diff -u src/sys/dev/pci/ixgbe/ix_txrx.c:1.54.2.7 src/sys/dev/pci/ixgbe/ix_txrx.c:1.54.2.8
--- src/sys/dev/pci/ixgbe/ix_txrx.c:1.54.2.7 Sat Nov 20 15:16:53 2021
+++ src/sys/dev/pci/ixgbe/ix_txrx.c Wed Feb 2 14:25:49 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: ix_txrx.c,v 1.54.2.7 2021/11/20 15:16:53 martin Exp $ */
+/* $NetBSD: ix_txrx.c,v 1.54.2.8 2022/02/02 14:25:49 martin Exp $ */
/******************************************************************************
@@ -64,7 +64,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.54.2.7 2021/11/20 15:16:53 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ix_txrx.c,v 1.54.2.8 2022/02/02 14:25:49 martin Exp $");
#include "opt_inet.h"
#include "opt_inet6.h"
@@ -247,7 +247,7 @@ ixgbe_mq_start(struct ifnet *ifp, struct
if (__predict_false(!pcq_put(txr->txr_interq, m))) {
m_freem(m);
- txr->pcq_drops.ev_count++;
+ IXGBE_EVC_ADD(&txr->pcq_drops, 1);
return ENOBUFS;
}
if (IXGBE_TX_TRYLOCK(txr)) {
@@ -475,7 +475,7 @@ retry:
/* Make certain there are enough descriptors */
if (txr->tx_avail < (map->dm_nsegs + 2)) {
txr->txr_no_space = true;
- txr->no_desc_avail.ev_count++;
+ IXGBE_EVC_ADD(&txr->no_desc_avail, 1);
ixgbe_dmamap_unload(txr->txtag, txbuf->map);
return EAGAIN;
}
@@ -546,7 +546,7 @@ retry:
* Advance the Transmit Descriptor Tail (Tdt), this tells the
* hardware that this frame is available to transmit.
*/
- ++txr->total_packets.ev_count;
+ IXGBE_EVC_ADD(&txr->total_packets, 1);
IXGBE_WRITE_REG(&adapter->hw, txr->tail, i);
/*
@@ -583,7 +583,7 @@ ixgbe_drain(struct ifnet *ifp, struct tx
while ((m = pcq_get(txr->txr_interq)) != NULL) {
m_freem(m);
- txr->pcq_drops.ev_count++;
+ IXGBE_EVC_ADD(&txr->pcq_drops, 1);
}
}
@@ -846,7 +846,7 @@ ixgbe_tx_ctx_setup(struct tx_ring *txr,
int rv = ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status);
if (rv != 0)
- ++adapter->tso_err.ev_count;
+ IXGBE_EVC_ADD(&adapter->tso_err, 1);
return rv;
}
@@ -1088,7 +1088,7 @@ ixgbe_tso_setup(struct tx_ring *txr, str
*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
- ++txr->tso_tx.ev_count;
+ IXGBE_EVC_ADD(&txr->tso_tx, 1);
return (0);
} /* ixgbe_tso_setup */
@@ -1360,7 +1360,7 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr,
if (__predict_false(rxbuf->buf == NULL)) {
mp = ixgbe_getcl();
if (mp == NULL) {
- rxr->no_mbuf.ev_count++;
+ IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
goto update;
}
mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
@@ -1549,7 +1549,7 @@ ixgbe_setup_receive_ring(struct rx_ring
rxbuf->flags = 0;
rxbuf->buf = ixgbe_getcl();
if (rxbuf->buf == NULL) {
- rxr->no_mbuf.ev_count++;
+ IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
error = ENOBUFS;
goto fail;
}
@@ -1582,11 +1582,11 @@ ixgbe_setup_receive_ring(struct rx_ring
rxr->next_to_refresh = adapter->num_rx_desc - 1; /* Fully allocated */
rxr->lro_enabled = FALSE;
rxr->discard_multidesc = false;
- rxr->rx_copies.ev_count = 0;
+ IXGBE_EVC_STORE(&rxr->rx_copies, 0);
#if 0 /* NetBSD */
- rxr->rx_bytes.ev_count = 0;
+ IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
#if 1 /* Fix inconsistency */
- rxr->rx_packets.ev_count = 0;
+ IXGBE_EVC_STORE(&rxr->rx_packets, 0);
#endif
#endif
rxr->vtag_strip = FALSE;
@@ -1917,7 +1917,7 @@ ixgbe_rxeof(struct ix_queue *que)
if (adapter->feat_en & IXGBE_FEATURE_VF)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
#endif
- rxr->rx_discarded.ev_count++;
+ IXGBE_EVC_ADD(&rxr->rx_discarded, 1);
ixgbe_rx_discard(rxr, i);
discard_multidesc = false;
goto next_desc;
@@ -1933,14 +1933,14 @@ ixgbe_rxeof(struct ix_queue *que)
/* For short packet. See below. */
sendmp = m_gethdr(M_NOWAIT, MT_DATA);
if (__predict_false(sendmp == NULL)) {
- rxr->no_mbuf.ev_count++;
+ IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
discard = true;
}
} else {
/* For long packet. */
newmp = ixgbe_getcl();
if (__predict_false(newmp == NULL)) {
- rxr->no_mbuf.ev_count++;
+ IXGBE_EVC_ADD(&rxr->no_mbuf, 1);
discard = true;
}
}
@@ -2044,7 +2044,7 @@ ixgbe_rxeof(struct ix_queue *que)
sendmp->m_data += ETHER_ALIGN;
memcpy(mtod(sendmp, void *),
mtod(mp, void *), len);
- rxr->rx_copies.ev_count++;
+ IXGBE_EVC_ADD(&rxr->rx_copies, 1);
rbuf->flags |= IXGBE_RX_COPY;
} else {
/* Non short packet */
@@ -2073,10 +2073,10 @@ ixgbe_rxeof(struct ix_queue *que)
} else { /* Sending this frame */
m_set_rcvif(sendmp, ifp);
++rxr->packets;
- rxr->rx_packets.ev_count++;
+ IXGBE_EVC_ADD(&rxr->rx_packets, 1);
/* capture data for AIM */
rxr->bytes += sendmp->m_pkthdr.len;
- rxr->rx_bytes.ev_count += sendmp->m_pkthdr.len;
+ IXGBE_EVC_ADD(&rxr->rx_bytes, sendmp->m_pkthdr.len);
/* Process vlan info */
if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
vtag = le16toh(cur->wb.upper.vlan);
@@ -2217,23 +2217,23 @@ ixgbe_rx_checksum(u32 staterr, struct mb
/* IPv4 checksum */
if (status & IXGBE_RXD_STAT_IPCS) {
- stats->ipcs.ev_count++;
+ IXGBE_EVC_ADD(&stats->ipcs, 1);
if (!(errors & IXGBE_RXD_ERR_IPE)) {
/* IP Checksum Good */
mp->m_pkthdr.csum_flags = M_CSUM_IPv4;
} else {
- stats->ipcs_bad.ev_count++;
+ IXGBE_EVC_ADD(&stats->ipcs_bad, 1);
mp->m_pkthdr.csum_flags = M_CSUM_IPv4|M_CSUM_IPv4_BAD;
}
}
/* TCP/UDP/SCTP checksum */
if (status & IXGBE_RXD_STAT_L4CS) {
- stats->l4cs.ev_count++;
+ IXGBE_EVC_ADD(&stats->l4cs, 1);
int type = M_CSUM_TCPv4|M_CSUM_TCPv6|M_CSUM_UDPv4|M_CSUM_UDPv6;
if (!(errors & IXGBE_RXD_ERR_TCPE)) {
mp->m_pkthdr.csum_flags |= type;
} else {
- stats->l4cs_bad.ev_count++;
+ IXGBE_EVC_ADD(&stats->l4cs_bad, 1);
mp->m_pkthdr.csum_flags |= type | M_CSUM_TCP_UDP_BAD;
}
}
Index: src/sys/dev/pci/ixgbe/ixgbe.c
diff -u src/sys/dev/pci/ixgbe/ixgbe.c:1.199.2.19 src/sys/dev/pci/ixgbe/ixgbe.c:1.199.2.20
--- src/sys/dev/pci/ixgbe/ixgbe.c:1.199.2.19 Tue Feb 1 11:35:45 2022
+++ src/sys/dev/pci/ixgbe/ixgbe.c Wed Feb 2 14:25:49 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: ixgbe.c,v 1.199.2.19 2022/02/01 11:35:45 martin Exp $ */
+/* $NetBSD: ixgbe.c,v 1.199.2.20 2022/02/02 14:25:49 martin Exp $ */
/******************************************************************************
@@ -64,7 +64,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.199.2.19 2022/02/01 11:35:45 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ixgbe.c,v 1.199.2.20 2022/02/02 14:25:49 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_inet.h"
@@ -1583,35 +1583,27 @@ ixgbe_update_stats_counters(struct adapt
struct ifnet *ifp = adapter->ifp;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *stats = &adapter->stats.pf;
- u32 missed_rx = 0, bprc, lxon, lxoff;
+ u32 missed_rx = 0, bprc, lxontxc, lxofftxc;
u64 total, total_missed_rx = 0;
uint64_t crcerrs, illerrc, rlec, ruc, rfc, roc, rjc;
unsigned int queue_counters;
int i;
-#define READ_COPY_SET(hw, stats, regname, evname) \
- do { \
- (evname) = IXGBE_READ_REG((hw), regname); \
- (stats)->evname.ev_count += (evname); \
- } while (/*CONSTCOND*/0)
-
- READ_COPY_SET(hw, stats, IXGBE_CRCERRS, crcerrs);
- READ_COPY_SET(hw, stats, IXGBE_ILLERRC, illerrc);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_CRCERRS, crcerrs);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_ILLERRC, illerrc);
- stats->errbc.ev_count += IXGBE_READ_REG(hw, IXGBE_ERRBC);
- stats->mspdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_ERRBC, errbc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MSPDC, mspdc);
if (hw->mac.type >= ixgbe_mac_X550)
- stats->mbsdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MBSDC);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MBSDC, mbsdc);
/* 16 registers exist */
queue_counters = uimin(__arraycount(stats->qprc), adapter->num_queues);
for (i = 0; i < queue_counters; i++) {
- stats->qprc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
- stats->qptc[i].ev_count += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
- if (hw->mac.type >= ixgbe_mac_82599EB) {
- stats->qprdc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
- }
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRC(i), qprc[i]);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_QPTC(i), qptc[i]);
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_QPRDC(i), qprdc[i]);
}
/* 8 registers exist */
@@ -1621,120 +1613,108 @@ ixgbe_update_stats_counters(struct adapt
/* MPC */
mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
/* global total per queue */
- stats->mpc[i].ev_count += mp;
+ IXGBE_EVC_ADD(&stats->mpc[i], mp);
/* running comprehensive total for stats display */
total_missed_rx += mp;
if (hw->mac.type == ixgbe_mac_82598EB)
- stats->rnbc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_RNBC(i), rnbc[i]);
- stats->pxontxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
- stats->pxofftxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONTXC(i), pxontxc[i]);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFTXC(i), pxofftxc[i]);
if (hw->mac.type >= ixgbe_mac_82599EB) {
- stats->pxonrxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
- stats->pxoffrxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
- stats->pxon2offc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXCNT(i), pxonrxc[i]);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXCNT(i), pxoffrxc[i]);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXON2OFFCNT(i),
+ pxon2offc[i]);
} else {
- stats->pxonrxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
- stats->pxoffrxc[i].ev_count
- += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXONRXC(i), pxonrxc[i]);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PXOFFRXC(i), pxoffrxc[i]);
}
}
- stats->mpctotal.ev_count += total_missed_rx;
+ IXGBE_EVC_ADD(&stats->mpctotal, total_missed_rx);
/* Document says M[LR]FC are valid when link is up and 10Gbps */
if ((adapter->link_active == LINK_STATE_UP)
&& (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL)) {
- stats->mlfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MLFC);
- stats->mrfc.ev_count += IXGBE_READ_REG(hw, IXGBE_MRFC);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MLFC, mlfc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MRFC, mrfc);
}
- READ_COPY_SET(hw, stats, IXGBE_RLEC, rlec);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_RLEC, rlec);
/* Hardware workaround, gprc counts missed packets */
- stats->gprc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx;
+ IXGBE_EVC_ADD(&stats->gprc,
+ IXGBE_READ_REG(hw, IXGBE_GPRC) - missed_rx);
- lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
- stats->lxontxc.ev_count += lxon;
- lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
- stats->lxofftxc.ev_count += lxoff;
- total = lxon + lxoff;
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXONTXC, lxontxc);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_LXOFFTXC, lxofftxc);
+ total = lxontxc + lxofftxc;
if (hw->mac.type != ixgbe_mac_82598EB) {
- stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
- stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+ IXGBE_EVC_ADD(&stats->gorc, IXGBE_READ_REG(hw, IXGBE_GORCL) +
+ ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32));
+ IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCL) +
((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32)
- - total * ETHER_MIN_LEN;
- stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORL) +
- ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
- stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
- stats->lxoffrxc.ev_count
- += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+ - total * ETHER_MIN_LEN);
+ IXGBE_EVC_ADD(&stats->tor, IXGBE_READ_REG(hw, IXGBE_TORL) +
+ ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32));
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXCNT, lxonrxc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXCNT, lxoffrxc);
} else {
- stats->lxonrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- stats->lxoffrxc.ev_count += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_LXONRXC, lxonrxc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_LXOFFRXC, lxoffrxc);
/* 82598 only has a counter in the high register */
- stats->gorc.ev_count += IXGBE_READ_REG(hw, IXGBE_GORCH);
- stats->gotc.ev_count += IXGBE_READ_REG(hw, IXGBE_GOTCH)
- - total * ETHER_MIN_LEN;
- stats->tor.ev_count += IXGBE_READ_REG(hw, IXGBE_TORH);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_GORCH, gorc);
+ IXGBE_EVC_ADD(&stats->gotc, IXGBE_READ_REG(hw, IXGBE_GOTCH)
+ - total * ETHER_MIN_LEN);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_TORH, tor);
}
/*
* Workaround: mprc hardware is incorrectly counting
* broadcasts, so for now we subtract those.
*/
- bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
- stats->bprc.ev_count += bprc;
- stats->mprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPRC)
- - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0);
-
- stats->prc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC64);
- stats->prc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC127);
- stats->prc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC255);
- stats->prc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC511);
- stats->prc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1023);
- stats->prc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
- stats->gptc.ev_count += IXGBE_READ_REG(hw, IXGBE_GPTC) - total;
- stats->mptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MPTC) - total;
- stats->ptc64.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC64) - total;
-
- READ_COPY_SET(hw, stats, IXGBE_RUC, ruc);
- READ_COPY_SET(hw, stats, IXGBE_RFC, rfc);
- READ_COPY_SET(hw, stats, IXGBE_ROC, roc);
- READ_COPY_SET(hw, stats, IXGBE_RJC, rjc);
-
-#undef READ_COPY_SET
-
- stats->mngprc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
- stats->mngpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
- stats->mngptc.ev_count += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
- stats->tpr.ev_count += IXGBE_READ_REG(hw, IXGBE_TPR);
- stats->tpt.ev_count += IXGBE_READ_REG(hw, IXGBE_TPT);
- stats->ptc127.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC127);
- stats->ptc255.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC255);
- stats->ptc511.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC511);
- stats->ptc1023.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1023);
- stats->ptc1522.ev_count += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- stats->bptc.ev_count += IXGBE_READ_REG(hw, IXGBE_BPTC);
- stats->xec.ev_count += IXGBE_READ_REG(hw, IXGBE_XEC);
- stats->fccrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCCRC);
- stats->fclast.ev_count += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_BPRC, bprc);
+ IXGBE_EVC_ADD(&stats->mprc, IXGBE_READ_REG(hw, IXGBE_MPRC)
+ - ((hw->mac.type == ixgbe_mac_82598EB) ? bprc : 0));
+
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC64, prc64);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC127, prc127);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC255, prc255);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC511, prc511);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1023, prc1023);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PRC1522, prc1522);
+
+ IXGBE_EVC_ADD(&stats->gptc, IXGBE_READ_REG(hw, IXGBE_GPTC) - total);
+ IXGBE_EVC_ADD(&stats->mptc, IXGBE_READ_REG(hw, IXGBE_MPTC) - total);
+ IXGBE_EVC_ADD(&stats->ptc64, IXGBE_READ_REG(hw, IXGBE_PTC64) - total);
+
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_RUC, ruc);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_RFC, rfc);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_ROC, roc);
+ IXGBE_EVC_REGADD2(hw, stats, IXGBE_RJC, rjc);
+
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPRC, mngprc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPDC, mngpdc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_MNGPTC, mngptc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_TPR, tpr);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_TPT, tpt);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC127, ptc127);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC255, ptc255);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC511, ptc511);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1023, ptc1023);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_PTC1522, ptc1522);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_BPTC, bptc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_XEC, xec);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCCRC, fccrc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCLAST, fclast);
/* Only read FCOE on 82599 */
if (hw->mac.type != ixgbe_mac_82598EB) {
- stats->fcoerpdc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
- stats->fcoeprc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
- stats->fcoeptc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
- stats->fcoedwrc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
- stats->fcoedwtc.ev_count += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOERPDC, fcoerpdc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPRC, fcoeprc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEPTC, fcoeptc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWRC, fcoedwrc);
+ IXGBE_EVC_REGADD(hw, stats, IXGBE_FCOEDWTC, fcoedwtc);
}
/* Fill out the OS statistics structure */
@@ -2098,47 +2078,47 @@ ixgbe_clear_evcnt(struct adapter *adapte
struct ixgbe_hw_stats *stats = &adapter->stats.pf;
int i;
- adapter->efbig_tx_dma_setup.ev_count = 0;
- adapter->mbuf_defrag_failed.ev_count = 0;
- adapter->efbig2_tx_dma_setup.ev_count = 0;
- adapter->einval_tx_dma_setup.ev_count = 0;
- adapter->other_tx_dma_setup.ev_count = 0;
- adapter->eagain_tx_dma_setup.ev_count = 0;
- adapter->enomem_tx_dma_setup.ev_count = 0;
- adapter->tso_err.ev_count = 0;
- adapter->watchdog_events.ev_count = 0;
- adapter->link_irq.ev_count = 0;
- adapter->link_sicount.ev_count = 0;
- adapter->mod_sicount.ev_count = 0;
- adapter->msf_sicount.ev_count = 0;
- adapter->phy_sicount.ev_count = 0;
+ IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0);
+ IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->tso_err, 0);
+ IXGBE_EVC_STORE(&adapter->watchdog_events, 0);
+ IXGBE_EVC_STORE(&adapter->link_irq, 0);
+ IXGBE_EVC_STORE(&adapter->link_sicount, 0);
+ IXGBE_EVC_STORE(&adapter->mod_sicount, 0);
+ IXGBE_EVC_STORE(&adapter->msf_sicount, 0);
+ IXGBE_EVC_STORE(&adapter->phy_sicount, 0);
for (i = 0; i < IXGBE_TC_COUNTER_NUM; i++) {
if (i < __arraycount(stats->mpc)) {
- stats->mpc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->mpc[i], 0);
if (hw->mac.type == ixgbe_mac_82598EB)
- stats->rnbc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->rnbc[i], 0);
}
if (i < __arraycount(stats->pxontxc)) {
- stats->pxontxc[i].ev_count = 0;
- stats->pxonrxc[i].ev_count = 0;
- stats->pxofftxc[i].ev_count = 0;
- stats->pxoffrxc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->pxontxc[i], 0);
+ IXGBE_EVC_STORE(&stats->pxonrxc[i], 0);
+ IXGBE_EVC_STORE(&stats->pxofftxc[i], 0);
+ IXGBE_EVC_STORE(&stats->pxoffrxc[i], 0);
if (hw->mac.type >= ixgbe_mac_82599EB)
- stats->pxon2offc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->pxon2offc[i], 0);
}
}
txr = adapter->tx_rings;
for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
- adapter->queues[i].irqs.ev_count = 0;
- adapter->queues[i].handleq.ev_count = 0;
- adapter->queues[i].req.ev_count = 0;
- txr->no_desc_avail.ev_count = 0;
- txr->total_packets.ev_count = 0;
- txr->tso_tx.ev_count = 0;
+ IXGBE_EVC_STORE(&adapter->queues[i].irqs, 0);
+ IXGBE_EVC_STORE(&adapter->queues[i].handleq, 0);
+ IXGBE_EVC_STORE(&adapter->queues[i].req, 0);
+ IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
+ IXGBE_EVC_STORE(&txr->total_packets, 0);
+ IXGBE_EVC_STORE(&txr->tso_tx, 0);
#ifndef IXGBE_LEGACY_TX
- txr->pcq_drops.ev_count = 0;
+ IXGBE_EVC_STORE(&txr->pcq_drops, 0);
#endif
txr->q_efbig_tx_dma_setup = 0;
txr->q_mbuf_defrag_failed = 0;
@@ -2150,75 +2130,75 @@ ixgbe_clear_evcnt(struct adapter *adapte
txr->q_tso_err = 0;
if (i < __arraycount(stats->qprc)) {
- stats->qprc[i].ev_count = 0;
- stats->qptc[i].ev_count = 0;
- stats->qbrc[i].ev_count = 0;
- stats->qbtc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->qprc[i], 0);
+ IXGBE_EVC_STORE(&stats->qptc[i], 0);
+ IXGBE_EVC_STORE(&stats->qbrc[i], 0);
+ IXGBE_EVC_STORE(&stats->qbtc[i], 0);
if (hw->mac.type >= ixgbe_mac_82599EB)
- stats->qprdc[i].ev_count = 0;
+ IXGBE_EVC_STORE(&stats->qprdc[i], 0);
}
- rxr->rx_packets.ev_count = 0;
- rxr->rx_bytes.ev_count = 0;
- rxr->rx_copies.ev_count = 0;
- rxr->no_mbuf.ev_count = 0;
- rxr->rx_discarded.ev_count = 0;
- }
- stats->ipcs.ev_count = 0;
- stats->l4cs.ev_count = 0;
- stats->ipcs_bad.ev_count = 0;
- stats->l4cs_bad.ev_count = 0;
- stats->intzero.ev_count = 0;
- stats->legint.ev_count = 0;
- stats->crcerrs.ev_count = 0;
- stats->illerrc.ev_count = 0;
- stats->errbc.ev_count = 0;
- stats->mspdc.ev_count = 0;
+ IXGBE_EVC_STORE(&rxr->rx_packets, 0);
+ IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
+ IXGBE_EVC_STORE(&rxr->rx_copies, 0);
+ IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
+ IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
+ }
+ IXGBE_EVC_STORE(&stats->ipcs, 0);
+ IXGBE_EVC_STORE(&stats->l4cs, 0);
+ IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
+ IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
+ IXGBE_EVC_STORE(&stats->intzero, 0);
+ IXGBE_EVC_STORE(&stats->legint, 0);
+ IXGBE_EVC_STORE(&stats->crcerrs, 0);
+ IXGBE_EVC_STORE(&stats->illerrc, 0);
+ IXGBE_EVC_STORE(&stats->errbc, 0);
+ IXGBE_EVC_STORE(&stats->mspdc, 0);
if (hw->mac.type >= ixgbe_mac_X550)
- stats->mbsdc.ev_count = 0;
- stats->mpctotal.ev_count = 0;
- stats->mlfc.ev_count = 0;
- stats->mrfc.ev_count = 0;
- stats->rlec.ev_count = 0;
- stats->lxontxc.ev_count = 0;
- stats->lxonrxc.ev_count = 0;
- stats->lxofftxc.ev_count = 0;
- stats->lxoffrxc.ev_count = 0;
+ IXGBE_EVC_STORE(&stats->mbsdc, 0);
+ IXGBE_EVC_STORE(&stats->mpctotal, 0);
+ IXGBE_EVC_STORE(&stats->mlfc, 0);
+ IXGBE_EVC_STORE(&stats->mrfc, 0);
+ IXGBE_EVC_STORE(&stats->rlec, 0);
+ IXGBE_EVC_STORE(&stats->lxontxc, 0);
+ IXGBE_EVC_STORE(&stats->lxonrxc, 0);
+ IXGBE_EVC_STORE(&stats->lxofftxc, 0);
+ IXGBE_EVC_STORE(&stats->lxoffrxc, 0);
/* Packet Reception Stats */
- stats->tor.ev_count = 0;
- stats->gorc.ev_count = 0;
- stats->tpr.ev_count = 0;
- stats->gprc.ev_count = 0;
- stats->mprc.ev_count = 0;
- stats->bprc.ev_count = 0;
- stats->prc64.ev_count = 0;
- stats->prc127.ev_count = 0;
- stats->prc255.ev_count = 0;
- stats->prc511.ev_count = 0;
- stats->prc1023.ev_count = 0;
- stats->prc1522.ev_count = 0;
- stats->ruc.ev_count = 0;
- stats->rfc.ev_count = 0;
- stats->roc.ev_count = 0;
- stats->rjc.ev_count = 0;
- stats->mngprc.ev_count = 0;
- stats->mngpdc.ev_count = 0;
- stats->xec.ev_count = 0;
+ IXGBE_EVC_STORE(&stats->tor, 0);
+ IXGBE_EVC_STORE(&stats->gorc, 0);
+ IXGBE_EVC_STORE(&stats->tpr, 0);
+ IXGBE_EVC_STORE(&stats->gprc, 0);
+ IXGBE_EVC_STORE(&stats->mprc, 0);
+ IXGBE_EVC_STORE(&stats->bprc, 0);
+ IXGBE_EVC_STORE(&stats->prc64, 0);
+ IXGBE_EVC_STORE(&stats->prc127, 0);
+ IXGBE_EVC_STORE(&stats->prc255, 0);
+ IXGBE_EVC_STORE(&stats->prc511, 0);
+ IXGBE_EVC_STORE(&stats->prc1023, 0);
+ IXGBE_EVC_STORE(&stats->prc1522, 0);
+ IXGBE_EVC_STORE(&stats->ruc, 0);
+ IXGBE_EVC_STORE(&stats->rfc, 0);
+ IXGBE_EVC_STORE(&stats->roc, 0);
+ IXGBE_EVC_STORE(&stats->rjc, 0);
+ IXGBE_EVC_STORE(&stats->mngprc, 0);
+ IXGBE_EVC_STORE(&stats->mngpdc, 0);
+ IXGBE_EVC_STORE(&stats->xec, 0);
/* Packet Transmission Stats */
- stats->gotc.ev_count = 0;
- stats->tpt.ev_count = 0;
- stats->gptc.ev_count = 0;
- stats->bptc.ev_count = 0;
- stats->mptc.ev_count = 0;
- stats->mngptc.ev_count = 0;
- stats->ptc64.ev_count = 0;
- stats->ptc127.ev_count = 0;
- stats->ptc255.ev_count = 0;
- stats->ptc511.ev_count = 0;
- stats->ptc1023.ev_count = 0;
- stats->ptc1522.ev_count = 0;
+ IXGBE_EVC_STORE(&stats->gotc, 0);
+ IXGBE_EVC_STORE(&stats->tpt, 0);
+ IXGBE_EVC_STORE(&stats->gptc, 0);
+ IXGBE_EVC_STORE(&stats->bptc, 0);
+ IXGBE_EVC_STORE(&stats->mptc, 0);
+ IXGBE_EVC_STORE(&stats->mngptc, 0);
+ IXGBE_EVC_STORE(&stats->ptc64, 0);
+ IXGBE_EVC_STORE(&stats->ptc127, 0);
+ IXGBE_EVC_STORE(&stats->ptc255, 0);
+ IXGBE_EVC_STORE(&stats->ptc511, 0);
+ IXGBE_EVC_STORE(&stats->ptc1023, 0);
+ IXGBE_EVC_STORE(&stats->ptc1522, 0);
}
/************************************************************************
@@ -2778,7 +2758,7 @@ ixgbe_msix_que(void *arg)
return 0;
ixgbe_disable_queue(adapter, que->msix);
- ++que->irqs.ev_count;
+ IXGBE_EVC_ADD(&que->irqs, 1);
/*
* Don't change "que->txrx_use_workqueue" from this point to avoid
@@ -3126,7 +3106,7 @@ ixgbe_msix_link(void *arg)
u32 eicr, eicr_mask;
s32 retval;
- ++adapter->link_irq.ev_count;
+ IXGBE_EVC_ADD(&adapter->link_irq, 1);
/* Pause other interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER);
@@ -4560,14 +4540,14 @@ ixgbe_local_timer1(void *arg)
v6 += txr->q_enomem_tx_dma_setup;
v7 += txr->q_tso_err;
}
- adapter->efbig_tx_dma_setup.ev_count = v0;
- adapter->mbuf_defrag_failed.ev_count = v1;
- adapter->efbig2_tx_dma_setup.ev_count = v2;
- adapter->einval_tx_dma_setup.ev_count = v3;
- adapter->other_tx_dma_setup.ev_count = v4;
- adapter->eagain_tx_dma_setup.ev_count = v5;
- adapter->enomem_tx_dma_setup.ev_count = v6;
- adapter->tso_err.ev_count = v7;
+ IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, v0);
+ IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, v1);
+ IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, v2);
+ IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, v3);
+ IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, v4);
+ IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, v5);
+ IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, v6);
+ IXGBE_EVC_STORE(&adapter->tso_err, v7);
/*
* Check the TX queues status
@@ -4625,7 +4605,7 @@ out:
watchdog:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
adapter->ifp->if_flags &= ~IFF_RUNNING;
- adapter->watchdog_events.ev_count++;
+ IXGBE_EVC_ADD(&adapter->watchdog_events, 1);
ixgbe_init_locked(adapter);
} /* ixgbe_local_timer */
@@ -4701,7 +4681,7 @@ ixgbe_handle_mod(void *context)
u32 err, cage_full = 0;
IXGBE_CORE_LOCK(adapter);
- ++adapter->mod_sicount.ev_count;
+ IXGBE_EVC_ADD(&adapter->mod_sicount, 1);
if (adapter->hw.need_crosstalk_fix) {
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
@@ -4769,7 +4749,7 @@ ixgbe_handle_msf(void *context)
bool negotiate;
IXGBE_CORE_LOCK(adapter);
- ++adapter->msf_sicount.ev_count;
+ IXGBE_EVC_ADD(&adapter->msf_sicount, 1);
/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
adapter->phy_layer = ixgbe_get_supported_physical_layer(hw);
@@ -4798,7 +4778,7 @@ ixgbe_handle_phy(void *context)
struct ixgbe_hw *hw = &adapter->hw;
int error;
- ++adapter->phy_sicount.ev_count;
+ IXGBE_EVC_ADD(&adapter->phy_sicount, 1);
error = hw->phy.ops.handle_lasi(hw);
if (error == IXGBE_ERR_OVERTEMP)
device_printf(adapter->dev,
@@ -5130,15 +5110,15 @@ ixgbe_legacy_irq(void *arg)
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (eicr == 0) {
- adapter->stats.pf.intzero.ev_count++;
+ IXGBE_EVC_ADD(&adapter->stats.pf.intzero, 1);
IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims_orig);
return 0;
}
- adapter->stats.pf.legint.ev_count++;
+ IXGBE_EVC_ADD(&adapter->stats.pf.legint, 1);
/* Queue (0) intr */
if ((eicr & IXGBE_EIMC_RTX_QUEUE) != 0) {
- ++que->irqs.ev_count;
+ IXGBE_EVC_ADD(&que->irqs, 1);
/*
* The same as ixgbe_msix_que() about
@@ -5198,7 +5178,7 @@ ixgbe_legacy_irq(void *arg)
softint_schedule(adapter->phy_si);
if (more) {
- que->req.ev_count++;
+ IXGBE_EVC_ADD(&que->req, 1);
ixgbe_sched_handle_que(adapter, que);
} else
ixgbe_enable_intr(adapter);
@@ -6504,7 +6484,7 @@ ixgbe_handle_que(void *context)
struct ifnet *ifp = adapter->ifp;
bool more = false;
- que->handleq.ev_count++;
+ IXGBE_EVC_ADD(&que->handleq, 1);
if (ifp->if_flags & IFF_RUNNING) {
more = ixgbe_rxeof(que);
@@ -6522,7 +6502,7 @@ ixgbe_handle_que(void *context)
}
if (more) {
- que->req.ev_count++;
+ IXGBE_EVC_ADD(&que->req, 1);
ixgbe_sched_handle_que(adapter, que);
} else if (que->res != NULL) {
/* Re-enable this interrupt */
@@ -6994,7 +6974,7 @@ ixgbe_handle_link(void *context)
struct ixgbe_hw *hw = &adapter->hw;
IXGBE_CORE_LOCK(adapter);
- ++adapter->link_sicount.ev_count;
+ IXGBE_EVC_ADD(&adapter->link_sicount, 1);
ixgbe_check_link(hw, &adapter->link_speed, &adapter->link_up, 0);
ixgbe_update_link_status(adapter);
Index: src/sys/dev/pci/ixgbe/ixgbe_mbx.c
diff -u src/sys/dev/pci/ixgbe/ixgbe_mbx.c:1.11.2.3 src/sys/dev/pci/ixgbe/ixgbe_mbx.c:1.11.2.4
--- src/sys/dev/pci/ixgbe/ixgbe_mbx.c:1.11.2.3 Mon Jan 31 17:36:25 2022
+++ src/sys/dev/pci/ixgbe/ixgbe_mbx.c Wed Feb 2 14:25:49 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: ixgbe_mbx.c,v 1.11.2.3 2022/01/31 17:36:25 martin Exp $ */
+/* $NetBSD: ixgbe_mbx.c,v 1.11.2.4 2022/02/02 14:25:49 martin Exp $ */
/******************************************************************************
SPDX-License-Identifier: BSD-3-Clause
@@ -36,9 +36,10 @@
/*$FreeBSD: head/sys/dev/ixgbe/ixgbe_mbx.c 326022 2017-11-20 19:36:21Z pfg $*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ixgbe_mbx.c,v 1.11.2.3 2022/01/31 17:36:25 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ixgbe_mbx.c,v 1.11.2.4 2022/02/02 14:25:49 martin Exp $");
#include "ixgbe_type.h"
+#include "ixgbe_netbsd.h"
#include "ixgbe_mbx.h"
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
@@ -314,7 +315,7 @@ static void ixgbe_clear_msg_vf(struct ix
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
- hw->mbx.stats.reqs.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.reqs, 1);
hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
}
}
@@ -324,7 +325,7 @@ static void ixgbe_clear_ack_vf(struct ix
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
- hw->mbx.stats.acks.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.acks, 1);
hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
}
}
@@ -334,7 +335,7 @@ static void ixgbe_clear_rst_vf(struct ix
u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
- hw->mbx.stats.rsts.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.rsts, 1);
hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
IXGBE_VFMAILBOX_RSTD);
}
@@ -371,7 +372,7 @@ static s32 ixgbe_check_for_msg_vf(struct
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
- hw->mbx.stats.reqs.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.reqs, 1);
return IXGBE_SUCCESS;
}
@@ -393,7 +394,7 @@ static s32 ixgbe_check_for_ack_vf(struct
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
/* TODO: should this be autocleared? */
ixgbe_clear_ack_vf(hw);
- hw->mbx.stats.acks.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.acks, 1);
return IXGBE_SUCCESS;
}
@@ -416,7 +417,7 @@ static s32 ixgbe_check_for_rst_vf(struct
IXGBE_VFMAILBOX_RSTD)) {
/* TODO: should this be autocleared? */
ixgbe_clear_rst_vf(hw);
- hw->mbx.stats.rsts.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.rsts, 1);
return IXGBE_SUCCESS;
}
@@ -531,7 +532,7 @@ static s32 ixgbe_write_mbx_vf_legacy(str
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
/* update stats */
- hw->mbx.stats.msgs_tx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_tx, 1);
/* interrupt the PF to tell it a message has been sent */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
@@ -573,7 +574,7 @@ static s32 ixgbe_write_mbx_vf(struct ixg
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
/* update stats */
- hw->mbx.stats.msgs_tx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_tx, 1);
/* interrupt the PF to tell it a message has been sent */
vf_mailbox = ixgbe_read_mailbox_vf(hw);
@@ -620,7 +621,7 @@ static s32 ixgbe_read_mbx_vf_legacy(stru
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
/* update stats */
- hw->mbx.stats.msgs_rx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_rx, 1);
return IXGBE_SUCCESS;
}
@@ -661,7 +662,7 @@ static s32 ixgbe_read_mbx_vf(struct ixgb
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
/* update stats */
- hw->mbx.stats.msgs_rx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_rx, 1);
return IXGBE_SUCCESS;
}
@@ -691,11 +692,11 @@ void ixgbe_init_mbx_params_vf(struct ixg
mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
mbx->ops[0].clear = NULL;
- mbx->stats.msgs_tx.ev_count = 0;
- mbx->stats.msgs_rx.ev_count = 0;
- mbx->stats.reqs.ev_count = 0;
- mbx->stats.acks.ev_count = 0;
- mbx->stats.rsts.ev_count = 0;
+ IXGBE_EVC_STORE(&mbx->stats.msgs_tx, 0);
+ IXGBE_EVC_STORE(&mbx->stats.msgs_rx, 0);
+ IXGBE_EVC_STORE(&mbx->stats.reqs, 0);
+ IXGBE_EVC_STORE(&mbx->stats.acks, 0);
+ IXGBE_EVC_STORE(&mbx->stats.rsts, 0);
}
/**
@@ -732,7 +733,7 @@ static void ixgbe_clear_msg_pf(struct ix
pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
- hw->mbx.stats.reqs.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.reqs, 1);
IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
@@ -747,7 +748,7 @@ static void ixgbe_clear_ack_pf(struct ix
pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
- hw->mbx.stats.acks.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.acks, 1);
IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
@@ -842,7 +843,7 @@ static s32 ixgbe_check_for_rst_pf(struct
if (vflre & (1 << vf_shift)) {
ret_val = IXGBE_SUCCESS;
IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
- hw->mbx.stats.rsts.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.rsts, 1);
}
return ret_val;
@@ -946,7 +947,7 @@ static s32 ixgbe_write_mbx_pf_legacy(str
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
/* update stats */
- hw->mbx.stats.msgs_tx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_tx, 1);
return IXGBE_SUCCESS;
}
@@ -991,7 +992,7 @@ static s32 ixgbe_write_mbx_pf(struct ixg
ixgbe_poll_for_ack(hw, vf_id);
/* update stats */
- hw->mbx.stats.msgs_tx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_tx, 1);
out:
hw->mbx.ops[vf_id].release(hw, vf_id);
@@ -1032,7 +1033,7 @@ static s32 ixgbe_read_mbx_pf_legacy(stru
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
/* update stats */
- hw->mbx.stats.msgs_rx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_rx, 1);
return IXGBE_SUCCESS;
}
@@ -1074,7 +1075,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgb
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
/* update stats */
- hw->mbx.stats.msgs_rx.ev_count++;
+ IXGBE_EVC_ADD(&hw->mbx.stats.msgs_rx, 1);
return IXGBE_SUCCESS;
}
@@ -1148,11 +1149,11 @@ void ixgbe_init_mbx_params_pf(struct ixg
mbx->size = IXGBE_VFMAILBOX_SIZE;
/* Initialize counters with zeroes */
- mbx->stats.msgs_tx.ev_count = 0;
- mbx->stats.msgs_rx.ev_count = 0;
- mbx->stats.reqs.ev_count = 0;
- mbx->stats.acks.ev_count = 0;
- mbx->stats.rsts.ev_count = 0;
+ IXGBE_EVC_STORE(&mbx->stats.msgs_tx, 0);
+ IXGBE_EVC_STORE(&mbx->stats.msgs_rx, 0);
+ IXGBE_EVC_STORE(&mbx->stats.reqs, 0);
+ IXGBE_EVC_STORE(&mbx->stats.acks, 0);
+ IXGBE_EVC_STORE(&mbx->stats.rsts, 0);
/* No matter of VF number, we initialize params for all 64 VFs. */
/* TODO: 1. Add a define for max VF and refactor SHARED to get rid
Index: src/sys/dev/pci/ixgbe/ixgbe_netbsd.h
diff -u src/sys/dev/pci/ixgbe/ixgbe_netbsd.h:1.11.4.2 src/sys/dev/pci/ixgbe/ixgbe_netbsd.h:1.11.4.3
--- src/sys/dev/pci/ixgbe/ixgbe_netbsd.h:1.11.4.2 Wed Sep 15 16:30:50 2021
+++ src/sys/dev/pci/ixgbe/ixgbe_netbsd.h Wed Feb 2 14:25:49 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: ixgbe_netbsd.h,v 1.11.4.2 2021/09/15 16:30:50 martin Exp $ */
+/* $NetBSD: ixgbe_netbsd.h,v 1.11.4.3 2022/02/02 14:25:49 martin Exp $ */
/*
* Copyright (c) 2011 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -50,6 +50,35 @@
#define IFCAP_HWCSUM (IFCAP_RXCSUM|IFCAP_TXCSUM)
+
+/* Helper macros for evcnt(9) .*/
+#ifdef __HAVE_ATOMIC64_LOADSTORE
+#define IXGBE_EVC_LOAD(evp) \
+ atomic_load_relaxed(&((evp)->ev_count))
+#define IXGBE_EVC_STORE(evp, val) \
+ atomic_store_relaxed(&((evp)->ev_count), (val))
+#define IXGBE_EVC_ADD(evp, val) \
+ atomic_store_relaxed(&((evp)->ev_count), \
+ atomic_load_relaxed(&((evp)->ev_count)) + (val))
+#else
+#define IXGBE_EVC_LOAD(evp) ((evp)->ev_count))
+#define IXGBE_EVC_STORE(evp, val) ((evp)->ev_count = (val))
+#define IXGBE_EVC_ADD(evp, val) ((evp)->ev_count += (val))
+#endif
+
+#define IXGBE_EVC_REGADD(hw, stats, regname, evname) \
+ IXGBE_EVC_ADD(&(stats)->evname, IXGBE_READ_REG((hw), (regname)))
+
+/*
+ * Copy a register value to variable "evname" for later use.
+ * "evname" is also the name of the evcnt.
+ */
+#define IXGBE_EVC_REGADD2(hw, stats, regname, evname) \
+ do { \
+ (evname) = IXGBE_READ_REG((hw), (regname)); \
+ IXGBE_EVC_ADD(&(stats)->evname, (evname)); \
+ } while (/*CONSTCOND*/0)
+
struct ixgbe_dma_tag {
bus_dma_tag_t dt_dmat;
bus_size_t dt_alignment;
Index: src/sys/dev/pci/ixgbe/ixv.c
diff -u src/sys/dev/pci/ixgbe/ixv.c:1.125.2.16 src/sys/dev/pci/ixgbe/ixv.c:1.125.2.17
--- src/sys/dev/pci/ixgbe/ixv.c:1.125.2.16 Mon Jan 31 17:36:25 2022
+++ src/sys/dev/pci/ixgbe/ixv.c Wed Feb 2 14:25:49 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: ixv.c,v 1.125.2.16 2022/01/31 17:36:25 martin Exp $ */
+/* $NetBSD: ixv.c,v 1.125.2.17 2022/02/02 14:25:49 martin Exp $ */
/******************************************************************************
@@ -35,7 +35,7 @@
/*$FreeBSD: head/sys/dev/ixgbe/if_ixv.c 331224 2018-03-19 20:55:05Z erj $*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.125.2.16 2022/01/31 17:36:25 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ixv.c,v 1.125.2.17 2022/02/02 14:25:49 martin Exp $");
#ifdef _KERNEL_OPT
#include "opt_inet.h"
@@ -877,7 +877,7 @@ ixv_msix_que(void *arg)
u32 newitr = 0;
ixv_disable_queue(adapter, que->msix);
- ++que->irqs.ev_count;
+ IXGBE_EVC_ADD(&que->irqs, 1);
#ifdef __NetBSD__
/* Don't run ixgbe_rxeof in interrupt context */
@@ -963,7 +963,7 @@ ixv_msix_mbx(void *arg)
struct adapter *adapter = arg;
struct ixgbe_hw *hw = &adapter->hw;
- ++adapter->link_irq.ev_count;
+ IXGBE_EVC_ADD(&adapter->link_irq, 1);
/* NetBSD: We use auto-clear, so it's not required to write VTEICR */
/* Link status change */
@@ -1294,14 +1294,14 @@ ixv_local_timer_locked(void *arg)
v6 += txr->q_enomem_tx_dma_setup;
v7 += txr->q_tso_err;
}
- adapter->efbig_tx_dma_setup.ev_count = v0;
- adapter->mbuf_defrag_failed.ev_count = v1;
- adapter->efbig2_tx_dma_setup.ev_count = v2;
- adapter->einval_tx_dma_setup.ev_count = v3;
- adapter->other_tx_dma_setup.ev_count = v4;
- adapter->eagain_tx_dma_setup.ev_count = v5;
- adapter->enomem_tx_dma_setup.ev_count = v6;
- adapter->tso_err.ev_count = v7;
+ IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, v0);
+ IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, v1);
+ IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, v2);
+ IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, v3);
+ IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, v4);
+ IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, v5);
+ IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, v6);
+ IXGBE_EVC_STORE(&adapter->tso_err, v7);
/*
* Check the TX queues status
@@ -1352,7 +1352,7 @@ ixv_local_timer_locked(void *arg)
watchdog:
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
adapter->ifp->if_flags &= ~IFF_RUNNING;
- adapter->watchdog_events.ev_count++;
+ IXGBE_EVC_ADD(&adapter->watchdog_events, 1);
ixv_init_locked(adapter);
} /* ixv_local_timer */
@@ -2377,20 +2377,20 @@ ixv_init_stats(struct adapter *adapter)
#define UPDATE_STAT_32(reg, last, count) \
{ \
u32 current = IXGBE_READ_REG(hw, (reg)); \
- count.ev_count += current - last; \
+ IXGBE_EVC_ADD(&count, current - (last)); \
(last) = current; \
}
-#define UPDATE_STAT_36(lsb, msb, last, count) \
-{ \
- u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
- u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
- u64 current = ((cur_msb << 32) | cur_lsb); \
- if (current < (last)) \
- count.ev_count += current + __BIT(36) - (last); \
- else \
- count.ev_count += current - (last); \
- (last) = current; \
+#define UPDATE_STAT_36(lsb, msb, last, count) \
+ { \
+ u64 cur_lsb = IXGBE_READ_REG(hw, (lsb)); \
+ u64 cur_msb = IXGBE_READ_REG(hw, (msb)); \
+ u64 current = ((cur_msb << 32) | cur_lsb); \
+ if (current < (last)) \
+ IXGBE_EVC_ADD(&count, current + __BIT(36) - (last)); \
+ else \
+ IXGBE_EVC_ADD(&count, current - (last)); \
+ (last) = current; \
}
/************************************************************************
@@ -2730,26 +2730,26 @@ ixv_clear_evcnt(struct adapter *adapter)
int i;
/* Driver Statistics */
- adapter->efbig_tx_dma_setup.ev_count = 0;
- adapter->mbuf_defrag_failed.ev_count = 0;
- adapter->efbig2_tx_dma_setup.ev_count = 0;
- adapter->einval_tx_dma_setup.ev_count = 0;
- adapter->other_tx_dma_setup.ev_count = 0;
- adapter->eagain_tx_dma_setup.ev_count = 0;
- adapter->enomem_tx_dma_setup.ev_count = 0;
- adapter->watchdog_events.ev_count = 0;
- adapter->tso_err.ev_count = 0;
- adapter->link_irq.ev_count = 0;
+ IXGBE_EVC_STORE(&adapter->efbig_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->mbuf_defrag_failed, 0);
+ IXGBE_EVC_STORE(&adapter->efbig2_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->einval_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->other_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->eagain_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->enomem_tx_dma_setup, 0);
+ IXGBE_EVC_STORE(&adapter->watchdog_events, 0);
+ IXGBE_EVC_STORE(&adapter->tso_err, 0);
+ IXGBE_EVC_STORE(&adapter->link_irq, 0);
for (i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
- adapter->queues[i].irqs.ev_count = 0;
- adapter->queues[i].handleq.ev_count = 0;
- adapter->queues[i].req.ev_count = 0;
- txr->tso_tx.ev_count = 0;
- txr->no_desc_avail.ev_count = 0;
- txr->total_packets.ev_count = 0;
+ IXGBE_EVC_STORE(&adapter->queues[i].irqs, 0);
+ IXGBE_EVC_STORE(&adapter->queues[i].handleq, 0);
+ IXGBE_EVC_STORE(&adapter->queues[i].req, 0);
+ IXGBE_EVC_STORE(&txr->tso_tx, 0);
+ IXGBE_EVC_STORE(&txr->no_desc_avail, 0);
+ IXGBE_EVC_STORE(&txr->total_packets, 0);
#ifndef IXGBE_LEGACY_TX
- txr->pcq_drops.ev_count = 0;
+ IXGBE_EVC_STORE(&txr->pcq_drops, 0);
#endif
txr->q_efbig_tx_dma_setup = 0;
txr->q_mbuf_defrag_failed = 0;
@@ -2760,37 +2760,37 @@ ixv_clear_evcnt(struct adapter *adapter)
txr->q_enomem_tx_dma_setup = 0;
txr->q_tso_err = 0;
- rxr->rx_packets.ev_count = 0;
- rxr->rx_bytes.ev_count = 0;
- rxr->rx_copies.ev_count = 0;
- rxr->no_mbuf.ev_count = 0;
- rxr->rx_discarded.ev_count = 0;
+ IXGBE_EVC_STORE(&rxr->rx_packets, 0);
+ IXGBE_EVC_STORE(&rxr->rx_bytes, 0);
+ IXGBE_EVC_STORE(&rxr->rx_copies, 0);
+ IXGBE_EVC_STORE(&rxr->no_mbuf, 0);
+ IXGBE_EVC_STORE(&rxr->rx_discarded, 0);
}
/* MAC stats get their own sub node */
- stats->ipcs.ev_count = 0;
- stats->l4cs.ev_count = 0;
- stats->ipcs_bad.ev_count = 0;
- stats->l4cs_bad.ev_count = 0;
+ IXGBE_EVC_STORE(&stats->ipcs, 0);
+ IXGBE_EVC_STORE(&stats->l4cs, 0);
+ IXGBE_EVC_STORE(&stats->ipcs_bad, 0);
+ IXGBE_EVC_STORE(&stats->l4cs_bad, 0);
/*
* Packet Reception Stats.
* Call ixv_init_stats() to save last VF counters' values.
*/
ixv_init_stats(adapter);
- stats->vfgprc.ev_count = 0;
- stats->vfgorc.ev_count = 0;
- stats->vfmprc.ev_count = 0;
- stats->vfgptc.ev_count = 0;
- stats->vfgotc.ev_count = 0;
+ IXGBE_EVC_STORE(&stats->vfgprc, 0);
+ IXGBE_EVC_STORE(&stats->vfgorc, 0);
+ IXGBE_EVC_STORE(&stats->vfmprc, 0);
+ IXGBE_EVC_STORE(&stats->vfgptc, 0);
+ IXGBE_EVC_STORE(&stats->vfgotc, 0);
/* Mailbox Stats */
- hw->mbx.stats.msgs_tx.ev_count = 0;
- hw->mbx.stats.msgs_rx.ev_count = 0;
- hw->mbx.stats.acks.ev_count = 0;
- hw->mbx.stats.reqs.ev_count = 0;
- hw->mbx.stats.rsts.ev_count = 0;
+ IXGBE_EVC_STORE(&hw->mbx.stats.msgs_tx, 0);
+ IXGBE_EVC_STORE(&hw->mbx.stats.msgs_rx, 0);
+ IXGBE_EVC_STORE(&hw->mbx.stats.acks, 0);
+ IXGBE_EVC_STORE(&hw->mbx.stats.reqs, 0);
+ IXGBE_EVC_STORE(&hw->mbx.stats.rsts, 0);
} /* ixv_clear_evcnt */
@@ -3189,7 +3189,7 @@ ixv_handle_que(void *context)
struct ifnet *ifp = adapter->ifp;
bool more;
- que->handleq.ev_count++;
+ IXGBE_EVC_ADD(&que->handleq, 1);
if (ifp->if_flags & IFF_RUNNING) {
more = ixgbe_rxeof(que);
@@ -3205,7 +3205,7 @@ ixv_handle_que(void *context)
ixgbe_legacy_start_locked(ifp, txr);
IXGBE_TX_UNLOCK(txr);
if (more) {
- que->req.ev_count++;
+ IXGBE_EVC_ADD(&que->req, 1);
if (adapter->txrx_use_workqueue) {
/*
* "enqueued flag" is not required here