Use the new common code for statistics.
This also fixes the bug that this driver was not accounting
for bytes.

Signed-off-by: Stephen Hemminger <step...@networkplumber.org>
---
 drivers/net/null/rte_eth_null.c | 80 +++++++--------------------------
 1 file changed, 17 insertions(+), 63 deletions(-)

diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7c46004f1e..7786982732 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -8,6 +8,7 @@
 #include <rte_mbuf.h>
 #include <ethdev_driver.h>
 #include <ethdev_vdev.h>
+#include <ethdev_swstats.h>
 #include <rte_malloc.h>
 #include <rte_memcpy.h>
 #include <bus_vdev_driver.h>
@@ -37,8 +38,8 @@ struct null_queue {
        struct rte_mempool *mb_pool;
        struct rte_mbuf *dummy_packet;
 
-       uint64_t rx_pkts;
-       uint64_t tx_pkts;
+       struct rte_eth_counters tx_stats;
+       struct rte_eth_counters rx_stats;
 };
 
 struct pmd_options {
@@ -99,11 +100,9 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
                bufs[i]->data_len = (uint16_t)packet_size;
                bufs[i]->pkt_len = packet_size;
                bufs[i]->port = h->internals->port_id;
+               rte_eth_count_mbuf(&h->rx_stats, bufs[i]);
        }
 
-       /* NOTE: review for potential ordering optimization */
-       __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
-
        return i;
 }
 
@@ -127,11 +126,9 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
                bufs[i]->data_len = (uint16_t)packet_size;
                bufs[i]->pkt_len = packet_size;
                bufs[i]->port = h->internals->port_id;
+               rte_eth_count_mbuf(&h->rx_stats, bufs[i]);
        }
 
-       /* NOTE: review for potential ordering optimization */
-       __atomic_fetch_add(&h->rx_pkts, i, __ATOMIC_SEQ_CST);
-
        return i;
 }
 
@@ -151,11 +148,10 @@ eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
        if ((q == NULL) || (bufs == NULL))
                return 0;
 
-       for (i = 0; i < nb_bufs; i++)
+       for (i = 0; i < nb_bufs; i++) {
+               rte_eth_count_mbuf(&h->tx_stats, bufs[i]);
                rte_pktmbuf_free(bufs[i]);
-
-       /* NOTE: review for potential ordering optimization */
-       __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
+       }
 
        return i;
 }
@@ -174,12 +170,10 @@ eth_null_copy_tx(void *q, struct rte_mbuf **bufs, 
uint16_t nb_bufs)
        for (i = 0; i < nb_bufs; i++) {
                rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
                                        packet_size);
+               rte_eth_count_mbuf(&h->tx_stats, bufs[i]);
                rte_pktmbuf_free(bufs[i]);
        }
 
-       /* NOTE: review for potential ordering optimization */
-       __atomic_fetch_add(&h->tx_pkts, i, __ATOMIC_SEQ_CST);
-
        return i;
 }
 
@@ -322,60 +316,20 @@ eth_dev_info(struct rte_eth_dev *dev,
 }
 
 static int
-eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       unsigned int i, num_stats;
-       unsigned long rx_total = 0, tx_total = 0;
-       const struct pmd_internals *internal;
-
-       if ((dev == NULL) || (igb_stats == NULL))
-               return -EINVAL;
-
-       internal = dev->data->dev_private;
-       num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
-                       RTE_MIN(dev->data->nb_rx_queues,
-                               RTE_DIM(internal->rx_null_queues)));
-       for (i = 0; i < num_stats; i++) {
-               /* NOTE: review for atomic access */
-               igb_stats->q_ipackets[i] =
-                       internal->rx_null_queues[i].rx_pkts;
-               rx_total += igb_stats->q_ipackets[i];
-       }
-
-       num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
-                       RTE_MIN(dev->data->nb_tx_queues,
-                               RTE_DIM(internal->tx_null_queues)));
-       for (i = 0; i < num_stats; i++) {
-               /* NOTE: review for atomic access */
-               igb_stats->q_opackets[i] =
-                       internal->tx_null_queues[i].tx_pkts;
-               tx_total += igb_stats->q_opackets[i];
-       }
-
-       igb_stats->ipackets = rx_total;
-       igb_stats->opackets = tx_total;
-
-       return 0;
+       return rte_eth_counters_stats_get(dev,
+                                         offsetof(struct null_queue, tx_stats),
+                                         offsetof(struct null_queue, rx_stats),
+                                         stats);
 }
 
 static int
 eth_stats_reset(struct rte_eth_dev *dev)
 {
-       unsigned int i;
-       struct pmd_internals *internal;
-
-       if (dev == NULL)
-               return -EINVAL;
-
-       internal = dev->data->dev_private;
-       for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
-               /* NOTE: review for atomic access */
-               internal->rx_null_queues[i].rx_pkts = 0;
-       for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++)
-               /* NOTE: review for atomic access */
-               internal->tx_null_queues[i].tx_pkts = 0;
-
-       return 0;
+       return rte_eth_counters_reset(dev,
+                                     offsetof(struct null_queue, tx_stats),
+                                     offsetof(struct null_queue, rx_stats));
 }
 
 static void
-- 
2.43.0

Reply via email to