From: Igor Romanov <igor.roma...@oktetlabs.ru>

Gather statistics of enqueued and dequeued packets in Rx and Tx burst
callbacks to report in stats_get callback.

Signed-off-by: Igor Romanov <igor.roma...@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <andrew.rybche...@oktetlabs.ru>
Reviewed-by: Andy Moreton <amore...@xilinx.com>
Reviewed-by: Ivan Malov <ivan.ma...@oktetlabs.ru>
---
 drivers/net/sfc/sfc_repr.c | 60 ++++++++++++++++++++++++++++++++++++--
 1 file changed, 58 insertions(+), 2 deletions(-)

diff --git a/drivers/net/sfc/sfc_repr.c b/drivers/net/sfc/sfc_repr.c
index a436b7e5e1..4fd81c3f6b 100644
--- a/drivers/net/sfc/sfc_repr.c
+++ b/drivers/net/sfc/sfc_repr.c
@@ -32,9 +32,14 @@ struct sfc_repr_shared {
        uint16_t                switch_port_id;
 };
 
+struct sfc_repr_queue_stats {
+       union sfc_pkts_bytes            packets_bytes;
+};
+
 struct sfc_repr_rxq {
        /* Datapath members */
        struct rte_ring                 *ring;
+       struct sfc_repr_queue_stats     stats;
 
        /* Non-datapath members */
        struct sfc_repr_shared          *shared;
@@ -45,6 +50,7 @@ struct sfc_repr_txq {
        /* Datapath members */
        struct rte_ring                 *ring;
        efx_mport_id_t                  egress_mport;
+       struct sfc_repr_queue_stats     stats;
 
        /* Non-datapath members */
        struct sfc_repr_shared          *shared;
@@ -173,15 +179,30 @@ sfc_repr_rx_burst(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
 {
        struct sfc_repr_rxq *rxq = rx_queue;
        void **objs = (void *)&rx_pkts[0];
+       unsigned int n_rx;
 
        /* mbufs port is already filled correctly by representors proxy */
-       return rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
+       n_rx = rte_ring_sc_dequeue_burst(rxq->ring, objs, nb_pkts, NULL);
+
+       if (n_rx > 0) {
+               unsigned int n_bytes = 0;
+               unsigned int i = 0;
+
+               do {
+                       n_bytes += rx_pkts[i]->pkt_len;
+               } while (++i < n_rx);
+
+               sfc_pkts_bytes_add(&rxq->stats.packets_bytes, n_rx, n_bytes);
+       }
+
+       return n_rx;
 }
 
 static uint16_t
 sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        struct sfc_repr_txq *txq = tx_queue;
+       unsigned int n_bytes = 0;
        unsigned int n_tx;
        void **objs;
        uint16_t i;
@@ -201,6 +222,7 @@ sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
                m->ol_flags |= sfc_dp_mport_override;
                *RTE_MBUF_DYNFIELD(m, sfc_dp_mport_offset,
                                   efx_mport_id_t *) = txq->egress_mport;
+               n_bytes += tx_pkts[i]->pkt_len;
        }
 
        objs = (void *)&tx_pkts[0];
@@ -210,14 +232,18 @@ sfc_repr_tx_burst(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
         * Remove m-port override flag from packets that were not enqueued
         * Setting the flag only for enqueued packets after the burst is
         * not possible since the ownership of enqueued packets is
-        * transferred to representor proxy.
+        * transferred to representor proxy. The same logic applies to
+        * counting the enqueued packets' bytes.
         */
        for (i = n_tx; i < nb_pkts; ++i) {
                struct rte_mbuf *m = tx_pkts[i];
 
                m->ol_flags &= ~sfc_dp_mport_override;
+               n_bytes -= m->pkt_len;
        }
 
+       sfc_pkts_bytes_add(&txq->stats.packets_bytes, n_tx, n_bytes);
+
        return n_tx;
 }
 
@@ -849,6 +875,35 @@ sfc_repr_dev_close(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+sfc_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       union sfc_pkts_bytes queue_stats;
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct sfc_repr_rxq *rxq = dev->data->rx_queues[i];
+
+               sfc_pkts_bytes_get(&rxq->stats.packets_bytes,
+                                  &queue_stats);
+
+               stats->ipackets += queue_stats.pkts;
+               stats->ibytes += queue_stats.bytes;
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct sfc_repr_txq *txq = dev->data->tx_queues[i];
+
+               sfc_pkts_bytes_get(&txq->stats.packets_bytes,
+                                  &queue_stats);
+
+               stats->opackets += queue_stats.pkts;
+               stats->obytes += queue_stats.bytes;
+       }
+
+       return 0;
+}
+
 static const struct eth_dev_ops sfc_repr_dev_ops = {
        .dev_configure                  = sfc_repr_dev_configure,
        .dev_start                      = sfc_repr_dev_start,
@@ -856,6 +911,7 @@ static const struct eth_dev_ops sfc_repr_dev_ops = {
        .dev_close                      = sfc_repr_dev_close,
        .dev_infos_get                  = sfc_repr_dev_infos_get,
        .link_update                    = sfc_repr_dev_link_update,
+       .stats_get                      = sfc_repr_stats_get,
        .rx_queue_setup                 = sfc_repr_rx_queue_setup,
        .rx_queue_release               = sfc_repr_rx_queue_release,
        .tx_queue_setup                 = sfc_repr_tx_queue_setup,
-- 
2.30.2

Reply via email to