The stats collection was not aggregrating the stats for rings greater
than RTE_ETHDEV_QUEUE_STAT_CNTRS  if the application did not increase
the stats counters but supports more than the limit.
Added checks to increment the aggregated stats from queues
greater than the limit.

Bugzilla ID: 1836
Cc: [email protected]
Fixes: 57d5e5bc86e4 ("net/bnxt: add statistics")

Signed-off-by: Kishore Padmanabha <[email protected]>
---
 drivers/net/bnxt/bnxt_stats.c | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 3ed1dc8101db..88cfbaf9ff4b 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -659,7 +659,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
        num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
                              (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
-       for (i = 0; i < num_q_stats; i++) {
+       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                struct bnxt_rx_queue *rxq = bp->rx_queues[i];
                struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
                struct bnxt_ring_stats_ext ring_stats = {0};
@@ -675,7 +675,8 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
                if (unlikely(rc))
                        return rc;
 
-               bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, qstats, i, 
true);
+               bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats,
+                                           i < num_q_stats ? qstats : NULL, i, 
true);
                bnxt_stats->rx_nombuf +=
                                
rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail,
                                                         
rte_memory_order_relaxed);
@@ -684,7 +685,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
        num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
                              (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
-       for (i = 0; i < num_q_stats; i++) {
+       for (i = 0; i < bp->tx_cp_nr_rings; i++) {
                struct bnxt_tx_queue *txq = bp->tx_queues[i];
                struct bnxt_cp_ring_info *cpr = txq->cp_ring;
                struct bnxt_ring_stats_ext ring_stats = {0};
@@ -697,7 +698,8 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
                if (unlikely(rc))
                        return rc;
 
-               bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, qstats, i, 
false);
+               bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats,
+                                           i < num_q_stats ? qstats : NULL, i, 
false);
        }
 
        return rc;
@@ -724,7 +726,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
        num_q_stats = RTE_MIN(bp->rx_cp_nr_rings,
                              (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
-       for (i = 0; i < num_q_stats; i++) {
+       for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                struct bnxt_rx_queue *rxq = bp->rx_queues[i];
                struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
                struct bnxt_ring_stats ring_stats = {0};
@@ -739,7 +741,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
                if (unlikely(rc))
                        return rc;
 
-               bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, qstats, i, 
true);
+               bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats,
+                                       i < num_q_stats ? qstats : NULL, i, 
true);
                bnxt_stats->rx_nombuf +=
                                
rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail,
                                                         
rte_memory_order_relaxed);
@@ -748,7 +751,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
        num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
                              (unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS);
 
-       for (i = 0; i < num_q_stats; i++) {
+       for (i = 0; i < bp->tx_cp_nr_rings; i++) {
                struct bnxt_tx_queue *txq = bp->tx_queues[i];
                struct bnxt_cp_ring_info *cpr = txq->cp_ring;
                struct bnxt_ring_stats ring_stats = {0};
@@ -761,7 +764,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
                if (unlikely(rc))
                        return rc;
 
-               bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, qstats, i, 
false);
+               bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats,
+                                       i < num_q_stats ? qstats : NULL, i, 
false);
                bnxt_stats->oerrors +=
                                rte_atomic_load_explicit(&txq->tx_mbuf_drop,
                                                         
rte_memory_order_relaxed);
-- 
2.45.4

Reply via email to