Introduce a new api to retrieve per queue statistics from the drivers.
The api objectives:
- easily add some common per queue statistics and have it exposed
  through the user xstats api while the user stats api is left untouched
- remove the limitations on the per queue statistics count (inherited
  from ixgbe) and avoid recurrent bugs on stats array overflow

Signed-off-by: David Marchand <david.march...@redhat.com>
---
 lib/librte_ethdev/rte_ethdev.c        | 191 ++++++++++++++++++++++++++++------
 lib/librte_ethdev/rte_ethdev_core.h   |  13 +++
 lib/librte_ethdev/rte_ethdev_driver.h |  18 ++++
 3 files changed, 192 insertions(+), 30 deletions(-)

diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 85c1794..058fbd1 100644
--- a/lib/librte_ethdev/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -88,21 +88,30 @@ struct rte_eth_xstats_name_off {
 
 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
 
-static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
+static const struct rte_eth_xstats_name_off legacy_rxq_stats_map[] = {
        {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
        {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
        {"errors", offsetof(struct rte_eth_stats, q_errors)},
 };
+#define RTE_NB_LEGACY_RXQ_STATS RTE_DIM(legacy_rxq_stats_map)
+static const struct rte_eth_xstats_name_off rxq_stats_map[] = {
+       {"packets", offsetof(struct pmd_eth_rxq_stats, packets)},
+       {"bytes", offsetof(struct pmd_eth_rxq_stats, bytes)},
+       {"errors", offsetof(struct pmd_eth_rxq_stats, errors)},
+};
+#define RTE_NB_RXQ_STATS RTE_DIM(rxq_stats_map)
 
-#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /      \
-               sizeof(rte_rxq_stats_strings[0]))
-
-static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
+static const struct rte_eth_xstats_name_off legacy_txq_stats_map[] = {
        {"packets", offsetof(struct rte_eth_stats, q_opackets)},
        {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
 };
-#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /      \
-               sizeof(rte_txq_stats_strings[0]))
+#define RTE_NB_LEGACY_TXQ_STATS RTE_DIM(legacy_txq_stats_map)
+static const struct rte_eth_xstats_name_off txq_stats_map[] = {
+       {"packets", offsetof(struct pmd_eth_txq_stats, packets)},
+       {"bytes", offsetof(struct pmd_eth_txq_stats, bytes)},
+       {"errors", offsetof(struct pmd_eth_txq_stats, errors)},
+};
+#define RTE_NB_TXQ_STATS RTE_DIM(txq_stats_map)
 
 #define RTE_RX_OFFLOAD_BIT2STR(_name)  \
        { DEV_RX_OFFLOAD_##_name, #_name }
@@ -1937,6 +1946,10 @@ struct rte_eth_dev *
 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
 {
        struct rte_eth_dev *dev;
+       unsigned int nb_rxqs;
+       unsigned int nb_txqs;
+       unsigned int qid;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1945,7 +1958,44 @@ struct rte_eth_dev *
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
        stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
-       return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
+       ret = eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
+       if (ret)
+               goto out;
+
+       if (!dev->dev_ops->rxq_stats_get)
+               goto skip_rxq;
+       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
+                         RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (qid = 0; qid < nb_rxqs; qid++) {
+               struct pmd_eth_rxq_stats rxq_stats;
+
+               memset(&rxq_stats, 0, sizeof(rxq_stats));
+               if (dev->dev_ops->rxq_stats_get(dev, qid, &rxq_stats))
+                       continue;
+
+               stats->q_ipackets[qid] = rxq_stats.packets;
+               stats->q_ibytes[qid] = rxq_stats.bytes;
+               stats->q_errors[qid] = rxq_stats.errors;
+       }
+
+skip_rxq:
+       if (!dev->dev_ops->txq_stats_get)
+               goto out;
+       nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
+                         RTE_ETHDEV_QUEUE_STAT_CNTRS);
+       for (qid = 0; qid < nb_txqs; qid++) {
+               struct pmd_eth_txq_stats txq_stats;
+
+               memset(&txq_stats, 0, sizeof(txq_stats));
+               if (dev->dev_ops->txq_stats_get(dev, qid, &txq_stats))
+                       continue;
+
+               stats->q_opackets[qid] = txq_stats.packets;
+               stats->q_obytes[qid] = txq_stats.bytes;
+       }
+
+out:
+       return ret;
 }
 
 int
@@ -1969,12 +2019,24 @@ struct rte_eth_dev *
        uint16_t nb_rxqs, nb_txqs;
        int count;
 
-       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
        count = RTE_NB_STATS;
-       count += nb_rxqs * RTE_NB_RXQ_STATS;
-       count += nb_txqs * RTE_NB_TXQ_STATS;
+       if (dev->dev_ops->rxq_stats_get) {
+               nb_rxqs = dev->data->nb_rx_queues;
+               count += nb_rxqs * RTE_NB_RXQ_STATS;
+       } else {
+               nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
+                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
+               count += nb_rxqs * RTE_NB_LEGACY_RXQ_STATS;
+       }
+
+       if (dev->dev_ops->txq_stats_get) {
+               nb_txqs = dev->data->nb_tx_queues;
+               count += nb_txqs * RTE_NB_TXQ_STATS;
+       } else {
+               nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
+                                 RTE_ETHDEV_QUEUE_STAT_CNTRS);
+               count += nb_txqs * RTE_NB_LEGACY_TXQ_STATS;
+       }
 
        return count;
 }
@@ -2065,27 +2127,59 @@ struct rte_eth_dev *
                        "%s", rte_stats_strings[idx].name);
                cnt_used_entries++;
        }
+
+       if (dev->dev_ops->rxq_stats_get) {
+               num_q = dev->data->nb_rx_queues;
+               for (id_queue = 0; id_queue < num_q; id_queue++) {
+                       for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+                               snprintf(xstats_names[cnt_used_entries].name,
+                                       sizeof(xstats_names[0].name),
+                                       "rx_q%u%s",
+                                       id_queue, rxq_stats_map[idx].name);
+                               cnt_used_entries++;
+                       }
+               }
+               goto skip_legacy_rxq;
+       }
+
        num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (id_queue = 0; id_queue < num_q; id_queue++) {
-               for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+               for (idx = 0; idx < RTE_NB_LEGACY_RXQ_STATS; idx++) {
                        snprintf(xstats_names[cnt_used_entries].name,
                                sizeof(xstats_names[0].name),
                                "rx_q%u%s",
-                               id_queue, rte_rxq_stats_strings[idx].name);
+                               id_queue, legacy_rxq_stats_map[idx].name);
                        cnt_used_entries++;
                }
+       }
 
+skip_legacy_rxq:
+       if (dev->dev_ops->txq_stats_get) {
+               num_q = dev->data->nb_tx_queues;
+               for (id_queue = 0; id_queue < num_q; id_queue++) {
+                       for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+                               snprintf(xstats_names[cnt_used_entries].name,
+                                       sizeof(xstats_names[0].name),
+                                       "tx_q%u%s",
+                                       id_queue, txq_stats_map[idx].name);
+                               cnt_used_entries++;
+                       }
+               }
+               goto skip_legacy_txq;
        }
+
        num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (id_queue = 0; id_queue < num_q; id_queue++) {
-               for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+               for (idx = 0; idx < RTE_NB_LEGACY_TXQ_STATS; idx++) {
                        snprintf(xstats_names[cnt_used_entries].name,
                                sizeof(xstats_names[0].name),
                                "tx_q%u%s",
-                               id_queue, rte_txq_stats_strings[idx].name);
+                               id_queue, legacy_txq_stats_map[idx].name);
                        cnt_used_entries++;
                }
        }
+
+skip_legacy_txq:
        return cnt_used_entries;
 }
 
@@ -2252,9 +2346,6 @@ struct rte_eth_dev *
 
        dev = &rte_eth_devices[port_id];
 
-       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
        /* global stats */
        for (i = 0; i < RTE_NB_STATS; i++) {
                stats_ptr = RTE_PTR_ADD(&eth_stats,
@@ -2264,26 +2355,71 @@ struct rte_eth_dev *
        }
 
        /* per-rxq stats */
+       if (dev->dev_ops->rxq_stats_get) {
+               nb_rxqs = dev->data->nb_rx_queues;
+               for (q = 0; q < nb_rxqs; q++) {
+                       struct pmd_eth_rxq_stats rxq_stats;
+
+                       memset(&rxq_stats, 0, sizeof(rxq_stats));
+                       if (dev->dev_ops->rxq_stats_get(dev, q, &rxq_stats)) {
+                               count += RTE_NB_RXQ_STATS;
+                               continue;
+                       }
+                       for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+                               stats_ptr = RTE_PTR_ADD(&rxq_stats,
+                                               rxq_stats_map[i].offset);
+                               val = *stats_ptr;
+                               xstats[count++].value = val;
+                       }
+               }
+               goto skip_legacy_rxq;
+       }
+
+       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (q = 0; q < nb_rxqs; q++) {
-               for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+               for (i = 0; i < RTE_NB_LEGACY_RXQ_STATS; i++) {
                        stats_ptr = RTE_PTR_ADD(&eth_stats,
-                                       rte_rxq_stats_strings[i].offset +
+                                       legacy_rxq_stats_map[i].offset +
                                        q * sizeof(uint64_t));
                        val = *stats_ptr;
                        xstats[count++].value = val;
                }
        }
 
+skip_legacy_rxq:
        /* per-txq stats */
+       if (dev->dev_ops->txq_stats_get) {
+               nb_txqs = dev->data->nb_tx_queues;
+               for (q = 0; q < nb_txqs; q++) {
+                       struct pmd_eth_txq_stats txq_stats;
+
+                       memset(&txq_stats, 0, sizeof(txq_stats));
+                       if (dev->dev_ops->txq_stats_get(dev, q, &txq_stats)) {
+                               count += RTE_NB_TXQ_STATS;
+                               continue;
+                       }
+                       for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+                               stats_ptr = RTE_PTR_ADD(&txq_stats,
+                                               txq_stats_map[i].offset);
+                               val = *stats_ptr;
+                               xstats[count++].value = val;
+                       }
+               }
+               goto skip_legacy_txq;
+       }
+
+       nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
        for (q = 0; q < nb_txqs; q++) {
-               for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+               for (i = 0; i < RTE_NB_LEGACY_TXQ_STATS; i++) {
                        stats_ptr = RTE_PTR_ADD(&eth_stats,
-                                       rte_txq_stats_strings[i].offset +
+                                       legacy_txq_stats_map[i].offset +
                                        q * sizeof(uint64_t));
                        val = *stats_ptr;
                        xstats[count++].value = val;
                }
        }
+
+skip_legacy_txq:
        return count;
 }
 
@@ -2387,19 +2523,14 @@ struct rte_eth_dev *
        struct rte_eth_dev *dev;
        unsigned int count = 0, i;
        signed int xcount = 0;
-       uint16_t nb_rxqs, nb_txqs;
        int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
        dev = &rte_eth_devices[port_id];
 
-       nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-       nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
-
        /* Return generic statistics */
-       count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
-               (nb_txqs * RTE_NB_TXQ_STATS);
+       count = get_xstats_basic_count(dev);
 
        /* implemented by the driver */
        if (dev->dev_ops->xstats_get != NULL) {
diff --git a/lib/librte_ethdev/rte_ethdev_core.h 
b/lib/librte_ethdev/rte_ethdev_core.h
index 8f03f83..63375fe 100644
--- a/lib/librte_ethdev/rte_ethdev_core.h
+++ b/lib/librte_ethdev/rte_ethdev_core.h
@@ -97,6 +97,16 @@ typedef int (*eth_xstats_get_names_by_id_t)(struct 
rte_eth_dev *dev,
        unsigned int size);
 /**< @internal Get names of extended stats of an Ethernet device. */
 
+struct pmd_eth_rxq_stats;
+typedef int (*eth_rxq_stats_get)(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+       struct pmd_eth_rxq_stats *rx_queue_stats);
+/**< @internal Get statistics for a rx queue of an Ethernet device. */
+
+struct pmd_eth_txq_stats;
+typedef int (*eth_txq_stats_get)(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+       struct pmd_eth_txq_stats *tx_queue_stats);
+/**< @internal Get statistics for a tx queue of an Ethernet device. */
+
 typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
                                             uint16_t queue_id,
                                             uint8_t stat_idx,
@@ -501,6 +511,9 @@ struct eth_dev_ops {
        eth_xstats_get_names_by_id_t xstats_get_names_by_id;
        /**< Get name of extended device statistics by ID. */
 
+       eth_rxq_stats_get rxq_stats_get; /**< Stats per rxq */
+       eth_txq_stats_get txq_stats_get; /**< Stats per txq */
+
        eth_tm_ops_get_t tm_ops_get;
        /**< Get Traffic Management (TM) operations. */
 
diff --git a/lib/librte_ethdev/rte_ethdev_driver.h 
b/lib/librte_ethdev/rte_ethdev_driver.h
index c2ac263..33a4b22 100644
--- a/lib/librte_ethdev/rte_ethdev_driver.h
+++ b/lib/librte_ethdev/rte_ethdev_driver.h
@@ -331,6 +331,24 @@ typedef int (*ethdev_bus_specific_init)(struct rte_eth_dev 
*ethdev,
 int __rte_experimental
 rte_eth_dev_destroy(struct rte_eth_dev *ethdev, ethdev_uninit_t ethdev_uninit);
 
+/**
+ * @internal
+ *
+ * Internal structures used by PMD to provide the per rx/tx queues to the
+ * ethdev layer.
+ */
+struct pmd_eth_rxq_stats {
+       uint64_t packets;
+       uint64_t bytes;
+       uint64_t errors;
+};
+
+struct pmd_eth_txq_stats {
+       uint64_t packets;
+       uint64_t bytes;
+       uint64_t errors;
+};
+
 #ifdef __cplusplus
 }
 #endif
-- 
1.8.3.1

Reply via email to