Replace the use of rte_atomic.h types and functions, instead use GCC
supplied C++11 memory model builtins.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/ring/rte_eth_ring.c | 26 ++++++++++++++++----------
 1 file changed, 16 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index e8bc9b6..fb7f0a0 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -44,8 +44,8 @@ enum dev_action {
 
 struct ring_queue {
        struct rte_ring *rng;
-       rte_atomic64_t rx_pkts;
-       rte_atomic64_t tx_pkts;
+       uint64_t rx_pkts;
+       uint64_t tx_pkts;
 };
 
 struct pmd_internals {
@@ -80,9 +80,10 @@ struct pmd_internals {
        const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
                        ptrs, nb_bufs, NULL);
        if (r->rng->flags & RING_F_SC_DEQ)
-               r->rx_pkts.cnt += nb_rx;
+               r->rx_pkts += nb_rx;
        else
-               rte_atomic64_add(&(r->rx_pkts), nb_rx);
+               // NOTE: review for potential ordering optimization
+               __atomic_fetch_add(&r->rx_pkts, nb_rx, __ATOMIC_SEQ_CST);
        return nb_rx;
 }
 
@@ -94,9 +95,10 @@ struct pmd_internals {
        const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
                        ptrs, nb_bufs, NULL);
        if (r->rng->flags & RING_F_SP_ENQ)
-               r->tx_pkts.cnt += nb_tx;
+               r->tx_pkts += nb_tx;
        else
-               rte_atomic64_add(&(r->tx_pkts), nb_tx);
+               // NOTE: review for potential ordering optimization
+               __atomic_fetch_add(&r->tx_pkts, nb_tx, __ATOMIC_SEQ_CST);
        return nb_tx;
 }
 
@@ -184,13 +186,15 @@ struct pmd_internals {
 
        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
                        i < dev->data->nb_rx_queues; i++) {
-               stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
+               // NOTE: review for atomic access
+               stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts;
                rx_total += stats->q_ipackets[i];
        }
 
        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
                        i < dev->data->nb_tx_queues; i++) {
-               stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
+               // NOTE: review for atomic access
+               stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts;
                tx_total += stats->q_opackets[i];
        }
 
@@ -207,9 +211,11 @@ struct pmd_internals {
        struct pmd_internals *internal = dev->data->dev_private;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++)
-               internal->rx_ring_queues[i].rx_pkts.cnt = 0;
+               // NOTE: review for atomic access
+               internal->rx_ring_queues[i].rx_pkts = 0;
        for (i = 0; i < dev->data->nb_tx_queues; i++)
-               internal->tx_ring_queues[i].tx_pkts.cnt = 0;
+               // NOTE: review for atomic access
+               internal->tx_ring_queues[i].tx_pkts = 0;
 
        return 0;
 }
-- 
1.8.3.1

Reply via email to