Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/bnxt/bnxt_cpr.h           |  4 ++--
 drivers/net/bnxt/bnxt_rxq.h           |  2 +-
 drivers/net/bnxt/bnxt_rxr.c           | 13 ++++++++-----
 drivers/net/bnxt/bnxt_rxtx_vec_neon.c |  2 +-
 drivers/net/bnxt/bnxt_stats.c         |  4 ++--
 5 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c7b3480..43f06fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -107,7 +107,7 @@ struct bnxt_cp_ring_info {
 
 /**
  * Check validity of a completion ring entry. If the entry is valid, include a
- * C11 __ATOMIC_ACQUIRE fence to ensure that subsequent loads of fields in the
+ * C11 rte_memory_order_acquire fence to ensure that subsequent loads of 
fields in the
  * completion are not hoisted by the compiler or by the CPU to come before the
  * loading of the "valid" field.
  *
@@ -130,7 +130,7 @@ struct bnxt_cp_ring_info {
        expected = !(raw_cons & ring_size);
        valid = !!(rte_le_to_cpu_32(c->info3_v) & CMPL_BASE_V);
        if (valid == expected) {
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
                return true;
        }
        return false;
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 77bc382..36e0ac3 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -40,7 +40,7 @@ struct bnxt_rx_queue {
        struct bnxt_rx_ring_info        *rx_ring;
        struct bnxt_cp_ring_info        *cp_ring;
        struct rte_mbuf                 fake_mbuf;
-       uint64_t                        rx_mbuf_alloc_fail;
+       RTE_ATOMIC(uint64_t)            rx_mbuf_alloc_fail;
        uint8_t                         need_realloc;
        const struct rte_memzone *mz;
 };
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 3542975..ca5d2c6 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -49,7 +49,8 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue 
*rxq,
        rx_buf = &rxr->rx_buf_ring[prod];
        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
        if (!mbuf) {
-               __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+                   rte_memory_order_relaxed);
                /* If buff has failed already, setting this again won't hurt */
                rxq->need_realloc = 1;
                return -ENOMEM;
@@ -86,7 +87,8 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue 
*rxq,
 
        mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
        if (!mbuf) {
-               __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+                   rte_memory_order_relaxed);
                /* If buff has failed already, setting this again won't hurt */
                rxq->need_realloc = 1;
                return -ENOMEM;
@@ -465,7 +467,8 @@ static inline struct rte_mbuf *bnxt_tpa_end(
        struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
        RTE_ASSERT(new_data != NULL);
        if (!new_data) {
-               __atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+                   rte_memory_order_relaxed);
                return NULL;
        }
        tpa_info->mbuf = new_data;
@@ -1677,8 +1680,8 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
                                rxr->tpa_info[i].mbuf =
                                        __bnxt_alloc_rx_data(rxq->mb_pool);
                                if (!rxr->tpa_info[i].mbuf) {
-                                       
__atomic_fetch_add(&rxq->rx_mbuf_alloc_fail, 1,
-                                                       __ATOMIC_RELAXED);
+                                       
rte_atomic_fetch_add_explicit(&rxq->rx_mbuf_alloc_fail, 1,
+                                                       
rte_memory_order_relaxed);
                                        return -ENOMEM;
                                }
                        }
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c 
b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
index 775400f..04864e0 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c
@@ -240,7 +240,7 @@
                rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
 
                /* Use acquire fence to order loads of descriptor words. */
-               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+               rte_atomic_thread_fence(rte_memory_order_acquire);
                /* Reload lower 64b of descriptors to make it ordered after 
info3_v. */
                rxcmp1[3] = vreinterpretq_u32_u64(vld1q_lane_u64
                                ((void *)&cpr->cp_desc_ring[cons + 7],
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6a6feab..479f819 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -663,7 +663,7 @@ static int bnxt_stats_get_ext(struct rte_eth_dev *eth_dev,
 
                bnxt_fill_rte_eth_stats_ext(bnxt_stats, &ring_stats, i, true);
                bnxt_stats->rx_nombuf +=
-                               __atomic_load_n(&rxq->rx_mbuf_alloc_fail, 
__ATOMIC_RELAXED);
+                   rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, 
rte_memory_order_relaxed);
        }
 
        num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
@@ -724,7 +724,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
 
                bnxt_fill_rte_eth_stats(bnxt_stats, &ring_stats, i, true);
                bnxt_stats->rx_nombuf +=
-                               __atomic_load_n(&rxq->rx_mbuf_alloc_fail, 
__ATOMIC_RELAXED);
+                   rte_atomic_load_explicit(&rxq->rx_mbuf_alloc_fail, 
rte_memory_order_relaxed);
        }
 
        num_q_stats = RTE_MIN(bp->tx_cp_nr_rings,
-- 
1.8.3.1

Reply via email to