Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/mlx5/linux/mlx5_ethdev_os.c |   6 +-
 drivers/net/mlx5/linux/mlx5_verbs.c     |   9 ++-
 drivers/net/mlx5/mlx5.c                 |   9 ++-
 drivers/net/mlx5/mlx5.h                 |  66 ++++++++---------
 drivers/net/mlx5/mlx5_flow.c            |  37 +++++-----
 drivers/net/mlx5/mlx5_flow.h            |   8 +-
 drivers/net/mlx5/mlx5_flow_aso.c        |  43 ++++++-----
 drivers/net/mlx5/mlx5_flow_dv.c         | 126 ++++++++++++++++----------------
 drivers/net/mlx5/mlx5_flow_flex.c       |  14 ++--
 drivers/net/mlx5/mlx5_flow_hw.c         |  61 +++++++++-------
 drivers/net/mlx5/mlx5_flow_meter.c      |  30 ++++----
 drivers/net/mlx5/mlx5_flow_quota.c      |  32 ++++----
 drivers/net/mlx5/mlx5_hws_cnt.c         |  71 +++++++++---------
 drivers/net/mlx5/mlx5_hws_cnt.h         |  10 +--
 drivers/net/mlx5/mlx5_rx.h              |  14 ++--
 drivers/net/mlx5/mlx5_rxq.c             |  30 ++++----
 drivers/net/mlx5/mlx5_trigger.c         |   2 +-
 drivers/net/mlx5/mlx5_tx.h              |  18 ++---
 drivers/net/mlx5/mlx5_txpp.c            |  84 ++++++++++-----------
 drivers/net/mlx5/mlx5_txq.c             |  12 +--
 drivers/net/mlx5/mlx5_utils.c           |  10 +--
 drivers/net/mlx5/mlx5_utils.h           |   4 +-
 22 files changed, 351 insertions(+), 345 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c 
b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index 40ea9d2..70bba6c 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -1918,9 +1918,9 @@ int mlx5_txpp_map_hca_bar(struct rte_eth_dev *dev)
                return -ENOTSUP;
        }
        /* Check there is no concurrent mapping in other thread. */
-       if (!__atomic_compare_exchange_n(&ppriv->hca_bar, &expected,
-                                        base, false,
-                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+       if (!rte_atomic_compare_exchange_strong_explicit(&ppriv->hca_bar, 
&expected,
+                                        base,
+                                        rte_memory_order_relaxed, 
rte_memory_order_relaxed))
                rte_mem_unmap(base, MLX5_ST_SZ_BYTES(initial_seg));
        return 0;
 }
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c 
b/drivers/net/mlx5/linux/mlx5_verbs.c
index b54f3cc..63da8f4 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -1117,7 +1117,7 @@
                return 0;
        }
        /* Only need to check refcnt, 0 after "sh" is allocated. */
-       if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
+       if (!!(rte_atomic_fetch_add_explicit(&sh->self_lb.refcnt, 1, 
rte_memory_order_relaxed))) {
                MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
                priv->lb_used = 1;
                return 0;
@@ -1163,7 +1163,7 @@
                claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
                sh->self_lb.ibv_cq = NULL;
        }
-       __atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1, 
rte_memory_order_relaxed);
        return -rte_errno;
 #else
        RTE_SET_USED(dev);
@@ -1186,8 +1186,9 @@
 
        if (!priv->lb_used)
                return;
-       MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
-       if (!(__atomic_fetch_sub(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED) - 
1)) {
+       MLX5_ASSERT(rte_atomic_load_explicit(&sh->self_lb.refcnt, 
rte_memory_order_relaxed));
+       if (!(rte_atomic_fetch_sub_explicit(&sh->self_lb.refcnt, 1,
+           rte_memory_order_relaxed) - 1)) {
                if (sh->self_lb.qp) {
                        claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
                        sh->self_lb.qp = NULL;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d1a6382..2ff94db 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -855,8 +855,8 @@
                ct_pool = mng->pools[idx];
                for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) {
                        ct = &ct_pool->actions[i];
-                       val = __atomic_fetch_sub(&ct->refcnt, 1,
-                                                __ATOMIC_RELAXED);
+                       val = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1,
+                                                rte_memory_order_relaxed);
                        MLX5_ASSERT(val == 1);
                        if (val > 1)
                                cnt++;
@@ -1082,7 +1082,8 @@
                DRV_LOG(ERR, "Dynamic flex parser is not supported on HWS");
                return -ENOTSUP;
        }
-       if (__atomic_fetch_add(&priv->sh->srh_flex_parser.refcnt, 1, 
__ATOMIC_RELAXED) + 1 > 1)
+       if (rte_atomic_fetch_add_explicit(&priv->sh->srh_flex_parser.refcnt, 1,
+           rte_memory_order_relaxed) + 1 > 1)
                return 0;
        priv->sh->srh_flex_parser.flex.devx_fp = mlx5_malloc(MLX5_MEM_ZERO,
                        sizeof(struct mlx5_flex_parser_devx), 0, SOCKET_ID_ANY);
@@ -1173,7 +1174,7 @@
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_internal_flex_parser_profile *fp = 
&priv->sh->srh_flex_parser;
 
-       if (__atomic_fetch_sub(&fp->refcnt, 1, __ATOMIC_RELAXED) - 1)
+       if (rte_atomic_fetch_sub_explicit(&fp->refcnt, 1, 
rte_memory_order_relaxed) - 1)
                return;
        mlx5_devx_cmd_destroy(fp->flex.devx_fp->devx_obj);
        mlx5_free(fp->flex.devx_fp);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0091a24..77c84b8 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -378,7 +378,7 @@ struct mlx5_drop {
 struct mlx5_lb_ctx {
        struct ibv_qp *qp; /* QP object. */
        void *ibv_cq; /* Completion queue. */
-       uint16_t refcnt; /* Reference count for representors. */
+       RTE_ATOMIC(uint16_t) refcnt; /* Reference count for representors. */
 };
 
 /* HW steering queue job descriptor type. */
@@ -481,10 +481,10 @@ enum mlx5_counter_type {
 
 /* Counter age parameter. */
 struct mlx5_age_param {
-       uint16_t state; /**< Age state (atomically accessed). */
+       RTE_ATOMIC(uint16_t) state; /**< Age state (atomically accessed). */
        uint16_t port_id; /**< Port id of the counter. */
        uint32_t timeout:24; /**< Aging timeout in seconds. */
-       uint32_t sec_since_last_hit;
+       RTE_ATOMIC(uint32_t) sec_since_last_hit;
        /**< Time in seconds since last hit (atomically accessed). */
        void *context; /**< Flow counter age context. */
 };
@@ -497,7 +497,7 @@ struct flow_counter_stats {
 /* Shared counters information for counters. */
 struct mlx5_flow_counter_shared {
        union {
-               uint32_t refcnt; /* Only for shared action management. */
+               RTE_ATOMIC(uint32_t) refcnt; /* Only for shared action 
management. */
                uint32_t id; /* User counter ID for legacy sharing. */
        };
 };
@@ -588,7 +588,7 @@ struct mlx5_counter_stats_raw {
 
 /* Counter global management structure. */
 struct mlx5_flow_counter_mng {
-       volatile uint16_t n_valid; /* Number of valid pools. */
+       volatile RTE_ATOMIC(uint16_t) n_valid; /* Number of valid pools. */
        uint16_t last_pool_idx; /* Last used pool index */
        int min_id; /* The minimum counter ID in the pools. */
        int max_id; /* The maximum counter ID in the pools. */
@@ -654,7 +654,7 @@ struct mlx5_aso_sq {
 struct mlx5_aso_age_action {
        LIST_ENTRY(mlx5_aso_age_action) next;
        void *dr_action;
-       uint32_t refcnt;
+       RTE_ATOMIC(uint32_t) refcnt;
        /* Following fields relevant only when action is active. */
        uint16_t offset; /* Offset of ASO Flow Hit flag in DevX object. */
        struct mlx5_age_param age_params;
@@ -688,7 +688,7 @@ struct mlx5_geneve_tlv_option_resource {
        rte_be16_t option_class; /* geneve tlv opt class.*/
        uint8_t option_type; /* geneve tlv opt type.*/
        uint8_t length; /* geneve tlv opt length. */
-       uint32_t refcnt; /* geneve tlv object reference counter */
+       RTE_ATOMIC(uint32_t) refcnt; /* geneve tlv object reference counter */
 };
 
 
@@ -903,7 +903,7 @@ struct mlx5_flow_meter_policy {
        uint16_t group;
        /* The group. */
        rte_spinlock_t sl;
-       uint32_t ref_cnt;
+       RTE_ATOMIC(uint32_t) ref_cnt;
        /* Use count. */
        struct rte_flow_pattern_template *hws_item_templ;
        /* Hardware steering item templates. */
@@ -1038,7 +1038,7 @@ struct mlx5_flow_meter_profile {
                struct mlx5_flow_meter_srtcm_rfc2697_prm srtcm_prm;
                /**< srtcm_rfc2697 struct. */
        };
-       uint32_t ref_cnt; /**< Use count. */
+       RTE_ATOMIC(uint32_t) ref_cnt; /**< Use count. */
        uint32_t g_support:1; /**< If G color will be generated. */
        uint32_t y_support:1; /**< If Y color will be generated. */
        uint32_t initialized:1; /**< Initialized. */
@@ -1078,7 +1078,7 @@ struct mlx5_aso_mtr {
        enum mlx5_aso_mtr_type type;
        struct mlx5_flow_meter_info fm;
        /**< Pointer to the next aso flow meter structure. */
-       uint8_t state; /**< ASO flow meter state. */
+       RTE_ATOMIC(uint8_t) state; /**< ASO flow meter state. */
        uint32_t offset;
        enum rte_color init_color;
 };
@@ -1124,7 +1124,7 @@ struct mlx5_flow_mtr_mng {
        /* Default policy table. */
        uint32_t def_policy_id;
        /* Default policy id. */
-       uint32_t def_policy_ref_cnt;
+       RTE_ATOMIC(uint32_t) def_policy_ref_cnt;
        /** def_policy meter use count. */
        struct mlx5_flow_tbl_resource *drop_tbl[MLX5_MTR_DOMAIN_MAX];
        /* Meter drop table. */
@@ -1197,8 +1197,8 @@ struct mlx5_txpp_wq {
 
 /* Tx packet pacing internal timestamp. */
 struct mlx5_txpp_ts {
-       uint64_t ci_ts;
-       uint64_t ts;
+       RTE_ATOMIC(uint64_t) ci_ts;
+       RTE_ATOMIC(uint64_t) ts;
 };
 
 /* Tx packet pacing structure. */
@@ -1221,12 +1221,12 @@ struct mlx5_dev_txpp {
        struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
        uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
        /* Statistics counters. */
-       uint64_t err_miss_int; /* Missed service interrupt. */
-       uint64_t err_rearm_queue; /* Rearm Queue errors. */
-       uint64_t err_clock_queue; /* Clock Queue errors. */
-       uint64_t err_ts_past; /* Timestamp in the past. */
-       uint64_t err_ts_future; /* Timestamp in the distant future. */
-       uint64_t err_ts_order; /* Timestamp not in ascending order. */
+       RTE_ATOMIC(uint64_t) err_miss_int; /* Missed service interrupt. */
+       RTE_ATOMIC(uint64_t) err_rearm_queue; /* Rearm Queue errors. */
+       RTE_ATOMIC(uint64_t) err_clock_queue; /* Clock Queue errors. */
+       RTE_ATOMIC(uint64_t) err_ts_past; /* Timestamp in the past. */
+       RTE_ATOMIC(uint64_t) err_ts_future; /* Timestamp in the distant future. 
*/
+       RTE_ATOMIC(uint64_t) err_ts_order; /* Timestamp not in ascending order. 
*/
 };
 
 /* Sample ID information of eCPRI flex parser structure. */
@@ -1287,16 +1287,16 @@ struct mlx5_aso_ct_action {
        void *dr_action_orig;
        /* General action object for reply dir. */
        void *dr_action_rply;
-       uint32_t refcnt; /* Action used count in device flows. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Action used count in device flows. */
        uint32_t offset; /* Offset of ASO CT in DevX objects bulk. */
        uint16_t peer; /* The only peer port index could also use this CT. */
-       enum mlx5_aso_ct_state state; /* ASO CT state. */
+       RTE_ATOMIC(enum mlx5_aso_ct_state) state; /* ASO CT state. */
        bool is_original; /* The direction of the DR action to be used. */
 };
 
 /* CT action object state update. */
 #define MLX5_ASO_CT_UPDATE_STATE(c, s) \
-       __atomic_store_n(&((c)->state), (s), __ATOMIC_RELAXED)
+       rte_atomic_store_explicit(&((c)->state), (s), rte_memory_order_relaxed)
 
 #ifdef PEDANTIC
 #pragma GCC diagnostic ignored "-Wpedantic"
@@ -1370,7 +1370,7 @@ struct mlx5_flex_pattern_field {
 /* Port flex item context. */
 struct mlx5_flex_item {
        struct mlx5_flex_parser_devx *devx_fp; /* DevX flex parser object. */
-       uint32_t refcnt; /* Atomically accessed refcnt by flows. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Atomically accessed refcnt by flows. */
        enum rte_flow_item_flex_tunnel_mode tunnel_mode; /* Tunnel mode. */
        uint32_t mapnum; /* Number of pattern translation entries. */
        struct mlx5_flex_pattern_field map[MLX5_FLEX_ITEM_MAPPING_NUM];
@@ -1383,7 +1383,7 @@ struct mlx5_flex_item {
 #define MLX5_SRV6_SAMPLE_NUM 5
 /* Mlx5 internal flex parser profile structure. */
 struct mlx5_internal_flex_parser_profile {
-       uint32_t refcnt;
+       RTE_ATOMIC(uint32_t) refcnt;
        struct mlx5_flex_item flex; /* Hold map info for modify field. */
 };
 
@@ -1512,9 +1512,9 @@ struct mlx5_dev_ctx_shared {
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
        struct mlx5_send_to_kernel_action 
send_to_kernel_action[MLX5DR_TABLE_TYPE_MAX];
 #endif
-       struct mlx5_hlist *encaps_decaps; /* Encap/decap action hash list. */
-       struct mlx5_hlist *modify_cmds;
-       struct mlx5_hlist *tag_table;
+       RTE_ATOMIC(struct mlx5_hlist *) encaps_decaps; /* Encap/decap action 
hash list. */
+       RTE_ATOMIC(struct mlx5_hlist *) modify_cmds;
+       RTE_ATOMIC(struct mlx5_hlist *) tag_table;
        struct mlx5_list *port_id_action_list; /* Port ID action list. */
        struct mlx5_list *push_vlan_action_list; /* Push VLAN actions. */
        struct mlx5_list *sample_action_list; /* List of sample actions. */
@@ -1525,7 +1525,7 @@ struct mlx5_dev_ctx_shared {
        /* SW steering counters management structure. */
        void *default_miss_action; /* Default miss action. */
        struct mlx5_indexed_pool *ipool[MLX5_IPOOL_MAX];
-       struct mlx5_indexed_pool *mdh_ipools[MLX5_MAX_MODIFY_NUM];
+       RTE_ATOMIC(struct mlx5_indexed_pool *) mdh_ipools[MLX5_MAX_MODIFY_NUM];
        /* Shared interrupt handler section. */
        struct rte_intr_handle *intr_handle; /* Interrupt handler for device. */
        struct rte_intr_handle *intr_handle_devx; /* DEVX interrupt handler. */
@@ -1570,7 +1570,7 @@ struct mlx5_dev_ctx_shared {
  * Caution, secondary process may rebuild the struct during port start.
  */
 struct mlx5_proc_priv {
-       void *hca_bar;
+       RTE_ATOMIC(void *) hca_bar;
        /* Mapped HCA PCI BAR area. */
        size_t uar_table_sz;
        /* Size of UAR register table. */
@@ -1635,7 +1635,7 @@ struct mlx5_rxq_obj {
 /* Indirection table. */
 struct mlx5_ind_table_obj {
        LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
-       uint32_t refcnt; /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
        union {
                void *ind_table; /**< Indirection table. */
                struct mlx5_devx_obj *rqt; /* DevX RQT object. */
@@ -1826,7 +1826,7 @@ enum mlx5_quota_state {
 };
 
 struct mlx5_quota {
-       uint8_t state; /* object state */
+       RTE_ATOMIC(uint8_t) state; /* object state */
        uint8_t mode;  /* metering mode */
        /**
         * Keep track of application update types.
@@ -1955,7 +1955,7 @@ struct mlx5_priv {
        uint32_t flex_item_map; /* Map of allocated flex item elements. */
        uint32_t nb_queue; /* HW steering queue number. */
        struct mlx5_hws_cnt_pool *hws_cpool; /* HW steering's counter pool. */
-       uint32_t hws_mark_refcnt; /* HWS mark action reference counter. */
+       RTE_ATOMIC(uint32_t) hws_mark_refcnt; /* HWS mark action reference 
counter. */
        struct rte_pmd_mlx5_flow_engine_mode_info mode_info; /* Process set 
flow engine info. */
        struct mlx5_flow_hw_attr *hw_attr; /* HW Steering port configuration. */
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -2007,7 +2007,7 @@ struct mlx5_priv {
 
 #endif
        struct rte_eth_dev *shared_host; /* Host device for HW steering. */
-       uint16_t shared_refcnt; /* HW steering host reference counter. */
+       RTE_ATOMIC(uint16_t) shared_refcnt; /* HW steering host reference 
counter. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f31fdfb..1954975 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -4623,8 +4623,8 @@ struct mlx5_translated_action_handle {
                        shared_rss = mlx5_ipool_get
                                (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
                                                                           idx);
-                       __atomic_fetch_add(&shared_rss->refcnt, 1,
-                                          __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1,
+                                          rte_memory_order_relaxed);
                        return idx;
                default:
                        break;
@@ -7459,7 +7459,7 @@ struct mlx5_list_entry *
        if (tunnel) {
                flow->tunnel = 1;
                flow->tunnel_id = tunnel->tunnel_id;
-               __atomic_fetch_add(&tunnel->refctn, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&tunnel->refctn, 1, 
rte_memory_order_relaxed);
                mlx5_free(default_miss_ctx.queue);
        }
        mlx5_flow_pop_thread_workspace();
@@ -7470,10 +7470,10 @@ struct mlx5_list_entry *
        flow_mreg_del_copy_action(dev, flow);
        flow_drv_destroy(dev, flow);
        if (rss_desc->shared_rss)
-               __atomic_fetch_sub(&((struct mlx5_shared_action_rss *)
+               rte_atomic_fetch_sub_explicit(&((struct mlx5_shared_action_rss 
*)
                        mlx5_ipool_get
                        (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
-                       rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
+                       rss_desc->shared_rss))->refcnt, 1, 
rte_memory_order_relaxed);
        mlx5_ipool_free(priv->flows[type], idx);
        rte_errno = ret; /* Restore rte_errno. */
        ret = rte_errno;
@@ -7976,7 +7976,8 @@ struct rte_flow *
 
                tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
                RTE_VERIFY(tunnel);
-               if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) 
- 1))
+               if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1,
+                   rte_memory_order_relaxed) - 1))
                        mlx5_flow_tunnel_free(dev, tunnel);
        }
        flow_mreg_del_copy_action(dev, flow);
@@ -9456,7 +9457,7 @@ struct mlx5_flow_workspace*
 {
        uint32_t pools_n, us;
 
-       pools_n = __atomic_load_n(&sh->sws_cmng.n_valid, __ATOMIC_RELAXED);
+       pools_n = rte_atomic_load_explicit(&sh->sws_cmng.n_valid, 
rte_memory_order_relaxed);
        us = MLX5_POOL_QUERY_FREQ_US / pools_n;
        DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
        if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
@@ -9558,17 +9559,17 @@ struct mlx5_flow_workspace*
        for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
                cnt = MLX5_POOL_GET_CNT(pool, i);
                age_param = MLX5_CNT_TO_AGE(cnt);
-               if (__atomic_load_n(&age_param->state,
-                                   __ATOMIC_RELAXED) != AGE_CANDIDATE)
+               if (rte_atomic_load_explicit(&age_param->state,
+                                   rte_memory_order_relaxed) != AGE_CANDIDATE)
                        continue;
                if (cur->data[i].hits != prev->data[i].hits) {
-                       __atomic_store_n(&age_param->sec_since_last_hit, 0,
-                                        __ATOMIC_RELAXED);
+                       
rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0,
+                                        rte_memory_order_relaxed);
                        continue;
                }
-               if (__atomic_fetch_add(&age_param->sec_since_last_hit,
+               if 
(rte_atomic_fetch_add_explicit(&age_param->sec_since_last_hit,
                                       time_delta,
-                                      __ATOMIC_RELAXED) + time_delta <= 
age_param->timeout)
+                                      rte_memory_order_relaxed) + time_delta 
<= age_param->timeout)
                        continue;
                /**
                 * Hold the lock first, or if between the
@@ -9579,10 +9580,10 @@ struct mlx5_flow_workspace*
                priv = rte_eth_devices[age_param->port_id].data->dev_private;
                age_info = GET_PORT_AGE_INFO(priv);
                rte_spinlock_lock(&age_info->aged_sl);
-               if (__atomic_compare_exchange_n(&age_param->state, &expected,
-                                               AGE_TMOUT, false,
-                                               __ATOMIC_RELAXED,
-                                               __ATOMIC_RELAXED)) {
+               if 
(rte_atomic_compare_exchange_strong_explicit(&age_param->state, &expected,
+                                               AGE_TMOUT,
+                                               rte_memory_order_relaxed,
+                                               rte_memory_order_relaxed)) {
                        TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
                        MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
                }
@@ -11407,7 +11408,7 @@ struct tunnel_db_element_release_ctx {
 {
        struct tunnel_db_element_release_ctx *ctx = x;
        ctx->ret = 0;
-       if (!(__atomic_fetch_sub(&tunnel->refctn, 1, __ATOMIC_RELAXED) - 1))
+       if (!(rte_atomic_fetch_sub_explicit(&tunnel->refctn, 1, 
rte_memory_order_relaxed) - 1))
                mlx5_flow_tunnel_free(dev, tunnel);
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 34b5e0f..edfa76f 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1049,7 +1049,7 @@ struct mlx5_flow_tunnel {
        LIST_ENTRY(mlx5_flow_tunnel) chain;
        struct rte_flow_tunnel app_tunnel;      /** app tunnel copy */
        uint32_t tunnel_id;                     /** unique tunnel ID */
-       uint32_t refctn;
+       RTE_ATOMIC(uint32_t) refctn;
        struct rte_flow_action action;
        struct rte_flow_item item;
        struct mlx5_hlist *groups;              /** tunnel groups */
@@ -1470,7 +1470,7 @@ struct rte_flow_pattern_template {
        struct mlx5dr_match_template *mt; /* mlx5 match template. */
        uint64_t item_flags; /* Item layer flags. */
        uint64_t orig_item_nb; /* Number of pattern items provided by the user 
(with END item). */
-       uint32_t refcnt;  /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt;  /* Reference counter. */
        /*
         * If true, then rule pattern should be prepended with
         * represented_port pattern item.
@@ -1502,7 +1502,7 @@ struct rte_flow_actions_template {
        uint16_t reformat_off; /* Offset of DR reformat action. */
        uint16_t mhdr_off; /* Offset of DR modify header action. */
        uint16_t recom_off;  /* Offset of DR IPv6 routing push remove action. */
-       uint32_t refcnt; /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
        uint8_t flex_item; /* flex item index. */
 };
 
@@ -1855,7 +1855,7 @@ struct rte_flow_template_table {
 /* Shared RSS action structure */
 struct mlx5_shared_action_rss {
        ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
-       uint32_t refcnt; /**< Atomically accessed refcnt. */
+       RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */
        struct rte_flow_action_rss origin; /**< Original rte RSS action. */
        uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
        struct mlx5_ind_table_obj *ind_tbl;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index ab9eb21..a94b228 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -619,7 +619,7 @@
                        uint8_t *u8addr;
                        uint8_t hit;
 
-                       if (__atomic_load_n(&ap->state, __ATOMIC_RELAXED) !=
+                       if (rte_atomic_load_explicit(&ap->state, 
rte_memory_order_relaxed) !=
                                            AGE_CANDIDATE)
                                continue;
                        byte = 63 - (j / 8);
@@ -627,13 +627,13 @@
                        u8addr = (uint8_t *)addr;
                        hit = (u8addr[byte] >> offset) & 0x1;
                        if (hit) {
-                               __atomic_store_n(&ap->sec_since_last_hit, 0,
-                                                __ATOMIC_RELAXED);
+                               
rte_atomic_store_explicit(&ap->sec_since_last_hit, 0,
+                                                rte_memory_order_relaxed);
                        } else {
                                struct mlx5_priv *priv;
 
-                               __atomic_fetch_add(&ap->sec_since_last_hit,
-                                                  diff, __ATOMIC_RELAXED);
+                               
rte_atomic_fetch_add_explicit(&ap->sec_since_last_hit,
+                                                  diff, 
rte_memory_order_relaxed);
                                /* If timeout passed add to aged-out list. */
                                if (ap->sec_since_last_hit <= ap->timeout)
                                        continue;
@@ -641,12 +641,11 @@
                                rte_eth_devices[ap->port_id].data->dev_private;
                                age_info = GET_PORT_AGE_INFO(priv);
                                rte_spinlock_lock(&age_info->aged_sl);
-                               if (__atomic_compare_exchange_n(&ap->state,
+                               if 
(rte_atomic_compare_exchange_strong_explicit(&ap->state,
                                                                &expected,
                                                                AGE_TMOUT,
-                                                               false,
-                                                              __ATOMIC_RELAXED,
-                                                           __ATOMIC_RELAXED)) {
+                                                              
rte_memory_order_relaxed,
+                                                           
rte_memory_order_relaxed)) {
                                        LIST_INSERT_HEAD(&age_info->aged_aso,
                                                         act, next);
                                        MLX5_AGE_SET(age_info,
@@ -946,10 +945,10 @@
                for (i = 0; i < n; ++i) {
                        aso_mtr = sq->elts[(sq->tail + i) & mask].mtr;
                        MLX5_ASSERT(aso_mtr);
-                       verdict = __atomic_compare_exchange_n(&aso_mtr->state,
+                       verdict = 
rte_atomic_compare_exchange_strong_explicit(&aso_mtr->state,
                                                    &exp_state, ASO_METER_READY,
-                                                   false, __ATOMIC_RELAXED,
-                                                   __ATOMIC_RELAXED);
+                                                   rte_memory_order_relaxed,
+                                                   rte_memory_order_relaxed);
                        MLX5_ASSERT(verdict);
                }
                sq->tail += n;
@@ -1005,10 +1004,10 @@
                        mtr = mlx5_ipool_get(priv->hws_mpool->idx_pool,
                                             
MLX5_INDIRECT_ACTION_IDX_GET(job->action));
                        MLX5_ASSERT(mtr);
-                       verdict = __atomic_compare_exchange_n(&mtr->state,
+                       verdict = 
rte_atomic_compare_exchange_strong_explicit(&mtr->state,
                                                    &exp_state, ASO_METER_READY,
-                                                   false, __ATOMIC_RELAXED,
-                                                   __ATOMIC_RELAXED);
+                                                   rte_memory_order_relaxed,
+                                                   rte_memory_order_relaxed);
                        MLX5_ASSERT(verdict);
                        flow_hw_job_put(priv, job, CTRL_QUEUE_ID(priv));
                }
@@ -1103,7 +1102,7 @@
        struct mlx5_aso_sq *sq;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
        uint32_t poll_cqe_times = MLX5_MTR_POLL_WQE_CQE_TIMES;
-       uint8_t state = __atomic_load_n(&mtr->state, __ATOMIC_RELAXED);
+       uint8_t state = rte_atomic_load_explicit(&mtr->state, 
rte_memory_order_relaxed);
        poll_cq_t poll_mtr_cq =
                is_tmpl_api ? mlx5_aso_poll_cq_mtr_hws : 
mlx5_aso_poll_cq_mtr_sws;
 
@@ -1112,7 +1111,7 @@
        sq = mlx5_aso_mtr_select_sq(sh, MLX5_HW_INV_QUEUE, mtr, &need_lock);
        do {
                poll_mtr_cq(priv, sq);
-               if (__atomic_load_n(&mtr->state, __ATOMIC_RELAXED) ==
+               if (rte_atomic_load_explicit(&mtr->state, 
rte_memory_order_relaxed) ==
                                            ASO_METER_READY)
                        return 0;
                /* Waiting for CQE ready. */
@@ -1411,7 +1410,7 @@
        uint16_t wqe_idx;
        struct mlx5_aso_ct_pool *pool;
        enum mlx5_aso_ct_state state =
-                               __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+                               rte_atomic_load_explicit(&ct->state, 
rte_memory_order_relaxed);
 
        if (state == ASO_CONNTRACK_FREE) {
                DRV_LOG(ERR, "Fail: No context to query");
@@ -1620,12 +1619,12 @@
                sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
        else
                sq = __mlx5_aso_ct_get_sq_in_sws(sh, ct);
-       if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+       if (rte_atomic_load_explicit(&ct->state, rte_memory_order_relaxed) ==
            ASO_CONNTRACK_READY)
                return 0;
        do {
                mlx5_aso_ct_completion_handle(sh, sq, need_lock);
-               if (__atomic_load_n(&ct->state, __ATOMIC_RELAXED) ==
+               if (rte_atomic_load_explicit(&ct->state, 
rte_memory_order_relaxed) ==
                    ASO_CONNTRACK_READY)
                        return 0;
                /* Waiting for CQE ready, consider should block or sleep. */
@@ -1791,7 +1790,7 @@
        bool need_lock = !!(queue == MLX5_HW_INV_QUEUE);
        uint32_t poll_cqe_times = MLX5_CT_POLL_WQE_CQE_TIMES;
        enum mlx5_aso_ct_state state =
-                               __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+                               rte_atomic_load_explicit(&ct->state, 
rte_memory_order_relaxed);
 
        if (sh->config.dv_flow_en == 2)
                sq = __mlx5_aso_ct_get_sq_in_hws(queue, pool);
@@ -1807,7 +1806,7 @@
        }
        do {
                mlx5_aso_ct_completion_handle(sh, sq, need_lock);
-               state = __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+               state = rte_atomic_load_explicit(&ct->state, 
rte_memory_order_relaxed);
                if (state == ASO_CONNTRACK_READY ||
                    state == ASO_CONNTRACK_QUERY)
                        return 0;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d434c67..f9c56af 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -313,7 +313,7 @@ enum mlx5_l3_tunnel_detection {
 }
 
 static inline struct mlx5_hlist *
-flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, struct mlx5_hlist **phl,
+flow_dv_hlist_prepare(struct mlx5_dev_ctx_shared *sh, RTE_ATOMIC(struct 
mlx5_hlist *) *phl,
                     const char *name, uint32_t size, bool direct_key,
                     bool lcores_share, void *ctx,
                     mlx5_list_create_cb cb_create,
@@ -327,7 +327,7 @@ enum mlx5_l3_tunnel_detection {
        struct mlx5_hlist *expected = NULL;
        char s[MLX5_NAME_SIZE];
 
-       hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+       hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
        if (likely(hl))
                return hl;
        snprintf(s, sizeof(s), "%s_%s", sh->ibdev_name, name);
@@ -341,11 +341,11 @@ enum mlx5_l3_tunnel_detection {
                                   "cannot allocate resource memory");
                return NULL;
        }
-       if (!__atomic_compare_exchange_n(phl, &expected, hl, false,
-                                        __ATOMIC_SEQ_CST,
-                                        __ATOMIC_SEQ_CST)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(phl, &expected, hl,
+                                        rte_memory_order_seq_cst,
+                                        rte_memory_order_seq_cst)) {
                mlx5_hlist_destroy(hl);
-               hl = __atomic_load_n(phl, __ATOMIC_SEQ_CST);
+               hl = rte_atomic_load_explicit(phl, rte_memory_order_seq_cst);
        }
        return hl;
 }
@@ -6139,8 +6139,8 @@ struct mlx5_list_entry *
 static struct mlx5_indexed_pool *
 flow_dv_modify_ipool_get(struct mlx5_dev_ctx_shared *sh, uint8_t index)
 {
-       struct mlx5_indexed_pool *ipool = __atomic_load_n
-                                    (&sh->mdh_ipools[index], __ATOMIC_SEQ_CST);
+       struct mlx5_indexed_pool *ipool = rte_atomic_load_explicit
+                                    (&sh->mdh_ipools[index], 
rte_memory_order_seq_cst);
 
        if (!ipool) {
                struct mlx5_indexed_pool *expected = NULL;
@@ -6165,13 +6165,13 @@ struct mlx5_list_entry *
                ipool = mlx5_ipool_create(&cfg);
                if (!ipool)
                        return NULL;
-               if (!__atomic_compare_exchange_n(&sh->mdh_ipools[index],
-                                                &expected, ipool, false,
-                                                __ATOMIC_SEQ_CST,
-                                                __ATOMIC_SEQ_CST)) {
+               if 
(!rte_atomic_compare_exchange_strong_explicit(&sh->mdh_ipools[index],
+                                                &expected, ipool,
+                                                rte_memory_order_seq_cst,
+                                                rte_memory_order_seq_cst)) {
                        mlx5_ipool_destroy(ipool);
-                       ipool = __atomic_load_n(&sh->mdh_ipools[index],
-                                               __ATOMIC_SEQ_CST);
+                       ipool = rte_atomic_load_explicit(&sh->mdh_ipools[index],
+                                               rte_memory_order_seq_cst);
                }
        }
        return ipool;
@@ -6992,9 +6992,9 @@ struct mlx5_list_entry *
 
        age_info = GET_PORT_AGE_INFO(priv);
        age_param = flow_dv_counter_idx_get_age(dev, counter);
-       if (!__atomic_compare_exchange_n(&age_param->state, &expected,
-                                        AGE_FREE, false, __ATOMIC_RELAXED,
-                                        __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, 
&expected,
+                                        AGE_FREE, rte_memory_order_relaxed,
+                                        rte_memory_order_relaxed)) {
                /**
                 * We need the lock even it is age timeout,
                 * since counter may still in process.
@@ -7002,7 +7002,7 @@ struct mlx5_list_entry *
                rte_spinlock_lock(&age_info->aged_sl);
                TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
                rte_spinlock_unlock(&age_info->aged_sl);
-               __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&age_param->state, AGE_FREE, 
rte_memory_order_relaxed);
        }
 }
 
@@ -7038,8 +7038,8 @@ struct mlx5_list_entry *
                 * indirect action API, shared info is 1 before the reduction,
                 * so this condition is failed and function doesn't return here.
                 */
-               if (__atomic_fetch_sub(&cnt->shared_info.refcnt, 1,
-                                      __ATOMIC_RELAXED) - 1)
+               if (rte_atomic_fetch_sub_explicit(&cnt->shared_info.refcnt, 1,
+                                      rte_memory_order_relaxed) - 1)
                        return;
        }
        cnt->pool = pool;
@@ -10203,8 +10203,8 @@ struct mlx5_list_entry *
                        geneve_opt_v->option_type &&
                        geneve_opt_resource->length ==
                        geneve_opt_v->option_len) {
-                       __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
-                                          __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&geneve_opt_resource->refcnt, 1,
+                                          rte_memory_order_relaxed);
                } else {
                        ret = rte_flow_error_set(error, ENOMEM,
                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -10243,8 +10243,8 @@ struct mlx5_list_entry *
                geneve_opt_resource->option_class = geneve_opt_v->option_class;
                geneve_opt_resource->option_type = geneve_opt_v->option_type;
                geneve_opt_resource->length = geneve_opt_v->option_len;
-               __atomic_store_n(&geneve_opt_resource->refcnt, 1,
-                               __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&geneve_opt_resource->refcnt, 1,
+                               rte_memory_order_relaxed);
        }
 exit:
        rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
@@ -12192,8 +12192,8 @@ struct mlx5_list_entry *
                (void *)(uintptr_t)(dev_flow->flow_idx);
        age_param->timeout = age->timeout;
        age_param->port_id = dev->data->port_id;
-       __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&age_param->sec_since_last_hit, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&age_param->state, AGE_CANDIDATE, 
rte_memory_order_relaxed);
        return counter;
 }
 
@@ -13241,9 +13241,9 @@ struct mlx5_list_entry *
        uint16_t expected = AGE_CANDIDATE;
 
        age_info = GET_PORT_AGE_INFO(priv);
-       if (!__atomic_compare_exchange_n(&age_param->state, &expected,
-                                        AGE_FREE, false, __ATOMIC_RELAXED,
-                                        __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&age_param->state, 
&expected,
+                                        AGE_FREE, rte_memory_order_relaxed,
+                                        rte_memory_order_relaxed)) {
                /**
                 * We need the lock even it is age timeout,
                 * since age action may still in process.
@@ -13251,7 +13251,7 @@ struct mlx5_list_entry *
                rte_spinlock_lock(&age_info->aged_sl);
                LIST_REMOVE(age, next);
                rte_spinlock_unlock(&age_info->aged_sl);
-               __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&age_param->state, AGE_FREE, 
rte_memory_order_relaxed);
        }
 }
 
@@ -13275,7 +13275,7 @@ struct mlx5_list_entry *
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
        struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
-       uint32_t ret = __atomic_fetch_sub(&age->refcnt, 1, __ATOMIC_RELAXED) - 
1;
+       uint32_t ret = rte_atomic_fetch_sub_explicit(&age->refcnt, 1, 
rte_memory_order_relaxed) - 1;
 
        if (!ret) {
                flow_dv_aso_age_remove_from_age(dev, age);
@@ -13451,7 +13451,7 @@ struct mlx5_list_entry *
                        return 0; /* 0 is an error. */
                }
        }
-       __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&age_free->refcnt, 1, 
rte_memory_order_relaxed);
        return pool->index | ((age_free->offset + 1) << 16);
 }
 
@@ -13481,10 +13481,10 @@ struct mlx5_list_entry *
        aso_age->age_params.context = context;
        aso_age->age_params.timeout = timeout;
        aso_age->age_params.port_id = dev->data->port_id;
-       __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
-                        __ATOMIC_RELAXED);
-       __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
-                        __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&aso_age->age_params.sec_since_last_hit, 0,
+                        rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&aso_age->age_params.state, AGE_CANDIDATE,
+                        rte_memory_order_relaxed);
 }
 
 static void
@@ -13666,12 +13666,12 @@ struct mlx5_list_entry *
        uint32_t ret;
        struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx);
        enum mlx5_aso_ct_state state =
-                       __atomic_load_n(&ct->state, __ATOMIC_RELAXED);
+                       rte_atomic_load_explicit(&ct->state, 
rte_memory_order_relaxed);
 
        /* Cannot release when CT is in the ASO SQ. */
        if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY)
                return -1;
-       ret = __atomic_fetch_sub(&ct->refcnt, 1, __ATOMIC_RELAXED) - 1;
+       ret = rte_atomic_fetch_sub_explicit(&ct->refcnt, 1, 
rte_memory_order_relaxed) - 1;
        if (!ret) {
                if (ct->dr_action_orig) {
 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -13861,7 +13861,7 @@ struct mlx5_list_entry *
        pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]);
        ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset);
        /* 0: inactive, 1: created, 2+: used by flows. */
-       __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&ct->refcnt, 1, rte_memory_order_relaxed);
        reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error);
        if (!ct->dr_action_orig) {
 #ifdef HAVE_MLX5_DR_ACTION_ASO_CT
@@ -14813,8 +14813,8 @@ struct mlx5_list_entry *
                        age_act = flow_aso_age_get_by_idx(dev, owner_idx);
                        if (flow->age == 0) {
                                flow->age = owner_idx;
-                               __atomic_fetch_add(&age_act->refcnt, 1,
-                                                  __ATOMIC_RELAXED);
+                               rte_atomic_fetch_add_explicit(&age_act->refcnt, 
1,
+                                                  rte_memory_order_relaxed);
                        }
                        age_act_pos = actions_n++;
                        action_flags |= MLX5_FLOW_ACTION_AGE;
@@ -14851,9 +14851,9 @@ struct mlx5_list_entry *
                        } else {
                                if (flow->counter == 0) {
                                        flow->counter = owner_idx;
-                                       __atomic_fetch_add
+                                       rte_atomic_fetch_add_explicit
                                                (&cnt_act->shared_info.refcnt,
-                                                1, __ATOMIC_RELAXED);
+                                                1, rte_memory_order_relaxed);
                                }
                                /* Save information first, will apply later. */
                                action_flags |= MLX5_FLOW_ACTION_COUNT;
@@ -15185,8 +15185,8 @@ struct mlx5_list_entry *
                                flow->indirect_type =
                                                MLX5_INDIRECT_ACTION_TYPE_CT;
                                flow->ct = owner_idx;
-                               __atomic_fetch_add(&ct->refcnt, 1,
-                                                  __ATOMIC_RELAXED);
+                               rte_atomic_fetch_add_explicit(&ct->refcnt, 1,
+                                                  rte_memory_order_relaxed);
                        }
                        actions_n++;
                        action_flags |= MLX5_FLOW_ACTION_CT;
@@ -15855,7 +15855,7 @@ struct mlx5_list_entry *
 
        shared_rss = mlx5_ipool_get
                        (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
-       __atomic_fetch_sub(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&shared_rss->refcnt, 1, 
rte_memory_order_relaxed);
 }
 
 void
@@ -16038,8 +16038,8 @@ struct mlx5_list_entry *
                                sh->geneve_tlv_option_resource;
        rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
        if (geneve_opt_resource) {
-               if (!(__atomic_fetch_sub(&geneve_opt_resource->refcnt, 1,
-                                        __ATOMIC_RELAXED) - 1)) {
+               if 
(!(rte_atomic_fetch_sub_explicit(&geneve_opt_resource->refcnt, 1,
+                                        rte_memory_order_relaxed) - 1)) {
                        claim_zero(mlx5_devx_cmd_destroy
                                        (geneve_opt_resource->obj));
                        mlx5_free(sh->geneve_tlv_option_resource);
@@ -16448,7 +16448,7 @@ struct mlx5_list_entry *
        /* Update queue with indirect table queue memoyr. */
        origin->queue = shared_rss->ind_tbl->queues;
        rte_spinlock_init(&shared_rss->action_rss_sl);
-       __atomic_fetch_add(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&shared_rss->refcnt, 1, 
rte_memory_order_relaxed);
        rte_spinlock_lock(&priv->shared_act_sl);
        ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
                     &priv->rss_shared_actions, idx, shared_rss, next);
@@ -16494,9 +16494,9 @@ struct mlx5_list_entry *
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
                                          "invalid shared action");
-       if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
-                                        0, 0, __ATOMIC_ACQUIRE,
-                                        __ATOMIC_RELAXED))
+       if (!rte_atomic_compare_exchange_strong_explicit(&shared_rss->refcnt, 
&old_refcnt,
+                                        0, rte_memory_order_acquire,
+                                        rte_memory_order_relaxed))
                return rte_flow_error_set(error, EBUSY,
                                          RTE_FLOW_ERROR_TYPE_ACTION,
                                          NULL,
@@ -16632,10 +16632,10 @@ struct rte_flow_action_handle *
                return __flow_dv_action_rss_release(dev, idx, error);
        case MLX5_INDIRECT_ACTION_TYPE_COUNT:
                cnt = flow_dv_counter_get_by_idx(dev, idx, NULL);
-               if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt,
-                                                &no_flow_refcnt, 1, false,
-                                                __ATOMIC_ACQUIRE,
-                                                __ATOMIC_RELAXED))
+               if 
(!rte_atomic_compare_exchange_strong_explicit(&cnt->shared_info.refcnt,
+                                                &no_flow_refcnt, 1,
+                                                rte_memory_order_acquire,
+                                                rte_memory_order_relaxed))
                        return rte_flow_error_set(error, EBUSY,
                                                  RTE_FLOW_ERROR_TYPE_ACTION,
                                                  NULL,
@@ -17595,13 +17595,13 @@ struct rte_flow_action_handle *
        case MLX5_INDIRECT_ACTION_TYPE_AGE:
                age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
                resp = data;
-               resp->aged = __atomic_load_n(&age_param->state,
-                                             __ATOMIC_RELAXED) == AGE_TMOUT ?
+               resp->aged = rte_atomic_load_explicit(&age_param->state,
+                                             rte_memory_order_relaxed) == 
AGE_TMOUT ?
                                                                          1 : 0;
                resp->sec_since_last_hit_valid = !resp->aged;
                if (resp->sec_since_last_hit_valid)
-                       resp->sec_since_last_hit = __atomic_load_n
-                            (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+                       resp->sec_since_last_hit = rte_atomic_load_explicit
+                            (&age_param->sec_since_last_hit, 
rte_memory_order_relaxed);
                return 0;
        case MLX5_INDIRECT_ACTION_TYPE_COUNT:
                return flow_dv_query_count(dev, idx, data, error);
@@ -17678,12 +17678,12 @@ struct rte_flow_action_handle *
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                          NULL, "age data not available");
        }
-       resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
+       resp->aged = rte_atomic_load_explicit(&age_param->state, 
rte_memory_order_relaxed) ==
                                     AGE_TMOUT ? 1 : 0;
        resp->sec_since_last_hit_valid = !resp->aged;
        if (resp->sec_since_last_hit_valid)
-               resp->sec_since_last_hit = __atomic_load_n
-                            (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
+               resp->sec_since_last_hit = rte_atomic_load_explicit
+                            (&age_param->sec_since_last_hit, 
rte_memory_order_relaxed);
        return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c 
b/drivers/net/mlx5/mlx5_flow_flex.c
index 4ae03a2..8a02247 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -86,7 +86,7 @@
                        MLX5_ASSERT(!item->refcnt);
                        MLX5_ASSERT(!item->devx_fp);
                        item->devx_fp = NULL;
-                       __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+                       rte_atomic_store_explicit(&item->refcnt, 0, 
rte_memory_order_release);
                        priv->flex_item_map |= 1u << idx;
                }
        }
@@ -107,7 +107,7 @@
                MLX5_ASSERT(!item->refcnt);
                MLX5_ASSERT(!item->devx_fp);
                item->devx_fp = NULL;
-               __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
+               rte_atomic_store_explicit(&item->refcnt, 0, 
rte_memory_order_release);
                priv->flex_item_map &= ~(1u << idx);
                rte_spinlock_unlock(&priv->flex_item_sl);
        }
@@ -379,7 +379,7 @@
                return ret;
        }
        if (acquire)
-               __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+               rte_atomic_fetch_add_explicit(&flex->refcnt, 1, 
rte_memory_order_release);
        return ret;
 }
 
@@ -414,7 +414,7 @@
                rte_errno = -EINVAL;
                return -EINVAL;
        }
-       __atomic_fetch_sub(&flex->refcnt, 1, __ATOMIC_RELEASE);
+       rte_atomic_fetch_sub_explicit(&flex->refcnt, 1, 
rte_memory_order_release);
        return 0;
 }
 
@@ -1337,7 +1337,7 @@ struct rte_flow_item_flex_handle *
        }
        flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
        /* Mark initialized flex item valid. */
-       __atomic_fetch_add(&flex->refcnt, 1, __ATOMIC_RELEASE);
+       rte_atomic_fetch_add_explicit(&flex->refcnt, 1, 
rte_memory_order_release);
        return (struct rte_flow_item_flex_handle *)flex;
 
 error:
@@ -1378,8 +1378,8 @@ struct rte_flow_item_flex_handle *
                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
                                          "invalid flex item handle value");
        }
-       if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
-                                        __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&flex->refcnt, 
&old_refcnt, 0,
+                                        rte_memory_order_acquire, 
rte_memory_order_relaxed)) {
                rte_spinlock_unlock(&priv->flex_item_sl);
                return rte_flow_error_set(error, EBUSY,
                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 35f1ed7..7f8d234 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -715,7 +715,8 @@ static int flow_hw_translate_group(struct rte_eth_dev *dev,
        }
 
        if (acts->mark)
-               if (!(__atomic_fetch_sub(&priv->hws_mark_refcnt, 1, 
__ATOMIC_RELAXED) - 1))
+               if (!(rte_atomic_fetch_sub_explicit(&priv->hws_mark_refcnt, 1,
+                   rte_memory_order_relaxed) - 1))
                        flow_hw_rxq_flag_set(dev, false);
 
        if (acts->jump) {
@@ -2298,7 +2299,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
                                goto err;
                        acts->rule_acts[dr_pos].action =
                                priv->hw_tag[!!attr->group];
-                       __atomic_fetch_add(&priv->hws_mark_refcnt, 1, 
__ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&priv->hws_mark_refcnt, 1,
+                           rte_memory_order_relaxed);
                        flow_hw_rxq_flag_set(dev, true);
                        break;
                case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
@@ -4537,8 +4539,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
        uint8_t i;
 
        for (i = 0; i < nb_action_templates; i++) {
-               uint32_t refcnt = 
__atomic_add_fetch(&action_templates[i]->refcnt, 1,
-                                                    __ATOMIC_RELAXED);
+               uint32_t refcnt = 
rte_atomic_fetch_add_explicit(&action_templates[i]->refcnt, 1,
+                                                    rte_memory_order_relaxed) 
+ 1;
 
                if (refcnt <= 1) {
                        rte_flow_error_set(error, EINVAL,
@@ -4576,8 +4578,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
 at_error:
        while (i--) {
                __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
-               __atomic_sub_fetch(&action_templates[i]->refcnt,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+                                  1, rte_memory_order_relaxed);
        }
        return rte_errno;
 }
@@ -4748,8 +4750,8 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
                }
                if (item_templates[i]->item_flags & MLX5_FLOW_ITEM_COMPARE)
                        matcher_attr.mode = MLX5DR_MATCHER_RESOURCE_MODE_HTABLE;
-               ret = __atomic_fetch_add(&item_templates[i]->refcnt, 1,
-                                        __ATOMIC_RELAXED) + 1;
+               ret = rte_atomic_fetch_add_explicit(&item_templates[i]->refcnt, 
1,
+                                        rte_memory_order_relaxed) + 1;
                if (ret <= 1) {
                        rte_errno = EINVAL;
                        goto it_error;
@@ -4800,14 +4802,14 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
 at_error:
        for (i = 0; i < nb_action_templates; i++) {
                __flow_hw_action_template_destroy(dev, &tbl->ats[i].acts);
-               __atomic_fetch_sub(&action_templates[i]->refcnt,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&action_templates[i]->refcnt,
+                                  1, rte_memory_order_relaxed);
        }
        i = nb_item_templates;
 it_error:
        while (i--)
-               __atomic_fetch_sub(&item_templates[i]->refcnt,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&item_templates[i]->refcnt,
+                                  1, rte_memory_order_relaxed);
 error:
        err = rte_errno;
        if (tbl) {
@@ -5039,12 +5041,12 @@ static rte_be32_t vlan_hdr_to_be32(const struct 
rte_flow_action *actions)
        }
        LIST_REMOVE(table, next);
        for (i = 0; i < table->nb_item_templates; i++)
-               __atomic_fetch_sub(&table->its[i]->refcnt,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&table->its[i]->refcnt,
+                                  1, rte_memory_order_relaxed);
        for (i = 0; i < table->nb_action_templates; i++) {
                __flow_hw_action_template_destroy(dev, &table->ats[i].acts);
-               __atomic_fetch_sub(&table->ats[i].action_template->refcnt,
-                                  1, __ATOMIC_RELAXED);
+               
rte_atomic_fetch_sub_explicit(&table->ats[i].action_template->refcnt,
+                                  1, rte_memory_order_relaxed);
        }
        flow_hw_destroy_table_multi_pattern_ctx(table);
        if (table->matcher_info[0].matcher)
@@ -7287,7 +7289,7 @@ enum mlx5_hw_indirect_list_relative_position {
        if (!at->tmpl)
                goto error;
        at->action_flags = action_flags;
-       __atomic_fetch_add(&at->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&at->refcnt, 1, rte_memory_order_relaxed);
        LIST_INSERT_HEAD(&priv->flow_hw_at, at, next);
        return at;
 error:
@@ -7323,7 +7325,7 @@ enum mlx5_hw_indirect_list_relative_position {
        uint64_t flag = MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE |
                        MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH;
 
-       if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+       if (rte_atomic_load_explicit(&template->refcnt, 
rte_memory_order_relaxed) > 1) {
                DRV_LOG(WARNING, "Action template %p is still in use.",
                        (void *)template);
                return rte_flow_error_set(error, EBUSY,
@@ -7897,7 +7899,7 @@ enum mlx5_hw_indirect_list_relative_position {
                        break;
                }
        }
-       __atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
        rte_errno = pattern_template_validate(dev, &it, 1);
        if (rte_errno)
                goto error;
@@ -7933,7 +7935,7 @@ enum mlx5_hw_indirect_list_relative_position {
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (__atomic_load_n(&template->refcnt, __ATOMIC_RELAXED) > 1) {
+       if (rte_atomic_load_explicit(&template->refcnt, 
rte_memory_order_relaxed) > 1) {
                DRV_LOG(WARNING, "Item template %p is still in use.",
                        (void *)template);
                return rte_flow_error_set(error, EBUSY,
@@ -10513,7 +10515,8 @@ struct mlx5_list_entry *
                }
                dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
                priv->shared_host = host_dev;
-               __atomic_fetch_add(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&host_priv->shared_refcnt, 1,
+                   rte_memory_order_relaxed);
        }
        dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
        /* rte_errno has been updated by HWS layer. */
@@ -10698,7 +10701,8 @@ struct mlx5_list_entry *
        if (priv->shared_host) {
                struct mlx5_priv *host_priv = 
priv->shared_host->data->dev_private;
 
-               __atomic_fetch_sub(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+                   rte_memory_order_relaxed);
                priv->shared_host = NULL;
        }
        if (priv->hw_q) {
@@ -10814,7 +10818,8 @@ struct mlx5_list_entry *
        priv->hw_q = NULL;
        if (priv->shared_host) {
                struct mlx5_priv *host_priv = 
priv->shared_host->data->dev_private;
-               __atomic_fetch_sub(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&host_priv->shared_refcnt, 1,
+                   rte_memory_order_relaxed);
                priv->shared_host = NULL;
        }
        mlx5_free(priv->hw_attr);
@@ -10872,8 +10877,8 @@ struct mlx5_list_entry *
                                NULL,
                                "Invalid CT destruction index");
        }
-       __atomic_store_n(&ct->state, ASO_CONNTRACK_FREE,
-                                __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&ct->state, ASO_CONNTRACK_FREE,
+                                rte_memory_order_relaxed);
        mlx5_ipool_free(pool->cts, idx);
        return 0;
 }
@@ -11575,7 +11580,7 @@ struct mlx5_hw_q_job *
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                          NULL, "age data not available");
-       switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
+       switch (rte_atomic_load_explicit(&param->state, 
rte_memory_order_relaxed)) {
        case HWS_AGE_AGED_OUT_REPORTED:
        case HWS_AGE_AGED_OUT_NOT_REPORTED:
                resp->aged = 1;
@@ -11595,8 +11600,8 @@ struct mlx5_hw_q_job *
        }
        resp->sec_since_last_hit_valid = !resp->aged;
        if (resp->sec_since_last_hit_valid)
-               resp->sec_since_last_hit = __atomic_load_n
-                                (&param->sec_since_last_hit, __ATOMIC_RELAXED);
+               resp->sec_since_last_hit = rte_atomic_load_explicit
+                                (&param->sec_since_last_hit, 
rte_memory_order_relaxed);
        return 0;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c 
b/drivers/net/mlx5/mlx5_flow_meter.c
index 4045c4c..f8eff60 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -2055,9 +2055,9 @@ struct mlx5_flow_meter_policy *
                        NULL, "Meter profile id not valid.");
        /* Meter policy must exist. */
        if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
-               __atomic_fetch_add
+               rte_atomic_fetch_add_explicit
                        (&priv->sh->mtrmng->def_policy_ref_cnt,
-                       1, __ATOMIC_RELAXED);
+                       1, rte_memory_order_relaxed);
                domain_bitmap = MLX5_MTR_ALL_DOMAIN_BIT;
                if (!priv->sh->config.dv_esw_en)
                        domain_bitmap &= ~MLX5_MTR_DOMAIN_TRANSFER_BIT;
@@ -2137,7 +2137,7 @@ struct mlx5_flow_meter_policy *
        fm->is_enable = params->meter_enable;
        fm->shared = !!shared;
        fm->color_aware = !!params->use_prev_mtr_color;
-       __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, 
rte_memory_order_relaxed);
        if (params->meter_policy_id == priv->sh->mtrmng->def_policy_id) {
                fm->def_policy = 1;
                fm->flow_ipool = mlx5_ipool_create(&flow_ipool_cfg);
@@ -2166,7 +2166,7 @@ struct mlx5_flow_meter_policy *
        }
        fm->active_state = params->meter_enable;
        if (mtr_policy)
-               __atomic_fetch_add(&mtr_policy->ref_cnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mtr_policy->ref_cnt, 1, 
rte_memory_order_relaxed);
        return 0;
 error:
        mlx5_flow_destroy_mtr_tbls(dev, fm);
@@ -2271,8 +2271,8 @@ struct mlx5_flow_meter_policy *
                                          NULL, "Failed to create devx meter.");
        }
        fm->active_state = params->meter_enable;
-       __atomic_fetch_add(&fm->profile->ref_cnt, 1, __ATOMIC_RELAXED);
-       __atomic_fetch_add(&policy->ref_cnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&fm->profile->ref_cnt, 1, 
rte_memory_order_relaxed);
+       rte_atomic_fetch_add_explicit(&policy->ref_cnt, 1, 
rte_memory_order_relaxed);
        return 0;
 }
 #endif
@@ -2295,7 +2295,7 @@ struct mlx5_flow_meter_policy *
        if (fmp == NULL)
                return -1;
        /* Update dependencies. */
-       __atomic_fetch_sub(&fmp->ref_cnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&fmp->ref_cnt, 1, 
rte_memory_order_relaxed);
        fm->profile = NULL;
        /* Remove from list. */
        if (!priv->sh->meter_aso_en) {
@@ -2313,15 +2313,15 @@ struct mlx5_flow_meter_policy *
        }
        mlx5_flow_destroy_mtr_tbls(dev, fm);
        if (fm->def_policy)
-               __atomic_fetch_sub(&priv->sh->mtrmng->def_policy_ref_cnt,
-                               1, __ATOMIC_RELAXED);
+               
rte_atomic_fetch_sub_explicit(&priv->sh->mtrmng->def_policy_ref_cnt,
+                               1, rte_memory_order_relaxed);
        if (priv->sh->meter_aso_en) {
                if (!fm->def_policy) {
                        mtr_policy = mlx5_flow_meter_policy_find(dev,
                                                fm->policy_id, NULL);
                        if (mtr_policy)
-                               __atomic_fetch_sub(&mtr_policy->ref_cnt,
-                                               1, __ATOMIC_RELAXED);
+                               
rte_atomic_fetch_sub_explicit(&mtr_policy->ref_cnt,
+                                               1, rte_memory_order_relaxed);
                        fm->policy_id = 0;
                }
                fm->def_policy = 0;
@@ -2424,13 +2424,13 @@ struct mlx5_flow_meter_policy *
                                          RTE_MTR_ERROR_TYPE_UNSPECIFIED,
                                          NULL, "Meter object is being used.");
        /* Destroy the meter profile. */
-       __atomic_fetch_sub(&fm->profile->ref_cnt,
-                                               1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&fm->profile->ref_cnt,
+                                               1, rte_memory_order_relaxed);
        /* Destroy the meter policy. */
        policy = mlx5_flow_meter_policy_find(dev,
                        fm->policy_id, NULL);
-       __atomic_fetch_sub(&policy->ref_cnt,
-                                               1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_sub_explicit(&policy->ref_cnt,
+                                               1, rte_memory_order_relaxed);
        memset(fm, 0, sizeof(struct mlx5_flow_meter_info));
        return 0;
 }
diff --git a/drivers/net/mlx5/mlx5_flow_quota.c 
b/drivers/net/mlx5/mlx5_flow_quota.c
index 14a2a8b..6ad0e8a 100644
--- a/drivers/net/mlx5/mlx5_flow_quota.c
+++ b/drivers/net/mlx5/mlx5_flow_quota.c
@@ -218,9 +218,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct 
mlx5_aso_wqe *restrict,
                struct mlx5_quota *quota_obj =
                        sq->elts[(sq->tail + i) & mask].quota_obj;
 
-               __atomic_compare_exchange_n(&quota_obj->state, &state,
-                                           MLX5_QUOTA_STATE_READY, false,
-                                           __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+               rte_atomic_compare_exchange_strong_explicit(&quota_obj->state, 
&state,
+                                           MLX5_QUOTA_STATE_READY,
+                                           rte_memory_order_relaxed, 
rte_memory_order_relaxed);
        }
 }
 
@@ -278,7 +278,7 @@ typedef void (*quota_wqe_cmd_t)(volatile struct 
mlx5_aso_wqe *restrict,
                rte_spinlock_lock(&sq->sqsl);
                mlx5_quota_cmd_completion_handle(sq);
                rte_spinlock_unlock(&sq->sqsl);
-               if (__atomic_load_n(&quota_obj->state, __ATOMIC_RELAXED) ==
+               if (rte_atomic_load_explicit(&quota_obj->state, 
rte_memory_order_relaxed) ==
                    MLX5_QUOTA_STATE_READY)
                        return 0;
        } while (poll_cqe_times -= MLX5_ASO_WQE_CQE_RESPONSE_DELAY);
@@ -470,9 +470,9 @@ typedef void (*quota_wqe_cmd_t)(volatile struct 
mlx5_aso_wqe *restrict,
 mlx5_quota_check_ready(struct mlx5_quota *qobj, struct rte_flow_error *error)
 {
        uint8_t state = MLX5_QUOTA_STATE_READY;
-       bool verdict = __atomic_compare_exchange_n
-               (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
-                __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+       bool verdict = rte_atomic_compare_exchange_strong_explicit
+               (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+                rte_memory_order_relaxed, rte_memory_order_relaxed);
 
        if (!verdict)
                return rte_flow_error_set(error, EBUSY,
@@ -507,8 +507,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct 
mlx5_aso_wqe *restrict,
        ret = mlx5_quota_cmd_wqe(dev, qobj, mlx5_quota_wqe_query, qix, 
work_queue,
                                 async_job ? async_job : &sync_job, push, NULL);
        if (ret) {
-               __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+                                rte_memory_order_relaxed);
                return rte_flow_error_set(error, EAGAIN,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL, 
"try again");
        }
@@ -557,8 +557,8 @@ typedef void (*quota_wqe_cmd_t)(volatile struct 
mlx5_aso_wqe *restrict,
                                 async_job ? async_job : &sync_job, push,
                                 (void *)(uintptr_t)update->conf);
        if (ret) {
-               __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_READY,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_READY,
+                                rte_memory_order_relaxed);
                return rte_flow_error_set(error, EAGAIN,
                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL, 
"try again");
        }
@@ -593,9 +593,9 @@ struct rte_flow_action_handle *
                                   NULL, "quota: failed to allocate quota 
object");
                return NULL;
        }
-       verdict = __atomic_compare_exchange_n
-               (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT, false,
-                __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+       verdict = rte_atomic_compare_exchange_strong_explicit
+               (&qobj->state, &state, MLX5_QUOTA_STATE_WAIT,
+                rte_memory_order_relaxed, rte_memory_order_relaxed);
        if (!verdict) {
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
                                   NULL, "quota: new quota object has invalid 
state");
@@ -616,8 +616,8 @@ struct rte_flow_action_handle *
                                 (void *)(uintptr_t)conf);
        if (ret) {
                mlx5_ipool_free(qctx->quota_ipool, id);
-               __atomic_store_n(&qobj->state, MLX5_QUOTA_STATE_FREE,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&qobj->state, MLX5_QUOTA_STATE_FREE,
+                                rte_memory_order_relaxed);
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
                                   NULL, "quota: WR failure");
                return 0;
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index c31f2f3..1b625e0 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -149,7 +149,7 @@
                }
                if (param->timeout == 0)
                        continue;
-               switch (__atomic_load_n(&param->state, __ATOMIC_RELAXED)) {
+               switch (rte_atomic_load_explicit(&param->state, 
rte_memory_order_relaxed)) {
                case HWS_AGE_AGED_OUT_NOT_REPORTED:
                case HWS_AGE_AGED_OUT_REPORTED:
                        /* Already aged-out, no action is needed. */
@@ -171,8 +171,8 @@
                hits = rte_be_to_cpu_64(stats[i].hits);
                if (param->nb_cnts == 1) {
                        if (hits != param->accumulator_last_hits) {
-                               __atomic_store_n(&param->sec_since_last_hit, 0,
-                                                __ATOMIC_RELAXED);
+                               
rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
+                                                rte_memory_order_relaxed);
                                param->accumulator_last_hits = hits;
                                continue;
                        }
@@ -184,8 +184,8 @@
                        param->accumulator_cnt = 0;
                        if (param->accumulator_last_hits !=
                                                param->accumulator_hits) {
-                               __atomic_store_n(&param->sec_since_last_hit,
-                                                0, __ATOMIC_RELAXED);
+                               
rte_atomic_store_explicit(&param->sec_since_last_hit,
+                                                0, rte_memory_order_relaxed);
                                param->accumulator_last_hits =
                                                        param->accumulator_hits;
                                param->accumulator_hits = 0;
@@ -193,9 +193,9 @@
                        }
                        param->accumulator_hits = 0;
                }
-               if (__atomic_fetch_add(&param->sec_since_last_hit, time_delta,
-                                      __ATOMIC_RELAXED) + time_delta <=
-                  __atomic_load_n(&param->timeout, __ATOMIC_RELAXED))
+               if (rte_atomic_fetch_add_explicit(&param->sec_since_last_hit, 
time_delta,
+                                      rte_memory_order_relaxed) + time_delta <=
+                  rte_atomic_load_explicit(&param->timeout, 
rte_memory_order_relaxed))
                        continue;
                /* Prepare the relevant ring for this AGE parameter */
                if (priv->hws_strict_queue)
@@ -203,10 +203,10 @@
                else
                        r = age_info->hw_age.aged_list;
                /* Changing the state atomically and insert it into the ring. */
-               if (__atomic_compare_exchange_n(&param->state, &expected1,
+               if (rte_atomic_compare_exchange_strong_explicit(&param->state, 
&expected1,
                                                HWS_AGE_AGED_OUT_NOT_REPORTED,
-                                               false, __ATOMIC_RELAXED,
-                                               __ATOMIC_RELAXED)) {
+                                               rte_memory_order_relaxed,
+                                               rte_memory_order_relaxed)) {
                        int ret = rte_ring_enqueue_burst_elem(r, &age_idx,
                                                              sizeof(uint32_t),
                                                              1, NULL);
@@ -221,11 +221,10 @@
                         */
                        expected2 = HWS_AGE_AGED_OUT_NOT_REPORTED;
                        if (ret == 0 &&
-                           !__atomic_compare_exchange_n(&param->state,
+                           
!rte_atomic_compare_exchange_strong_explicit(&param->state,
                                                         &expected2, expected1,
-                                                        false,
-                                                        __ATOMIC_RELAXED,
-                                                        __ATOMIC_RELAXED) &&
+                                                        
rte_memory_order_relaxed,
+                                                        
rte_memory_order_relaxed) &&
                            expected2 == HWS_AGE_FREE)
                                mlx5_hws_age_param_free(priv,
                                                        param->own_cnt_index,
@@ -235,10 +234,10 @@
                        if (!priv->hws_strict_queue)
                                MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
                } else {
-                       __atomic_compare_exchange_n(&param->state, &expected2,
+                       
rte_atomic_compare_exchange_strong_explicit(&param->state, &expected2,
                                                  HWS_AGE_AGED_OUT_NOT_REPORTED,
-                                                 false, __ATOMIC_RELAXED,
-                                                 __ATOMIC_RELAXED);
+                                                 rte_memory_order_relaxed,
+                                                 rte_memory_order_relaxed);
                }
        }
        /* The event is irrelevant in strict queue mode. */
@@ -796,8 +795,8 @@ struct mlx5_hws_cnt_pool *
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "invalid AGE parameter index");
-       switch (__atomic_exchange_n(&param->state, HWS_AGE_FREE,
-                                   __ATOMIC_RELAXED)) {
+       switch (rte_atomic_exchange_explicit(&param->state, HWS_AGE_FREE,
+                                   rte_memory_order_relaxed)) {
        case HWS_AGE_CANDIDATE:
        case HWS_AGE_AGED_OUT_REPORTED:
                mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
@@ -862,8 +861,8 @@ struct mlx5_hws_cnt_pool *
                                   "cannot allocate AGE parameter");
                return 0;
        }
-       MLX5_ASSERT(__atomic_load_n(&param->state,
-                                   __ATOMIC_RELAXED) == HWS_AGE_FREE);
+       MLX5_ASSERT(rte_atomic_load_explicit(&param->state,
+                                   rte_memory_order_relaxed) == HWS_AGE_FREE);
        if (shared) {
                param->nb_cnts = 0;
                param->accumulator_hits = 0;
@@ -914,9 +913,9 @@ struct mlx5_hws_cnt_pool *
                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
                                          "invalid AGE parameter index");
        if (update_ade->timeout_valid) {
-               uint32_t old_timeout = __atomic_exchange_n(&param->timeout,
+               uint32_t old_timeout = 
rte_atomic_exchange_explicit(&param->timeout,
                                                           update_ade->timeout,
-                                                          __ATOMIC_RELAXED);
+                                                          
rte_memory_order_relaxed);
 
                if (old_timeout == 0)
                        sec_since_last_hit_reset = true;
@@ -935,8 +934,8 @@ struct mlx5_hws_cnt_pool *
                state_update = true;
        }
        if (sec_since_last_hit_reset)
-               __atomic_store_n(&param->sec_since_last_hit, 0,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&param->sec_since_last_hit, 0,
+                                rte_memory_order_relaxed);
        if (state_update) {
                uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
 
@@ -945,13 +944,13 @@ struct mlx5_hws_cnt_pool *
                 *  - AGED_OUT_NOT_REPORTED -> CANDIDATE_INSIDE_RING
                 *  - AGED_OUT_REPORTED -> CANDIDATE
                 */
-               if (!__atomic_compare_exchange_n(&param->state, &expected,
+               if (!rte_atomic_compare_exchange_strong_explicit(&param->state, 
&expected,
                                                 HWS_AGE_CANDIDATE_INSIDE_RING,
-                                                false, __ATOMIC_RELAXED,
-                                                __ATOMIC_RELAXED) &&
+                                                rte_memory_order_relaxed,
+                                                rte_memory_order_relaxed) &&
                    expected == HWS_AGE_AGED_OUT_REPORTED)
-                       __atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
-                                        __ATOMIC_RELAXED);
+                       rte_atomic_store_explicit(&param->state, 
HWS_AGE_CANDIDATE,
+                                        rte_memory_order_relaxed);
        }
        return 0;
 }
@@ -976,9 +975,9 @@ struct mlx5_hws_cnt_pool *
        uint16_t expected = HWS_AGE_AGED_OUT_NOT_REPORTED;
 
        MLX5_ASSERT(param != NULL);
-       if (__atomic_compare_exchange_n(&param->state, &expected,
-                                       HWS_AGE_AGED_OUT_REPORTED, false,
-                                       __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+       if (rte_atomic_compare_exchange_strong_explicit(&param->state, 
&expected,
+                                       HWS_AGE_AGED_OUT_REPORTED,
+                                       rte_memory_order_relaxed, 
rte_memory_order_relaxed))
                return param->context;
        switch (expected) {
        case HWS_AGE_FREE:
@@ -990,8 +989,8 @@ struct mlx5_hws_cnt_pool *
                mlx5_hws_age_param_free(priv, param->own_cnt_index, ipool, idx);
                break;
        case HWS_AGE_CANDIDATE_INSIDE_RING:
-               __atomic_store_n(&param->state, HWS_AGE_CANDIDATE,
-                                __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&param->state, HWS_AGE_CANDIDATE,
+                                rte_memory_order_relaxed);
                break;
        case HWS_AGE_CANDIDATE:
                /*
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.h b/drivers/net/mlx5/mlx5_hws_cnt.h
index e005960..481442f 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.h
+++ b/drivers/net/mlx5/mlx5_hws_cnt.h
@@ -101,7 +101,7 @@ struct mlx5_hws_cnt_pool {
        LIST_ENTRY(mlx5_hws_cnt_pool) next;
        struct mlx5_hws_cnt_pool_cfg cfg __rte_cache_aligned;
        struct mlx5_hws_cnt_dcs_mng dcs_mng __rte_cache_aligned;
-       uint32_t query_gen __rte_cache_aligned;
+       RTE_ATOMIC(uint32_t) query_gen __rte_cache_aligned;
        struct mlx5_hws_cnt *pool;
        struct mlx5_hws_cnt_raw_data_mng *raw_mng;
        struct rte_ring *reuse_list;
@@ -134,10 +134,10 @@ enum {
 
 /* HWS counter age parameter. */
 struct mlx5_hws_age_param {
-       uint32_t timeout; /* Aging timeout in seconds (atomically accessed). */
-       uint32_t sec_since_last_hit;
+       RTE_ATOMIC(uint32_t) timeout; /* Aging timeout in seconds (atomically 
accessed). */
+       RTE_ATOMIC(uint32_t) sec_since_last_hit;
        /* Time in seconds since last hit (atomically accessed). */
-       uint16_t state; /* AGE state (atomically accessed). */
+       RTE_ATOMIC(uint16_t) state; /* AGE state (atomically accessed). */
        uint64_t accumulator_last_hits;
        /* Last total value of hits for comparing. */
        uint64_t accumulator_hits;
@@ -426,7 +426,7 @@ struct mlx5_hws_age_param {
        iidx = mlx5_hws_cnt_iidx(hpool, *cnt_id);
        hpool->pool[iidx].in_used = false;
        hpool->pool[iidx].query_gen_when_free =
-               __atomic_load_n(&hpool->query_gen, __ATOMIC_RELAXED);
+               rte_atomic_load_explicit(&hpool->query_gen, 
rte_memory_order_relaxed);
        if (likely(queue != NULL) && cpool->cfg.host_cpool == NULL)
                qcache = hpool->cache->qcache[*queue];
        if (unlikely(qcache == NULL)) {
diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h
index 2fce908..c627113 100644
--- a/drivers/net/mlx5/mlx5_rx.h
+++ b/drivers/net/mlx5/mlx5_rx.h
@@ -173,7 +173,7 @@ struct mlx5_rxq_ctrl {
 /* RX queue private data. */
 struct mlx5_rxq_priv {
        uint16_t idx; /* Queue index. */
-       uint32_t refcnt; /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
        struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
        LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
        struct mlx5_priv *priv; /* Back pointer to private data. */
@@ -188,7 +188,7 @@ struct mlx5_rxq_priv {
 /* External RX queue descriptor. */
 struct mlx5_external_rxq {
        uint32_t hw_id; /* Queue index in the Hardware. */
-       uint32_t refcnt; /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
 };
 
 /* mlx5_rxq.c */
@@ -412,7 +412,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct 
rte_mbuf **pkts,
        struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_idx];
        void *addr;
 
-       if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) > 1) {
+       if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) > 
1) {
                MLX5_ASSERT(rep != NULL);
                /* Replace MPRQ buf. */
                (*rxq->mprq_bufs)[rq_idx] = rep;
@@ -524,9 +524,9 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct 
rte_mbuf **pkts,
                void *buf_addr;
 
                /* Increment the refcnt of the whole chunk. */
-               __atomic_fetch_add(&buf->refcnt, 1, __ATOMIC_RELAXED);
-               MLX5_ASSERT(__atomic_load_n(&buf->refcnt,
-                           __ATOMIC_RELAXED) <= strd_n + 1);
+               rte_atomic_fetch_add_explicit(&buf->refcnt, 1, 
rte_memory_order_relaxed);
+               MLX5_ASSERT(rte_atomic_load_explicit(&buf->refcnt,
+                           rte_memory_order_relaxed) <= strd_n + 1);
                buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
                /*
                 * MLX5 device doesn't use iova but it is necessary in a
@@ -666,7 +666,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct 
rte_mbuf **pkts,
        if (!priv->ext_rxqs || queue_idx < 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN)
                return false;
        rxq = &priv->ext_rxqs[queue_idx - 
RTE_PMD_MLX5_EXTERNAL_RX_QUEUE_ID_MIN];
-       return !!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED);
+       return !!rte_atomic_load_explicit(&rxq->refcnt, 
rte_memory_order_relaxed);
 }
 
 #define LWM_COOKIE_RXQID_OFFSET 0
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dd51687..f67aaa6 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -416,7 +416,7 @@
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
+       return (rte_atomic_load_explicit(&rxq->refcnt, 
rte_memory_order_relaxed) == 1);
 }
 
 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
@@ -1319,7 +1319,7 @@
 
        memset(_m, 0, sizeof(*buf));
        buf->mp = mp;
-       __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&buf->refcnt, 1, rte_memory_order_relaxed);
        for (j = 0; j != strd_n; ++j) {
                shinfo = &buf->shinfos[j];
                shinfo->free_cb = mlx5_mprq_buf_free_cb;
@@ -2037,7 +2037,7 @@ struct mlx5_rxq_priv *
        struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
 
        if (rxq != NULL)
-               __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, 
rte_memory_order_relaxed);
        return rxq;
 }
 
@@ -2059,7 +2059,7 @@ struct mlx5_rxq_priv *
 
        if (rxq == NULL)
                return 0;
-       return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+       return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, 
rte_memory_order_relaxed) - 1;
 }
 
 /**
@@ -2138,7 +2138,7 @@ struct mlx5_external_rxq *
 {
        struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
 
-       __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&rxq->refcnt, 1, 
rte_memory_order_relaxed);
        return rxq;
 }
 
@@ -2158,7 +2158,7 @@ struct mlx5_external_rxq *
 {
        struct mlx5_external_rxq *rxq = mlx5_ext_rxq_get(dev, idx);
 
-       return __atomic_fetch_sub(&rxq->refcnt, 1, __ATOMIC_RELAXED) - 1;
+       return rte_atomic_fetch_sub_explicit(&rxq->refcnt, 1, 
rte_memory_order_relaxed) - 1;
 }
 
 /**
@@ -2447,8 +2447,8 @@ struct mlx5_ind_table_obj *
                    (memcmp(ind_tbl->queues, queues,
                            ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
                     == 0)) {
-                       __atomic_fetch_add(&ind_tbl->refcnt, 1,
-                                          __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1,
+                                          rte_memory_order_relaxed);
                        break;
                }
        }
@@ -2479,7 +2479,7 @@ struct mlx5_ind_table_obj *
        unsigned int ret;
 
        rte_rwlock_write_lock(&priv->ind_tbls_lock);
-       ret = __atomic_fetch_sub(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) - 1;
+       ret = rte_atomic_fetch_sub_explicit(&ind_tbl->refcnt, 1, 
rte_memory_order_relaxed) - 1;
        if (!ret)
                LIST_REMOVE(ind_tbl, next);
        rte_rwlock_write_unlock(&priv->ind_tbls_lock);
@@ -2561,7 +2561,7 @@ struct mlx5_ind_table_obj *
                }
                return ret;
        }
-       __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&ind_tbl->refcnt, 1, 
rte_memory_order_relaxed);
        return 0;
 }
 
@@ -2626,7 +2626,7 @@ struct mlx5_ind_table_obj *
 {
        uint32_t refcnt;
 
-       refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+       refcnt = rte_atomic_load_explicit(&ind_tbl->refcnt, 
rte_memory_order_relaxed);
        if (refcnt <= 1)
                return 0;
        /*
@@ -3258,8 +3258,8 @@ struct mlx5_hrxq *
        ext_rxq = mlx5_external_rx_queue_get_validate(port_id, dpdk_idx);
        if (ext_rxq == NULL)
                return -rte_errno;
-       if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &unmapped, 1, false,
-                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, 
&unmapped, 1,
+                                        rte_memory_order_relaxed, 
rte_memory_order_relaxed)) {
                if (ext_rxq->hw_id != hw_idx) {
                        DRV_LOG(ERR, "Port %u external RxQ index %u "
                                "is already mapped to HW index (requesting is "
@@ -3296,8 +3296,8 @@ struct mlx5_hrxq *
                rte_errno = EINVAL;
                return -rte_errno;
        }
-       if (!__atomic_compare_exchange_n(&ext_rxq->refcnt, &mapped, 0, false,
-                                        __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_compare_exchange_strong_explicit(&ext_rxq->refcnt, 
&mapped, 0,
+                                        rte_memory_order_relaxed, 
rte_memory_order_relaxed)) {
                DRV_LOG(ERR, "Port %u external RxQ index %u doesn't exist.",
                        port_id, dpdk_idx);
                rte_errno = EINVAL;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f8d6728..c241a1d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1441,7 +1441,7 @@
        rte_delay_us_sleep(1000 * priv->rxqs_n);
        DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
        if (priv->sh->config.dv_flow_en == 2) {
-               if (!__atomic_load_n(&priv->hws_mark_refcnt, __ATOMIC_RELAXED))
+               if (!rte_atomic_load_explicit(&priv->hws_mark_refcnt, 
rte_memory_order_relaxed))
                        flow_hw_rxq_flag_set(dev, false);
        } else {
                mlx5_flow_stop_default(dev);
diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h
index b1e8ea1..0e44df5 100644
--- a/drivers/net/mlx5/mlx5_tx.h
+++ b/drivers/net/mlx5/mlx5_tx.h
@@ -179,7 +179,7 @@ struct mlx5_txq_data {
 __extension__
 struct mlx5_txq_ctrl {
        LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
-       uint32_t refcnt; /* Reference counter. */
+       RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */
        unsigned int socket; /* CPU socket ID for allocations. */
        bool is_hairpin; /* Whether TxQ type is Hairpin. */
        unsigned int max_inline_data; /* Max inline data. */
@@ -339,8 +339,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
                 * the service thread, data should be re-read.
                 */
                rte_compiler_barrier();
-               ci = __atomic_load_n(&sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
-               ts = __atomic_load_n(&sh->txpp.ts.ts, __ATOMIC_RELAXED);
+               ci = rte_atomic_load_explicit(&sh->txpp.ts.ci_ts, 
rte_memory_order_relaxed);
+               ts = rte_atomic_load_explicit(&sh->txpp.ts.ts, 
rte_memory_order_relaxed);
                rte_compiler_barrier();
                if (!((ts ^ ci) << (64 - MLX5_CQ_INDEX_WIDTH)))
                        break;
@@ -350,8 +350,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
        mts -= ts;
        if (unlikely(mts >= UINT64_MAX / 2)) {
                /* We have negative integer, mts is in the past. */
-               __atomic_fetch_add(&sh->txpp.err_ts_past,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_past,
+                                  1, rte_memory_order_relaxed);
                return -1;
        }
        tick = sh->txpp.tick;
@@ -360,8 +360,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
        mts = (mts + tick - 1) / tick;
        if (unlikely(mts >= (1 << MLX5_CQ_INDEX_WIDTH) / 2 - 1)) {
                /* We have mts is too distant future. */
-               __atomic_fetch_add(&sh->txpp.err_ts_future,
-                                  1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&sh->txpp.err_ts_future,
+                                  1, rte_memory_order_relaxed);
                return -1;
        }
        mts <<= 64 - MLX5_CQ_INDEX_WIDTH;
@@ -1743,8 +1743,8 @@ int mlx5_tx_burst_mode_get(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
                /* Convert the timestamp into completion to wait. */
                ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
                if (txq->ts_last && ts < txq->ts_last)
-                       __atomic_fetch_add(&txq->sh->txpp.err_ts_order,
-                                          1, __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&txq->sh->txpp.err_ts_order,
+                                          1, rte_memory_order_relaxed);
                txq->ts_last = ts;
                wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
                sh = txq->sh;
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 5a5df2d..4e26fa2 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -538,12 +538,12 @@
                uint64_t *ps;
 
                rte_compiler_barrier();
-               tm = __atomic_load_n(cqe + 0, __ATOMIC_RELAXED);
-               op = __atomic_load_n(cqe + 1, __ATOMIC_RELAXED);
+               tm = rte_atomic_load_explicit(cqe + 0, 
rte_memory_order_relaxed);
+               op = rte_atomic_load_explicit(cqe + 1, 
rte_memory_order_relaxed);
                rte_compiler_barrier();
-               if (tm != __atomic_load_n(cqe + 0, __ATOMIC_RELAXED))
+               if (tm != rte_atomic_load_explicit(cqe + 0, 
rte_memory_order_relaxed))
                        continue;
-               if (op != __atomic_load_n(cqe + 1, __ATOMIC_RELAXED))
+               if (op != rte_atomic_load_explicit(cqe + 1, 
rte_memory_order_relaxed))
                        continue;
                ps = (uint64_t *)ts;
                ps[0] = tm;
@@ -561,8 +561,8 @@
        ci = ci << (64 - MLX5_CQ_INDEX_WIDTH);
        ci |= (ts << MLX5_CQ_INDEX_WIDTH) >> MLX5_CQ_INDEX_WIDTH;
        rte_compiler_barrier();
-       __atomic_store_n(&sh->txpp.ts.ts, ts, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.ts.ci_ts, ci, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&sh->txpp.ts.ts, ts, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.ts.ci_ts, ci, 
rte_memory_order_relaxed);
        rte_wmb();
 }
 
@@ -590,8 +590,8 @@
                         */
                        DRV_LOG(DEBUG,
                                "Clock Queue error sync lost (%X).", opcode);
-                               __atomic_fetch_add(&sh->txpp.err_clock_queue,
-                                  1, __ATOMIC_RELAXED);
+                               
rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+                                  1, rte_memory_order_relaxed);
                        sh->txpp.sync_lost = 1;
                }
                return;
@@ -633,10 +633,10 @@
        if (!sh->txpp.clock_queue.sq_ci && !sh->txpp.ts_n)
                return;
        MLX5_ASSERT(sh->txpp.ts_p < MLX5_TXPP_REARM_SQ_SIZE);
-       __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ts,
-                        sh->txpp.ts.ts, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
-                        sh->txpp.ts.ci_ts, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ts,
+                        sh->txpp.ts.ts, rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.tsa[sh->txpp.ts_p].ci_ts,
+                        sh->txpp.ts.ci_ts, rte_memory_order_relaxed);
        if (++sh->txpp.ts_p >= MLX5_TXPP_REARM_SQ_SIZE)
                sh->txpp.ts_p = 0;
        if (sh->txpp.ts_n < MLX5_TXPP_REARM_SQ_SIZE)
@@ -677,8 +677,8 @@
                /* Check whether we have missed interrupts. */
                if (cq_ci - wq->cq_ci != 1) {
                        DRV_LOG(DEBUG, "Rearm Queue missed interrupt.");
-                       __atomic_fetch_add(&sh->txpp.err_miss_int,
-                                          1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&sh->txpp.err_miss_int,
+                                          1, rte_memory_order_relaxed);
                        /* Check sync lost on wqe index. */
                        if (cq_ci - wq->cq_ci >=
                                (((1UL << MLX5_WQ_INDEX_WIDTH) /
@@ -693,8 +693,8 @@
                /* Fire new requests to Rearm Queue. */
                if (error) {
                        DRV_LOG(DEBUG, "Rearm Queue error sync lost.");
-                       __atomic_fetch_add(&sh->txpp.err_rearm_queue,
-                                          1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&sh->txpp.err_rearm_queue,
+                                          1, rte_memory_order_relaxed);
                        sh->txpp.sync_lost = 1;
                }
        }
@@ -987,8 +987,8 @@
                mlx5_atomic_read_cqe((rte_int128_t *)&cqe->timestamp, &to.u128);
                if (to.cts.op_own >> 4) {
                        DRV_LOG(DEBUG, "Clock Queue error sync lost.");
-                       __atomic_fetch_add(&sh->txpp.err_clock_queue,
-                                          1, __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&sh->txpp.err_clock_queue,
+                                          1, rte_memory_order_relaxed);
                        sh->txpp.sync_lost = 1;
                        return -EIO;
                }
@@ -1031,12 +1031,12 @@ int mlx5_txpp_xstats_reset(struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
 
-       __atomic_store_n(&sh->txpp.err_miss_int, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.err_rearm_queue, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.err_clock_queue, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.err_ts_past, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.err_ts_future, 0, __ATOMIC_RELAXED);
-       __atomic_store_n(&sh->txpp.err_ts_order, 0, __ATOMIC_RELAXED);
+       rte_atomic_store_explicit(&sh->txpp.err_miss_int, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.err_rearm_queue, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.err_clock_queue, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.err_ts_past, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.err_ts_future, 0, 
rte_memory_order_relaxed);
+       rte_atomic_store_explicit(&sh->txpp.err_ts_order, 0, 
rte_memory_order_relaxed);
        return 0;
 }
 
@@ -1081,16 +1081,16 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev 
__rte_unused,
        do {
                uint64_t ts, ci;
 
-               ts = __atomic_load_n(&txpp->tsa[idx].ts, __ATOMIC_RELAXED);
-               ci = __atomic_load_n(&txpp->tsa[idx].ci_ts, __ATOMIC_RELAXED);
+               ts = rte_atomic_load_explicit(&txpp->tsa[idx].ts, 
rte_memory_order_relaxed);
+               ci = rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts, 
rte_memory_order_relaxed);
                rte_compiler_barrier();
                if ((ci ^ ts) << MLX5_CQ_INDEX_WIDTH != 0)
                        continue;
-               if (__atomic_load_n(&txpp->tsa[idx].ts,
-                                   __ATOMIC_RELAXED) != ts)
+               if (rte_atomic_load_explicit(&txpp->tsa[idx].ts,
+                                   rte_memory_order_relaxed) != ts)
                        continue;
-               if (__atomic_load_n(&txpp->tsa[idx].ci_ts,
-                                   __ATOMIC_RELAXED) != ci)
+               if (rte_atomic_load_explicit(&txpp->tsa[idx].ci_ts,
+                                   rte_memory_order_relaxed) != ci)
                        continue;
                tsa->ts = ts;
                tsa->ci_ts = ci;
@@ -1210,23 +1210,23 @@ int mlx5_txpp_xstats_get_names(struct rte_eth_dev *dev 
__rte_unused,
                for (i = 0; i < n_txpp; ++i)
                        stats[n_used + i].id = n_used + i;
                stats[n_used + 0].value =
-                               __atomic_load_n(&sh->txpp.err_miss_int,
-                                               __ATOMIC_RELAXED);
+                               rte_atomic_load_explicit(&sh->txpp.err_miss_int,
+                                               rte_memory_order_relaxed);
                stats[n_used + 1].value =
-                               __atomic_load_n(&sh->txpp.err_rearm_queue,
-                                               __ATOMIC_RELAXED);
+                               
rte_atomic_load_explicit(&sh->txpp.err_rearm_queue,
+                                               rte_memory_order_relaxed);
                stats[n_used + 2].value =
-                               __atomic_load_n(&sh->txpp.err_clock_queue,
-                                               __ATOMIC_RELAXED);
+                               
rte_atomic_load_explicit(&sh->txpp.err_clock_queue,
+                                               rte_memory_order_relaxed);
                stats[n_used + 3].value =
-                               __atomic_load_n(&sh->txpp.err_ts_past,
-                                               __ATOMIC_RELAXED);
+                               rte_atomic_load_explicit(&sh->txpp.err_ts_past,
+                                               rte_memory_order_relaxed);
                stats[n_used + 4].value =
-                               __atomic_load_n(&sh->txpp.err_ts_future,
-                                               __ATOMIC_RELAXED);
+                               
rte_atomic_load_explicit(&sh->txpp.err_ts_future,
+                                               rte_memory_order_relaxed);
                stats[n_used + 5].value =
-                               __atomic_load_n(&sh->txpp.err_ts_order,
-                                               __ATOMIC_RELAXED);
+                               rte_atomic_load_explicit(&sh->txpp.err_ts_order,
+                                               rte_memory_order_relaxed);
                stats[n_used + 6].value = mlx5_txpp_xstats_jitter(&sh->txpp);
                stats[n_used + 7].value = mlx5_txpp_xstats_wander(&sh->txpp);
                stats[n_used + 8].value = sh->txpp.sync_lost;
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 14f55e8..da4236f 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1108,7 +1108,7 @@ struct mlx5_txq_ctrl *
                rte_errno = ENOMEM;
                goto error;
        }
-       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, 
rte_memory_order_relaxed);
        tmpl->is_hairpin = false;
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
@@ -1153,7 +1153,7 @@ struct mlx5_txq_ctrl *
        tmpl->txq.idx = idx;
        tmpl->hairpin_conf = *hairpin_conf;
        tmpl->is_hairpin = true;
-       __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&tmpl->refcnt, 1, 
rte_memory_order_relaxed);
        LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
        return tmpl;
 }
@@ -1178,7 +1178,7 @@ struct mlx5_txq_ctrl *
 
        if (txq_data) {
                ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
-               __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&ctrl->refcnt, 1, 
rte_memory_order_relaxed);
        }
        return ctrl;
 }
@@ -1203,7 +1203,7 @@ struct mlx5_txq_ctrl *
        if (priv->txqs == NULL || (*priv->txqs)[idx] == NULL)
                return 0;
        txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-       if (__atomic_fetch_sub(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) - 1 > 1)
+       if (rte_atomic_fetch_sub_explicit(&txq_ctrl->refcnt, 1, 
rte_memory_order_relaxed) - 1 > 1)
                return 1;
        if (txq_ctrl->obj) {
                priv->obj_ops.txq_obj_release(txq_ctrl->obj);
@@ -1219,7 +1219,7 @@ struct mlx5_txq_ctrl *
                txq_free_elts(txq_ctrl);
                dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
-       if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+       if (!rte_atomic_load_explicit(&txq_ctrl->refcnt, 
rte_memory_order_relaxed)) {
                if (!txq_ctrl->is_hairpin)
                        mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
                LIST_REMOVE(txq_ctrl, next);
@@ -1249,7 +1249,7 @@ struct mlx5_txq_ctrl *
        if (!(*priv->txqs)[idx])
                return -1;
        txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
-       return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
+       return (rte_atomic_load_explicit(&txq->refcnt, 
rte_memory_order_relaxed) == 1);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index e28db2e..fc03cc0 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -203,7 +203,7 @@ struct mlx5_indexed_pool *
        struct mlx5_indexed_cache *gc, *lc, *olc = NULL;
 
        lc = pool->cache[cidx]->lc;
-       gc = __atomic_load_n(&pool->gc, __ATOMIC_RELAXED);
+       gc = rte_atomic_load_explicit(&pool->gc, rte_memory_order_relaxed);
        if (gc && lc != gc) {
                mlx5_ipool_lock(pool);
                if (lc && !(--lc->ref_cnt))
@@ -266,8 +266,8 @@ struct mlx5_indexed_pool *
                pool->cache[cidx]->len = fetch_size - 1;
                return pool->cache[cidx]->idx[pool->cache[cidx]->len];
        }
-       trunk_idx = lc ? __atomic_load_n(&lc->n_trunk_valid,
-                        __ATOMIC_ACQUIRE) : 0;
+       trunk_idx = lc ? rte_atomic_load_explicit(&lc->n_trunk_valid,
+                        rte_memory_order_acquire) : 0;
        trunk_n = lc ? lc->n_trunk : 0;
        cur_max_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx);
        /* Check if index reach maximum. */
@@ -332,11 +332,11 @@ struct mlx5_indexed_pool *
                lc = p;
                lc->ref_cnt = 1;
                pool->cache[cidx]->lc = lc;
-               __atomic_store_n(&pool->gc, p, __ATOMIC_RELAXED);
+               rte_atomic_store_explicit(&pool->gc, p, 
rte_memory_order_relaxed);
        }
        /* Add trunk to trunks array. */
        lc->trunks[trunk_idx] = trunk;
-       __atomic_fetch_add(&lc->n_trunk_valid, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&lc->n_trunk_valid, 1, 
rte_memory_order_relaxed);
        /* Enqueue half of the index to global. */
        ts_idx = mlx5_trunk_idx_offset_get(pool, trunk_idx) + 1;
        fetch_size = trunk->free >> 1;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index f3c0d76..3146092 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -240,7 +240,7 @@ struct mlx5_indexed_trunk {
 
 struct mlx5_indexed_cache {
        struct mlx5_indexed_trunk **trunks;
-       volatile uint32_t n_trunk_valid; /* Trunks allocated. */
+       volatile RTE_ATOMIC(uint32_t) n_trunk_valid; /* Trunks allocated. */
        uint32_t n_trunk; /* Trunk pointer array size. */
        uint32_t ref_cnt;
        uint32_t len;
@@ -266,7 +266,7 @@ struct mlx5_indexed_pool {
                        uint32_t free_list; /* Index to first free trunk. */
                };
                struct {
-                       struct mlx5_indexed_cache *gc;
+                       RTE_ATOMIC(struct mlx5_indexed_cache *) gc;
                        /* Global cache. */
                        struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
                        /* Local cache. */
-- 
1.8.3.1

Reply via email to