Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/common/mlx5/linux/mlx5_nl.c     |  5 +--
 drivers/common/mlx5/mlx5_common.h       |  2 +-
 drivers/common/mlx5/mlx5_common_mr.c    | 16 ++++-----
 drivers/common/mlx5/mlx5_common_mr.h    |  2 +-
 drivers/common/mlx5/mlx5_common_utils.c | 32 +++++++++---------
 drivers/common/mlx5/mlx5_common_utils.h |  6 ++--
 drivers/common/mlx5/mlx5_malloc.c       | 58 ++++++++++++++++-----------------
 7 files changed, 61 insertions(+), 60 deletions(-)

diff --git a/drivers/common/mlx5/linux/mlx5_nl.c 
b/drivers/common/mlx5/linux/mlx5_nl.c
index 28a1f56..bf6dd19 100644
--- a/drivers/common/mlx5/linux/mlx5_nl.c
+++ b/drivers/common/mlx5/linux/mlx5_nl.c
@@ -175,10 +175,11 @@ struct mlx5_nl_port_info {
        uint16_t state; /**< IB device port state (out). */
 };
 
-uint32_t atomic_sn;
+RTE_ATOMIC(uint32_t) atomic_sn;
 
 /* Generate Netlink sequence number. */
-#define MLX5_NL_SN_GENERATE (__atomic_fetch_add(&atomic_sn, 1, 
__ATOMIC_RELAXED) + 1)
+#define MLX5_NL_SN_GENERATE (rte_atomic_fetch_add_explicit(&atomic_sn, 1, \
+       rte_memory_order_relaxed) + 1)
 
 /**
  * Opens a Netlink socket.
diff --git a/drivers/common/mlx5/mlx5_common.h 
b/drivers/common/mlx5/mlx5_common.h
index 9c80277..14c70ed 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -195,7 +195,7 @@ enum mlx5_cqe_status {
        /* Prevent speculative reading of other fields in CQE until
         * CQE is valid.
         */
-       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
+       rte_atomic_thread_fence(rte_memory_order_acquire);
 
        if (unlikely(op_code == MLX5_CQE_RESP_ERR ||
                     op_code == MLX5_CQE_REQ_ERR))
diff --git a/drivers/common/mlx5/mlx5_common_mr.c 
b/drivers/common/mlx5/mlx5_common_mr.c
index 85ec10d..50922ad 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -35,7 +35,7 @@ struct mlx5_range {
 /** Memory region for a mempool. */
 struct mlx5_mempool_mr {
        struct mlx5_pmd_mr pmd_mr;
-       uint32_t refcnt; /**< Number of mempools sharing this MR. */
+       RTE_ATOMIC(uint32_t) refcnt; /**< Number of mempools sharing this MR. */
 };
 
 /* Mempool registration. */
@@ -56,11 +56,11 @@ struct mlx5_mempool_reg {
 {
        struct mlx5_mprq_buf *buf = opaque;
 
-       if (__atomic_load_n(&buf->refcnt, __ATOMIC_RELAXED) == 1) {
+       if (rte_atomic_load_explicit(&buf->refcnt, rte_memory_order_relaxed) == 
1) {
                rte_mempool_put(buf->mp, buf);
-       } else if (unlikely(__atomic_fetch_sub(&buf->refcnt, 1,
-                                              __ATOMIC_RELAXED) - 1 == 0)) {
-               __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED);
+       } else if (unlikely(rte_atomic_fetch_sub_explicit(&buf->refcnt, 1,
+                                              rte_memory_order_relaxed) - 1 == 
0)) {
+               rte_atomic_store_explicit(&buf->refcnt, 1, 
rte_memory_order_relaxed);
                rte_mempool_put(buf->mp, buf);
        }
 }
@@ -1650,7 +1650,7 @@ struct mlx5_mempool_get_extmem_data {
        unsigned int i;
 
        for (i = 0; i < mpr->mrs_n; i++)
-               __atomic_fetch_add(&mpr->mrs[i].refcnt, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mpr->mrs[i].refcnt, 1, 
rte_memory_order_relaxed);
 }
 
 /**
@@ -1665,8 +1665,8 @@ struct mlx5_mempool_get_extmem_data {
        bool ret = false;
 
        for (i = 0; i < mpr->mrs_n; i++)
-               ret |= __atomic_fetch_sub(&mpr->mrs[i].refcnt, 1,
-                                         __ATOMIC_RELAXED) - 1 == 0;
+               ret |= rte_atomic_fetch_sub_explicit(&mpr->mrs[i].refcnt, 1,
+                                         rte_memory_order_relaxed) - 1 == 0;
        return ret;
 }
 
diff --git a/drivers/common/mlx5/mlx5_common_mr.h 
b/drivers/common/mlx5/mlx5_common_mr.h
index 8789d40..5bdf48a 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -93,7 +93,7 @@ struct mlx5_mr_share_cache {
 /* Multi-Packet RQ buffer header. */
 struct mlx5_mprq_buf {
        struct rte_mempool *mp;
-       uint16_t refcnt; /* Atomically accessed refcnt. */
+       RTE_ATOMIC(uint16_t) refcnt; /* Atomically accessed refcnt. */
        struct rte_mbuf_ext_shared_info shinfos[];
        /*
         * Shared information per stride.
diff --git a/drivers/common/mlx5/mlx5_common_utils.c 
b/drivers/common/mlx5/mlx5_common_utils.c
index e69d068..4b95d35 100644
--- a/drivers/common/mlx5/mlx5_common_utils.c
+++ b/drivers/common/mlx5/mlx5_common_utils.c
@@ -81,14 +81,14 @@ struct mlx5_list *
        while (entry != NULL) {
                if (l_const->cb_match(l_const->ctx, entry, ctx) == 0) {
                        if (reuse) {
-                               ret = __atomic_fetch_add(&entry->ref_cnt, 1,
-                                                        __ATOMIC_RELAXED);
+                               ret = 
rte_atomic_fetch_add_explicit(&entry->ref_cnt, 1,
+                                                        
rte_memory_order_relaxed);
                                DRV_LOG(DEBUG, "mlx5 list %s entry %p ref: %u.",
                                        l_const->name, (void *)entry,
                                        entry->ref_cnt);
                        } else if (lcore_index < MLX5_LIST_GLOBAL) {
-                               ret = __atomic_load_n(&entry->ref_cnt,
-                                                     __ATOMIC_RELAXED);
+                               ret = rte_atomic_load_explicit(&entry->ref_cnt,
+                                                     rte_memory_order_relaxed);
                        }
                        if (likely(ret != 0 || lcore_index == MLX5_LIST_GLOBAL))
                                return entry;
@@ -151,13 +151,13 @@ struct mlx5_list_entry *
 {
        struct mlx5_list_cache *c = l_inconst->cache[lcore_index];
        struct mlx5_list_entry *entry = LIST_FIRST(&c->h);
-       uint32_t inv_cnt = __atomic_exchange_n(&c->inv_cnt, 0,
-                                              __ATOMIC_RELAXED);
+       uint32_t inv_cnt = rte_atomic_exchange_explicit(&c->inv_cnt, 0,
+                                              rte_memory_order_relaxed);
 
        while (inv_cnt != 0 && entry != NULL) {
                struct mlx5_list_entry *nentry = LIST_NEXT(entry, next);
 
-               if (__atomic_load_n(&entry->ref_cnt, __ATOMIC_RELAXED) == 0) {
+               if (rte_atomic_load_explicit(&entry->ref_cnt, 
rte_memory_order_relaxed) == 0) {
                        LIST_REMOVE(entry, next);
                        if (l_const->lcores_share)
                                l_const->cb_clone_free(l_const->ctx, entry);
@@ -217,7 +217,7 @@ struct mlx5_list_entry *
                entry->lcore_idx = (uint32_t)lcore_index;
                LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h,
                                 entry, next);
-               __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&l_inconst->count, 1, 
rte_memory_order_relaxed);
                DRV_LOG(DEBUG, "MLX5 list %s c%d entry %p new: %u.",
                        l_const->name, lcore_index,
                        (void *)entry, entry->ref_cnt);
@@ -254,7 +254,7 @@ struct mlx5_list_entry *
        l_inconst->gen_cnt++;
        rte_rwlock_write_unlock(&l_inconst->lock);
        LIST_INSERT_HEAD(&l_inconst->cache[lcore_index]->h, local_entry, next);
-       __atomic_fetch_add(&l_inconst->count, 1, __ATOMIC_RELAXED);
+       rte_atomic_fetch_add_explicit(&l_inconst->count, 1, 
rte_memory_order_relaxed);
        DRV_LOG(DEBUG, "mlx5 list %s entry %p new: %u.", l_const->name,
                (void *)entry, entry->ref_cnt);
        return local_entry;
@@ -285,7 +285,7 @@ struct mlx5_list_entry *
 {
        struct mlx5_list_entry *gentry = entry->gentry;
 
-       if (__atomic_fetch_sub(&entry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+       if (rte_atomic_fetch_sub_explicit(&entry->ref_cnt, 1, 
rte_memory_order_relaxed) - 1 != 0)
                return 1;
        if (entry->lcore_idx == (uint32_t)lcore_idx) {
                LIST_REMOVE(entry, next);
@@ -294,23 +294,23 @@ struct mlx5_list_entry *
                else
                        l_const->cb_remove(l_const->ctx, entry);
        } else {
-               __atomic_fetch_add(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
-                                  1, __ATOMIC_RELAXED);
+               
rte_atomic_fetch_add_explicit(&l_inconst->cache[entry->lcore_idx]->inv_cnt,
+                                  1, rte_memory_order_relaxed);
        }
        if (!l_const->lcores_share) {
-               __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, 
rte_memory_order_relaxed);
                DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
                        l_const->name, (void *)entry);
                return 0;
        }
-       if (__atomic_fetch_sub(&gentry->ref_cnt, 1, __ATOMIC_RELAXED) - 1 != 0)
+       if (rte_atomic_fetch_sub_explicit(&gentry->ref_cnt, 1, 
rte_memory_order_relaxed) - 1 != 0)
                return 1;
        rte_rwlock_write_lock(&l_inconst->lock);
        if (likely(gentry->ref_cnt == 0)) {
                LIST_REMOVE(gentry, next);
                rte_rwlock_write_unlock(&l_inconst->lock);
                l_const->cb_remove(l_const->ctx, gentry);
-               __atomic_fetch_sub(&l_inconst->count, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_sub_explicit(&l_inconst->count, 1, 
rte_memory_order_relaxed);
                DRV_LOG(DEBUG, "mlx5 list %s entry %p removed.",
                        l_const->name, (void *)gentry);
                return 0;
@@ -377,7 +377,7 @@ struct mlx5_list_entry *
 mlx5_list_get_entry_num(struct mlx5_list *list)
 {
        MLX5_ASSERT(list);
-       return __atomic_load_n(&list->l_inconst.count, __ATOMIC_RELAXED);
+       return rte_atomic_load_explicit(&list->l_inconst.count, 
rte_memory_order_relaxed);
 }
 
 /********************* Hash List **********************/
diff --git a/drivers/common/mlx5/mlx5_common_utils.h 
b/drivers/common/mlx5/mlx5_common_utils.h
index ae15119..cb4d104 100644
--- a/drivers/common/mlx5/mlx5_common_utils.h
+++ b/drivers/common/mlx5/mlx5_common_utils.h
@@ -29,7 +29,7 @@
  */
 struct mlx5_list_entry {
        LIST_ENTRY(mlx5_list_entry) next; /* Entry pointers in the list. */
-       uint32_t ref_cnt __rte_aligned(8); /* 0 means, entry is invalid. */
+       RTE_ATOMIC(uint32_t) ref_cnt __rte_aligned(8); /* 0 means, entry is 
invalid. */
        uint32_t lcore_idx;
        union {
                struct mlx5_list_entry *gentry;
@@ -39,7 +39,7 @@ struct mlx5_list_entry {
 
 struct mlx5_list_cache {
        LIST_HEAD(mlx5_list_head, mlx5_list_entry) h;
-       uint32_t inv_cnt; /* Invalid entries counter. */
+       RTE_ATOMIC(uint32_t) inv_cnt; /* Invalid entries counter. */
 } __rte_cache_aligned;
 
 /**
@@ -111,7 +111,7 @@ struct mlx5_list_const {
 struct mlx5_list_inconst {
        rte_rwlock_t lock; /* read/write lock. */
        volatile uint32_t gen_cnt; /* List modification may update it. */
-       volatile uint32_t count; /* number of entries in list. */
+       volatile RTE_ATOMIC(uint32_t) count; /* number of entries in list. */
        struct mlx5_list_cache *cache[MLX5_LIST_MAX];
        /* Lcore cache, last index is the global cache. */
 };
diff --git a/drivers/common/mlx5/mlx5_malloc.c 
b/drivers/common/mlx5/mlx5_malloc.c
index c58c41d..ef6dabe 100644
--- a/drivers/common/mlx5/mlx5_malloc.c
+++ b/drivers/common/mlx5/mlx5_malloc.c
@@ -16,7 +16,7 @@ struct mlx5_sys_mem {
        uint32_t init:1; /* Memory allocator initialized. */
        uint32_t enable:1; /* System memory select. */
        uint32_t reserve:30; /* Reserve. */
-       struct rte_memseg_list *last_msl;
+       RTE_ATOMIC(struct rte_memseg_list *) last_msl;
        /* last allocated rte memory memseg list. */
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        uint64_t malloc_sys;
@@ -93,14 +93,14 @@ struct mlx5_sys_mem {
         * different with the cached msl.
         */
        if (addr && !mlx5_mem_check_msl(addr,
-           (struct rte_memseg_list *)__atomic_load_n
-           (&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
-               __atomic_store_n(&mlx5_sys_mem.last_msl,
+           (struct rte_memseg_list *)rte_atomic_load_explicit
+           (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
+               rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
                        rte_mem_virt2memseg_list(addr),
-                       __ATOMIC_RELAXED);
+                       rte_memory_order_relaxed);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-               __atomic_fetch_add(&mlx5_sys_mem.msl_update, 1,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
+                                  rte_memory_order_relaxed);
 #endif
        }
 }
@@ -122,11 +122,11 @@ struct mlx5_sys_mem {
         * to check if the memory belongs to rte memory.
         */
        if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
-           __atomic_load_n(&mlx5_sys_mem.last_msl, __ATOMIC_RELAXED))) {
+           rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, 
rte_memory_order_relaxed))) {
                if (!rte_mem_virt2memseg_list(addr))
                        return false;
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-               __atomic_fetch_add(&mlx5_sys_mem.msl_miss, 1, __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, 
rte_memory_order_relaxed);
 #endif
        }
        return true;
@@ -185,8 +185,8 @@ struct mlx5_sys_mem {
                mlx5_mem_update_msl(addr);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
                if (addr)
-                       __atomic_fetch_add(&mlx5_sys_mem.malloc_rte, 1,
-                                          __ATOMIC_RELAXED);
+                       rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 
1,
+                                          rte_memory_order_relaxed);
 #endif
                return addr;
        }
@@ -199,8 +199,8 @@ struct mlx5_sys_mem {
                addr = malloc(size);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        if (addr)
-               __atomic_fetch_add(&mlx5_sys_mem.malloc_sys, 1,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
+                                  rte_memory_order_relaxed);
 #endif
        return addr;
 }
@@ -233,8 +233,8 @@ struct mlx5_sys_mem {
                mlx5_mem_update_msl(new_addr);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
                if (new_addr)
-                       __atomic_fetch_add(&mlx5_sys_mem.realloc_rte, 1,
-                                          __ATOMIC_RELAXED);
+                       
rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
+                                          rte_memory_order_relaxed);
 #endif
                return new_addr;
        }
@@ -246,8 +246,8 @@ struct mlx5_sys_mem {
        new_addr = realloc(addr, size);
 #ifdef RTE_LIBRTE_MLX5_DEBUG
        if (new_addr)
-               __atomic_fetch_add(&mlx5_sys_mem.realloc_sys, 1,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
+                                  rte_memory_order_relaxed);
 #endif
        return new_addr;
 }
@@ -259,14 +259,14 @@ struct mlx5_sys_mem {
                return;
        if (!mlx5_mem_is_rte(addr)) {
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-               __atomic_fetch_add(&mlx5_sys_mem.free_sys, 1,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
+                                  rte_memory_order_relaxed);
 #endif
                mlx5_os_free(addr);
        } else {
 #ifdef RTE_LIBRTE_MLX5_DEBUG
-               __atomic_fetch_add(&mlx5_sys_mem.free_rte, 1,
-                                  __ATOMIC_RELAXED);
+               rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
+                                  rte_memory_order_relaxed);
 #endif
                rte_free(addr);
        }
@@ -280,14 +280,14 @@ struct mlx5_sys_mem {
                " free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
                " realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
                " update:%"PRIi64"",
-               __atomic_load_n(&mlx5_sys_mem.malloc_sys, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.realloc_sys, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.free_sys, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.malloc_rte, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.realloc_rte, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.free_rte, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.msl_miss, __ATOMIC_RELAXED),
-               __atomic_load_n(&mlx5_sys_mem.msl_update, __ATOMIC_RELAXED));
+               rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, 
rte_memory_order_relaxed),
+               rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, 
rte_memory_order_relaxed));
 #endif
 }
 
-- 
1.8.3.1

Reply via email to