Replace the use of gcc builtin __atomic_xxx intrinsics with
corresponding rte_atomic_xxx optional rte stdatomic API.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/dma/idxd/idxd_internal.h | 2 +-
 drivers/dma/idxd/idxd_pci.c      | 9 +++++----
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index cd41777..537cf9b 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -33,7 +33,7 @@ struct idxd_pci_common {
        rte_spinlock_t lk;
 
        uint8_t wq_cfg_sz;
-       uint16_t ref_count;
+       RTE_ATOMIC(uint16_t) ref_count;
        volatile struct rte_idxd_bar0 *regs;
        volatile uint32_t *wq_regs_base;
        volatile struct rte_idxd_grpcfg *grp_regs;
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a78889a..06fa115 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -136,7 +136,8 @@
         * the PCI struct
         */
        /* NOTE: review for potential ordering optimization */
-       is_last_wq = (__atomic_fetch_sub(&idxd->u.pci->ref_count, 1, 
__ATOMIC_SEQ_CST) == 1);
+       is_last_wq = (rte_atomic_fetch_sub_explicit(&idxd->u.pci->ref_count, 1,
+           rte_memory_order_seq_cst) == 1);
        if (is_last_wq) {
                /* disable the device */
                err_code = idxd_pci_dev_command(idxd, idxd_disable_dev);
@@ -330,9 +331,9 @@
                        return ret;
                }
                qid = rte_dma_get_dev_id_by_name(qname);
-               max_qid = __atomic_load_n(
+               max_qid = rte_atomic_load_explicit(
                        &((struct idxd_dmadev 
*)rte_dma_fp_objs[qid].dev_private)->u.pci->ref_count,
-                       __ATOMIC_SEQ_CST);
+                       rte_memory_order_seq_cst);
 
                /* we have queue 0 done, now configure the rest of the queues */
                for (qid = 1; qid < max_qid; qid++) {
@@ -389,7 +390,7 @@
                                free(idxd.u.pci);
                        return ret;
                }
-               __atomic_fetch_add(&idxd.u.pci->ref_count, 1, __ATOMIC_SEQ_CST);
+               rte_atomic_fetch_add_explicit(&idxd.u.pci->ref_count, 1, 
rte_memory_order_seq_cst);
        }
 
        return 0;
-- 
1.8.3.1

Reply via email to