> -----Original Message----- > From: Tyler Retzlaff <[email protected]> > Sent: Wednesday, October 18, 2023 4:31 AM > To: [email protected] > Cc: Akhil Goyal <[email protected]>; Anatoly Burakov > <[email protected]>; Andrew > Rybchenko <[email protected]>; Bruce Richardson > <[email protected]>; > Chenbo Xia <[email protected]>; Ciara Power <[email protected]>; David > Christensen > <[email protected]>; David Hunt <[email protected]>; Dmitry Kozlyuk > <[email protected]>; Dmitry Malloy <[email protected]>; Elena > Agostini > <[email protected]>; Erik Gabriel Carrillo <[email protected]>; > Fan Zhang > <[email protected]>; Ferruh Yigit <[email protected]>; Harman Kalra > <[email protected]>; Harry van Haaren <[email protected]>; Honnappa > Nagarahalli > <[email protected]>; [email protected]; Konstantin Ananyev > <[email protected]>; Matan Azrad <[email protected]>; Maxime > Coquelin > <[email protected]>; Narcisa Ana Maria Vasile > <[email protected]>; > Nicolas Chautru <[email protected]>; Olivier Matz > <[email protected]>; Ori > Kam <[email protected]>; Pallavi Kadam <[email protected]>; Pavan > Nikhilesh > <[email protected]>; Reshma Pattan <[email protected]>; Sameh > Gobriel > <[email protected]>; Shijith Thotton <[email protected]>; Sivaprasad > Tummala > <[email protected]>; Stephen Hemminger <[email protected]>; > Suanming Mou > <[email protected]>; Sunil Kumar Kori <[email protected]>; > [email protected]; > Viacheslav Ovsiienko <[email protected]>; Vladimir Medvedkin > <[email protected]>; Yipeng Wang <[email protected]>; Tyler > Retzlaff > <[email protected]> > Subject: [PATCH v2 09/19] rcu: use rte optional stdatomic API > > Replace the use of gcc builtin __atomic_xxx intrinsics with corresponding > rte_atomic_xxx > optional stdatomic API > > Signed-off-by: Tyler Retzlaff <[email protected]> > --- > lib/rcu/rte_rcu_qsbr.c | 48 +++++++++++++++++------------------ > lib/rcu/rte_rcu_qsbr.h | 68 > +++++++++++++++++++++++++------------------------- > 2 files changed, 58 insertions(+), 58 deletions(-) > > diff --git a/lib/rcu/rte_rcu_qsbr.c b/lib/rcu/rte_rcu_qsbr.c index > 17be93e..4dc7714 100644 > --- a/lib/rcu/rte_rcu_qsbr.c > +++ b/lib/rcu/rte_rcu_qsbr.c > @@ -102,21 +102,21 @@ > * go out of sync. Hence, additional checks are required. > */ > /* Check if the thread is already registered */ > - old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > - __ATOMIC_RELAXED); > + old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > + rte_memory_order_relaxed); > if (old_bmap & 1UL << id) > return 0; > > do { > new_bmap = old_bmap | (1UL << id); > - success = __atomic_compare_exchange( > + success = rte_atomic_compare_exchange_strong_explicit( > __RTE_QSBR_THRID_ARRAY_ELM(v, i), > - &old_bmap, &new_bmap, 0, > - __ATOMIC_RELEASE, __ATOMIC_RELAXED); > + &old_bmap, new_bmap, > + rte_memory_order_release, > rte_memory_order_relaxed); > > if (success) > - __atomic_fetch_add(&v->num_threads, > - 1, __ATOMIC_RELAXED); > + rte_atomic_fetch_add_explicit(&v->num_threads, > + 1, rte_memory_order_relaxed); > else if (old_bmap & (1UL << id)) > /* Someone else registered this thread. > * Counter should not be incremented. > @@ -154,8 +154,8 @@ > * go out of sync. Hence, additional checks are required. > */ > /* Check if the thread is already unregistered */ > - old_bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > - __ATOMIC_RELAXED); > + old_bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > + rte_memory_order_relaxed); > if (!(old_bmap & (1UL << id))) > return 0; > > @@ -165,14 +165,14 @@ > * completed before removal of the thread from the list of > * reporting threads. > */ > - success = __atomic_compare_exchange( > + success = rte_atomic_compare_exchange_strong_explicit( > __RTE_QSBR_THRID_ARRAY_ELM(v, i), > - &old_bmap, &new_bmap, 0, > - __ATOMIC_RELEASE, __ATOMIC_RELAXED); > + &old_bmap, new_bmap, > + rte_memory_order_release, > rte_memory_order_relaxed); > > if (success) > - __atomic_fetch_sub(&v->num_threads, > - 1, __ATOMIC_RELAXED); > + rte_atomic_fetch_sub_explicit(&v->num_threads, > + 1, rte_memory_order_relaxed); > else if (!(old_bmap & (1UL << id))) > /* Someone else unregistered this thread. > * Counter should not be incremented. > @@ -227,8 +227,8 @@ > > fprintf(f, " Registered thread IDs = "); > for (i = 0; i < v->num_elems; i++) { > - bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > - __ATOMIC_ACQUIRE); > + bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, > i), > + rte_memory_order_acquire); > id = i << __RTE_QSBR_THRID_INDEX_SHIFT; > while (bmap) { > t = __builtin_ctzl(bmap); > @@ -241,26 +241,26 @@ > fprintf(f, "\n"); > > fprintf(f, " Token = %" PRIu64 "\n", > - __atomic_load_n(&v->token, __ATOMIC_ACQUIRE)); > + rte_atomic_load_explicit(&v->token, > rte_memory_order_acquire)); > > fprintf(f, " Least Acknowledged Token = %" PRIu64 "\n", > - __atomic_load_n(&v->acked_token, __ATOMIC_ACQUIRE)); > + rte_atomic_load_explicit(&v->acked_token, > +rte_memory_order_acquire)); > > fprintf(f, "Quiescent State Counts for readers:\n"); > for (i = 0; i < v->num_elems; i++) { > - bmap = __atomic_load_n(__RTE_QSBR_THRID_ARRAY_ELM(v, i), > - __ATOMIC_ACQUIRE); > + bmap = rte_atomic_load_explicit(__RTE_QSBR_THRID_ARRAY_ELM(v, > i), > + rte_memory_order_acquire); > id = i << __RTE_QSBR_THRID_INDEX_SHIFT; > while (bmap) { > t = __builtin_ctzl(bmap); > fprintf(f, "thread ID = %u, count = %" PRIu64 ", lock > count = %u\n", > id + t, > - __atomic_load_n( > + rte_atomic_load_explicit( > &v->qsbr_cnt[id + t].cnt, > - __ATOMIC_RELAXED), > - __atomic_load_n( > + rte_memory_order_relaxed), > + rte_atomic_load_explicit( > &v->qsbr_cnt[id + t].lock_cnt, > - __ATOMIC_RELAXED)); > + rte_memory_order_relaxed)); > bmap &= ~(1UL << t); > } > } > diff --git a/lib/rcu/rte_rcu_qsbr.h b/lib/rcu/rte_rcu_qsbr.h index > 87e1b55..9f4aed2 100644 > --- a/lib/rcu/rte_rcu_qsbr.h > +++ b/lib/rcu/rte_rcu_qsbr.h > @@ -63,11 +63,11 @@ > * Given thread id needs to be converted to index into the array and > * the id within the array element. > */ > -#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(uint64_t) * 8) > +#define __RTE_QSBR_THRID_ARRAY_ELM_SIZE (sizeof(RTE_ATOMIC(uint64_t)) * > +8) > #define __RTE_QSBR_THRID_ARRAY_SIZE(max_threads) \ > RTE_ALIGN(RTE_ALIGN_MUL_CEIL(max_threads, \ > __RTE_QSBR_THRID_ARRAY_ELM_SIZE) >> 3, RTE_CACHE_LINE_SIZE) > -#define > __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t *) \ > +#define __RTE_QSBR_THRID_ARRAY_ELM(v, i) ((uint64_t __rte_atomic *) \
Is it equivalent to ((RTE_ATOMIC(uint64_t) *)? > ((struct rte_rcu_qsbr_cnt *)(v + 1) + v->max_threads) + i) #define > __RTE_QSBR_THRID_INDEX_SHIFT 6 #define __RTE_QSBR_THRID_MASK 0x3f @@ -75,13 > +75,13 @@ > <snip>

