Instead of using CMM_ACCESS_ONCE() with memory barriers, use __atomic
builtins with relaxed memory ordering to implement CMM_LOAD_SHARED() and
CMM_STORE_SHARED().

Signed-off-by: Ondřej Surý <ond...@sury.org>
---
 include/urcu/system.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/urcu/system.h b/include/urcu/system.h
index faae390..4302253 100644
--- a/include/urcu/system.h
+++ b/include/urcu/system.h
@@ -26,7 +26,7 @@
  * Identify a shared load. A cmm_smp_rmc() or cmm_smp_mc() should come
  * before the load.
  */
-#define _CMM_LOAD_SHARED(p)           CMM_ACCESS_ONCE(p)
+#define _CMM_LOAD_SHARED(p)           __atomic_load_n(&(p), __ATOMIC_RELAXED)
 
 /*
  * Load a data from shared memory, doing a cache flush if required.
@@ -42,7 +42,7 @@
  * Identify a shared store. A cmm_smp_wmc() or cmm_smp_mc() should
  * follow the store.
  */
-#define _CMM_STORE_SHARED(x, v)        __extension__ ({ CMM_ACCESS_ONCE(x) = 
(v); })
+#define _CMM_STORE_SHARED(x, v)        __extension__ ({ __atomic_store_n(&(x), 
v, __ATOMIC_RELAXED); v; })
 
 /*
  * Store v into x, where x is located in shared memory. Performs the
-- 
2.39.2

_______________________________________________
lttng-dev mailing list
lttng-dev@lists.lttng.org
https://lists.lttng.org/cgi-bin/mailman/listinfo/lttng-dev

Reply via email to