Signed-off-by: Ola Liljedahl <ola.liljed...@arm.com>
Reviewed-by: Brian Brooks <brian.bro...@arm.com>
---
 platform/linux-generic/include/odp_atomic16.h | 214 ++++++++++++++++++++++++++
 1 file changed, 214 insertions(+)
 create mode 100644 platform/linux-generic/include/odp_atomic16.h

diff --git a/platform/linux-generic/include/odp_atomic16.h 
b/platform/linux-generic/include/odp_atomic16.h
new file mode 100644
index 00000000..bbebb803
--- /dev/null
+++ b/platform/linux-generic/include/odp_atomic16.h
@@ -0,0 +1,214 @@
+/* Copyright (c) 2017, ARM Limited.
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef _ODP_ATOMIC16_H_
+#define _ODP_ATOMIC16_H_
+
+#include "odp_llsc.h"
+
+#if defined __ARM_ARCH && __ARM_ARCH == 8
+
+static inline __int128 __atomic_load_16(__int128 *var, int mo)
+{
+       __int128 old = *var; /* Possibly torn read */
+
+       /* Do CAS to ensure atomicity
+        * Either CAS succeeds (writing back the same value)
+        * Or CAS fails and returns the old value (atomic read)
+        */
+       (void)__atomic_compare_exchange_n(var,
+                       &old,
+                       old,
+                       false, /* weak= */
+                       mo,
+                       mo);
+       return old;
+}
+
+#ifndef __ARM_FEATURE_QRDMX /* Feature only available in v8.1a and beyond */
+static inline bool
+__atomic_compare_exchange_16(register __int128 *var, __int128 *exp,
+                            register __int128 neu, bool weak, int mo_success,
+                            int mo_failure)
+{
+       (void)weak; /* Always do strong CAS or we can't perform atomic read */
+       /* Ignore memory ordering for failure, memory order for
+        * success must be stronger or equal
+        */
+       (void)mo_failure;
+       int ll_mo;
+       int sc_mo;
+
+       register __int128 old;
+       register __int128 expected;
+
+       ll_mo = mo_success == __ATOMIC_ACQUIRE ||
+               mo_success == __ATOMIC_ACQ_REL ?
+               __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
+
+       sc_mo = mo_success == __ATOMIC_RELEASE ||
+               mo_success == __ATOMIC_ACQ_REL ?
+               __ATOMIC_RELEASE : __ATOMIC_RELAXED;
+
+       expected = *exp;
+       __asm__ volatile("" ::: "memory");
+       do {
+               /* Atomicity of LLD is not guaranteed */
+               old = lld(var, ll_mo);
+               /* Must write back neu or old to verify atomicity of LLD */
+       } while (odp_unlikely(scd(var, old == expected ? neu : old, sc_mo)));
+       *exp = old; /* Always update, atomically read value */
+       return old == expected;
+}
+
+static inline __int128 __atomic_exchange_16(__int128 *var,
+                                           __int128 neu, int mo)
+{
+       int ll_mo;
+       int sc_mo;
+       register __int128 old;
+
+       ll_mo = mo == __ATOMIC_ACQUIRE || mo == __ATOMIC_ACQ_REL ?
+               __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
+       sc_mo = mo == __ATOMIC_RELEASE || mo == __ATOMIC_ACQ_REL ?
+               __ATOMIC_RELEASE : __ATOMIC_RELAXED;
+       do {
+               /* Atomicity of LLD is not guaranteed */
+               old = lld(var, ll_mo);
+               /* Must successfully write back to verify atomicity of LLD */
+       } while (odp_unlikely(scd(var, neu, sc_mo)));
+       return old;
+}
+
+static inline __int128 __atomic_fetch_and_16(__int128 *var,
+                                            __int128 mask, int mo)
+{
+       int ll_mo;
+       int sc_mo;
+       register __int128 old;
+
+       ll_mo = mo == __ATOMIC_ACQUIRE || mo == __ATOMIC_ACQ_REL ?
+                       __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
+       sc_mo = mo == __ATOMIC_RELEASE || mo == __ATOMIC_ACQ_REL ?
+                       __ATOMIC_RELEASE : __ATOMIC_RELAXED;
+       do {
+               /* Atomicity of LLD is not guaranteed */
+               old = lld(var, ll_mo);
+               /* Must successfully write back to verify atomicity of LLD */
+       } while (odp_unlikely(scd(var, old & mask, sc_mo)));
+       return old;
+}
+
+static inline __int128 __atomic_fetch_or_16(__int128 *var,
+                                           __int128 mask,
+                                           int mo)
+{
+       int ll_mo;
+       int sc_mo;
+       register __int128 old;
+
+       ll_mo = mo == __ATOMIC_ACQUIRE || mo == __ATOMIC_ACQ_REL ?
+                       __ATOMIC_ACQUIRE : __ATOMIC_RELAXED;
+       sc_mo = mo == __ATOMIC_RELEASE || mo == __ATOMIC_ACQ_REL ?
+                       __ATOMIC_RELEASE : __ATOMIC_RELAXED;
+       do {
+               /* Atomicity of LLD is not guaranteed */
+               old = lld(var, ll_mo);
+               /* Must successfully write back to verify atomicity of LLD */
+       } while (odp_unlikely(scd(var, old | mask, sc_mo)));
+       return old;
+}
+
+#else
+
+static inline __int128 casp(__int128 *var, __int128 old, __int128 neu, int mo)
+{
+       if (mo == __ATOMIC_RELAXED) {
+               __asm__ volatile("casp %0, %H0, %1, %H1, [%2]"
+                                : "+r" (old)
+                                : "r" (neu), "r" (var)
+                                : "memory");
+       } else if (mo == __ATOMIC_ACQUIRE) {
+               __asm__ volatile("caspa %0, %H0, %1, %H1, [%2]"
+                                : "+r" (old)
+                                : "r" (neu), "r" (var)
+                                : "memory");
+       } else if (mo == __ATOMIC_ACQ_REL) {
+               __asm__ volatile("caspal %0, %H0, %1, %H1, [%2]"
+                                : "+r" (old)
+                                : "r" (neu), "r" (var)
+                                : "memory");
+       } else if (mo == __ATOMIC_RELEASE) {
+               __asm__ volatile("caspl %0, %H0, %1, %H1, [%2]"
+                                : "+r" (old)
+                                : "r" (neu), "r" (var)
+                                : "memory");
+       } else {
+               abort();
+       }
+       return old;
+}
+
+static inline bool
+__atomic_compare_exchange_16(register __int128 *var, __int128 *exp,
+                            register __int128 neu, bool weak, int mo_success,
+                            int mo_failure)
+{
+       (void)weak;
+       (void)mo_failure;
+       __int128 old;
+       __int128 expected;
+
+       expected = *exp;
+       old = casp(var, expected, neu, mo_success);
+       *exp = old; /* Always update, atomically read value */
+       return old == expected;
+}
+
+static inline __int128 __atomic_exchange_16(__int128 *var,
+                                           __int128 neu, int mo)
+{
+       __int128 old;
+       __int128 expected;
+
+       do {
+               expected = *var;
+               old = casp(var, expected, neu, mo);
+       } while (old != expected);
+       return old;
+}
+
+static inline __int128 __atomic_fetch_and_16(__int128 *var,
+                                            __int128 mask, int mo)
+{
+       __int128 old;
+       __int128 expected;
+
+       do {
+               expected = *var;
+               old = casp(var, expected, expected & mask, mo);
+       } while (old != expected);
+       return old;
+}
+
+static inline __int128 __atomic_fetch_or_16(__int128 *var,
+                                           __int128 mask,
+                                           int mo)
+{
+       __int128 old;
+       __int128 expected;
+
+       do {
+               expected = *var;
+               old = casp(var, expected, expected | mask, mo);
+       } while (old != expected);
+       return old;
+}
+
+#endif
+
+#endif
+#endif
-- 
2.12.2

Reply via email to