The header was taken from Linux kernl 6.4.0-rc1.

Addionally, were updated:
* add emulation of {cmp}xchg for 1/2 byte types
* replace tabs with spaces
* replace __* varialbed with *__
* introduce generic version of xchg_* and cmpxchg_*.

Signed-off-by: Oleksii Kurochko <oleksii.kuroc...@gmail.com>
---
Changes in V4:
 - Code style fixes.
 - enforce in __xchg_*() has the same type for new and *ptr, also "\n"
   was removed at the end of asm instruction.
 - dependency from 
https://lore.kernel.org/xen-devel/cover.1706259490.git.federico.seraf...@bugseng.com/
 - switch from ASSERT_UNREACHABLE to STATIC_ASSERT_UNREACHABLE().
 - drop xchg32(ptr, x) and xchg64(ptr, x) as they aren't used.
 - drop cmpxcg{32,64}_{local} as they aren't used.
 - introduce generic version of xchg_* and cmpxchg_*.
 - update the commit message.
---
Changes in V3:
 - update the commit message
 - add emulation of {cmp}xchg_... for 1 and 2 bytes types
---
Changes in V2:
 - update the comment at the top of the header.
 - change xen/lib.h to xen/bug.h.
 - sort inclusion of headers properly.
---
 xen/arch/riscv/include/asm/cmpxchg.h | 237 +++++++++++++++++++++++++++
 1 file changed, 237 insertions(+)
 create mode 100644 xen/arch/riscv/include/asm/cmpxchg.h

diff --git a/xen/arch/riscv/include/asm/cmpxchg.h 
b/xen/arch/riscv/include/asm/cmpxchg.h
new file mode 100644
index 0000000000..b751a50cbf
--- /dev/null
+++ b/xen/arch/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2014 Regents of the University of California */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <xen/compiler.h>
+#include <xen/lib.h>
+
+#include <asm/fence.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#define ALIGN_DOWN(addr, size)  ((addr) & (~((size) - 1)))
+
+#define __amoswap_generic(ptr, new, ret, sfx, release_barrier, 
acquire_barrier) \
+({ \
+    asm volatile( \
+        release_barrier \
+        " amoswap" sfx " %0, %2, %1\n" \
+        acquire_barrier \
+        : "=r" (ret), "+A" (*ptr) \
+        : "r" (new) \
+        : "memory" ); \
+})
+
+#define emulate_xchg_1_2(ptr, new, ret, release_barrier, acquire_barrier) \
+({ \
+    uint32_t *ptr_32b_aligned = (uint32_t *)ALIGN_DOWN((unsigned long)ptr, 4); 
\
+    uint8_t mask_l = ((unsigned long)(ptr) & (0x8 - sizeof(*ptr))) * 
BITS_PER_BYTE; \
+    uint8_t mask_size = sizeof(*ptr) * BITS_PER_BYTE; \
+    uint8_t mask_h = mask_l + mask_size - 1; \
+    unsigned long mask = GENMASK(mask_h, mask_l); \
+    unsigned long new_ = (unsigned long)(new) << mask_l; \
+    unsigned long ret_; \
+    unsigned long rc; \
+    \
+    asm volatile( \
+        release_barrier \
+        "0: lr.d %0, %2\n" \
+        "   and  %1, %0, %z4\n" \
+        "   or   %1, %1, %z3\n" \
+        "   sc.d %1, %1, %2\n" \
+        "   bnez %1, 0b\n" \
+        acquire_barrier \
+        : "=&r" (ret_), "=&r" (rc), "+A" (*ptr_32b_aligned) \
+        : "rJ" (new_), "rJ" (~mask) \
+        : "memory"); \
+    \
+    ret = (__typeof__(*(ptr)))((ret_ & mask) >> mask_l); \
+})
+
+#define __xchg_generic(ptr, new, size, sfx, release_barrier, acquire_barrier) \
+({ \
+    __typeof__(ptr) ptr__ = (ptr); \
+    __typeof__(*(ptr)) new__ = (new); \
+    __typeof__(*(ptr)) ret__; \
+    switch (size) \
+    { \
+    case 1: \
+    case 2: \
+        emulate_xchg_1_2(ptr__, new__, ret__, release_barrier, 
acquire_barrier); \
+        break; \
+    case 4: \
+        __amoswap_generic(ptr__, new__, ret__,\
+                          ".w" sfx,  release_barrier, acquire_barrier); \
+        break; \
+    case 8: \
+        __amoswap_generic(ptr__, new__, ret__,\
+                          ".d" sfx,  release_barrier, acquire_barrier); \
+        break; \
+    default: \
+        STATIC_ASSERT_UNREACHABLE(); \
+    } \
+    ret__; \
+})
+
+#define xchg_relaxed(ptr, x) \
+({ \
+    __typeof__(*(ptr)) x_ = (x); \
+    (__typeof__(*(ptr)))__xchg_generic(ptr, x_, sizeof(*(ptr)), "", "", ""); \
+})
+
+#define xchg_acquire(ptr, x) \
+({ \
+    __typeof__(*(ptr)) x_ = (x); \
+    (__typeof__(*(ptr)))__xchg_generic(ptr, x_, sizeof(*(ptr)), \
+                                       "", "", RISCV_ACQUIRE_BARRIER); \
+})
+
+#define xchg_release(ptr, x) \
+({ \
+    __typeof__(*(ptr)) x_ = (x); \
+    (__typeof__(*(ptr)))__xchg_generic(ptr, x_, sizeof(*(ptr)),\
+                                       "", RISCV_RELEASE_BARRIER, ""); \
+})
+
+#define xchg(ptr,x) \
+({ \
+    __typeof__(*(ptr)) ret__; \
+    ret__ = (__typeof__(*(ptr))) \
+            __xchg_generic(ptr, (unsigned long)(x), sizeof(*(ptr)), \
+                           ".aqrl", "", ""); \
+    ret__; \
+})
+
+#define __generic_cmpxchg(ptr, old, new, ret, lr_sfx, sc_sfx, release_barrier, 
acquire_barrier)        \
+ ({ \
+    register unsigned int rc; \
+    asm volatile( \
+        release_barrier \
+        "0: lr" lr_sfx " %0, %2\n" \
+        "   bne  %0, %z3, 1f\n" \
+        "   sc" sc_sfx " %1, %z4, %2\n" \
+        "   bnez %1, 0b\n" \
+        acquire_barrier \
+        "1:\n" \
+        : "=&r" (ret), "=&r" (rc), "+A" (*ptr) \
+        : "rJ" (old), "rJ" (new) \
+        : "memory"); \
+ })
+
+#define emulate_cmpxchg_1_2(ptr, old, new, ret, sc_sfx, release_barrier, 
acquire_barrier) \
+({ \
+    uint32_t *ptr_32b_aligned = (uint32_t *)ALIGN_DOWN((unsigned long)ptr, 4); 
\
+    uint8_t mask_l = ((unsigned long)(ptr) & (0x8 - sizeof(*ptr))) * 
BITS_PER_BYTE; \
+    uint8_t mask_size = sizeof(*ptr) * BITS_PER_BYTE; \
+    uint8_t mask_h = mask_l + mask_size - 1; \
+    unsigned long mask = GENMASK(mask_h, mask_l); \
+    unsigned long old_ = (unsigned long)(old) << mask_l; \
+    unsigned long new_ = (unsigned long)(new) << mask_l; \
+    unsigned long ret_; \
+    unsigned long rc; \
+    \
+    __asm__ __volatile__ ( \
+        release_barrier \
+        "0: lr.d %0, %2\n" \
+        "   and  %1, %0, %z5\n" \
+        "   bne  %1, %z3, 1f\n" \
+        "   and  %1, %0, %z6\n" \
+        "   or   %1, %1, %z4\n" \
+        "   sc.d" sc_sfx " %1, %1, %2\n" \
+        "   bnez %1, 0b\n" \
+        acquire_barrier \
+        "1:\n" \
+        : "=&r" (ret_), "=&r" (rc), "+A" (*ptr_32b_aligned) \
+        : "rJ" (old_), "rJ" (new_), \
+          "rJ" (mask), "rJ" (~mask) \
+        : "memory"); \
+    \
+    ret = (__typeof__(*(ptr)))((ret_ & mask) >> mask_l); \
+})
+
+/*
+ * Atomic compare and exchange.  Compare OLD with MEM, if identical,
+ * store NEW in MEM.  Return the initial value in MEM.  Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg_generic(ptr, old, new, size, sc_sfx, release_barrier, 
acquire_barrier) \
+({ \
+    __typeof__(ptr) ptr__ = (ptr); \
+    __typeof__(*(ptr)) old__ = (__typeof__(*(ptr)))(old); \
+    __typeof__(*(ptr)) new__ = (__typeof__(*(ptr)))(new); \
+    __typeof__(*(ptr)) ret__; \
+    switch (size) \
+    { \
+    case 1: \
+    case 2: \
+        emulate_cmpxchg_1_2(ptr, old, new, ret__,\
+                            sc_sfx, release_barrier, acquire_barrier); \
+        break; \
+    case 4: \
+        __generic_cmpxchg(ptr__, old__, new__, ret__, \
+                          ".w", ".w"sc_sfx, release_barrier, acquire_barrier); 
\
+        break; \
+    case 8: \
+        __generic_cmpxchg(ptr__, old__, new__, ret__, \
+                          ".d", ".d"sc_sfx, release_barrier, acquire_barrier); 
\
+        break; \
+    default: \
+        STATIC_ASSERT_UNREACHABLE(); \
+    } \
+    ret__; \
+})
+
+#define cmpxchg_relaxed(ptr, o, n) \
+({ \
+    __typeof__(*(ptr)) o_ = (o); \
+    __typeof__(*(ptr)) n_ = (n); \
+    (__typeof__(*(ptr)))__cmpxchg_generic(ptr, \
+                    o_, n_, sizeof(*(ptr)), "", "", ""); \
+})
+
+#define cmpxchg_acquire(ptr, o, n) \
+({ \
+    __typeof__(*(ptr)) o_ = (o); \
+    __typeof__(*(ptr)) n_ = (n); \
+    (__typeof__(*(ptr)))__cmpxchg_generic(ptr, o_, n_, sizeof(*(ptr)), \
+                                          "", "", RISCV_ACQUIRE_BARRIER); \
+})
+
+#define cmpxchg_release(ptr, o, n) \
+({ \
+    __typeof__(*(ptr)) o_ = (o); \
+    __typeof__(*(ptr)) n_ = (n); \
+    (__typeof__(*(ptr)))__cmpxchg_release(ptr, o_, n_, sizeof(*(ptr)), \
+                                          "", RISCV_RELEASE_BARRIER, ""); \
+})
+
+#define cmpxchg(ptr, o, n) \
+({ \
+    __typeof__(*(ptr)) ret__; \
+    ret__ = (__typeof__(*(ptr))) \
+            __cmpxchg_generic(ptr, (unsigned long)(o), (unsigned long)(n), \
+                              sizeof(*(ptr)), ".rl", "", " fence rw, rw\n"); \
+    ret__; \
+})
+
+#define __cmpxchg(ptr, o, n, s) \
+({ \
+    __typeof__(*(ptr)) ret__; \
+    ret__ = (__typeof__(*(ptr))) \
+            __cmpxchg_generic(ptr, (unsigned long)(o), (unsigned long)(n), \
+                              s, ".rl", "", " fence rw, rw\n"); \
+    ret__; \
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
-- 
2.43.0


Reply via email to