This is an automated email from the ASF dual-hosted git repository.

gustavonihei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nuttx.git

commit 3da0bb7829639f2b0d1e153c04a308e9b5001352
Author: zhuyanlin <zhuyanl...@xiaomi.com>
AuthorDate: Wed Aug 25 17:52:56 2021 +0800

    libc:machine: add common atomic operation.
    
    Add common atomic operation.
    
    Change-Id: I9f6b891f1406d54871a3f50c217a1029b434d2e8
---
 libs/libc/machine/Kconfig       |   4 +
 libs/libc/machine/Make.defs     |   8 ++
 libs/libc/machine/arch_atomic.c | 283 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 295 insertions(+)

diff --git a/libs/libc/machine/Kconfig b/libs/libc/machine/Kconfig
index 8623e46..046afc6 100644
--- a/libs/libc/machine/Kconfig
+++ b/libs/libc/machine/Kconfig
@@ -44,6 +44,10 @@ config ARCH_ROMGETC
 # Default settings for C library functions that may be replaced with
 # architecture-specific versions.
 
+config LIBC_ARCH_ATOMIC
+       bool
+       default n
+
 config LIBC_ARCH_MEMCPY
        bool
        default n
diff --git a/libs/libc/machine/Make.defs b/libs/libc/machine/Make.defs
index 818dffb..6f654a3 100644
--- a/libs/libc/machine/Make.defs
+++ b/libs/libc/machine/Make.defs
@@ -18,6 +18,10 @@
 #
 ############################################################################
 
+ifeq ($(CONFIG_LIBC_ARCH_ATOMIC),y)
+  CSRCS += arch_atomic.c
+endif
+
 ifeq ($(CONFIG_ARCH_ARM),y)
 include $(TOPDIR)/libs/libc/machine/arm/Make.defs
 endif
@@ -36,3 +40,7 @@ endif
 ifeq ($(CONFIG_ARCH_RENESAS),y)
 include $(TOPDIR)/libs/libc/machine/renesas/Make.defs
 endif
+
+DEPPATH += --dep-path machine
+VPATH += :machine
+
diff --git a/libs/libc/machine/arch_atomic.c b/libs/libc/machine/arch_atomic.c
new file mode 100644
index 0000000..16e7495
--- /dev/null
+++ b/libs/libc/machine/arch_atomic.c
@@ -0,0 +1,283 @@
+/****************************************************************************
+ * libs/libc/machine/arch_atomic.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <nuttx/spinlock.h>
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define CMP_EXCHANGE(n, type)                             \
+                                                          \
+  bool __atomic_compare_exchange_ ## n (type *mem,        \
+                                        type *expect,     \
+                                        type desired,     \
+                                        int success,      \
+                                        int failure)      \
+  {                                                       \
+    bool ret = false;                                     \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+                                                          \
+    if (*mem == *expect)                                  \
+      {                                                   \
+        ret = true;                                       \
+        *mem = desired;                                   \
+      }                                                   \
+    else                                                  \
+      {                                                   \
+        *expect = *mem;                                   \
+      }                                                   \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret; \
+  }
+
+#define FETCH_ADD(n, type)                                \
+                                                          \
+  type __atomic_fetch_add_ ## n (type *ptr,               \
+                                 type value,              \
+                                 int memorder)            \
+  {                                                       \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+    type ret = *ptr;                                      \
+                                                          \
+    *ptr = *ptr + value;                                  \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret;                                           \
+  }
+
+#define FETCH_SUB(n, type)                                \
+                                                          \
+  type __atomic_fetch_sub_ ## n (type *ptr,               \
+                                 type value,              \
+                                 int memorder)            \
+  {                                                       \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+    type ret = *ptr;                                      \
+                                                          \
+    *ptr = *ptr - value;                                  \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret;                                           \
+  }
+
+#define FETCH_AND(n, type)                                \
+                                                          \
+  type __atomic_fetch_and_ ## n (type *ptr,               \
+                                 type value,              \
+                                 int memorder)            \
+  {                                                       \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+    type ret = *ptr;                                      \
+                                                          \
+    *ptr = *ptr & value;                                  \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret;                                           \
+  }
+
+#define FETCH_OR(n, type)                                 \
+                                                          \
+  type __atomic_fetch_or_ ## n (type *ptr,                \
+                                type value,               \
+                                int memorder)             \
+  {                                                       \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+    type ret = *ptr;                                      \
+                                                          \
+    *ptr = *ptr | value;                                  \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret;                                           \
+  }
+
+#define FETCH_XOR(n, type)                                \
+                                                          \
+  type __atomic_fetch_xor_ ## n (type *ptr,               \
+                                 type value,              \
+                                 int memorder)            \
+  {                                                       \
+    irqstate_t irqstate = spin_lock_irqsave(NULL);        \
+    type ret = *ptr;                                      \
+                                                          \
+    *ptr = *ptr ^ value;                                  \
+                                                          \
+    spin_unlock_irqrestore(NULL, irqstate);               \
+    return ret;                                           \
+  }
+
+#pragma GCC diagnostic ignored "-Wbuiltin-declaration-mismatch"
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: __atomic_compare_exchange_1
+ ****************************************************************************/
+
+CMP_EXCHANGE(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_compare_exchange_2
+ ****************************************************************************/
+
+CMP_EXCHANGE(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_compare_exchange_4
+ ****************************************************************************/
+
+CMP_EXCHANGE(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_compare_exchange_8
+ ****************************************************************************/
+
+CMP_EXCHANGE(8, uint64_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_add_1
+ ****************************************************************************/
+
+FETCH_ADD(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_add_2
+ ****************************************************************************/
+
+FETCH_ADD(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_add_4
+ ****************************************************************************/
+
+FETCH_ADD(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_add_8
+ ****************************************************************************/
+
+FETCH_ADD(8, uint64_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_sub_1
+ ****************************************************************************/
+
+FETCH_SUB(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_sub_2
+ ****************************************************************************/
+
+FETCH_SUB(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_sub_4
+ ****************************************************************************/
+
+FETCH_SUB(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_sub_8
+ ****************************************************************************/
+
+FETCH_SUB(8, uint64_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_and_1
+ ****************************************************************************/
+
+FETCH_AND(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_and_2
+ ****************************************************************************/
+
+FETCH_AND(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_and_4
+ ****************************************************************************/
+
+FETCH_AND(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_and_8
+ ****************************************************************************/
+
+FETCH_AND(8, uint64_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_or_1
+ ****************************************************************************/
+
+FETCH_OR(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_or_2
+ ****************************************************************************/
+
+FETCH_OR(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_or_4
+ ****************************************************************************/
+
+FETCH_OR(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_or_4
+ ****************************************************************************/
+
+FETCH_OR(8, uint64_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_xor_1
+ ****************************************************************************/
+
+FETCH_XOR(1, uint8_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_xor_2
+ ****************************************************************************/
+
+FETCH_XOR(2, uint16_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_xor_4
+ ****************************************************************************/
+
+FETCH_XOR(4, uint32_t)
+
+/****************************************************************************
+ * Name: __atomic_fetch_xor_8
+ ****************************************************************************/
+
+FETCH_XOR(8, uint64_t)

Reply via email to