This is an automated email from the ASF dual-hosted git repository.

pkarashchenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit b2f75c2f3dc6b307d15f7bf715a1bbca48f9e1b2
Author: Xiang Xiao <xiaoxi...@xiaomi.com>
AuthorDate: Sat Nov 18 20:13:27 2023 +0800

    spinlock: Move the inclusion of stdatomic.h to source file
    
    to make rwlock work with c++ source code
    
    Signed-off-by: Xiang Xiao <xiaoxi...@xiaomi.com>
---
 include/nuttx/spinlock.h   | 15 ++++++-------
 sched/semaphore/spinlock.c | 53 +++++++++++++++++++++++++++-------------------
 2 files changed, 38 insertions(+), 30 deletions(-)

diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h
index 943932f241..380d550c3a 100644
--- a/include/nuttx/spinlock.h
+++ b/include/nuttx/spinlock.h
@@ -41,9 +41,8 @@ extern "C"
 #define EXTERN extern
 #endif
 
-#if defined(CONFIG_RW_SPINLOCK) && !defined(__cplusplus)
-#  include <stdatomic.h>
-typedef atomic_int rwlock_t;
+#if defined(CONFIG_RW_SPINLOCK)
+typedef int rwlock_t;
 #  define RW_SP_UNLOCKED      0
 #  define RW_SP_READ_LOCKED   1
 #  define RW_SP_WRITE_LOCKED -1
@@ -60,10 +59,10 @@ union spinlock_u
 {
   struct
   {
-    uint16_t owner;
-    uint16_t next;
+    unsigned short owner;
+    unsigned short next;
   } tickets;
-  uint32_t value;
+  unsigned int value;
 };
 typedef union spinlock_u spinlock_t;
 
@@ -505,7 +504,7 @@ void spin_unlock_irqrestore_wo_note(FAR spinlock_t *lock, 
irqstate_t flags);
 #  define spin_unlock_irqrestore_wo_note(l, f) up_irq_restore(f)
 #endif
 
-#if defined(CONFIG_RW_SPINLOCK) && !defined(__cplusplus)
+#if defined(CONFIG_RW_SPINLOCK)
 
 /****************************************************************************
  * Name: rwlock_init
@@ -813,7 +812,7 @@ void write_unlock_irqrestore(FAR rwlock_t *lock, irqstate_t 
flags);
 #  define write_unlock_irqrestore(l, f) up_irq_restore(f)
 #endif
 
-#endif /* CONFIG_RW_SPINLOCK && !__cplusplus */
+#endif /* CONFIG_RW_SPINLOCK */
 
 #undef EXTERN
 #if defined(__cplusplus)
diff --git a/sched/semaphore/spinlock.c b/sched/semaphore/spinlock.c
index ba92917f65..07c34c7790 100644
--- a/sched/semaphore/spinlock.c
+++ b/sched/semaphore/spinlock.c
@@ -25,6 +25,7 @@
 #include <nuttx/config.h>
 
 #include <sys/types.h>
+#include <stdatomic.h>
 #include <sched.h>
 #include <assert.h>
 
@@ -76,8 +77,9 @@ void spin_lock(FAR volatile spinlock_t *lock)
 #endif
 
 #ifdef CONFIG_TICKET_SPINLOCK
-  uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
-  while (atomic_load(&lock->tickets.owner) != ticket)
+  unsigned short ticket =
+    atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.next, 1);
+  while (atomic_load((FAR atomic_ushort *)&lock->tickets.owner) != ticket)
 #else /* CONFIG_SPINLOCK */
   while (up_testset(lock) == SP_LOCKED)
 #endif
@@ -119,8 +121,9 @@ void spin_lock(FAR volatile spinlock_t *lock)
 void spin_lock_wo_note(FAR volatile spinlock_t *lock)
 {
 #ifdef CONFIG_TICKET_SPINLOCK
-  uint16_t ticket = atomic_fetch_add(&lock->tickets.next, 1);
-  while (atomic_load(&lock->tickets.owner) != ticket)
+  unsigned short ticket =
+    atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.next, 1);
+  while (atomic_load((FAR atomic_ushort *)&lock->tickets.owner) != ticket)
 #else /* CONFIG_TICKET_SPINLOCK */
   while (up_testset(lock) == SP_LOCKED)
 #endif
@@ -160,7 +163,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
 #endif
 
 #ifdef CONFIG_TICKET_SPINLOCK
-  uint16_t ticket = atomic_load(&lock->tickets.next);
+  unsigned short ticket =
+    atomic_load((FAR atomic_ushort *)&lock->tickets.next);
 
   spinlock_t old =
     {
@@ -176,7 +180,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
         }
     };
 
-  if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
+  if (!atomic_compare_exchange_strong((FAR atomic_uint *)&lock->value,
+                                      &old.value, new.value))
 #else /* CONFIG_TICKET_SPINLOCK */
   if (up_testset(lock) == SP_LOCKED)
 #endif /* CONFIG_TICKET_SPINLOCK */
@@ -224,7 +229,8 @@ bool spin_trylock(FAR volatile spinlock_t *lock)
 bool spin_trylock_wo_note(FAR volatile spinlock_t *lock)
 {
 #ifdef CONFIG_TICKET_SPINLOCK
-  uint16_t ticket = atomic_load(&lock->tickets.next);
+  unsigned short ticket =
+    atomic_load((FAR atomic_ushort *)&lock->tickets.next);
 
   spinlock_t old =
     {
@@ -240,7 +246,8 @@ bool spin_trylock_wo_note(FAR volatile spinlock_t *lock)
         }
     };
 
-  if (!atomic_compare_exchange_strong(&lock->value, &old.value, new.value))
+  if (!atomic_compare_exchange_strong((FAR atomic_uint *)&lock->value,
+                                      &old.value, new.value))
 #else /* CONFIG_TICKET_SPINLOCK */
   if (up_testset(lock) == SP_LOCKED)
 #endif /* CONFIG_TICKET_SPINLOCK */
@@ -281,7 +288,7 @@ void spin_unlock(FAR volatile spinlock_t *lock)
 
   SP_DMB();
 #ifdef CONFIG_TICKET_SPINLOCK
-  atomic_fetch_add(&lock->tickets.owner, 1);
+  atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.owner, 1);
 #else
   *lock = SP_UNLOCKED;
 #endif
@@ -314,7 +321,7 @@ void spin_unlock_wo_note(FAR volatile spinlock_t *lock)
 {
   SP_DMB();
 #ifdef CONFIG_TICKET_SPINLOCK
-  atomic_fetch_add(&lock->tickets.owner, 1);
+  atomic_fetch_add((FAR atomic_ushort *)&lock->tickets.owner, 1);
 #else
   *lock = SP_UNLOCKED;
 #endif
@@ -476,15 +483,15 @@ void read_lock(FAR volatile rwlock_t *lock)
 {
   while (true)
     {
-      int old = atomic_load(lock);
-
+      int old = atomic_load((FAR atomic_int *)lock);
       if (old <= RW_SP_WRITE_LOCKED)
         {
           DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
           SP_DSB();
           SP_WFE();
         }
-      else if(atomic_compare_exchange_strong(lock, &old, old + 1))
+      else if(atomic_compare_exchange_strong((FAR atomic_int *)lock,
+                                             &old, old + 1))
         {
           break;
         }
@@ -521,14 +528,14 @@ bool read_trylock(FAR volatile rwlock_t *lock)
 {
   while (true)
     {
-      int old = atomic_load(lock);
-
+      int old = atomic_load((FAR atomic_int *)lock);
       if (old <= RW_SP_WRITE_LOCKED)
         {
           DEBUGASSERT(old == RW_SP_WRITE_LOCKED);
           return false;
         }
-      else if (atomic_compare_exchange_strong(lock, &old, old + 1))
+      else if (atomic_compare_exchange_strong((FAR atomic_int *)lock,
+                                              &old, old + 1))
         {
           break;
         }
@@ -557,10 +564,10 @@ bool read_trylock(FAR volatile rwlock_t *lock)
 
 void read_unlock(FAR volatile rwlock_t *lock)
 {
-  DEBUGASSERT(atomic_load(lock) >= RW_SP_READ_LOCKED);
+  DEBUGASSERT(atomic_load((FAR atomic_int *)lock) >= RW_SP_READ_LOCKED);
 
   SP_DMB();
-  atomic_fetch_add(lock, -1);
+  atomic_fetch_sub((FAR atomic_int *)lock, 1);
   SP_DSB();
   SP_SEV();
 }
@@ -594,7 +601,8 @@ void write_lock(FAR volatile rwlock_t *lock)
 {
   int zero = RW_SP_UNLOCKED;
 
-  while (!atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
+  while (!atomic_compare_exchange_strong((FAR atomic_int *)lock,
+                                         &zero, RW_SP_WRITE_LOCKED))
     {
       SP_DSB();
       SP_WFE();
@@ -632,7 +640,8 @@ bool write_trylock(FAR volatile rwlock_t *lock)
 {
   int zero = RW_SP_UNLOCKED;
 
-  if (atomic_compare_exchange_strong(lock, &zero, RW_SP_WRITE_LOCKED))
+  if (atomic_compare_exchange_strong((FAR atomic_int *)lock,
+                                     &zero, RW_SP_WRITE_LOCKED))
     {
       SP_DMB();
       return true;
@@ -663,10 +672,10 @@ void write_unlock(FAR volatile rwlock_t *lock)
 {
   /* Ensure this cpu already get write lock */
 
-  DEBUGASSERT(atomic_load(lock) == RW_SP_WRITE_LOCKED);
+  DEBUGASSERT(atomic_load((FAR atomic_int *)lock) == RW_SP_WRITE_LOCKED);
 
   SP_DMB();
-  atomic_store(lock, RW_SP_UNLOCKED);
+  atomic_store((FAR atomic_int *)lock, RW_SP_UNLOCKED);
   SP_DSB();
   SP_SEV();
 }

Reply via email to