This is an automated email from the ASF dual-hosted git repository. ligd pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/nuttx.git
commit b2fadb8529e48526dcec134bbd1ac0a267650f2b Author: ouyangxiangzhen <[email protected]> AuthorDate: Wed Jan 7 20:34:14 2026 +0800 sched/hrtimer: Fix functional correctness issue in synchronization. On 32-bit platform with 64-bit pointer, read/write to the `g_hrtimer_running` is not atomic, we should protect it with the seqcount to ensure the value we read is correct. Signed-off-by: ouyangxiangzhen <[email protected]> --- sched/hrtimer/hrtimer.h | 76 +++++++++++++++++++++++++++++++++++--- sched/hrtimer/hrtimer_cancel.c | 4 +- sched/hrtimer/hrtimer_initialize.c | 4 +- sched/hrtimer/hrtimer_process.c | 10 +++-- sched/hrtimer/hrtimer_start.c | 4 +- 5 files changed, 82 insertions(+), 16 deletions(-) diff --git a/sched/hrtimer/hrtimer.h b/sched/hrtimer/hrtimer.h index ff1d40a947a..f3d6e80a2aa 100644 --- a/sched/hrtimer/hrtimer.h +++ b/sched/hrtimer/hrtimer.h @@ -31,6 +31,7 @@ #include <nuttx/arch.h> #include <nuttx/clock.h> #include <nuttx/hrtimer.h> +#include <nuttx/seqlock.h> #ifdef CONFIG_HRTIMER @@ -52,9 +53,9 @@ RB_HEAD(hrtimer_tree_s, hrtimer_node_s); * Public Data ****************************************************************************/ -/* Spinlock protecting access to the hrtimer container and timer state */ +/* Seqcount protecting access to the hrtimer queue and timer state */ -extern spinlock_t g_hrtimer_spinlock; +extern seqcount_t g_hrtimer_lock; #ifdef CONFIG_HRTIMER_TREE /* Red-Black tree containing all active high-resolution timers */ @@ -310,6 +311,72 @@ static inline_function bool hrtimer_is_first(FAR hrtimer_t *hrtimer) #endif } +/**************************************************************************** + * Name: hrtimer_read_64/32 + * + * Description: + * Internal function to read the value in the queue atomically. + * Do not use this function if you are not sure about the thread-safe + * of the value you are reading. + * + * Input Parameters: + * ptr - The pointer to be read. + * + * Returned Value: + * The value in the queue. + * + ****************************************************************************/ + +#ifdef CONFIG_ARCH_64BIT +/* On 64-bit architectures, read/write uint64_t is atomic. */ +# define hrtimer_read_64(ptr) (*(FAR volatile uint64_t *)(ptr)) +#else +static inline_function +uint64_t hrtimer_read_64(FAR const uint64_t *ptr) +{ + uint64_t val; + uint32_t seq; + + do + { + seq = read_seqbegin(&g_hrtimer_lock); + val = *ptr; + } + while (read_seqretry(&g_hrtimer_lock, seq)); + + return val; +} +#endif + +#if UINT_MAX >= UINT32_MAX +/* On 32/64-bit architectures, read/write uint32_t is atomic. */ +# define hrtimer_read_32(ptr) (*(FAR volatile uint32_t *)(ptr)) +#else +static inline_function +uint32_t hrtimer_read_32(FAR const uint32_t *ptr) +{ + uint32_t val; + uint32_t seq; + + do + { + seq = read_seqbegin(&g_hrtimer_lock); + val = *ptr; + } + while (read_seqretry(&g_hrtimer_lock, seq)); + + return val; +} +#endif + +/* Generic function to read the value in the queue atomically. */ + +#define hrtimer_read(ptr) \ + (sizeof(*(ptr)) == 8u ? \ + hrtimer_read_64((FAR const uint64_t *)(ptr)) : \ + (sizeof(*(ptr)) == 4u ? \ + hrtimer_read_32((FAR const uint32_t *)(ptr)) : 0u)) + /**************************************************************************** * Name: hrtimer_mark_running * @@ -349,14 +416,11 @@ static inline_function bool hrtimer_is_first(FAR hrtimer_t *hrtimer) * Returned Value: * true if the CPU core is running the timer, false otherwise. * - * Assumption: - * The caller must hold the lock. - * ****************************************************************************/ #ifdef CONFIG_SMP # define hrtimer_is_running(timer, cpu) \ - (g_hrtimer_running[cpu] == (uintptr_t)(timer)) + (hrtimer_read(&g_hrtimer_running[cpu]) == (uintptr_t)(timer)) #else # define hrtimer_is_running(timer, cpu) (true) #endif diff --git a/sched/hrtimer/hrtimer_cancel.c b/sched/hrtimer/hrtimer_cancel.c index 43a6d2f371e..8cf5a322606 100644 --- a/sched/hrtimer/hrtimer_cancel.c +++ b/sched/hrtimer/hrtimer_cancel.c @@ -130,7 +130,7 @@ int hrtimer_cancel(FAR hrtimer_t *hrtimer) /* Enter critical section to protect the hrtimer container */ - flags = spin_lock_irqsave(&g_hrtimer_spinlock); + flags = write_seqlock_irqsave(&g_hrtimer_lock); /* Ensure no core can write the hrtimer. */ @@ -154,7 +154,7 @@ int hrtimer_cancel(FAR hrtimer_t *hrtimer) /* Leave critical section */ - spin_unlock_irqrestore(&g_hrtimer_spinlock, flags); + write_sequnlock_irqrestore(&g_hrtimer_lock, flags); return ret; } diff --git a/sched/hrtimer/hrtimer_initialize.c b/sched/hrtimer/hrtimer_initialize.c index 43010f074d2..c097f9f99ae 100644 --- a/sched/hrtimer/hrtimer_initialize.c +++ b/sched/hrtimer/hrtimer_initialize.c @@ -47,7 +47,7 @@ uintptr_t g_hrtimer_running[CONFIG_SMP_NCPUS]; * timer container is modified. */ -spinlock_t g_hrtimer_spinlock = SP_UNLOCKED; +seqcount_t g_hrtimer_lock = SEQLOCK_INITIALIZER; /* Container for all active high-resolution timers. * @@ -89,7 +89,7 @@ struct list_node g_hrtimer_list = LIST_INITIAL_VALUE(g_hrtimer_list); * - The tree key is the absolute expiration time stored in * hrtimer_node_s and compared via hrtimer_compare(). * - All accesses to the tree must be serialized using - * g_hrtimer_spinlock. + * g_hrtimer_lock. * - These generated functions are used internally by the hrtimer * core (e.g., hrtimer_start(), hrtimer_cancel(), and expire paths). ****************************************************************************/ diff --git a/sched/hrtimer/hrtimer_process.c b/sched/hrtimer/hrtimer_process.c index 97266cacf8c..9111d3991e8 100644 --- a/sched/hrtimer/hrtimer_process.c +++ b/sched/hrtimer/hrtimer_process.c @@ -79,7 +79,7 @@ void hrtimer_process(uint64_t now) /* Lock the hrtimer container to protect access */ - flags = spin_lock_irqsave(&g_hrtimer_spinlock); + flags = write_seqlock_irqsave(&g_hrtimer_lock); /* Fetch the earliest active timer */ @@ -110,7 +110,7 @@ void hrtimer_process(uint64_t now) /* Leave critical section before invoking the callback */ - spin_unlock_irqrestore(&g_hrtimer_spinlock, flags); + write_sequnlock_irqrestore(&g_hrtimer_lock, flags); /* Invoke the timer callback */ @@ -118,7 +118,9 @@ void hrtimer_process(uint64_t now) /* Re-enter critical section to update timer state */ - flags = spin_lock_irqsave(&g_hrtimer_spinlock); + flags = write_seqlock_irqsave(&g_hrtimer_lock); + + hrtimer_mark_running(NULL, cpu); /* If the timer is periodic and has not been rearmed or * cancelled concurrently, @@ -155,5 +157,5 @@ void hrtimer_process(uint64_t now) /* Leave critical section */ - spin_unlock_irqrestore(&g_hrtimer_spinlock, flags); + write_sequnlock_irqrestore(&g_hrtimer_lock, flags); } diff --git a/sched/hrtimer/hrtimer_start.c b/sched/hrtimer/hrtimer_start.c index 61ed886dd39..86b37f3cce2 100644 --- a/sched/hrtimer/hrtimer_start.c +++ b/sched/hrtimer/hrtimer_start.c @@ -71,7 +71,7 @@ int hrtimer_start(FAR hrtimer_t *hrtimer, hrtimer_entry_t func, /* Protect container manipulation with spinlock and disable interrupts */ - flags = spin_lock_irqsave(&g_hrtimer_spinlock); + flags = write_seqlock_irqsave(&g_hrtimer_lock); /* Ensure no core can write the hrtimer. */ @@ -112,7 +112,7 @@ int hrtimer_start(FAR hrtimer_t *hrtimer, hrtimer_entry_t func, /* Release spinlock and restore interrupts */ - spin_unlock_irqrestore(&g_hrtimer_spinlock, flags); + write_sequnlock_irqrestore(&g_hrtimer_lock, flags); return ret; }
