This is an automated email from the ASF dual-hosted git repository. jiuzhudong pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/nuttx.git
commit 30493d558a335710a98ac498e61d5c5f0f3285d6 Author: wangzhi16 <[email protected]> AuthorDate: Fri Dec 20 10:35:50 2024 +0800 sched/spinlock: Add time statistics in func enter_critical_section(). Because enter_critical_section() changes from calling spin_lock() to calling spin_lock_wo_note(), it is necessary to count the busywait time in enter_critical_section() additionally. Signed-off-by: wangzhi16 <[email protected]> --- include/nuttx/irq.h | 1 + include/nuttx/spinlock.h | 8 ++++++++ sched/irq/irq_csection.c | 12 ++++++++++++ sched/sched/sched_process_delivered.c | 11 +++++++++++ 4 files changed, 32 insertions(+) diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h index b1af8827e98..fb5906fa502 100644 --- a/include/nuttx/irq.h +++ b/include/nuttx/irq.h @@ -335,6 +335,7 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg); #ifdef CONFIG_IRQCOUNT # if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \ + CONFIG_SCHED_CRITMONITOR_MAXTIME_BUSYWAIT >= 0 || \ defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) irqstate_t enter_critical_section(void) noinstrument_function; # else diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h index fdb066d9f69..208686b5b55 100644 --- a/include/nuttx/spinlock.h +++ b/include/nuttx/spinlock.h @@ -491,10 +491,18 @@ irqstate_t spin_lock_irqsave(FAR volatile spinlock_t *lock) sched_note_spinlock_lock(lock); + /* If CONFIG_SCHED_CRITMONITOR_MAXTIME_BUSYWAIT >= 0, count busy-waiting. */ + + nxsched_critmon_busywait(true, return_address(0)); + /* Lock without trace note */ flags = spin_lock_irqsave_notrace(lock); + /* Get the lock, end counting busy-waiting */ + + nxsched_critmon_busywait(false, return_address(0)); + /* Notify that we have the spinlock */ sched_note_spinlock_locked(lock); diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c index 563107438a2..cee5da70840 100644 --- a/sched/irq/irq_csection.c +++ b/sched/irq/irq_csection.c @@ -290,8 +290,19 @@ irqstate_t enter_critical_section(void) { FAR struct tcb_s *rtcb; irqstate_t flags; + + /* If CONFIG_SCHED_CRITMONITOR_MAXTIME_BUSYWAIT >= 0, + * start counting time of busy-waiting. + */ + + nxsched_critmon_busywait(true, return_address(0)); + flags = enter_critical_section_notrace(); + /* Get the lock, end counting busy-waiting */ + + nxsched_critmon_busywait(false, return_address(0)); + if (!up_interrupt_context()) { rtcb = this_task(); @@ -446,6 +457,7 @@ void leave_critical_section_notrace(irqstate_t flags) #endif #if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \ + CONFIG_SCHED_CRITMONITOR_MAXTIME_BUSYWAIT >= 0 || \ defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION) void leave_critical_section(irqstate_t flags) { diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c index c7b242b550b..663161de754 100644 --- a/sched/sched/sched_process_delivered.c +++ b/sched/sched/sched_process_delivered.c @@ -69,7 +69,18 @@ void nxsched_process_delivered(int cpu) if ((g_cpu_irqset & (1 << cpu)) == 0) { + /* If CONFIG_SCHED_CRITMONITOR_MAXTIME_BUSYWAIT >= 0, + * start counting time of busy-waiting. + */ + + nxsched_critmon_busywait(true, return_address(0)); + spin_lock_notrace(&g_cpu_irqlock); + + /* Get the lock, end counting busy-waiting */ + + nxsched_critmon_busywait(false, return_address(0)); + g_cpu_irqset |= (1 << cpu); }
