This is an automated email from the ASF dual-hosted git repository.
xiaoxiang pushed a commit to branch releases/12.7
in repository https://gitbox.apache.org/repos/asf/nuttx.git
The following commit(s) were added to refs/heads/releases/12.7 by this push:
new 5f6eb292a8 sched: add nxsched_remove_self
5f6eb292a8 is described below
commit 5f6eb292a8040a4fa9b0c631142a3eb3f4d5f8c5
Author: hujun5 <[email protected]>
AuthorDate: Fri Jan 19 20:57:04 2024 +0800
sched: add nxsched_remove_self
reason:
1In the scenario of active waiting, context switching is inevitable, and we
can eliminate redundant judgments.
code size
before
hujun5@hujun5-OptiPlex-7070:~/downloads1/vela_sim/nuttx$ size nuttx
text data bss dec hex filename
262848 49985 63893 376726 5bf96 nuttx
after
hujun5@hujun5-OptiPlex-7070:~/downloads1/vela_sim/nuttx$ size nuttx
text data bss dec hex filename
263324 49985 63893 377202 5c172 nuttx
reduce code size by -476
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
Signed-off-by: hujun5 <[email protected]>
---
sched/mqueue/mq_rcvinternal.c | 12 +-
sched/mqueue/mq_sndinternal.c | 12 +-
sched/mqueue/msgrcv.c | 12 +-
sched/mqueue/msgsnd.c | 12 +-
sched/paging/pg_miss.c | 12 +-
sched/sched/sched.h | 1 +
sched/sched/sched_removereadytorun.c | 265 +++++++++++++++++++----------------
sched/semaphore/sem_wait.c | 12 +-
sched/signal/sig_suspend.c | 12 +-
sched/signal/sig_timedwait.c | 19 +--
sched/task/task_exit.c | 2 +-
11 files changed, 179 insertions(+), 192 deletions(-)
diff --git a/sched/mqueue/mq_rcvinternal.c b/sched/mqueue/mq_rcvinternal.c
index 58f138f5fd..f3861c1ca0 100644
--- a/sched/mqueue/mq_rcvinternal.c
+++ b/sched/mqueue/mq_rcvinternal.c
@@ -138,7 +138,6 @@ int nxmq_wait_receive(FAR struct mqueue_inode_s *msgq,
{
FAR struct mqueue_msg_s *newmsg;
FAR struct tcb_s *rtcb;
- bool switch_needed;
DEBUGASSERT(rcvmsg != NULL);
@@ -186,21 +185,18 @@ int nxmq_wait_receive(FAR struct mqueue_inode_s *msgq,
DEBUGASSERT(!is_idle_task(rtcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_MQNOTEMPTY;
nxsched_add_prioritized(rtcb, MQ_WNELIST(msgq->cmn));
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* When we resume at this point, either (1) the message queue
* is no longer empty, or (2) the wait has been interrupted by
diff --git a/sched/mqueue/mq_sndinternal.c b/sched/mqueue/mq_sndinternal.c
index e034fc44d4..1e1214cedf 100644
--- a/sched/mqueue/mq_sndinternal.c
+++ b/sched/mqueue/mq_sndinternal.c
@@ -215,7 +215,6 @@ FAR struct mqueue_msg_s *nxmq_alloc_msg(void)
int nxmq_wait_send(FAR struct mqueue_inode_s *msgq, int oflags)
{
FAR struct tcb_s *rtcb;
- bool switch_needed;
#ifdef CONFIG_CANCELLATION_POINTS
/* nxmq_wait_send() is not a cancellation point, but may be called via
@@ -271,21 +270,18 @@ int nxmq_wait_send(FAR struct mqueue_inode_s *msgq, int
oflags)
DEBUGASSERT(!is_idle_task(rtcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_MQNOTFULL;
nxsched_add_prioritized(rtcb, MQ_WNFLIST(msgq->cmn));
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* When we resume at this point, either (1) the message queue
* is no longer empty, or (2) the wait has been interrupted by
diff --git a/sched/mqueue/msgrcv.c b/sched/mqueue/msgrcv.c
index 1a5cb4fec4..9085ee43fe 100644
--- a/sched/mqueue/msgrcv.c
+++ b/sched/mqueue/msgrcv.c
@@ -45,7 +45,6 @@ static int msgrcv_wait(FAR struct msgq_s *msgq, FAR struct
msgbuf_s **rcvmsg,
FAR struct msgbuf_s *newmsg = NULL;
FAR struct msgbuf_s *tmp;
FAR struct tcb_s *rtcb;
- bool switch_needed;
#ifdef CONFIG_CANCELLATION_POINTS
/* msgrcv_wait() is not a cancellation point, but it may be called
@@ -129,21 +128,18 @@ static int msgrcv_wait(FAR struct msgq_s *msgq, FAR
struct msgbuf_s **rcvmsg,
DEBUGASSERT(NULL != rtcb->flink);
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_MQNOTEMPTY;
nxsched_add_prioritized(rtcb, MQ_WNELIST(msgq->cmn));
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* When we resume at this point, either (1) the message queue
* is no longer empty, or (2) the wait has been interrupted by
diff --git a/sched/mqueue/msgsnd.c b/sched/mqueue/msgsnd.c
index ce783d03c6..9bbafeefe3 100644
--- a/sched/mqueue/msgsnd.c
+++ b/sched/mqueue/msgsnd.c
@@ -42,7 +42,6 @@
static int msgsnd_wait(FAR struct msgq_s *msgq, int msgflg)
{
FAR struct tcb_s *rtcb;
- bool switch_needed;
#ifdef CONFIG_CANCELLATION_POINTS
/* msgsnd_wait() is not a cancellation point, but may be called via
@@ -95,21 +94,18 @@ static int msgsnd_wait(FAR struct msgq_s *msgq, int msgflg)
DEBUGASSERT(NULL != rtcb->flink);
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_MQNOTFULL;
nxsched_add_prioritized(rtcb, MQ_WNFLIST(msgq->cmn));
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* When we resume at this point, either (1) the message queue
* is no longer empty, or (2) the wait has been interrupted by
diff --git a/sched/paging/pg_miss.c b/sched/paging/pg_miss.c
index be00f3b630..bf3f3767f0 100644
--- a/sched/paging/pg_miss.c
+++ b/sched/paging/pg_miss.c
@@ -112,7 +112,6 @@ void pg_miss(void)
{
FAR struct tcb_s *ftcb = this_task();
FAR struct tcb_s *wtcb;
- bool switch_needed;
/* Sanity checking
*
@@ -138,21 +137,18 @@ void pg_miss(void)
DEBUGASSERT(!is_idle_task(ftcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(ftcb, true);
+ nxsched_remove_self(ftcb);
/* Add the task to the specified blocked task list */
ftcb->task_state = TSTATE_WAIT_PAGEFILL;
nxsched_add_prioritized(ftcb, list_waitingforfill());
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), ftcb);
- }
+ up_switch_context(this_task(), ftcb);
/* Boost the page fill worker thread priority.
* - Check the priority of the task at the head of the g_waitingforfill
diff --git a/sched/sched/sched.h b/sched/sched/sched.h
index a09f55a88c..a68353a9b0 100644
--- a/sched/sched/sched.h
+++ b/sched/sched/sched.h
@@ -321,6 +321,7 @@ int nxthread_create(FAR const char *name, uint8_t ttype,
int priority,
bool nxsched_add_readytorun(FAR struct tcb_s *rtrtcb);
bool nxsched_remove_readytorun(FAR struct tcb_s *rtrtcb, bool merge);
+void nxsched_remove_self(FAR struct tcb_s *rtrtcb);
bool nxsched_add_prioritized(FAR struct tcb_s *tcb, DSEG dq_queue_t *list);
void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2,
uint8_t task_state);
diff --git a/sched/sched/sched_removereadytorun.c
b/sched/sched/sched_removereadytorun.c
index ef547447cc..923ec59d87 100644
--- a/sched/sched/sched_removereadytorun.c
+++ b/sched/sched/sched_removereadytorun.c
@@ -106,6 +106,11 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb,
bool merge)
return doswitch;
}
+
+void nxsched_remove_self(FAR struct tcb_s *tcb)
+{
+ nxsched_remove_readytorun(tcb, true);
+}
#endif /* !CONFIG_SMP */
/****************************************************************************
@@ -132,20 +137,22 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb,
bool merge)
****************************************************************************/
#ifdef CONFIG_SMP
-bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb, bool merge)
+void nxsched_remove_running(FAR struct tcb_s *tcb)
{
FAR dq_queue_t *tasklist;
- bool doswitch = false;
+ FAR struct tcb_s *nxttcb;
+ FAR struct tcb_s *rtrtcb = NULL;
int cpu;
/* Which CPU (if any) is the task running on? Which task list holds the
* TCB?
*/
- cpu = rtcb->cpu;
- tasklist = TLIST_HEAD(rtcb, cpu);
+ DEBUGASSERT(tcb->task_state == TSTATE_TASK_RUNNING);
+ cpu = tcb->cpu;
+ tasklist = &g_assignedtasks[cpu];
- /* Check if the TCB to be removed is at the head of a ready-to-run list.
+ /* Check if the TCB to be removed is at the head of a running list.
* For the case of SMP, there are two lists involved: (1) the
* g_readytorun list that holds non-running tasks that have not been
* assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
@@ -153,172 +160,186 @@ bool nxsched_remove_readytorun(FAR struct tcb_s *rtcb,
bool merge)
* that CPU. Only this latter list contains the currently active task
* only removing the head of that list can result in a context switch.
*
- * rtcb->blink == NULL will tell us if the TCB is at the head of the
- * ready-to-run list and, hence, a candidate for the new running task.
+ * tcb->blink == NULL will tell us if the TCB is at the head of the
+ * running list and, hence, a candidate for the new running task.
*
* If so, then the tasklist RUNNABLE attribute will inform us if the list
* holds the currently executing task and, hence, if a context switch
* should occur.
*/
- if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
- {
- FAR struct tcb_s *nxttcb;
- FAR struct tcb_s *rtrtcb = NULL;
- int me;
+ DEBUGASSERT(tcb->blink == NULL);
+ DEBUGASSERT(TLIST_ISRUNNABLE(tcb->task_state));
- /* There must always be at least one task in the list (the IDLE task)
- * after the TCB being removed.
- */
+ /* There must always be at least one task in the list (the IDLE task)
+ * after the TCB being removed.
+ */
- nxttcb = rtcb->flink;
- DEBUGASSERT(nxttcb != NULL);
+ nxttcb = tcb->flink;
+ DEBUGASSERT(nxttcb != NULL);
- /* If we are modifying the head of some assigned task list other than
- * our own, we will need to stop that CPU.
- */
+ /* The task is running but the CPU that it was running on has been
+ * paused. We can now safely remove its TCB from the running
+ * task list. In the SMP case this may be either the g_readytorun()
+ * or the g_assignedtasks[cpu] list.
+ */
- me = this_cpu();
- if (cpu != me)
- {
- DEBUGVERIFY(up_cpu_pause(cpu));
- }
+ dq_rem_head((FAR dq_entry_t *)tcb, tasklist);
- /* The task is running but the CPU that it was running on has been
- * paused. We can now safely remove its TCB from the ready-to-run
- * task list. In the SMP case this may be either the g_readytorun()
- * or the g_assignedtasks[cpu] list.
- */
+ /* Find the highest priority non-running tasks in the g_assignedtasks
+ * list of other CPUs, and also non-idle tasks, place them in the
+ * g_readytorun list. so as to find the task with the highest priority,
+ * globally
+ */
- dq_rem_head((FAR dq_entry_t *)rtcb, tasklist);
+ for (int i = 0; i < CONFIG_SMP_NCPUS; i++)
+ {
+ if (i == cpu)
+ {
+ /* The highest priority task of the current
+ * CPU has been found, which is nxttcb.
+ */
- /* Find the highest priority non-running tasks in the g_assignedtasks
- * list of other CPUs, and also non-idle tasks, place them in the
- * g_readytorun list. so as to find the task with the highest priority,
- * globally
- */
+ continue;
+ }
- for (int i = 0; i < CONFIG_SMP_NCPUS; i++)
+ for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head;
+ !is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink)
{
- if (i == cpu)
+ if (rtrtcb->task_state != TSTATE_TASK_RUNNING &&
+ CPU_ISSET(cpu, &rtrtcb->affinity))
{
- /* The highest priority task of the current
- * CPU has been found, which is nxttcb.
+ /* We have found the task with the highest priority whose
+ * CPU index is i. Since this task must be between the two
+ * tasks, we can use the dq_rem_mid macro to delete it.
*/
- continue;
- }
+ dq_rem_mid(rtrtcb);
+ rtrtcb->task_state = TSTATE_TASK_READYTORUN;
- for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head;
- !is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink)
- {
- if (rtrtcb->task_state != TSTATE_TASK_RUNNING &&
- CPU_ISSET(cpu, &rtrtcb->affinity))
- {
- /* We have found the task with the highest priority whose
- * CPU index is i. Since this task must be between the two
- * tasks, we can use the dq_rem_mid macro to delete it.
- */
-
- dq_rem_mid(rtrtcb);
- rtrtcb->task_state = TSTATE_TASK_READYTORUN;
-
- /* Add rtrtcb to g_readytorun to find
- * the task with the highest global priority
- */
-
- nxsched_add_prioritized(rtrtcb, &g_readytorun);
- break;
- }
+ /* Add rtrtcb to g_readytorun to find
+ * the task with the highest global priority
+ */
+
+ nxsched_add_prioritized(rtrtcb, &g_readytorun);
+ break;
}
}
+ }
- /* Which task will go at the head of the list? It will be either the
- * next tcb in the assigned task list (nxttcb) or a TCB in the
- * g_readytorun list. We can only select a task from that list if
- * the affinity mask includes the current CPU.
- */
+ /* Which task will go at the head of the list? It will be either the
+ * next tcb in the assigned task list (nxttcb) or a TCB in the
+ * g_readytorun list. We can only select a task from that list if
+ * the affinity mask includes the current CPU.
+ */
- /* Search for the highest priority task that can run on this
- * CPU.
- */
+ /* Search for the highest priority task that can run on this
+ * CPU.
+ */
- for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head;
- rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
- rtrtcb = rtrtcb->flink);
+ for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
+ rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
+ rtrtcb = rtrtcb->flink);
+
+ /* Did we find a task in the g_readytorun list? Which task should
+ * we use? We decide strictly by the priority of the two tasks:
+ * Either (1) the task currently at the head of the
+ * g_assignedtasks[cpu] list (nexttcb) or (2) the highest priority
+ * task from the g_readytorun list with matching affinity (rtrtcb).
+ */
- /* Did we find a task in the g_readytorun list? Which task should
- * we use? We decide strictly by the priority of the two tasks:
- * Either (1) the task currently at the head of the
- * g_assignedtasks[cpu] list (nexttcb) or (2) the highest priority
- * task from the g_readytorun list with matching affinity (rtrtcb).
+ if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
+ {
+ /* The TCB rtrtcb has the higher priority and it can be run on
+ * target CPU. Remove that task (rtrtcb) from the g_readytorun
+ * list and add to the head of the g_assignedtasks[cpu] list.
*/
- if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
- {
- /* The TCB rtrtcb has the higher priority and it can be run on
- * target CPU. Remove that task (rtrtcb) from the g_readytorun
- * list and add to the head of the g_assignedtasks[cpu] list.
- */
+ dq_rem((FAR dq_entry_t *)rtrtcb, &g_readytorun);
+ dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist);
- dq_rem((FAR dq_entry_t *)rtrtcb, list_readytorun());
- dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist);
+ rtrtcb->cpu = cpu;
+ nxttcb = rtrtcb;
+ }
- rtrtcb->cpu = cpu;
- nxttcb = rtrtcb;
- }
+ /* Will pre-emption be disabled after the switch? If the lockcount is
+ * greater than zero, then this task/this CPU holds the scheduler lock.
+ */
- /* Will pre-emption be disabled after the switch? If the lockcount is
- * greater than zero, then this task/this CPU holds the scheduler lock.
- */
+ if (nxttcb->lockcount > 0)
+ {
+ /* Yes... make sure that scheduling logic knows about this */
- if (nxttcb->lockcount > 0)
- {
- /* Yes... make sure that scheduling logic knows about this */
+ g_cpu_lockset |= (1 << cpu);
+ }
+ else
+ {
+ /* No.. we may need to perform release our hold on the lock. */
- g_cpu_lockset |= (1 << cpu);
- }
- else
- {
- /* No.. we may need to perform release our hold on the lock. */
+ g_cpu_lockset &= ~(1 << cpu);
+ }
- g_cpu_lockset &= ~(1 << cpu);
- }
+ /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
+ * controls will be done in the pause handler on the new CPU(cpu).
+ * If the task is scheduled on this CPU(me), do nothing because
+ * this CPU already has a critical section
+ */
- /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ
- * controls will be done in the pause handler on the new CPU(cpu).
- * If the task is scheduled on this CPU(me), do nothing because
- * this CPU already has a critical section
- */
+ nxttcb->task_state = TSTATE_TASK_RUNNING;
- nxttcb->task_state = TSTATE_TASK_RUNNING;
+ /* Since the TCB is no longer in any list, it is now invalid */
- /* All done, restart the other CPU (if it was paused). */
+ tcb->task_state = TSTATE_TASK_INVALID;
+}
- doswitch = true;
+void nxsched_remove_self(FAR struct tcb_s *tcb)
+{
+ nxsched_remove_running(tcb);
+ if (g_pendingtasks.head)
+ {
+ nxsched_merge_pending();
+ }
+}
+
+bool nxsched_remove_readytorun(FAR struct tcb_s *tcb, bool merge)
+{
+ bool doswitch = false;
+
+ if (tcb->task_state == TSTATE_TASK_RUNNING)
+ {
+ int me = this_cpu();
+ int cpu = tcb->cpu;
if (cpu != me)
{
- /* In this we will not want to report a context switch to this
- * CPU. Only the other CPU is affected.
- */
-
- DEBUGVERIFY(up_cpu_resume(cpu));
- doswitch = false;
+ up_cpu_pause(tcb->cpu);
+ nxsched_remove_running(tcb);
+ up_cpu_resume(tcb->cpu);
+ }
+ else
+ {
+ nxsched_remove_running(tcb);
+ doswitch = true;
}
}
else
{
- /* The task is not running. Just remove its TCB from the ready-to-run
+ FAR dq_queue_t *tasklist;
+
+ tasklist = TLIST_HEAD(tcb, tcb->cpu);
+
+ DEBUGASSERT(tcb->task_state != TSTATE_TASK_RUNNING);
+
+ /* The task is not running. Just remove its TCB from the task
* list. In the SMP case this may be either the g_readytorun() or the
* g_assignedtasks[cpu] list.
*/
- dq_rem((FAR dq_entry_t *)rtcb, tasklist);
- }
+ dq_rem((FAR dq_entry_t *)tcb, tasklist);
- /* Since the TCB is no longer in any list, it is now invalid */
+ /* Since the TCB is no longer in any list, it is now invalid */
- rtcb->task_state = TSTATE_TASK_INVALID;
+ tcb->task_state = TSTATE_TASK_INVALID;
+ }
if (list_pendingtasks()->head && merge)
{
diff --git a/sched/semaphore/sem_wait.c b/sched/semaphore/sem_wait.c
index d515ce6ddd..7ea4d36480 100644
--- a/sched/semaphore/sem_wait.c
+++ b/sched/semaphore/sem_wait.c
@@ -73,7 +73,6 @@ int nxsem_wait(FAR sem_t *sem)
{
FAR struct tcb_s *rtcb = this_task();
irqstate_t flags;
- bool switch_needed;
int ret;
/* This API should not be called from interrupt handlers & idleloop */
@@ -168,21 +167,18 @@ int nxsem_wait(FAR sem_t *sem)
DEBUGASSERT(!is_idle_task(rtcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SEM;
nxsched_add_prioritized(rtcb, SEM_WAITLIST(sem));
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* When we resume at this point, either (1) the semaphore has been
* assigned to this thread of execution, or (2) the semaphore wait
diff --git a/sched/signal/sig_suspend.c b/sched/signal/sig_suspend.c
index 206906cce9..397daa8519 100644
--- a/sched/signal/sig_suspend.c
+++ b/sched/signal/sig_suspend.c
@@ -82,7 +82,6 @@ int sigsuspend(FAR const sigset_t *set)
FAR struct tcb_s *rtcb = this_task();
sigset_t saved_sigprocmask;
irqstate_t flags;
- bool switch_needed;
/* sigsuspend() is a cancellation point */
@@ -127,21 +126,18 @@ int sigsuspend(FAR const sigset_t *set)
DEBUGASSERT(!is_idle_task(rtcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SIG;
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* We are running again, restore the original sigprocmask */
diff --git a/sched/signal/sig_timedwait.c b/sched/signal/sig_timedwait.c
index aef4a6d367..3d5873a1e1 100644
--- a/sched/signal/sig_timedwait.c
+++ b/sched/signal/sig_timedwait.c
@@ -251,7 +251,6 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct
siginfo *info,
FAR sigpendq_t *sigpend;
irqstate_t flags;
sclock_t waitticks;
- bool switch_needed;
siginfo_t unbinfo;
int ret;
@@ -364,7 +363,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct
siginfo *info,
/* Remove the tcb task from the ready-to-run list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
@@ -373,10 +372,7 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct
siginfo *info,
/* Now, perform the context switch if one is needed */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
/* We no longer need the watchdog */
@@ -406,21 +402,18 @@ int nxsig_timedwait(FAR const sigset_t *set, FAR struct
siginfo *info,
DEBUGASSERT(!is_idle_task(rtcb));
- /* Remove the tcb task from the ready-to-run list. */
+ /* Remove the tcb task from the running list. */
- switch_needed = nxsched_remove_readytorun(rtcb, true);
+ nxsched_remove_self(rtcb);
/* Add the task to the specified blocked task list */
rtcb->task_state = TSTATE_WAIT_SIG;
dq_addlast((FAR dq_entry_t *)rtcb, list_waitingforsignal());
- /* Now, perform the context switch if one is needed */
+ /* Now, perform the context switch */
- if (switch_needed)
- {
- up_switch_context(this_task(), rtcb);
- }
+ up_switch_context(this_task(), rtcb);
}
/* We are running again, clear the sigwaitmask */
diff --git a/sched/task/task_exit.c b/sched/task/task_exit.c
index a01bafab34..3dc03fa41e 100644
--- a/sched/task/task_exit.c
+++ b/sched/task/task_exit.c
@@ -110,7 +110,7 @@ int nxtask_exit(void)
* ready-to-run with state == TSTATE_TASK_RUNNING
*/
- nxsched_remove_readytorun(dtcb, true);
+ nxsched_remove_self(dtcb);
/* Get the new task at the head of the ready to run list */