xiaoxiang781216 commented on code in PR #16231: URL: https://github.com/apache/nuttx/pull/16231#discussion_r2072976590
########## sched/wqueue/wqueue.h: ########## @@ -66,14 +66,15 @@ struct kworker_s struct kwork_wqueue_s { - struct list_node q; /* The queue of pending work */ - sem_t sem; /* The counting semaphore of the wqueue */ - sem_t exsem; /* Sync waiting for thread exit */ - spinlock_t lock; /* Spinlock */ - uint8_t nthreads; /* Number of worker threads */ - bool exit; /* A flag to request the thread to exit */ - int16_t wait_count; - struct kworker_s worker[0]; /* Describes a worker thread */ + struct list_node expired; /* The queue of expired work. */ Review Comment: remove the extra spaces ########## sched/wqueue/wqueue.h: ########## @@ -159,6 +148,65 @@ static inline_function FAR struct kwork_wqueue_s *work_qid2wq(int qid) } } +/**************************************************************************** + * Name: work_insert_pending + * + * Description: + * Internal public function to insert the work to the workqueue. + * Require wqueue != NULL and work != NULL. + * + * Input Parameters: + * wqueue - The work queue. + * work - The work to be inserted. + * + * Returned Value: + * Return whether the work is inserted at the head of the pending queue. + * + ****************************************************************************/ + +static inline_function +bool work_insert_pending(FAR struct kwork_wqueue_s *wqueue, + FAR struct work_s *work) +{ + struct work_s *curr; + + DEBUGASSERT(wqueue != NULL && work != NULL); + + /* Insert the work into the wait queue sorted by the expired time. */ + + list_for_every_entry(&wqueue->pending, curr, struct work_s, node) + { + if (!clock_compare(curr->qtime, work->qtime)) + { + break; + } + } + + /* After the insertion, we do not violate the invariant that + * the wait queue is sorted by the expired time. Because + * curr->qtime > work_qtime. Review Comment: work->qtime ########## sched/wqueue/kwork_cancel.c: ########## @@ -89,6 +109,7 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync, } spin_unlock_irqrestore(&wqueue->lock, flags); + Review Comment: revert the change ########## sched/wqueue/kwork_queue.c: ########## @@ -141,65 +80,72 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue, FAR void *arg, clock_t delay, clock_t period) { irqstate_t flags; - int ret = OK; + clock_t expected; + bool wake = false; + int ret = OK; if (wqueue == NULL || work == NULL || worker == NULL) { return -EINVAL; } + /* delay+1 is to prevent the insufficient sleep time if we are + * currently near the boundary to the next tick. + * | current_tick | current_tick + 1 | current_tick + 2 | .... | + * | ^ Here we get the current tick + * In this case we delay 1 tick, timer will be triggered at + * current_tick + 1, which is not enough for at least 1 tick. + */ + + expected = clock_systime_ticks() + delay + 1; + /* Interrupts are disabled so that this logic can be called from with * task logic or from interrupt handling logic. */ flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - /* Remove the entry from the timer and work queue. */ + /* Check whether we own the work structure. */ - if (work->worker != NULL) + if (!work_available(work)) { - /* Remove the entry from the work queue and make sure that it is - * marked as available (i.e., the worker field is nullified). - */ - - work->worker = NULL; - wd_cancel(&work->u.timer); - - list_delete(&work->u.s.node); - } + /* Seize the ownership from the work thread. */ - if (work_is_canceling(wqueue->worker, wqueue->nthreads, work)) Review Comment: why remove this check ########## sched/wqueue/kwork_thread.c: ########## @@ -148,33 +193,54 @@ static int work_thread(int argc, FAR char *argv[]) kworker = (FAR struct kworker_s *) ((uintptr_t)strtoul(argv[2], NULL, 16)); - flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - - /* Loop forever */ + /* Loop until wqueue->exit != 0. + * Since the only way to set wqueue->exit is to call work_queue_free(), + * there is no need for entering the critical section. + */ while (!wqueue->exit) { - /* And check each entry in the work queue. Since we have disabled + /* And check first entry in the work queue. Since we have disabled * interrupts we know: (1) we will not be suspended unless we do * so ourselves, and (2) there will be no changes to the work queue */ - /* Remove the ready-to-execute work from the list */ + flags = spin_lock_irqsave(&wqueue->lock); + sched_lock(); + + /* If the wqueue timer is expired and non-active, it indicates that + * there might be expired work in the pending queue. + */ - while (!list_is_empty(&wqueue->q)) + if (!WDOG_ISACTIVE(&wqueue->timer)) { - work = list_first_entry(&wqueue->q, struct work_s, u.s.node); + unsigned int wake_count = work_dispatch(wqueue); - list_delete(&work->u.s.node); + spin_unlock_irqrestore(&wqueue->lock, flags); + sched_unlock(); + + /* Note that the thread execution this function is also + * a worker thread, which has already been woken up by the timer. + * So only `count - 1` semaphore will be posted. + */ - if (work->worker == NULL) + while (wake_count-- > 1) { - continue; + nxsem_post(&wqueue->sem); Review Comment: why not move the nxsem_post to work_dispatch? which could avoid the scheduler try to scheduling in each post. ########## sched/wqueue/kwork_cancel.c: ########## @@ -31,7 +31,7 @@ #include <nuttx/irq.h> #include <nuttx/arch.h> -#include <nuttx/queue.h> +#include <nuttx/list.h> Review Comment: move to the first patch ########## include/nuttx/wqueue.h: ########## @@ -249,19 +249,11 @@ typedef CODE void (*worker_t)(FAR void *arg); struct work_s { - union - { - struct - { - struct list_node node; /* Implements a double linked list */ - clock_t qtime; /* Time work queued */ - } s; - struct wdog_s timer; /* Delay expiry timer */ - struct wdog_period_s ptimer; /* Period expiry timer */ - } u; - worker_t worker; /* Work callback */ - FAR void *arg; /* Callback argument */ - FAR struct kwork_wqueue_s *wq; /* Work queue */ + struct list_node node; /* Implements a double linked list */ Review Comment: remove the extra space ########## sched/wqueue/kwork_cancel.c: ########## @@ -59,17 +59,37 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, bool sync, */ flags = spin_lock_irqsave(&wqueue->lock); - if (work->worker != NULL) + + /* Check whether we own the work structure. */ + + if (!work_available(work)) { - /* Remove the entry from the work queue and make sure that it is - * marked as available (i.e., the worker field is nullified). - */ + bool is_head = list_is_head(&wqueue->pending, &work->node); + + /* Seize the ownership from the work thread. */ work->worker = NULL; - wd_cancel(&work->u.timer); - list_delete(&work->u.s.node); - ret = OK; + list_delete(&work->node); + + /* If the head of the pending queue has changed, we should reset + * the wqueue timer. + */ + + if (is_head) + { + if (!list_is_empty(&wqueue->expired)) Review Comment: should we use pending list? ########## sched/wqueue/wqueue.h: ########## @@ -159,6 +148,65 @@ static inline_function FAR struct kwork_wqueue_s *work_qid2wq(int qid) } } +/**************************************************************************** + * Name: work_insert_pending + * + * Description: + * Internal public function to insert the work to the workqueue. + * Require wqueue != NULL and work != NULL. + * + * Input Parameters: + * wqueue - The work queue. + * work - The work to be inserted. + * + * Returned Value: + * Return whether the work is inserted at the head of the pending queue. + * + ****************************************************************************/ + +static inline_function +bool work_insert_pending(FAR struct kwork_wqueue_s *wqueue, + FAR struct work_s *work) +{ + struct work_s *curr; + + DEBUGASSERT(wqueue != NULL && work != NULL); + + /* Insert the work into the wait queue sorted by the expired time. */ + + list_for_every_entry(&wqueue->pending, curr, struct work_s, node) + { + if (!clock_compare(curr->qtime, work->qtime)) + { + break; + } + } + + /* After the insertion, we do not violate the invariant that + * the wait queue is sorted by the expired time. Because + * curr->qtime > work_qtime. + * In the case of the wqueue is empty, we insert + * the work at the head of the wait queue. + */ + + list_add_before(&curr->node, &work->node); + + return &curr->node == &wqueue->pending; Review Comment: call list_is_head ########## sched/wqueue/kwork_thread.c: ########## @@ -148,33 +193,54 @@ static int work_thread(int argc, FAR char *argv[]) kworker = (FAR struct kworker_s *) ((uintptr_t)strtoul(argv[2], NULL, 16)); - flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - - /* Loop forever */ + /* Loop until wqueue->exit != 0. + * Since the only way to set wqueue->exit is to call work_queue_free(), + * there is no need for entering the critical section. + */ while (!wqueue->exit) { - /* And check each entry in the work queue. Since we have disabled + /* And check first entry in the work queue. Since we have disabled * interrupts we know: (1) we will not be suspended unless we do * so ourselves, and (2) there will be no changes to the work queue */ - /* Remove the ready-to-execute work from the list */ + flags = spin_lock_irqsave(&wqueue->lock); + sched_lock(); + + /* If the wqueue timer is expired and non-active, it indicates that + * there might be expired work in the pending queue. + */ - while (!list_is_empty(&wqueue->q)) + if (!WDOG_ISACTIVE(&wqueue->timer)) { - work = list_first_entry(&wqueue->q, struct work_s, u.s.node); + unsigned int wake_count = work_dispatch(wqueue); - list_delete(&work->u.s.node); + spin_unlock_irqrestore(&wqueue->lock, flags); + sched_unlock(); + + /* Note that the thread execution this function is also + * a worker thread, which has already been woken up by the timer. + * So only `count - 1` semaphore will be posted. + */ - if (work->worker == NULL) + while (wake_count-- > 1) { - continue; + nxsem_post(&wqueue->sem); } - /* Extract the work description from the entry (in case the work - * instance will be re-used after it has been de-queued). + flags = spin_lock_irqsave(&wqueue->lock); + sched_lock(); + } + + if (!list_is_empty(&wqueue->expired)) + { + work = list_first_entry(&wqueue->expired, struct work_s, node); Review Comment: remove extra space before = ########## sched/wqueue/kwork_queue.c: ########## @@ -141,65 +80,72 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue, FAR void *arg, clock_t delay, clock_t period) { irqstate_t flags; - int ret = OK; + clock_t expected; + bool wake = false; + int ret = OK; if (wqueue == NULL || work == NULL || worker == NULL) { return -EINVAL; } + /* delay+1 is to prevent the insufficient sleep time if we are + * currently near the boundary to the next tick. + * | current_tick | current_tick + 1 | current_tick + 2 | .... | + * | ^ Here we get the current tick + * In this case we delay 1 tick, timer will be triggered at + * current_tick + 1, which is not enough for at least 1 tick. + */ + + expected = clock_systime_ticks() + delay + 1; + /* Interrupts are disabled so that this logic can be called from with * task logic or from interrupt handling logic. */ flags = spin_lock_irqsave(&wqueue->lock); - sched_lock(); - /* Remove the entry from the timer and work queue. */ + /* Check whether we own the work structure. */ - if (work->worker != NULL) + if (!work_available(work)) { - /* Remove the entry from the work queue and make sure that it is - * marked as available (i.e., the worker field is nullified). - */ - - work->worker = NULL; - wd_cancel(&work->u.timer); - - list_delete(&work->u.s.node); - } + /* Seize the ownership from the work thread. */ - if (work_is_canceling(wqueue->worker, wqueue->nthreads, work)) - { - goto out; + list_delete(&work->node); Review Comment: may change the head of pending list, need restart wdog in this case -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org