anchao commented on code in PR #16231:
URL: https://github.com/apache/nuttx/pull/16231#discussion_r2051672835


##########
sched/wqueue/kwork_thread.c:
##########
@@ -216,18 +288,49 @@ static int work_thread(int argc, FAR char *argv[])
        * posted.
        */
 
+      /* Check the waiting queue */
+
+      has_next = false;
+
+      if (!list_is_empty(&wqueue->q))
+        {
+          work  = list_first_entry(&wqueue->q, struct work_s, node);
+
+          /* The work thread will go sleep until work->qtime */
+
+          ticks = work->qtime;
+          has_next = true;
+        }
+
       wqueue->wait_count++;
       spin_unlock_irqrestore(&wqueue->lock, flags);
       sched_unlock();
 
+      /* Set the earliest expired work delay */
+
+      if (has_next)
+        {
+          /* If the earliest work has already expired. */
+
+          if (clock_compare(ticks, clock_systime_ticks()))
+            {
+              /* Continue the work thread loop. */
+
+              continue;
+            }
+
+          /* Else we start a time to wake up the work thread. */
+
+          wd_start_abstick(&wqueue->timer, ticks, work_timer_expired,

Review Comment:
   We should optimize the current wait strategy and wait only when there are no 
enqueue works or the timer is started, so that `wait_count` and `sem_post` in 
this code should be removed



##########
sched/wqueue/kwork_queue.c:
##########
@@ -155,51 +94,46 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
   flags = spin_lock_irqsave(&wqueue->lock);
   sched_lock();
 
-  /* Remove the entry from the timer and work queue. */
+  /* Check whether we own the work structure. */
 
-  if (work->worker != NULL)
+  if (!work_available(work))
     {
-      /* Remove the entry from the work queue and make sure that it is
-       * marked as available (i.e., the worker field is nullified).
-       */
+      /* Seize the ownership from the work thread. */
 
       work->worker = NULL;
-      wd_cancel(&work->u.timer);
-      if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
-        {
-          dq_rem((FAR dq_entry_t *)work, &wqueue->q);
-        }
-    }
 
-  if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
-    {
-      goto out;
+      list_delete(&work->node);
     }
 
   /* Initialize the work structure. */
 
   work->worker = worker;           /* Work callback. non-NULL means queued */
   work->arg    = arg;              /* Callback argument */
-  work->wq     = wqueue;           /* Work queue */
+  work->period = period;           /* Periodical delay */
 
   /* Queue the new work */
 
   if (!delay)
     {
-      queue_work(wqueue, work);
+      work->qtime = clock_systime_ticks();
     }
-  else if (period == 0)
+  else
     {
-      ret = wd_start(&work->u.timer, delay,
-                     work_timer_expiry, (wdparm_t)work);
+      work->qtime = clock_systime_ticks() + delay + 1;
     }
-  else
+
+  /* Insert to the workqueue's waiting list. */
+
+  work_insert_queue(wqueue, work);
+
+  /* Wakeup the wqueue thread. */
+
+  if (wqueue->wait_count > 0) /* There are threads waiting for sem. */

Review Comment:
   no need to wake up the worker thread if the work does not expiration



##########
sched/wqueue/kwork_cancel.c:
##########
@@ -59,40 +61,36 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, 
bool sync,
    */
 
   flags = spin_lock_irqsave(&wqueue->lock);
-  if (work->worker != NULL)
+
+  /* Check whether we own the work structure. */
+
+  if (!work_available(work))
     {
-      /* Remove the entry from the work queue and make sure that it is
-       * marked as available (i.e., the worker field is nullified).
-       */
+      /* Seize the ownership from the work thread. */
 
+      worker = work->worker;
+      arg    = work->arg;
+
+      run_myself   = sync;
       work->worker = NULL;
-      wd_cancel(&work->u.timer);
-      if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
-        {
-          dq_rem((FAR dq_entry_t *)work, &wqueue->q);
-        }
 
-      ret = OK;
+      list_delete(&work->node);
     }
-  else if (!up_interrupt_context() && !sched_idletask() && sync)
+
+  spin_unlock_irqrestore(&wqueue->lock, flags);
+
+  if (run_myself)

Review Comment:
   This is wrong. **cancel_sync** means waiting for the task being executed to 
end, rather than executing the call in the current task context. Such an 
implementation could easily cause deadlock.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to