anchao commented on code in PR #16231:
URL: https://github.com/apache/nuttx/pull/16231#discussion_r2049150464


##########
sched/wqueue/kwork_thread.c:
##########
@@ -110,6 +118,44 @@ struct lp_wqueue_s g_lpwork =
  * Private Functions
  ****************************************************************************/
 
+static inline void work_timer_expired(wdparm_t arg)

Review Comment:
   ```suggestion
   static inline_function void work_timer_expired(wdparm_t arg)
   ```



##########
sched/wqueue/wqueue.h:
##########
@@ -159,6 +149,24 @@ static inline_function FAR struct kwork_wqueue_s 
*work_qid2wq(int qid)
     }
 }
 
+static inline_function
+void work_insert_waitq(FAR struct kwork_wqueue_s *wqueue,
+                       struct work_s *work)
+{
+  struct list_node *curr;
+
+  list_for_every(&wqueue->wait_q, curr)

Review Comment:
   list_for_every_entry



##########
sched/wqueue/kwork_thread.c:
##########
@@ -160,14 +208,19 @@ static int work_thread(int argc, FAR char *argv[])
        * so ourselves, and (2) there will be no changes to the work queue
        */
 
+      ticks = clock_systime_ticks();
+
+      /* Check if there is expired delayed work */
+
+      work_expiration(ticks, wqueue);
+
       /* Remove the ready-to-execute work from the list */
 
-      while ((work = (FAR struct work_s *)dq_remfirst(&wqueue->q)) != NULL)
+      while (!list_is_empty(&wqueue->q))

Review Comment:
   move readytorun queue into stack



##########
sched/wqueue/kwork_queue.c:
##########
@@ -155,51 +94,48 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
   flags = spin_lock_irqsave(&wqueue->lock);
   sched_lock();
 
-  /* Remove the entry from the timer and work queue. */
+  /* Check whether we own the work structure. */
 
   if (work->worker != NULL)
     {
-      /* Remove the entry from the work queue and make sure that it is
-       * marked as available (i.e., the worker field is nullified).
-       */
+      /* Seize the ownership from the work thread. */
 
       work->worker = NULL;
-      wd_cancel(&work->u.timer);
-      if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
-        {
-          dq_rem((FAR dq_entry_t *)work, &wqueue->q);
-        }
-    }
 
-  if (work_is_canceling(wqueue->worker, wqueue->nthreads, work))
-    {
-      goto out;
+      list_delete(&work->node);
     }
 
   /* Initialize the work structure. */
 
   work->worker = worker;           /* Work callback. non-NULL means queued */
   work->arg    = arg;              /* Callback argument */
-  work->wq     = wqueue;           /* Work queue */
+  work->period = period;           /* Periodical delay */
 
   /* Queue the new work */
 
   if (!delay)
     {
-      queue_work(wqueue, work);
+      work->qtime = clock_systime_ticks();
+
+      list_add_tail(&wqueue->q, &work->node);
     }
-  else if (period == 0)
+  else
     {
-      ret = wd_start(&work->u.timer, delay,
-                     work_timer_expiry, (wdparm_t)work);
+      /* Insert the work to the workqueue's waiting list. */
+
+      work->qtime = clock_systime_ticks() + delay + 1;
+
+      work_insert_waitq(wqueue, work);
     }
-  else
+
+  /* Wakeup the wqueue thread. */
+
+  if (wqueue->wait_count > 0) /* There are threads waiting for sem. */
     {
-      ret = wd_start_period(&work->u.ptimer, delay, period,
-                            work_timer_expiry, (wdparm_t)work);
+      wqueue->wait_count--;
+      nxsem_post(&wqueue->sem);
     }
 
-out:
   spin_unlock_irqrestore(&wqueue->lock, flags);
   sched_unlock();
   return ret;

Review Comment:
   ```suggestion
     return 0;
   ```



##########
sched/wqueue/kwork_thread.c:
##########
@@ -110,6 +118,44 @@ struct lp_wqueue_s g_lpwork =
  * Private Functions
  ****************************************************************************/
 
+static inline void work_timer_expired(wdparm_t arg)
+{
+  FAR struct kwork_wqueue_s *wq = (FAR struct kwork_wqueue_s *)arg;
+  irqstate_t flags = spin_lock_irqsave(&wq->lock);
+  sched_lock();
+
+  if (wq->wait_count > 0)
+    {
+      wq->wait_count--;
+      nxsem_post(&wq->sem);
+    }
+
+  spin_unlock_irqrestore(&wq->lock, flags);
+  sched_unlock();
+}
+
+static inline
+void work_expiration(clock_t ticks, FAR struct kwork_wqueue_s *wq)
+{
+  struct work_s *work;
+
+  while (!list_is_empty(&wq->wait_q))

Review Comment:
   list_for_every_entry



##########
sched/wqueue/kwork_thread.c:
##########
@@ -181,7 +234,31 @@ static int work_thread(int argc, FAR char *argv[])
 
           /* Mark the work as no longer being queued */
 
-          work->worker = NULL;
+          if (work->period != 0)
+            {
+              /* Insert qtime */
+
+              work->qtime += work->period;
+
+              if (clock_compare(work->qtime, ticks))

Review Comment:
   ticks should be updated again after each polling



##########
sched/wqueue/wqueue.h:
##########
@@ -159,6 +149,24 @@ static inline_function FAR struct kwork_wqueue_s 
*work_qid2wq(int qid)
     }
 }
 
+static inline_function
+void work_insert_waitq(FAR struct kwork_wqueue_s *wqueue,
+                       struct work_s *work)
+{
+  struct list_node *curr;
+
+  list_for_every(&wqueue->wait_q, curr)
+    {
+      struct work_s *curr_work = list_entry(curr, struct work_s, node);

Review Comment:
   ```suggestion
         FAR struct work_s *curr_work = list_entry(curr, struct work_s, node);
   ```



##########
sched/wqueue/kwork_queue.c:
##########
@@ -155,51 +94,48 @@ int work_queue_period_wq(FAR struct kwork_wqueue_s *wqueue,
   flags = spin_lock_irqsave(&wqueue->lock);
   sched_lock();
 
-  /* Remove the entry from the timer and work queue. */
+  /* Check whether we own the work structure. */
 
   if (work->worker != NULL)

Review Comment:
   work_available



##########
sched/wqueue/kwork_cancel.c:
##########
@@ -59,38 +59,20 @@ static int work_qcancel(FAR struct kwork_wqueue_s *wqueue, 
bool sync,
    */
 
   flags = spin_lock_irqsave(&wqueue->lock);
+
+  /* Check whether we own the work structure. */
+
   if (work->worker != NULL)
     {
-      /* Remove the entry from the work queue and make sure that it is
-       * marked as available (i.e., the worker field is nullified).
-       */
+      /* Seize the ownership from the work thread. */
 
       work->worker = NULL;
-      wd_cancel(&work->u.timer);
-      if (dq_inqueue((FAR dq_entry_t *)work, &wqueue->q))
-        {
-          dq_rem((FAR dq_entry_t *)work, &wqueue->q);
-        }
 
-      ret = OK;
-    }
-  else if (!up_interrupt_context() && !sched_idletask() && sync)
-    {
-      int wndx;
-
-      for (wndx = 0; wndx < wqueue->nthreads; wndx++)
-        {
-          if (wqueue->worker[wndx].work == work &&
-              wqueue->worker[wndx].pid != nxsched_gettid())
-            {
-              wqueue->worker[wndx].wait_count++;
-              spin_unlock_irqrestore(&wqueue->lock, flags);
-              nxsem_wait_uninterruptible(&wqueue->worker[wndx].wait);
-              return 1;
-            }
-        }
+      list_delete(&work->node);
     }
 
+  ret = OK;

Review Comment:
   ```suggestion
   ```
   remove ret
   
   and `work_cancel_sync() ` should be also removed



##########
sched/wqueue/kwork_thread.c:
##########
@@ -211,6 +288,17 @@ static int work_thread(int argc, FAR char *argv[])
             }
         }
 
+      if (!list_is_empty(&wqueue->wait_q))

Review Comment:
   manybe wait_q have ready worker now



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to