From: Vivek Venkatraman <[email protected]>

- Allow work queues to specify the yield duration for corresponding background 
thread
- Support using specified yield duration in thread yielding
- During work queue processing, if using a single list element with a meta-queue
  (like done in Zebra), do not exit after each element is processed, instead
  update the next-node upon a WQ_REQUEUE so that the WQ processing continues
  and is terminated by the yield logic.
- Enhance work queue debug output

Signed-off-by: Vivek Venkatraman <[email protected]>
Signed-off-by: Daniel Walton <[email protected]>
---
 lib/thread.c    | 12 ++++++++++--
 lib/thread.h    |  3 +++
 lib/workqueue.c | 37 ++++++++++++++++++++++++++-----------
 lib/workqueue.h |  3 +++
 4 files changed, 42 insertions(+), 13 deletions(-)

diff --git a/lib/thread.c b/lib/thread.c
index 5e40261..688d291 100644
--- a/lib/thread.c
+++ b/lib/thread.c
@@ -691,6 +691,7 @@ thread_get (struct thread_master *m, u_char type,
   thread->func = func;
   thread->arg = arg;
   thread->index = -1;
+  thread->yield = THREAD_YIELD_TIME_SLOT; /* default */
 
   thread->funcname = funcname;
   thread->schedfrom = schedfrom;
@@ -1190,7 +1191,8 @@ thread_consumed_time (RUSAGE_T *now, RUSAGE_T *start, 
unsigned long *cputime)
   return timeval_elapsed (now->real, start->real);
 }
 
-/* We should aim to yield after THREAD_YIELD_TIME_SLOT milliseconds. 
+/* We should aim to yield after yield milliseconds, which defaults
+   to THREAD_YIELD_TIME_SLOT .
    Note: we are using real (wall clock) time for this calculation.
    It could be argued that CPU time may make more sense in certain
    contexts.  The things to consider are whether the thread may have
@@ -1204,7 +1206,13 @@ thread_should_yield (struct thread *thread)
 {
   quagga_get_relative (NULL);
   return (timeval_elapsed(relative_time, thread->real) >
-         THREAD_YIELD_TIME_SLOT);
+          thread->yield);
+}
+
+void
+thread_set_yield_time (struct thread *thread, unsigned long yield_time)
+{
+  thread->yield = yield_time;
 }
 
 void
diff --git a/lib/thread.h b/lib/thread.h
index 5bc756c..d8c5389 100644
--- a/lib/thread.h
+++ b/lib/thread.h
@@ -84,6 +84,7 @@ struct thread
   const char *funcname;
   const char *schedfrom;
   int schedfrom_line;
+  unsigned long yield;          /* yield time in us */
 };
 
 struct cpu_thread_history 
@@ -212,6 +213,8 @@ extern unsigned long thread_timer_remain_second (struct 
thread *);
 extern struct timeval thread_timer_remain(struct thread*);
 extern int thread_should_yield (struct thread *);
 extern unsigned long timeval_elapsed (struct timeval a, struct timeval b);
+/* set yield time for thread */
+extern void thread_set_yield_time (struct thread *, unsigned long);
 
 /* Internal libzebra exports */
 extern void thread_getrusage (RUSAGE_T *);
diff --git a/lib/workqueue.c b/lib/workqueue.c
index b1a5d5b..d2d002f 100644
--- a/lib/workqueue.c
+++ b/lib/workqueue.c
@@ -88,6 +88,7 @@ work_queue_new (struct thread_master *m, const char 
*queue_name)
 
   /* Default values, can be overriden by caller */
   new->spec.hold = WORK_QUEUE_DEFAULT_HOLD;
+  new->spec.yield = THREAD_YIELD_TIME_SLOT;
     
   return new;
 }
@@ -123,6 +124,9 @@ work_queue_schedule (struct work_queue *wq, unsigned int 
delay)
     {
       wq->thread = thread_add_background (wq->master, work_queue_run, 
                                           wq, delay);
+      /* set thread yield time, if needed */
+      if (wq->thread && wq->spec.yield != THREAD_YIELD_TIME_SLOT)
+        thread_set_yield_time (wq->thread, wq->spec.yield);
       return 1;
     }
   else
@@ -184,27 +188,27 @@ DEFUN(show_work_queues,
   struct work_queue *wq;
   
   vty_out (vty, 
-           "%c %8s %5s %8s %21s%s",
-           ' ', "List","(ms) ","Q. Runs","Cycle Counts   ",
+           "%c %8s %5s %8s %8s %21s%s",
+           ' ', "List","(ms) ","Q. Runs","Yields","Cycle Counts   ",
            VTY_NEWLINE);
   vty_out (vty,
-           "%c %8s %5s %8s %7s %6s %6s %s%s",
+           "%c %8s %5s %8s %8s %7s %6s %8s %6s %s%s",
            'P',
            "Items",
            "Hold",
-           "Total",
-           "Best","Gran.","Avg.", 
+           "Total","Total",
+           "Best","Gran.","Total","Avg.",
            "Name", 
            VTY_NEWLINE);
  
   for (ALL_LIST_ELEMENTS_RO (work_queues, node, wq))
     {
-      vty_out (vty,"%c %8d %5d %8ld %7d %6d %6u %s%s",
+      vty_out (vty,"%c %8d %5d %8ld %8ld %7d %6d %8ld %6u %s%s",
                (CHECK_FLAG (wq->flags, WQ_UNPLUGGED) ? ' ' : 'P'),
                listcount (wq->items),
                wq->spec.hold,
-               wq->runs,
-               wq->cycles.best, wq->cycles.granularity,
+               wq->runs, wq->yields,
+               wq->cycles.best, wq->cycles.granularity, wq->cycles.total,
                  (wq->runs) ? 
                    (unsigned int) (wq->cycles.total / wq->runs) : 0,
                wq->name,
@@ -260,7 +264,8 @@ work_queue_run (struct thread *thread)
   assert (wq && wq->items);
 
   /* calculate cycle granularity:
-   * list iteration == 1 cycle
+   * list iteration == 1 run
+   * listnode processing == 1 cycle
    * granularity == # cycles between checks whether we should yield.
    *
    * granularity should be > 0, and can increase slowly after each run to
@@ -319,6 +324,14 @@ work_queue_run (struct thread *thread)
        {
          item->ran--;
          work_queue_item_requeue (wq, node);
+      /* If a single node is being used with a meta-queue (e.g., zebra),
+       * update the next node as we don't want to exit the thread and
+       * reschedule it after every node. By definition, WQ_REQUEUE is
+       * meant to continue the processing; the yield logic will kick in
+       * to terminate the thread when time has exceeded.
+       */
+      if (nnode == NULL)
+        nnode = node;
          break;
        }
       case WQ_RETRY_NOW:
@@ -356,7 +369,7 @@ stats:
   /* we yielded, check whether granularity should be reduced */
   if (yielded && (cycles < wq->cycles.granularity))
     {
-      wq->cycles.granularity = ((cycles > 0) ? cycles 
+      wq->cycles.granularity = ((cycles > 0) ? cycles
                                              : WORK_QUEUE_MIN_GRANULARITY);
     }
   /* otherwise, should granularity increase? */
@@ -364,7 +377,7 @@ stats:
     {
       if (cycles > wq->cycles.best)
         wq->cycles.best = cycles;
-      
+
       /* along with yielded check, provides hysteresis for granularity */
       if (cycles > (wq->cycles.granularity * WQ_HYSTERESIS_FACTOR
                                            * WQ_HYSTERESIS_FACTOR))
@@ -376,6 +389,8 @@ stats:
   
   wq->runs++;
   wq->cycles.total += cycles;
+  if (yielded)
+    wq->yields++;
 
 #if 0
   printf ("%s: cycles %d, new: best %d, worst %d\n",
diff --git a/lib/workqueue.h b/lib/workqueue.h
index 5ad2589..19b4404 100644
--- a/lib/workqueue.h
+++ b/lib/workqueue.h
@@ -84,11 +84,14 @@ struct work_queue
     unsigned int max_retries;  
 
     unsigned int hold; /* hold time for first run, in ms */
+
+    unsigned long yield; /* yield time in us for associated thread */
   } spec;
   
   /* remaining fields should be opaque to users */
   struct list *items;                 /* queue item list */
   unsigned long runs;                 /* runs count */
+  unsigned long yields;               /* yields count */
   
   struct {
     unsigned int best;
-- 
1.9.1


_______________________________________________
Quagga-dev mailing list
[email protected]
https://lists.quagga.net/mailman/listinfo/quagga-dev

Reply via email to