The cpuuse top command now supports the current load where the list of
tasks is ordered based on the current load rather than the total cpu usage.
This lets you see what is using the processor at any specific instance. You
can toggle between the modes.

Added memory usage stats for unified and separate workspace and C heaps as
well as displaying the allocated stack space.

Added a few more command keys to refresh the display, show all tasks in the
system, control the lines display and a scrolling mode that does not clear
the display on each refresh.

Removed support for tick kernel builds. The tick support in the kernel is to
be removed.
---
 cpukit/libmisc/cpuuse/cpuusagetop.c | 691 ++++++++++++++++++++++++++----------
 1 file changed, 496 insertions(+), 195 deletions(-)

diff --git a/cpukit/libmisc/cpuuse/cpuusagetop.c 
b/cpukit/libmisc/cpuuse/cpuusagetop.c
index e47ba59..13b659e 100644
--- a/cpukit/libmisc/cpuuse/cpuusagetop.c
+++ b/cpukit/libmisc/cpuuse/cpuusagetop.c
@@ -18,6 +18,7 @@
 #include "config.h"
 #endif
 
+#include <stdbool.h>
 #include <string.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -25,29 +26,97 @@
 #include <inttypes.h>
 
 #include <rtems/cpuuse.h>
+#include <rtems/malloc.h>
 #include <rtems/score/objectimpl.h>
+#include <rtems/score/protectedheap.h>
 #include <rtems/score/threadimpl.h>
 #include <rtems/score/todimpl.h>
 #include <rtems/score/watchdogimpl.h>
+#include <rtems/score/wkspace.h>
 
 /*
  * Common variable to sync the load monitor task.
  */
-static volatile int rtems_cpuusage_top_thread_active;
-
-typedef struct {
-  void                  *context;
+typedef struct
+{
+  void*                  context;
   rtems_printk_plugin_t  print;
-}rtems_cpu_usage_plugin_t;
+} rtems_cpu_usage_plugin;
+
+/*
+ * Use a struct for all data to allow more than one top and to support the
+ * thread iterator.
+ */
+typedef struct
+{
+  volatile bool          thread_run;
+  volatile bool          thread_active;
+  volatile bool          single_page;
+  volatile bool          sort_current;
+  volatile uint32_t      poll_rate_usecs;
+  volatile uint32_t      show;
+  rtems_cpu_usage_plugin plugin;
+  Thread_CPU_usage_t     zero;
+  Timestamp_Control      uptime;
+  Timestamp_Control      last_uptime;
+  Timestamp_Control      period;
+  int                    task_count;        /* Number of tasks. */
+  int                    last_task_count;   /* Number of tasks in the previous 
sample. */
+  int                    task_size;         /* The size of the arrays */
+  Thread_Control**       tasks;             /* List of tasks in this sample. */
+  Thread_Control**       last_tasks;        /* List of tasks in the last 
sample. */
+  Thread_CPU_usage_t*    usage;             /* Usage of task's in this sample. 
*/
+  Thread_CPU_usage_t*    last_usage;        /* Usage of task's in the last 
sample. */
+  Thread_CPU_usage_t*    current_usage;     /* Current usage for this sample. 
*/
+  Timestamp_Control      total;             /* Total run run, should equal the 
uptime. */
+  Timestamp_Control      idle;              /* Time spent in idle. */
+  Timestamp_Control      current;           /* Current time run in this 
period. */
+  Timestamp_Control      current_idle;      /* Current time in idle this 
period. */
+  uint32_t               stack_size;        /* Size of stack allocated. */
+} rtems_cpu_usage_data;
+
+/*
+ * Private version of the iterator with an arg. This will be moved
+ * to the public in 5.0.
+ */
+
+typedef void (*rtems_per_thread_routine_2)( Thread_Control *, void* );
 
-#define RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS (20)
+void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
+                                      void*                      arg);
 
+void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
+                                      void*                      arg)
+{
+  uint32_t             i;
+  uint32_t             api_index;
+  Thread_Control      *the_thread;
+  Objects_Information *information;
+
+  if ( !routine )
+    return;
+
+  for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
+    #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
+      if ( !_Objects_Information_table[ api_index ] )
+        continue;
+    #endif
+    information = _Objects_Information_table[ api_index ][ 1 ];
+    if ( information ) {
+      for ( i=1 ; i <= information->maximum ; i++ ) {
+        the_thread = (Thread_Control *)information->local_table[ i ];
+        if ( the_thread )
+          (*routine)(the_thread, arg);
+      }
+    }
+  }
+}
 
 static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs )
 {
    if ( *lhs == *rhs )
      return true;
-   else 
+   else
      return false;
 }
 
@@ -60,31 +129,163 @@ static inline bool less_than_uint32_t( uint32_t * lhs, 
uint32_t * rhs )
 }
 
 #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-  #define _Thread_CPU_usage_Equal_to( _lhs, _rhs ) \
+  #define CPU_usage_Equal_to( _lhs, _rhs ) \
           _Timestamp_Equal_to( _lhs, _rhs )
 #else
-  #define _Thread_CPU_usage_Equal_to( _lhs, _rhs ) \
+  #define CPU_usage_Equal_to( _lhs, _rhs ) \
           equal_to_uint32_t( _lhs, _rhs )
 #endif
 
 #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-#define  _Thread_CPU_usage_Set_to_zero( _time ) \
+  #define CPU_usage_Set_to_zero( _time ) \
          _Timestamp_Set_to_zero( _time )
 #else
-#define  _Thread_CPU_usage_Set_to_zero( _time ) \
+  #define CPU_usage_Set_to_zero( _time ) \
        do { \
          *_time = 0; \
        } while (0)
 #endif
 
 #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-#define _Thread_CPU_usage_Less_than( _lhs, _rhs ) \
+  #define CPU_usage_Less_than( _lhs, _rhs ) \
         _Timestamp_Less_than( _lhs, _rhs )
 #else
-#define _Thread_CPU_usage_Less_than( _lhs, _rhs ) \
+  #define CPU_usage_Less_than( _lhs, _rhs ) \
          less_than_uint32_t( _lhs, _rhs )
 #endif
 
+static void
+print_memsize(rtems_cpu_usage_data* data, const uint32_t size, const char* 
label)
+{
+  if (size > (1024 * 1024))
+    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "M %s",
+                          size / (1024 * 1024), label);
+  else if (size > 1024)
+    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "K %s",
+                          size / 1024, label);
+  else
+    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 " %s",
+                          size, label);
+}
+
+static int
+print_time(rtems_cpu_usage_data*    data,
+           const Timestamp_Control* time,
+           const int                length)
+{
+  uint32_t secs = _Timestamp_Get_seconds( time );
+  uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / 
TOD_NANOSECONDS_PER_MICROSECOND;
+  int      len = 0;
+
+  if (secs > 60)
+  {
+    uint32_t mins = secs / 60;
+    if (mins > 60)
+    {
+      uint32_t hours = mins / 60;
+      if (hours > 24)
+      {
+        len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "d", 
hours / 24);
+        hours %= 24;
+      }
+      len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "hr", 
hours);
+      mins %= 60;
+    }
+    len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "m", mins);
+    secs %= 60;
+  }
+  len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 ".%06" PRIu32, 
secs, usecs);
+
+  if (len < length)
+    (*data->plugin.print)(data->plugin.context, "%*c", length - len, ' ');
+
+  return len;
+}
+
+/*
+ * Count the number of tasks.
+ */
+static void
+task_counter(Thread_Control *thrad, void* arg)
+{
+  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
+  ++data->task_count;
+}
+
+/*
+ * Create the usage and currant usage table for display.
+ */
+static void
+task_usage(Thread_Control* thread, void* arg)
+{
+  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
+  Thread_CPU_usage_t    usage = thread->cpu_time_used;
+  Thread_CPU_usage_t    current = data->zero;
+  int                   j;
+
+  data->stack_size += thread->Start.Initial_stack.size;
+
+  for (j = 0; j < data->last_task_count; j++)
+  {
+    if (thread == data->last_tasks[j])
+    {
+      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
+      break;
+    }
+  }
+
+  /*
+   * When not using nanosecond CPU usage resolution, we have to count the
+   * number of "ticks" we gave credit for to give the user a rough guideline as
+   * to what each number means proportionally.
+   */
+  _Timestamp_Add_to(&data->total, &usage);
+  _Timestamp_Add_to(&data->current, &current);
+
+  if (thread->Object.id == 0x09010001)
+  {
+    data->idle = usage;
+    data->current_idle = current;
+  }
+
+  /*
+   * Create the tasks to display soring as we create.
+   */
+  for (j = 0; j < data->task_count; j++)
+  {
+    if (data->tasks[j])
+    {
+      int k;
+
+      /*
+       * Sort on the current load.
+       */
+      if (data->sort_current)
+      {
+        if (CPU_usage_Equal_to(&current, &data->zero) ||
+             CPU_usage_Less_than(&current, &data->current_usage[j]))
+          continue;
+      }
+      else
+      {
+        if (CPU_usage_Equal_to(&usage, &data->zero) ||
+            CPU_usage_Less_than(&usage, &data->usage[j]))
+          continue;
+      }
+      for (k = (data->task_count - 1); k >= j; k--)
+      {
+        data->tasks[k + 1] = data->tasks[k];
+        data->usage[k + 1]  = data->usage[k];
+        data->current_usage[k + 1]  = data->current_usage[k];
+      }
+    }
+    data->tasks[j] = thread;
+    data->usage[j] = usage;
+    data->current_usage[j] = current;
+    break;
+  }
+}
+
 /*
  * rtems_cpuusage_top_thread
  *
@@ -94,202 +295,251 @@ static inline bool less_than_uint32_t( uint32_t * lhs, 
uint32_t * rhs )
 static void
 rtems_cpuusage_top_thread (rtems_task_argument arg)
 {
-  uint32_t                  api_index;
-  Thread_Control*           the_thread;
-  int                       i;
-  int                       j;
-  int                       k;
-  Objects_Information*      information;
-  char                      name[13];
-  int                       task_count = 0;
-  uint32_t                  seconds, nanoseconds;
-  rtems_cpu_usage_plugin_t* plugin = (rtems_cpu_usage_plugin_t*)arg;
-  Thread_Control*           load_tasks[RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1];
-  Thread_CPU_usage_t        load[RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1];
-  Thread_CPU_usage_t        zero;
-  Timestamp_Control         uptime;
-  uint32_t                  ival, fval;
-
-  while (true) {
-    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-      Timestamp_Control  total, ran, uptime_at_last_reset;
-    #else
-      uint32_t           total_units = 0;
-    #endif
+  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
+  char                   name[13];
+  int                    i;
+  Heap_Information_block wksp;
+  uint32_t               ival, fval;
+  int                    task_count;
+  rtems_event_set        out;
+  rtems_status_code      sc;
+  bool                   first_time = true;
 
-    rtems_cpuusage_top_thread_active = 1;
+  data->thread_active = true;
 
-    _Thread_CPU_usage_Set_to_zero( &zero);
-    memset (load_tasks, 0, sizeof (load_tasks));
-    for (i=0; i< (RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1); i++)
-      _Thread_CPU_usage_Set_to_zero( &load[i] );
+  _TOD_Get_uptime(&data->last_uptime);
 
-   /*
-     * Iterate over the tasks and sort the highest load tasks
-     * into our local arrays. We only handle a limited number of
-     * tasks.
-     */
-    for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
-      #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
-        if ( !_Objects_Information_table[ api_index ] )
-          continue;
-      #endif
-
-      information = _Objects_Information_table[ api_index ][ 1 ];
-      if ( information ) {
-        for ( i=1 ; i <= information->maximum ; i++ ) {
-          the_thread = (Thread_Control *)information->local_table[ i ];
-          if ( the_thread ) {
-            Thread_CPU_usage_t usage = the_thread->cpu_time_used;
-
-            /*
-             *  When not using nanosecond CPU usage resolution, we have to 
count
-             *  the number of "ticks" we gave credit for to give the user a 
rough
-             *  guideline as to what each number means proportionally.
-             */
-            #ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
-              total_units += usage;
-            #endif
-
-            /* Count the number of tasks and sort this load value */
-            task_count++;
-            for (j = 0; j < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS; j++) {
-              if (load_tasks[j]) {
-                if ( _Thread_CPU_usage_Equal_to( &usage, &zero) || 
-                     _Thread_CPU_usage_Less_than( &usage, &load[j]))
-                  continue;
-                for (k = (RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS - 1); k >= j; k--){
-                  load_tasks[k + 1] = load_tasks[k];
-                  load[k + 1]  = load[k];
-                }
-              }
-              load_tasks[j] = the_thread;
-              load[j]  = usage;
-              break;
-            }
-          }
-        }
+  CPU_usage_Set_to_zero(&data->zero);
+
+  while (data->thread_run)
+  {
+    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
+    size_t            tasks_size;
+    size_t            usage_size;
+    Timestamp_Control load;
+
+    data->task_count = 0;
+    rtems_iterate_over_all_threads_2(task_counter, data);
+
+    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
+    usage_size = sizeof(Thread_CPU_usage_t) * (data->task_count + 1);
+
+    if (data->task_count > data->task_size)
+    {
+      data->tasks = realloc(data->tasks, tasks_size);
+      data->usage = realloc(data->usage, usage_size);
+      data->current_usage = realloc(data->current_usage, usage_size);
+      if ((data->tasks == NULL) || (data->usage == NULL) || 
(data->current_usage == NULL))
+      {
+        (*data->plugin.print)(data->plugin.context, "top worker: error: no 
memory\n");
+        data->thread_run = false;
+        break;
       }
     }
 
-    #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-      _Timestamp_Set_to_zero( &total );
-      uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
-    #endif
+    memset(data->tasks, 0, tasks_size);
+    memset(data->usage, 0, usage_size);
+    memset(data->current_usage, 0, usage_size);
 
-    _TOD_Get_uptime( &uptime );
-    seconds = _Timestamp_Get_seconds( &uptime );
-    nanoseconds = _Timestamp_Get_nanoseconds( &uptime ) /
-                  TOD_NANOSECONDS_PER_MICROSECOND;
-    (*plugin->print)(plugin->context, "\x1b[H\x1b[J Press ENTER to exit.\n\n");
-    (*plugin->print)(plugin->context, "uptime: ");
-    (*plugin->print)(plugin->context,
-      "%7" PRIu32 ".%06" PRIu32 "\n",  seconds, nanoseconds
-    );
+    _Timestamp_Set_to_zero(&data->total);
+    _Timestamp_Set_to_zero(&data->current);
+    data->stack_size = 0;
+
+    _TOD_Get_uptime(&data->uptime);
+    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
+    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
+    data->last_uptime = data->uptime;
 
-    (*plugin->print)(
-       plugin->context,
-       
"-------------------------------------------------------------------------------\n"
-       "                              CPU USAGE BY THREAD\n"
-       
"------------+---------------------+---------------+---------------+------------\n"
-       #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-        " ID         | NAME                | RPRI | CPRI   | SECONDS       | 
PERCENT\n"
-       #else
-        " ID         | NAME                | RPRI | CPRI   | TICKS         | 
PERCENT\n"
-       #endif
-       
"------------+---------------------+---------------+---------------+------------\n"
+    rtems_iterate_over_all_threads_2(task_usage, data);
+
+    if (data->task_count > data->task_size)
+    {
+      data->last_tasks = realloc(data->last_tasks, tasks_size);
+      data->last_usage = realloc(data->last_usage, usage_size);
+      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
+      {
+        (*data->plugin.print)(data->plugin.context, "top worker: error: no 
memory\n");
+        data->thread_run = false;
+        break;
+      }
+      data->task_size = data->task_count;
+    }
+
+    memcpy(data->last_tasks, data->tasks, tasks_size);
+    memcpy(data->last_usage, data->usage, usage_size);
+    data->last_task_count = data->task_count;
+
+    if (data->sort_current  && first_time)
+    {
+      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
+      first_time = false;
+      continue;
+    }
+
+    _Protected_heap_Get_information(&_Workspace_Area, &wksp);
+
+    if (data->single_page)
+      (*data->plugin.print)(data->plugin.context,
+                            "\x1b[H\x1b[J"
+                            " <ENTER>:Exit  <C>:%s  <SPACE>:Refresh"
+                            "  <S>:Scroll  <A>:All  <+/->:Lines\n",
+                            data->sort_current ? "Total  " : "Current");
+    (*data->plugin.print)(data->plugin.context,"\n");
+
+    /*
+     * Uptime and period of this sample.
+     */
+    (*data->plugin.print)(data->plugin.context, "Uptime: ");
+    print_time(data, &data->uptime, 20);
+    (*data->plugin.print)(data->plugin.context, " Period: ");
+    print_time(data, &data->period, 20);
+
+    /*
+     * Task count, load and idle levels.
+     */
+    (*data->plugin.print)(data->plugin.context, "\nTasks: %4i  ", 
data->task_count);
+
+    _Timestamp_Subtract(&data->idle, &data->total, &load);
+    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
+    (*data->plugin.print)(data->plugin.context,
+                          "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, 
fval);
+    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
+    _Timestamp_Divide(&load, &data->period, &ival, &fval);
+    (*data->plugin.print)(data->plugin.context,
+                          "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
+    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
+    (*data->plugin.print)(data->plugin.context,
+                          "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
+
+    /*
+     * Memory usage.
+     */
+    if (rtems_configuration_get_unified_work_area())
+    {
+      (*data->plugin.print)(data->plugin.context, "\nMem: ");
+      print_memsize(data, wksp.Free.total, "free");
+      print_memsize(data, wksp.Used.total, "used");
+    }
+    else
+    {
+      region_information_block libc_heap;
+      malloc_info(&libc_heap);
+      (*data->plugin.print)(data->plugin.context, "\nMem: Wksp: ");
+      print_memsize(data, wksp.Free.total, "free");
+      print_memsize(data, wksp.Used.total, "used  Heap: ");
+      print_memsize(data, libc_heap.Free.total, "free");
+      print_memsize(data, libc_heap.Used.total, "used");
+    }
+
+    print_memsize(data, data->stack_size, "stack\n");
+
+    (*data->plugin.print)(data->plugin.context,
+       "\n"
+        " ID         | NAME                | RPRI | CPRI   | TIME              
  | TOTAL   | CURRENT\n"
+       
"------------+---------------------+---------------+---------------------+-----%s-+-----%s-\n",
+       data->sort_current ? "---" : "***",
+       data->sort_current ? "***" : "---"
     );
 
-    for (i = 0; i < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS; i++) {
+    task_count = 0;
+
+    for (i = 0; i < data->task_count; i++)
+    {
+      Thread_Control*   thread = data->tasks[i];
+      Timestamp_Control last;
+      Timestamp_Control usage;
+      Timestamp_Control current_usage;
 
-      if (!load_tasks[i])
+      if (thread == NULL)
+        break;
+
+      if (data->single_page && (data->show != 0) && (i >= data->show))
         break;
 
       /*
-       * If this is the currently executing thread, account for time
-       * since the last context switch.
+       * We need to count the number displayed to clear the remainder of the
+       * the display.
        */
-      the_thread = load_tasks[i];
-
-      rtems_object_get_name( the_thread->Object.id, sizeof(name), name );
-      (*plugin->print)(
-        plugin->context,
-        " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" PRId32 "   |",
-        the_thread->Object.id,
-        name,
-        the_thread->real_priority,
-        the_thread->current_priority
-      );
-
-      #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
-      {
-        Timestamp_Control last;
+      ++task_count;
 
-        /*
-         * If this is the currently executing thread, account for time
-         * since the last context switch.
-         */
-        ran = load[i];
-        if ( _Thread_Get_time_of_last_context_switch( the_thread, &last ) ) {
-          Timestamp_Control used;
-          _TOD_Get_uptime( &uptime );
-          _Timestamp_Subtract( &last, &uptime, &used );
-          _Timestamp_Add_to( &ran, &used );
-        } else {
-          _TOD_Get_uptime( &uptime );
-        }
-        _Timestamp_Subtract( &uptime_at_last_reset, &uptime, &total );
-        _Timestamp_Divide( &ran, &total, &ival, &fval );
+      /*
+       * If the API os POSIX print the entry point.
+       */
+      rtems_object_get_name(thread->Object.id, sizeof(name), name);
+      if (name[0] == '\0')
+        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.entry_point);
+
+      (*data->plugin.print)(data->plugin.context,
+                            " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" 
PRId32 "   | ",
+                            thread->Object.id,
+                            name,
+                            thread->real_priority,
+                            thread->current_priority);
+
+      usage = data->usage[i];
+      current_usage = data->current_usage[i];
+
+      /*
+       * If this is the currently executing thread, account for time since
+       * the last context switch.
+       */
+      if (_Thread_Get_time_of_last_context_switch(thread, &last))
+      {
+        Timestamp_Control used;
+        Timestamp_Control now;
 
         /*
-         * Print the information
+         * Get the current uptime and assume we are not pre-empted to
+         * measure the time from the last switch this thread and now.
          */
-
-        seconds = _Timestamp_Get_seconds( &ran );
-        nanoseconds = _Timestamp_Get_nanoseconds( &ran ) /
-          TOD_NANOSECONDS_PER_MICROSECOND;
-       (*plugin->print)( plugin->context,
-          "%7" PRIu32 ".%06" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n",
-          seconds, nanoseconds,
-            ival, fval
-        );
+        _TOD_Get_uptime(&now);
+        _Timestamp_Subtract(&last, &now, &used);
+        _Timestamp_Add_to(&usage, &used);
+        _Timestamp_Add_to(&current_usage, &used);
       }
-      #else
-        if (total_units) {
-          uint64_t ival_64;
-
-          ival_64 = load[i];
-          ival_64 *= 100000;
-          ival = ival_64 / total_units;
-        } else {
-          ival = 0;
-        }
-
-        fval = ival % 1000;
-        ival /= 1000;
-       (*plugin->print)( plugin->context,
-          "%14" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n",
-          load[i],
-          ival,
-          fval
-        );
-      #endif
+
+      /*
+       * Print the information
+       */
+      print_time(data, &usage, 19);
+      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
+      (*data->plugin.print)(data->plugin.context,
+                            " |%4" PRIu32 ".%03" PRIu32, ival, fval);
+      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
+      (*data->plugin.print)(data->plugin.context,
+                            " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
     }
 
-    if (task_count < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS)
+    if (data->single_page && (data->show != 0) && (task_count < data->show))
     {
-      j = RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS - task_count;
-      while (j > 0)
+      i = data->show - task_count;
+      while (i > 0)
       {
-       (*plugin->print)( plugin->context, "\x1b[K\n");
-        j--;
+        (*data->plugin.print)(data->plugin.context, "\x1b[K\n");
+        i--;
       }
     }
 
-    rtems_cpuusage_top_thread_active = 0;
-
-    rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (5000000));
+    sc = rtems_event_receive(RTEMS_EVENT_1,
+                             RTEMS_EVENT_ANY,
+                             RTEMS_MILLISECONDS_TO_TICKS 
(data->poll_rate_usecs),
+                             &out);
+    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
+    {
+      (*data->plugin.print)(data->plugin.context,
+                            "error: event receive: %s\n", 
rtems_status_text(sc));
+      break;
+    }
   }
+
+  free(data->tasks);
+  free(data->last_tasks);
+  free(data->last_usage);
+  free(data->current_usage);
+
+  data->thread_active = false;
+
+  rtems_task_delete (RTEMS_SELF);
 }
 
 void rtems_cpu_usage_top_with_plugin(
@@ -297,17 +547,30 @@ void rtems_cpu_usage_top_with_plugin(
   rtems_printk_plugin_t  print
 )
 {
-  rtems_status_code   sc;
-  rtems_task_priority priority;
-  rtems_name          name;
-  rtems_id            id;
-  rtems_cpu_usage_plugin_t  plugin;
+#ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
+  if ( !print )
+    return;
+  (*print)(context, "error: tick kernels not supported\n");
+#else
+  rtems_status_code      sc;
+  rtems_task_priority    priority;
+  rtems_name             name;
+  rtems_id               id;
+  rtems_cpu_usage_data   data;
+  int                    show_lines = 25;
 
   if ( !print )
     return;
 
-  plugin.context = context;
-  plugin.print   = print;
+  memset(&data, 0, sizeof(data));
+
+  data.thread_run = true;
+  data.single_page = true;
+  data.sort_current = true;
+  data.poll_rate_usecs = 3000;
+  data.show = show_lines;
+  data.plugin.context = context;
+  data.plugin.print = print;
 
   sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority);
 
@@ -339,7 +602,7 @@ void rtems_cpu_usage_top_with_plugin(
   }
 
   sc = rtems_task_start (
-    id, rtems_cpuusage_top_thread, (rtems_task_argument)&plugin
+    id, rtems_cpuusage_top_thread, (rtems_task_argument) &data
   );
   if (sc != RTEMS_SUCCESSFUL)
   {
@@ -352,23 +615,61 @@ void rtems_cpu_usage_top_with_plugin(
     return;
   }
 
-  for (;;)
+  while (true)
   {
     int c = getchar ();
 
-    if ((c == '\r') || (c == '\n'))
+    if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q'))
     {
-      int loops = 20;
+      int loops = 50;
 
-      while (loops && rtems_cpuusage_top_thread_active)
-        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
+      data.thread_run = false;
+
+      rtems_event_send(id, RTEMS_EVENT_1);
 
-      rtems_task_delete (id);
+      while (loops && data.thread_active)
+        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
 
       (*print)(context, "load monitoring stopped.\n");
       return;
     }
+    else if ((c == 'c') || (c == 'C'))
+    {
+      data.sort_current = !data.sort_current;
+      rtems_event_send(id, RTEMS_EVENT_1);
+    }
+    else if ((c == 's') || (c == 'S'))
+    {
+      data.single_page = !data.single_page;
+      rtems_event_send(id, RTEMS_EVENT_1);
+    }
+    else if ((c == 'a') || (c == 'A'))
+    {
+      if (data.show == 0)
+        data.show = show_lines;
+      else
+        data.show = 0;
+      rtems_event_send(id, RTEMS_EVENT_1);
+    }
+    else if (c == '+')
+    {
+      ++show_lines;
+      if (data.show != 0)
+        data.show = show_lines;
+    }
+    else if (c == '-')
+    {
+      if (show_lines > 5)
+        --show_lines;
+      if (data.show != 0)
+        data.show = show_lines;
+    }
+    else if (c == ' ')
+    {
+      rtems_event_send(id, RTEMS_EVENT_1);
+    }
   }
+#endif
 }
 
 void rtems_cpu_usage_top( void )
-- 
2.2.2

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to