Signed-off-by: Ciprian Barbu <ciprian.ba...@linaro.org>
---
I started from scratch this time trying to follow the list Taras proposed. This
patch covers the first 4 bullets on his lists.

One thing I need to mention is that the schedule_mq_mt_prio_atomic would not
work (at least on linux-generic) without using odp_schedule_release_atomic.
This might make bullet 6 on Taras' list not needed, I need some clarification
on this.

 test/validation/.gitignore                         |   1 +
 test/validation/Makefile.am                        |   4 +-
 test/validation/schedule/odp_schedule.c            |  36 ++
 test/validation/schedule/odp_schedule_test.c       | 450 +++++++++++++++++++++
 test/validation/schedule/odp_schedule_testsuites.h |  21 +
 5 files changed, 511 insertions(+), 1 deletion(-)
 create mode 100644 test/validation/schedule/odp_schedule.c
 create mode 100644 test/validation/schedule/odp_schedule_test.c
 create mode 100644 test/validation/schedule/odp_schedule_testsuites.h

diff --git a/test/validation/.gitignore b/test/validation/.gitignore
index 696cf0a..c0556e6 100644
--- a/test/validation/.gitignore
+++ b/test/validation/.gitignore
@@ -3,3 +3,4 @@
 odp_init
 odp_queue
 odp_crypto
+odp_schedule
diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index 0b831d0..9e9f84f 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -6,7 +6,7 @@ AM_LDFLAGS += -L$(CUNIT_PATH)/lib -static -lcunit
 if ODP_CUNIT_ENABLED
 TESTS = ${bin_PROGRAMS}
 check_PROGRAMS = ${bin_PROGRAMS}
-bin_PROGRAMS = odp_init odp_queue odp_crypto
+bin_PROGRAMS = odp_init odp_queue odp_crypto odp_schedule
 odp_init_LDFLAGS = $(AM_LDFLAGS)
 odp_queue_LDFLAGS = $(AM_LDFLAGS)
 odp_crypto_CFLAGS = $(AM_CFLAGS) -I$(srcdir)/crypto
@@ -18,3 +18,5 @@ dist_odp_queue_SOURCES = odp_queue.c
 dist_odp_crypto_SOURCES = crypto/odp_crypto_test_async_inp.c \
                          crypto/odp_crypto_test_sync_inp.c \
                          odp_crypto.c
+dist_odp_schedule_SOURCES = schedule/odp_schedule_test.c \
+                           schedule/odp_schedule.c
diff --git a/test/validation/schedule/odp_schedule.c 
b/test/validation/schedule/odp_schedule.c
new file mode 100644
index 0000000..9ce1281
--- /dev/null
+++ b/test/validation/schedule/odp_schedule.c
@@ -0,0 +1,36 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp_schedule_testsuites.h"
+
+static CU_SuiteInfo suites[] = {
+       {
+               "Scheduler tests" ,
+               schedule_test_init,
+               schedule_test_finalize,
+               NULL,
+               NULL,
+               schedule_tests
+       },
+       CU_SUITE_INFO_NULL,
+};
+
+int main(void)
+{
+       CU_set_error_action(CUEA_ABORT);
+       /* initialize the CUnit test registry */
+       if (CUE_SUCCESS != CU_initialize_registry())
+               return CU_get_error();
+
+       /* register suites */
+       CU_register_suites(suites);
+       /* Run all tests using the CUnit Basic interface */
+       CU_basic_set_mode(CU_BRM_VERBOSE);
+       CU_basic_run_tests();
+       CU_cleanup_registry();
+
+       return CU_get_error();
+}
diff --git a/test/validation/schedule/odp_schedule_test.c 
b/test/validation/schedule/odp_schedule_test.c
new file mode 100644
index 0000000..5a469a6
--- /dev/null
+++ b/test/validation/schedule/odp_schedule_test.c
@@ -0,0 +1,450 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include "odp_schedule_testsuites.h"
+#include <odph_linux.h>
+
+#define MAX_WORKERS            32            /**< Max worker threads */
+#define MSG_POOL_SIZE           (4*1024*1024)
+#define QUEUES_PER_PRIO                16            /**< Queue per priority */
+#define BUF_SIZE               64
+#define TEST_NUM_BUFS          10
+
+#define GLOBALS_SHM_NAME       "test_globals"
+#define MSG_POOL_NAME          "msg_pool"
+#define SHM_MSG_POOL_NAME      "shm_msg_pool"
+#define SHM_THR_ARGS_NAME      "shm_thr_args"
+
+
+/** Test global variables */
+typedef struct {
+       int core_count; /**< Core count */
+       int proc_mode;  /**< Process mode */
+       odp_barrier_t barrier;/**< @private Barrier for test synchronisation */
+       odp_schedule_prio_t prio;
+       int prio_buf_count; /**< Number of bufs received at current prio */
+       odp_spinlock_t count_lock; /**< Used for accessing prio counters */
+} test_globals_t;
+
+typedef struct {
+       odp_schedule_sync_t sync;
+       int num_queues;
+       int num_prio;
+       int use_barrier;
+} thread_args_t;
+
+odp_buffer_pool_t pool;
+
+/**
+ * @internal CUnit test case for verifying functionality of
+ *           schedule_wait_time
+ */
+static void schedule_wait_time(void)
+{
+       uint64_t wait_time;
+
+       wait_time = odp_schedule_wait_time(0);
+       CU_ASSERT(wait_time > 0);
+
+       wait_time = odp_schedule_wait_time(1);
+       CU_ASSERT(wait_time > 0);
+
+       wait_time = odp_schedule_wait_time((uint64_t)-1LL);
+       CU_ASSERT(wait_time > 0);
+}
+
+/* Get next priority in the direction from highest to lowest */
+static int get_next_prio(int prio) {
+       CU_ASSERT(ODP_SCHED_PRIO_LOWEST != ODP_SCHED_PRIO_HIGHEST);
+
+       if (prio == ODP_SCHED_PRIO_LOWEST)
+               return prio;
+
+       if (ODP_SCHED_PRIO_HIGHEST < ODP_SCHED_PRIO_LOWEST)
+               return prio -1;
+
+       if (ODP_SCHED_PRIO_HIGHEST > ODP_SCHED_PRIO_LOWEST)
+               return prio +1;
+}
+
+static void *schedule_common_(void *arg)
+{
+       thread_args_t *args = (thread_args_t*)arg;
+       odp_schedule_sync_t sync;
+       int num_queues, num_prio, use_barrier;
+       int i, j, k;
+       odp_buffer_pool_t pool;
+       char name[] = "sched_XX_YY_Z";
+       odp_shm_t shm;
+       test_globals_t *globals;
+
+       sync = args->sync;
+       num_queues = args->num_queues;
+       num_prio = args->num_prio;
+       use_barrier = args->use_barrier;
+
+       pool = odp_buffer_pool_lookup(MSG_POOL_NAME);
+       CU_ASSERT_FATAL(pool != ODP_BUFFER_POOL_INVALID);
+
+       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+       globals = odp_shm_addr(shm);
+       CU_ASSERT_FATAL(globals != NULL);
+
+       switch (sync) {
+       case ODP_SCHED_SYNC_NONE:
+               name[12] = 'n';
+               break;
+       case ODP_SCHED_SYNC_ATOMIC:
+               name[12] = 'a';
+               break;
+       case ODP_SCHED_SYNC_ORDERED:
+               name[12] = 'o';
+               break;
+       default:
+               CU_ASSERT(0);
+               break;
+       }
+
+       for (i = 0; i < num_prio; i++) {
+               name[6] = '0' + i/10;
+               name[7] = '0' + i - 10*(i/10);
+
+               for (j = 0; j < num_queues; j++) {
+                       odp_queue_t queue;
+
+                       name[9]  = '0' + j/10;
+                       name[10]  = '0' + j - 10*(j/10);
+                       queue = odp_queue_lookup(name);
+                       CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+                       for (k = 0; k < TEST_NUM_BUFS; k++) {
+                               odp_buffer_t buf;
+                               buf = odp_buffer_alloc(pool);
+                               CU_ASSERT(buf != ODP_BUFFER_INVALID);
+                               CU_ASSERT(odp_queue_enq(queue, buf) == 0);
+                       }
+               }
+       }
+
+       if (use_barrier)
+               odp_barrier_wait(&globals->barrier);
+
+       for (i = 0; i < TEST_NUM_BUFS * num_queues * num_prio; i++) {
+               odp_buffer_t buf;
+               odp_queue_t from;
+
+               buf = odp_schedule(&from, ODP_SCHED_WAIT);
+               CU_ASSERT(buf != ODP_BUFFER_INVALID);
+               CU_ASSERT(from != ODP_QUEUE_INVALID);
+
+               odp_buffer_free(buf);
+
+               /* TODO: odp_queue_sched_prio not in yet so we can't 
+                check the priority is the one we expected */
+               odp_spinlock_lock(&globals->count_lock);
+               globals->prio_buf_count++;
+               if (globals->prio_buf_count == TEST_NUM_BUFS * QUEUES_PER_PRIO)
+                       /* Small artificial function to get next prio */
+                       globals->prio = get_next_prio(globals->prio);
+               odp_spinlock_unlock(&globals->count_lock);
+
+               odp_schedule_release_atomic();
+       }
+
+       return NULL;
+}
+
+static void schedule_common(odp_schedule_sync_t sync, int num_queues,
+                           int num_prio, int use_barrier)
+{
+       thread_args_t args;
+
+       args.sync = sync;
+       args.num_queues = num_queues;
+       args.num_prio = num_prio;
+       args.use_barrier = use_barrier;
+
+       schedule_common_(&args);
+}
+
+static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
+                            int num_prio)
+{
+       odp_shm_t shm;
+       test_globals_t *globals;
+       thread_args_t *thr_args;
+       int first_core;
+       odph_linux_pthread_t thread_tbl[MAX_WORKERS];
+
+       shm = odp_shm_lookup(GLOBALS_SHM_NAME);
+       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+       globals = odp_shm_addr(shm);
+       CU_ASSERT_FATAL(globals != NULL);
+
+       shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
+       CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+       thr_args = odp_shm_addr(shm);
+       CU_ASSERT_FATAL(thr_args != NULL);
+
+       thr_args->sync = sync;
+       thr_args->num_queues = num_queues;
+       thr_args->num_prio = num_prio;
+       thr_args->use_barrier = 1;
+
+       /*
+        * By default core #0 runs Linux kernel background tasks.
+        * Start mapping thread from core #1
+        */
+       first_core = 1;
+
+       if (odp_sys_core_count() == 1)
+               first_core = 0;
+
+       memset(thread_tbl, 0, sizeof(thread_tbl));
+
+       /* Reset buffer counters from the main thread */
+       globals->prio = ODP_SCHED_PRIO_HIGHEST;
+       globals->prio_buf_count = 0;
+
+       /* Create and launch worker threads */
+       odph_linux_pthread_create(thread_tbl, globals->core_count, first_core,
+                                 schedule_common_, thr_args);
+
+       /* Wait for worker threads to terminate */
+       odph_linux_pthread_join(thread_tbl, globals->core_count);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+static void schedule_1q_1t_none(void)
+{
+       schedule_common(ODP_SCHED_SYNC_NONE, 1, 1, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void schedule_1q_1t_atomic(void)
+{
+       schedule_common(ODP_SCHED_SYNC_ATOMIC, 1, 1, 0);
+}
+
+/* 1 queue 1 thread ODP_SCHED_SYNC_ORDERED */
+static void schedule_1q_1t_ordered(void)
+{
+       schedule_common(ODP_SCHED_SYNC_ORDERED, 1, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+static void schedule_mq_1t_none(void)
+{
+       /* Only one priority involved in these tests, but use
+          the same number of queues the more general case uses */
+       schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
+static void schedule_mq_1t_atomic(void)
+{
+       schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, 1, 0);
+}
+
+/* Many queues 1 thread ODP_SCHED_SYNC_ORDERED */
+static void schedule_mq_1t_ordered(void)
+{
+       schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, 1, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+static void schedule_mq_1t_prio_none(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       schedule_common(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, num_prio, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
+static void schedule_mq_1t_prio_atomic(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       schedule_common(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, num_prio, 0);
+}
+
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_ORDERED */
+static void schedule_mq_1t_prio_ordered(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       schedule_common(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, num_prio, 0);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+static void schedule_mq_mt_prio_none(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       parallel_execute(ODP_SCHED_SYNC_NONE, QUEUES_PER_PRIO, num_prio);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC */
+static void schedule_mq_mt_prio_atomic(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       parallel_execute(ODP_SCHED_SYNC_ATOMIC, QUEUES_PER_PRIO, num_prio);
+}
+
+/* Many queues many threads check priority ODP_SCHED_SYNC_ORDERED */
+static void schedule_mq_mt_prio_ordered(void)
+{
+       int num_prio = odp_schedule_num_prio();
+       parallel_execute(ODP_SCHED_SYNC_ORDERED, QUEUES_PER_PRIO, num_prio);
+}
+
+int schedule_test_init(void)
+{
+       odp_shm_t shm;
+       void *pool_base;
+       odp_buffer_pool_t pool;
+       test_globals_t *globals;
+       thread_args_t *thr_args;
+       int i, j;
+       int prios;
+
+       if (0 != odp_init_global(NULL, NULL)) {
+               printf("odp_init_global fail.\n");
+               return -1;
+       }
+       if (0 != odp_init_local()) {
+               printf("odp_init_local fail.\n");
+               return -1;
+       }
+
+       shm = odp_shm_reserve(SHM_MSG_POOL_NAME, MSG_POOL_SIZE,
+                             ODP_CACHE_LINE_SIZE, 0);
+       pool_base = odp_shm_addr(shm);
+       if (pool_base == NULL) {
+               printf("Shared memory reserve failed.\n");
+               return -1;
+       }
+
+       pool = odp_buffer_pool_create(MSG_POOL_NAME, pool_base, MSG_POOL_SIZE,
+                                     BUF_SIZE, ODP_CACHE_LINE_SIZE,
+                                     ODP_BUFFER_TYPE_RAW);
+       if (pool == ODP_BUFFER_POOL_INVALID) {
+               printf("Pool creation failed (msg).\n");
+               return -1;
+       }
+
+       shm = odp_shm_reserve(GLOBALS_SHM_NAME,
+                             sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+
+       globals = odp_shm_addr(shm);
+
+       if (globals == NULL) {
+               printf("Shared memory reserve failed (globals).\n");
+               return -1;
+       }
+
+       memset(globals, 0, sizeof(test_globals_t));
+
+       globals->core_count = odp_sys_core_count();
+       if (globals->core_count > MAX_WORKERS)
+               globals->core_count = MAX_WORKERS;
+
+       shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t),
+                             ODP_CACHE_LINE_SIZE, 0);
+       thr_args = odp_shm_addr(shm);
+
+       if (thr_args == NULL) {
+               printf("Shared memory reserve failed (thr_args).\n");
+               return -1;
+       }
+
+       memset(thr_args, 0, sizeof(thread_args_t));
+
+       /* Barrier to sync test case execution */
+       odp_barrier_init(&globals->barrier, globals->core_count);
+       odp_spinlock_init(&globals->count_lock);
+
+       prios = odp_schedule_num_prio();
+
+       for (i = 0; i < prios; i++) {
+               odp_queue_param_t p;
+               /* Per sched sync type */
+               char name[] = "sched_XX_YY_n";
+
+               name[6] = '0' + i/10;
+               name[7] = '0' + i - 10*(i/10);
+
+               p.sched.prio  = i;
+               p.sched.group = ODP_SCHED_GROUP_DEFAULT;
+
+               for (j = 0; j < QUEUES_PER_PRIO; j++) {
+                       odp_queue_t q;
+                       name[9]  = '0' + j/10;
+                       name[10] = '0' + j - 10*(j/10);
+
+                       name[12] = 'n';
+                       p.sched.sync = ODP_SCHED_SYNC_NONE;
+                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+                       if (q == ODP_QUEUE_INVALID) {
+                               printf("Schedule queue create failed.\n");
+                               return -1;
+                       }
+
+                       name[12] = 'a';
+                       p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+                       if (q == ODP_QUEUE_INVALID) {
+                               printf("Schedule queue create failed.\n");
+                               return -1;
+                       }
+
+                       name[12] = 'o';
+                       p.sched.sync = ODP_SCHED_SYNC_ORDERED;
+                       q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+
+                       if (q == ODP_QUEUE_INVALID) {
+                               printf("Schedule queue create failed.\n");
+                               return -1;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void schedule_num_prio(void)
+{
+       int num_prio;
+
+       num_prio = odp_schedule_num_prio();
+
+       CU_ASSERT(num_prio > 0);
+       /* Just common sense but why not */
+       CU_ASSERT(num_prio == odp_schedule_num_prio());
+}
+
+int schedule_test_finalize(void)
+{
+       odp_term_local();
+       odp_term_global();
+       return 0;
+}
+
+struct CU_TestInfo schedule_tests[] = {
+       _CU_TEST_INFO(schedule_wait_time),
+       _CU_TEST_INFO(schedule_num_prio),
+       _CU_TEST_INFO(schedule_1q_1t_none),
+       _CU_TEST_INFO(schedule_1q_1t_atomic),
+       _CU_TEST_INFO(schedule_1q_1t_ordered),
+       _CU_TEST_INFO(schedule_mq_1t_none),
+       _CU_TEST_INFO(schedule_mq_1t_atomic),
+       _CU_TEST_INFO(schedule_mq_1t_ordered),
+       _CU_TEST_INFO(schedule_mq_1t_prio_none),
+       _CU_TEST_INFO(schedule_mq_1t_prio_atomic),
+       _CU_TEST_INFO(schedule_mq_1t_prio_ordered),
+       _CU_TEST_INFO(schedule_mq_mt_prio_none),
+       _CU_TEST_INFO(schedule_mq_mt_prio_atomic),
+       _CU_TEST_INFO(schedule_mq_mt_prio_ordered),
+       CU_TEST_INFO_NULL,
+};
diff --git a/test/validation/schedule/odp_schedule_testsuites.h 
b/test/validation/schedule/odp_schedule_testsuites.h
new file mode 100644
index 0000000..67a2a69
--- /dev/null
+++ b/test/validation/schedule/odp_schedule_testsuites.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef ODP_SCHEDULE_TESTSUITES_H_
+#define ODP_SCHEDULE_TESTSUITES_H_
+
+#include "odp.h"
+#include <CUnit/Basic.h>
+
+/* Helper macro for CU_TestInfo initialization */
+#define _CU_TEST_INFO(test_func) {#test_func, test_func}
+
+extern struct CU_TestInfo schedule_tests[];
+
+extern int schedule_test_init(void);
+extern int schedule_test_finalize(void);
+
+#endif /* ODP_SCHEDULE_TESTSUITES_H_ */
-- 
1.8.3.2


_______________________________________________
lng-odp mailing list
lng-odp@lists.linaro.org
http://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to