Hi, Maxim

in v4 patch, I did:
1, Fixed all checkpatch.pl warnings.
2, Updated all copyright years 2016.
3, Most time spent on revise the while(1) loops in producer/consumer
threads, added a consume count to constraint the loop in consumer thread.
4, For your last comment, actually no need to call odp_term_local()
and odp_term_global(), the cunit common library will do it.

thanks and best regards, Yi

On 24 May 2016 at 15:22, Yi He <yi...@linaro.org> wrote:

> Fixes: https://bugs.linaro.org/show_bug.cgi?id=2228
>
> Convert ring test program into cunit framework
> Improve LCOV coverage for linux-generic/pktio/ring.c
>
> Signed-off-by: Yi He <yi...@linaro.org>
> ---
>  platform/linux-generic/test/Makefile.am        |   2 +-
>  platform/linux-generic/test/ring/Makefile.am   |  20 +-
>  platform/linux-generic/test/ring/ring_basic.c  | 358 ++++++++++++++++++
>  platform/linux-generic/test/ring/ring_main.c   |  12 +
>  platform/linux-generic/test/ring/ring_stress.c | 309 +++++++++++++++
>  platform/linux-generic/test/ring/ring_suites.c |  74 ++++
>  platform/linux-generic/test/ring/ring_suites.h |  34 ++
>  platform/linux-generic/test/ring/ringtest.c    | 495
> -------------------------
>  8 files changed, 797 insertions(+), 507 deletions(-)
>  create mode 100644 platform/linux-generic/test/ring/ring_basic.c
>  create mode 100644 platform/linux-generic/test/ring/ring_main.c
>  create mode 100644 platform/linux-generic/test/ring/ring_stress.c
>  create mode 100644 platform/linux-generic/test/ring/ring_suites.c
>  create mode 100644 platform/linux-generic/test/ring/ring_suites.h
>  delete mode 100644 platform/linux-generic/test/ring/ringtest.c
>
> diff --git a/platform/linux-generic/test/Makefile.am
> b/platform/linux-generic/test/Makefile.am
> index f74185d..2d58c57 100644
> --- a/platform/linux-generic/test/Makefile.am
> +++ b/platform/linux-generic/test/Makefile.am
> @@ -8,7 +8,7 @@ ODP_MODULES = pktio \
>  if test_vald
>  TESTS = pktio/pktio_run.sh \
>         pktio/pktio_run_tap.sh \
> -       ring/ringtest$(EXEEXT) \
> +       ring/ring_main$(EXEEXT) \
>         shmem/shmem_linux \
>         ${top_builddir}/test/validation/atomic/atomic_main$(EXEEXT) \
>         ${top_builddir}/test/validation/barrier/barrier_main$(EXEEXT) \
> diff --git a/platform/linux-generic/test/ring/Makefile.am
> b/platform/linux-generic/test/ring/Makefile.am
> index 5a949d0..c086584 100644
> --- a/platform/linux-generic/test/ring/Makefile.am
> +++ b/platform/linux-generic/test/ring/Makefile.am
> @@ -1,16 +1,14 @@
> -include $(top_srcdir)/test/validation/Makefile.inc
> +include ../Makefile.inc
>
> -AM_CFLAGS += -I$(srcdir)/common
> -AM_CFLAGS += -I$(top_srcdir)/test/validation/common
> -AM_LDFLAGS += -static
> +noinst_LTLIBRARIES = libtestring.la
> +libtestring_la_SOURCES = ring_suites.c ring_basic.c ring_stress.c
> +libtestring_la_CFLAGS = $(AM_CFLAGS) $(INCCUNIT_COMMON) $(INCODP)
>
> -COMPILE_ONLY =
> +test_PROGRAMS = ring_main$(EXEEXT)
> +dist_ring_main_SOURCES = ring_main.c
>
> -TESTSCRIPTS =
> +ring_main_LDFLAGS = $(AM_LDFLAGS)
> +ring_main_LDADD = libtestring.la $(LIBCUNIT_COMMON) $(LIBODP)
>
> -EXECUTABLES = ringtest$(EXEEXT)
> +noinst_HEADERS = ring_suites.h
>
> -test_PROGRAMS = $(EXECUTABLES) $(COMPILE_ONLY)
> -
> -ringtest_SOURCES = ringtest.c
> -ringtest_LDADD = $(LIBCUNIT_COMMON) $(LIBODP)
> diff --git a/platform/linux-generic/test/ring/ring_basic.c
> b/platform/linux-generic/test/ring/ring_basic.c
> new file mode 100644
> index 0000000..19e24f1
> --- /dev/null
> +++ b/platform/linux-generic/test/ring/ring_basic.c
> @@ -0,0 +1,358 @@
> +/* Copyright (c) 2016, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +/**
> + * @file
> + *
> + * ODP ring basic test
> + */
> +
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <string.h>
> +
> +#include <test_debug.h>
> +#include <odp_cunit_common.h>
> +#include <odp_packet_io_ring_internal.h>
> +
> +#include "ring_suites.h"
> +
> +/* labor functions declaration */
> +static void __do_basic_burst(_ring_t *r);
> +static void __do_basic_bulk(_ring_t *r);
> +static void __do_basic_watermark(_ring_t *r);
> +
> +/* dummy object pointers for enqueue and dequeue testing */
> +static void **test_enq_data;
> +static void **test_deq_data;
> +
> +/* create two rings: one for single thread usage scenario
> + * and another for multiple thread usage scenario.
> + * st - single thread usage scenario
> + * mt - multiple thread usage scenario
> + */
> +static const char *st_ring_name = "ST basic ring";
> +static const char *mt_ring_name = "MT basic ring";
> +static _ring_t *st_ring, *mt_ring;
> +
> +int ring_test_basic_start(void)
> +{
> +       int i = 0;
> +
> +       /* alloc dummy object pointers for enqueue testing */
> +       test_enq_data = malloc(RING_SIZE * 2 * sizeof(void *));
> +       if (NULL == test_enq_data) {
> +               LOG_ERR("failed to allocate basic test enqeue data\n");
> +               return -1;
> +       }
> +
> +       for (i = 0; i < RING_SIZE * 2; i++)
> +               test_enq_data[i] = (void *)(unsigned long)i;
> +
> +       /* alloc dummy object pointers for dequeue testing */
> +       test_deq_data = malloc(RING_SIZE * 2 * sizeof(void *));
> +       if (NULL == test_deq_data) {
> +               LOG_ERR("failed to allocate basic test dequeue data\n");
> +               free(test_enq_data); test_enq_data = NULL;
> +               return -1;
> +       }
> +
> +       memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
> +       return 0;
> +}
> +
> +int ring_test_basic_end(void)
> +{
> +       free(test_enq_data);
> +       free(test_deq_data);
> +       return 0;
> +}
> +
> +/* basic test cases */
> +void ring_test_basic_create(void)
> +{
> +       /* prove illegal size shall fail */
> +       st_ring = _ring_create(st_ring_name, ILLEGAL_SIZE, 0);
> +       CU_ASSERT(NULL == st_ring);
> +       CU_ASSERT(EINVAL == __odp_errno);
> +
> +       /* create ring for single thread usage scenario */
> +       st_ring = _ring_create(st_ring_name, RING_SIZE,
> +                              _RING_F_SP_ENQ | _RING_F_SC_DEQ);
> +
> +       CU_ASSERT(NULL != st_ring);
> +       CU_ASSERT(_ring_lookup(st_ring_name) == st_ring);
> +
> +       /* create ring for multiple thread usage scenario */
> +       mt_ring = _ring_create(mt_ring_name, RING_SIZE,
> +                              _RING_SHM_PROC);
> +
> +       CU_ASSERT(NULL != mt_ring);
> +       CU_ASSERT(_ring_lookup(mt_ring_name) == mt_ring);
> +}
> +
> +void ring_test_basic_burst(void)
> +{
> +       /* two rounds to cover both single
> +        * thread and multiple thread APIs
> +        */
> +       __do_basic_burst(st_ring);
> +       __do_basic_burst(mt_ring);
> +}
> +
> +void ring_test_basic_bulk(void)
> +{
> +       __do_basic_bulk(st_ring);
> +       __do_basic_bulk(mt_ring);
> +}
> +
> +void ring_test_basic_watermark(void)
> +{
> +       __do_basic_watermark(st_ring);
> +       __do_basic_watermark(mt_ring);
> +}
> +
> +/* labor functions definition */
> +static void __do_basic_burst(_ring_t *r)
> +{
> +       int result = 0;
> +       unsigned int count = 0;
> +       void * const *source = test_enq_data;
> +       void * const *dest = test_deq_data;
> +       void **enq = NULL, **deq = NULL;
> +
> +       enq = test_enq_data; deq = test_deq_data;
> +
> +       /* ring is empty */
> +       CU_ASSERT(1 == _ring_empty(r));
> +
> +       /* enqueue 1 object */
> +       result = _ring_enqueue_burst(r, enq, 1);
> +       enq += 1;
> +       CU_ASSERT(1 == (result & _RING_SZ_MASK));
> +
> +       /* enqueue 2 objects */
> +       result = _ring_enqueue_burst(r, enq, 2);
> +       enq += 2;
> +       CU_ASSERT(2 == (result & _RING_SZ_MASK));
> +
> +       /* enqueue HALF_BULK objects */
> +       result = _ring_enqueue_burst(r, enq, HALF_BULK);
> +       enq += HALF_BULK;
> +       CU_ASSERT(HALF_BULK == (result & _RING_SZ_MASK));
> +
> +       /* ring is neither empty nor full */
> +       CU_ASSERT(0 == _ring_full(r));
> +       CU_ASSERT(0 == _ring_empty(r));
> +
> +       /* _ring_count() equals enqueued */
> +       count = (1 + 2 + HALF_BULK);
> +       CU_ASSERT(count == _ring_count(r));
> +       /* _ring_free_count() equals rooms left */
> +       count = (RING_SIZE - 1) - count;
> +       CU_ASSERT(count == _ring_free_count(r));
> +
> +       /* exceed the size, enquene as many as possible */
> +       result = _ring_enqueue_burst(r, enq, HALF_BULK);
> +       enq += count;
> +       CU_ASSERT(count == (result & _RING_SZ_MASK));
> +       CU_ASSERT(1 == _ring_full(r));
> +
> +       /* dequeue 1 object */
> +       result = _ring_dequeue_burst(r, deq, 1);
> +       deq += 1;
> +       CU_ASSERT(1 == (result & _RING_SZ_MASK));
> +
> +       /* dequeue 2 objects */
> +       result = _ring_dequeue_burst(r, deq, 2);
> +       deq += 2;
> +       CU_ASSERT(2 == (result & _RING_SZ_MASK));
> +
> +       /* dequeue HALF_BULK objects */
> +       result = _ring_dequeue_burst(r, deq, HALF_BULK);
> +       deq += HALF_BULK;
> +       CU_ASSERT(HALF_BULK == (result & _RING_SZ_MASK));
> +
> +       /* _ring_free_count() equals dequeued */
> +       count = (1 + 2 + HALF_BULK);
> +       CU_ASSERT(count == _ring_free_count(r));
> +       /* _ring_count() equals remained left */
> +       count = (RING_SIZE - 1) - count;
> +       CU_ASSERT(count == _ring_count(r));
> +
> +       /* underrun the size, dequeue as many as possible */
> +       result = _ring_dequeue_burst(r, deq, HALF_BULK);
> +       deq += count;
> +       CU_ASSERT(count == (result & _RING_SZ_MASK));
> +       CU_ASSERT(1 == _ring_empty(r));
> +
> +       /* check data */
> +       CU_ASSERT(0 == memcmp(source, dest, deq - dest));
> +
> +       /* reset dequeue data */
> +       memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
> +}
> +
> +/* incomplete ring API set: strange!
> + * complement _ring_enqueue/dequeue_bulk to improve coverage
> + */
> +static inline int __ring_enqueue_bulk(
> +       _ring_t *r, void * const *objects, unsigned bulk)
> +{
> +       if (r->prod.sp_enqueue)
> +               return _ring_sp_enqueue_bulk(r, objects, bulk);
> +       else
> +               return _ring_mp_enqueue_bulk(r, objects, bulk);
> +}
> +
> +static inline int __ring_dequeue_bulk(
> +       _ring_t *r, void **objects, unsigned bulk)
> +{
> +       if (r->cons.sc_dequeue)
> +               return _ring_sc_dequeue_bulk(r, objects, bulk);
> +       else
> +               return _ring_mc_dequeue_bulk(r, objects, bulk);
> +}
> +
> +static void __do_basic_bulk(_ring_t *r)
> +{
> +       int result = 0;
> +       unsigned int count = 0;
> +       void * const *source = test_enq_data;
> +       void * const *dest = test_deq_data;
> +       void **enq = NULL, **deq = NULL;
> +
> +       enq = test_enq_data; deq = test_deq_data;
> +
> +       /* ring is empty */
> +       CU_ASSERT(1 == _ring_empty(r));
> +
> +       /* enqueue 1 object */
> +       result = __ring_enqueue_bulk(r, enq, 1);
> +       enq += 1;
> +       CU_ASSERT(0 == result);
> +
> +       /* enqueue 2 objects */
> +       result = __ring_enqueue_bulk(r, enq, 2);
> +       enq += 2;
> +       CU_ASSERT(0 == result);
> +
> +       /* enqueue HALF_BULK objects */
> +       result = __ring_enqueue_bulk(r, enq, HALF_BULK);
> +       enq += HALF_BULK;
> +       CU_ASSERT(0 == result);
> +
> +       /* ring is neither empty nor full */
> +       CU_ASSERT(0 == _ring_full(r));
> +       CU_ASSERT(0 == _ring_empty(r));
> +
> +       /* _ring_count() equals enqueued */
> +       count = (1 + 2 + HALF_BULK);
> +       CU_ASSERT(count == _ring_count(r));
> +       /* _ring_free_count() equals rooms left */
> +       count = (RING_SIZE - 1) - count;
> +       CU_ASSERT(count == _ring_free_count(r));
> +
> +       /* exceed the size, enquene shall fail with -ENOBUFS */
> +       result = __ring_enqueue_bulk(r, enq, HALF_BULK);
> +       CU_ASSERT(-ENOBUFS == result);
> +
> +       /* fullful the ring */
> +       result = __ring_enqueue_bulk(r, enq, count);
> +       enq += count;
> +       CU_ASSERT(0 == result);
> +       CU_ASSERT(1 == _ring_full(r));
> +
> +       /* dequeue 1 object */
> +       result = __ring_dequeue_bulk(r, deq, 1);
> +       deq += 1;
> +       CU_ASSERT(0 == result);
> +
> +       /* dequeue 2 objects */
> +       result = __ring_dequeue_bulk(r, deq, 2);
> +       deq += 2;
> +       CU_ASSERT(0 == result);
> +
> +       /* dequeue HALF_BULK objects */
> +       result = __ring_dequeue_bulk(r, deq, HALF_BULK);
> +       deq += HALF_BULK;
> +       CU_ASSERT(0 == result);
> +
> +       /* _ring_free_count() equals dequeued */
> +       count = (1 + 2 + HALF_BULK);
> +       CU_ASSERT(count == _ring_free_count(r));
> +       /* _ring_count() equals remained left */
> +       count = (RING_SIZE - 1) - count;
> +       CU_ASSERT(count == _ring_count(r));
> +
> +       /* underrun the size, dequeue shall fail with -ENOENT */
> +       result = __ring_dequeue_bulk(r, deq, HALF_BULK);
> +       CU_ASSERT(-ENOENT == result);
> +
> +       /* empty the queue */
> +       result = __ring_dequeue_bulk(r, deq, count);
> +       deq += count;
> +       CU_ASSERT(0 == result);
> +       CU_ASSERT(1 == _ring_empty(r));
> +
> +       /* check data */
> +       CU_ASSERT(0 == memcmp(source, dest, deq - dest));
> +
> +       /* reset dequeue data */
> +       memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
> +}
> +
> +void __do_basic_watermark(_ring_t *r)
> +{
> +       int result = 0;
> +       void * const *source = test_enq_data;
> +       void * const *dest = test_deq_data;
> +       void **enq = NULL, **deq = NULL;
> +
> +       enq = test_enq_data; deq = test_deq_data;
> +
> +       /* bulk = 3/4 watermark to trigger alarm on 2nd enqueue */
> +       const unsigned watermark = PIECE_BULK;
> +       const unsigned bulk = (watermark / 4) * 3;
> +
> +       /* watermark cannot exceed ring size */
> +       result = _ring_set_water_mark(r, ILLEGAL_SIZE);
> +       CU_ASSERT(-EINVAL == result);
> +
> +       /* set watermark */
> +       result = _ring_set_water_mark(r, watermark);
> +       CU_ASSERT(0 == result);
> +
> +       /* 1st enqueue shall succeed */
> +       result = __ring_enqueue_bulk(r, enq, bulk);
> +       enq += bulk;
> +       CU_ASSERT(0 == result);
> +
> +       /* 2nd enqueue shall succeed but return -EDQUOT */
> +       result = __ring_enqueue_bulk(r, enq, bulk);
> +       enq += bulk;
> +       CU_ASSERT(-EDQUOT == result);
> +
> +       /* dequeue 1st bulk */
> +       result = __ring_dequeue_bulk(r, deq, bulk);
> +       deq += bulk;
> +       CU_ASSERT(0 == result);
> +
> +       /* dequeue 2nd bulk */
> +       result = __ring_dequeue_bulk(r, deq, bulk);
> +       deq += bulk;
> +       CU_ASSERT(0 == result);
> +
> +       /* check data */
> +       CU_ASSERT(0 == memcmp(source, dest, deq - dest));
> +
> +       /* reset watermark */
> +       result = _ring_set_water_mark(r, 0);
> +       CU_ASSERT(0 == result);
> +
> +       /* reset dequeue data */
> +       memset(test_deq_data, 0, RING_SIZE * 2 * sizeof(void *));
> +}
> diff --git a/platform/linux-generic/test/ring/ring_main.c
> b/platform/linux-generic/test/ring/ring_main.c
> new file mode 100644
> index 0000000..7152688
> --- /dev/null
> +++ b/platform/linux-generic/test/ring/ring_main.c
> @@ -0,0 +1,12 @@
> +/* Copyright (c) 2016, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include "ring_suites.h"
> +
> +int main(int argc, char *argv[])
> +{
> +       return ring_suites_main(argc, argv);
> +}
> diff --git a/platform/linux-generic/test/ring/ring_stress.c
> b/platform/linux-generic/test/ring/ring_stress.c
> new file mode 100644
> index 0000000..c68419f
> --- /dev/null
> +++ b/platform/linux-generic/test/ring/ring_stress.c
> @@ -0,0 +1,309 @@
> +/* Copyright (c) 2016, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +/**
> + * @file
> + *
> + * ODP ring stress test
> + */
> +
> +#define _GNU_SOURCE
> +
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include <unistd.h>
> +
> +#include <odp_api.h>
> +#include <odp/helper/linux.h>
> +#include <odp_packet_io_ring_internal.h>
> +#include <test_debug.h>
> +#include <odp_cunit_common.h>
> +
> +#include "ring_suites.h"
> +
> +/*
> + * Since cunit framework cannot work with multi-threading, ask workers
> + * to save their results for delayed assertion after thread collection.
> + */
> +static int worker_results[MAX_WORKERS];
> +
> +/*
> + * Note : make sure that both enqueue and dequeue
> + * operation starts at same time so to avoid data corruption
> + * Its because atomic lock will protect only indexes, but if order of
> + * read or write operation incorrect then data mismatch will happen
> + * So its resposibility of application develop to take care of order of
> + * data read or write.
> + */
> +typedef enum {
> +       STRESS_1_1_PRODUCER_CONSUMER,
> +       STRESS_1_N_PRODUCER_CONSUMER,
> +       STRESS_N_1_PRODUCER_CONSUMER,
> +       STRESS_N_M_PRODUCER_CONSUMER
> +} stress_case_t;
> +
> +/* worker function declarations */
> +static int stress_worker(void *_data);
> +static odp_atomic_u32_t *retrieve_consume_count(void);
> +
> +/* global name for later look up in workers' context */
> +static const char *ring_name = "stress ring";
> +static const char *consume_count_name = "stress ring consume count";
> +
> +int ring_test_stress_start(void)
> +{
> +       odp_shm_t shared;
> +       _ring_t *r_stress = NULL;
> +
> +       /* multiple thread usage scenario, thread or process sharable */
> +       r_stress = _ring_create(ring_name, RING_SIZE, _RING_SHM_PROC);
> +       if (r_stress == NULL) {
> +               LOG_ERR("create ring failed for stress.\n");
> +               return -1;
> +       }
> +
> +       /* atomic count for expected data pieces to be consumed
> +        * by consumer threads.
> +        */
> +       shared = odp_shm_reserve(consume_count_name,
> +                                sizeof(odp_atomic_u32_t),
> +                                sizeof(odp_atomic_u32_t),
> +                                ODP_SHM_PROC);
> +       if (shared == ODP_SHM_INVALID) {
> +               LOG_ERR("create expected consume count failed for
> stress.\n");
> +               return -1;
> +       }
> +       return 0;
> +}
> +
> +int ring_test_stress_end(void)
> +{
> +       odp_shm_t shared;
> +
> +       /* release consume atomic count */
> +       shared = odp_shm_lookup(consume_count_name);
> +       if (shared != ODP_SHM_INVALID)
> +               odp_shm_free(shared);
> +       return 0;
> +}
> +
> +void ring_test_stress_1_1_producer_consumer(void)
> +{
> +       int i = 0;
> +       odp_cpumask_t cpus;
> +       pthrd_arg worker_param;
> +       odp_atomic_u32_t *consume_count = NULL;
> +
> +       /* reset results for delayed assertion */
> +       memset(worker_results, 0, sizeof(worker_results));
> +
> +       /* request 2 threads to run 1:1 stress */
> +       worker_param.numthrds = odp_cpumask_default_worker(&cpus, 2);
> +       worker_param.testcase = STRESS_1_1_PRODUCER_CONSUMER;
> +
> +       /* not failure, insufficient resource */
> +       if (worker_param.numthrds < 2) {
> +               LOG_ERR("insufficient cpu for 1:1 "
> +                       "producer/consumer stress.\n");
> +               return;
> +       }
> +
> +       consume_count = retrieve_consume_count();
> +       CU_ASSERT(consume_count != NULL);
> +
> +       /* in 1:1 test case, one producer thread produces one
> +        * data piece to be consumed by one consumer thread.
> +        */
> +       odp_atomic_init_u32(consume_count, 1);
> +
> +       /* kick the workers */
> +       odp_cunit_thread_create(stress_worker, &worker_param);
> +
> +       /* collect the results */
> +       odp_cunit_thread_exit(&worker_param);
> +
> +       /* delayed assertion due to cunit limitation */
> +       for (i = 0; i < worker_param.numthrds; i++)
> +               CU_ASSERT(0 == worker_results[i]);
> +}
> +
> +void ring_test_stress_N_M_producer_consumer(void)
> +{
> +       int i = 0;
> +       odp_cpumask_t cpus;
> +       pthrd_arg worker_param;
> +       odp_atomic_u32_t *consume_count = NULL;
> +
> +       /* reset results for delayed assertion */
> +       memset(worker_results, 0, sizeof(worker_results));
> +
> +       /* request MAX_WORKERS threads to run N:M stress */
> +       worker_param.numthrds =
> +               odp_cpumask_default_worker(&cpus, MAX_WORKERS);
> +       worker_param.testcase = STRESS_N_M_PRODUCER_CONSUMER;
> +
> +       /* not failure, insufficient resource */
> +       if (worker_param.numthrds < 3) {
> +               LOG_ERR("insufficient cpu for N:M "
> +                       "producer/consumer stress.\n");
> +               return;
> +       }
> +
> +       consume_count = retrieve_consume_count();
> +       CU_ASSERT(consume_count != NULL);
> +
> +       /* in N:M test case, producer threads are always
> +        * greater or equal to consumer threads, thus produce
> +        * enought "goods" to be consumed by consumer threads.
> +        */
> +       odp_atomic_init_u32(consume_count,
> +                           (worker_param.numthrds) / 2);
> +
> +       /* kick the workers */
> +       odp_cunit_thread_create(stress_worker, &worker_param);
> +
> +       /* collect the results */
> +       odp_cunit_thread_exit(&worker_param);
> +
> +       /* delayed assertion due to cunit limitation */
> +       for (i = 0; i < worker_param.numthrds; i++)
> +               CU_ASSERT(0 == worker_results[i]);
> +}
> +
> +void ring_test_stress_1_N_producer_consumer(void)
> +{
> +}
> +
> +void ring_test_stress_N_1_producer_consumer(void)
> +{
> +}
> +
> +void ring_test_stress_ring_list_dump(void)
> +{
> +       /* improve code coverage */
> +       _ring_list_dump();
> +}
> +
> +static odp_atomic_u32_t *retrieve_consume_count(void)
> +{
> +       odp_shm_t shared;
> +
> +       shared = odp_shm_lookup(consume_count_name);
> +       if (shared == ODP_SHM_INVALID)
> +               return NULL;
> +
> +       return (odp_atomic_u32_t *)odp_shm_addr(shared);
> +}
> +
> +/* worker function for multiple producer instances */
> +static int do_producer(_ring_t *r)
> +{
> +       int i, result = 0;
> +       void **enq = NULL;
> +
> +       /* allocate dummy object pointers for enqueue */
> +       enq = malloc(PIECE_BULK * 2 * sizeof(void *));
> +       if (NULL == enq) {
> +               LOG_ERR("insufficient memory for producer enqueue.\n");
> +               return 0; /* not failure, skip for insufficient memory */
> +       }
> +
> +       /* data pattern to be evaluated later in consumer */
> +       for (i = 0; i < PIECE_BULK; i++)
> +               enq[i] = (void *)(unsigned long)i;
> +
> +       do {
> +               result = _ring_mp_enqueue_bulk(r, enq, PIECE_BULK);
> +               if (0 == result) {
> +                       free(enq);
> +                       return 0;
> +               }
> +               usleep(10); /* wait for consumer threads */
> +       } while (!_ring_full(r));
> +
> +       return 0;
> +}
> +
> +/* worker function for multiple consumer instances */
> +static int do_consumer(_ring_t *r)
> +{
> +       int i, result = 0;
> +       void **deq = NULL;
> +       odp_atomic_u32_t *consume_count = NULL;
> +       const char *message = "test OK!";
> +       const char *mismatch = "data mismatch..lockless enq/deq failed.";
> +
> +       /* allocate dummy object pointers for dequeue */
> +       deq = malloc(PIECE_BULK * 2 * sizeof(void *));
> +       if (NULL == deq) {
> +               LOG_ERR("insufficient memory for consumer dequeue.\n");
> +               return 0; /* not failure, skip for insufficient memory */
> +       }
> +
> +       consume_count = retrieve_consume_count();
> +       if (consume_count == NULL) {
> +               LOG_ERR("cannot retrieve expected consume count.\n");
> +               return -1;
> +       }
> +
> +       while (odp_atomic_load_u32(consume_count) > 0) {
> +               result = _ring_mc_dequeue_bulk(r, deq, PIECE_BULK);
> +               if (0 == result) {
> +                       /* evaluate the data pattern */
> +                       for (i = 0; i < PIECE_BULK; i++) {
> +                               if (deq[i] != (void *)(unsigned long)i) {
> +                                       result = -1;
> +                                       message = mismatch;
> +                                       break;
> +                               }
> +                       }
> +
> +                       free(deq);
> +                       LOG_ERR("%s\n", message);
> +                       odp_atomic_dec_u32(consume_count);
> +                       return result;
> +               }
> +               usleep(10); /* wait for producer threads */
> +       }
> +       return 0;
> +}
> +
> +static int stress_worker(void *_data)
> +{
> +       pthrd_arg *worker_param = (pthrd_arg *)_data;
> +       _ring_t *r_stress = NULL;
> +       int *result = NULL;
> +       int worker_id = odp_thread_id();
> +
> +       /* save the worker result for delayed assertion */
> +       result = &worker_results[(worker_id % worker_param->numthrds)];
> +
> +       /* verify ring lookup in worker context */
> +       r_stress = _ring_lookup(ring_name);
> +       if (NULL == r_stress) {
> +               LOG_ERR("ring lookup %s not found\n", ring_name);
> +               return (*result = -1);
> +       }
> +
> +       switch (worker_param->testcase) {
> +       case STRESS_1_1_PRODUCER_CONSUMER:
> +       case STRESS_N_M_PRODUCER_CONSUMER:
> +               /* interleaved producer/consumer */
> +               if (0 == (worker_id % 2))
> +                       *result = do_producer(r_stress);
> +               else if (1 == (worker_id % 2))
> +                       *result = do_consumer(r_stress);
> +               break;
> +       case STRESS_1_N_PRODUCER_CONSUMER:
> +       case STRESS_N_1_PRODUCER_CONSUMER:
> +       default:
> +               LOG_ERR("invalid or not-implemented stress type (%d)\n",
> +                       worker_param->testcase);
> +               break;
> +       }
> +       return 0;
> +}
> diff --git a/platform/linux-generic/test/ring/ring_suites.c
> b/platform/linux-generic/test/ring/ring_suites.c
> new file mode 100644
> index 0000000..f321a76
> --- /dev/null
> +++ b/platform/linux-generic/test/ring/ring_suites.c
> @@ -0,0 +1,74 @@
> +/* Copyright (c) 2016, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <string.h>
> +
> +#include <odp_api.h>
> +#include <test_debug.h>
> +#include <odp_cunit_common.h>
> +#include <odp_packet_io_ring_internal.h>
> +
> +#include "ring_suites.h"
> +
> +static int ring_suites_init(odp_instance_t *inst)
> +{
> +       if (0 != odp_init_global(inst, NULL, NULL)) {
> +               LOG_ERR("error: odp_init_global() failed.\n");
> +               return -1;
> +       }
> +       if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
> +               LOG_ERR("error: odp_init_local() failed.\n");
> +               return -1;
> +       }
> +
> +       _ring_tailq_init();
> +       return 0;
> +}
> +
> +static odp_testinfo_t ring_suite_basic[] = {
> +       ODP_TEST_INFO(ring_test_basic_create),
> +       ODP_TEST_INFO(ring_test_basic_burst),
> +       ODP_TEST_INFO(ring_test_basic_bulk),
> +       ODP_TEST_INFO(ring_test_basic_watermark),
> +       ODP_TEST_INFO_NULL,
> +};
> +
> +static odp_testinfo_t ring_suite_stress[] = {
> +       ODP_TEST_INFO(ring_test_stress_1_1_producer_consumer),
> +       ODP_TEST_INFO(ring_test_stress_1_N_producer_consumer),
> +       ODP_TEST_INFO(ring_test_stress_N_1_producer_consumer),
> +       ODP_TEST_INFO(ring_test_stress_N_M_producer_consumer),
> +       ODP_TEST_INFO(ring_test_stress_ring_list_dump),
> +       ODP_TEST_INFO_NULL,
> +};
> +
> +static odp_suiteinfo_t ring_suites[] = {
> +       {"ring basic", ring_test_basic_start,
> +               ring_test_basic_end, ring_suite_basic},
> +       {"ring stress", ring_test_stress_start,
> +               ring_test_stress_end, ring_suite_stress},
> +       ODP_SUITE_INFO_NULL
> +};
> +
> +int ring_suites_main(int argc, char *argv[])
> +{
> +       int ret;
> +
> +       /* let helper collect its own arguments (e.g. --odph_proc) */
> +       if (odp_cunit_parse_options(argc, argv))
> +               return -1;
> +
> +       odp_cunit_register_global_init(ring_suites_init);
> +
> +       ret = odp_cunit_register(ring_suites);
> +
> +       if (ret == 0)
> +               ret = odp_cunit_run();
> +
> +       return ret;
> +}
> diff --git a/platform/linux-generic/test/ring/ring_suites.h
> b/platform/linux-generic/test/ring/ring_suites.h
> new file mode 100644
> index 0000000..5fa5b9c
> --- /dev/null
> +++ b/platform/linux-generic/test/ring/ring_suites.h
> @@ -0,0 +1,34 @@
> +/* Copyright (c) 2016, Linaro Limited
> + * All rights reserved.
> + *
> + * SPDX-License-Identifier:     BSD-3-Clause
> + */
> +
> +#define RING_SIZE 4096
> +#define PIECE_BULK 32
> +
> +#define HALF_BULK (RING_SIZE >> 1)
> +#define ILLEGAL_SIZE (RING_SIZE | 0x3)
> +
> +/* test suite start and stop */
> +int ring_test_basic_start(void);
> +int ring_test_basic_end(void);
> +
> +/* basic test cases */
> +void ring_test_basic_create(void);
> +void ring_test_basic_burst(void);
> +void ring_test_basic_bulk(void);
> +void ring_test_basic_watermark(void);
> +
> +/* test suite start and stop */
> +int ring_test_stress_start(void);
> +int ring_test_stress_end(void);
> +
> +/* stress test cases */
> +void ring_test_stress_1_1_producer_consumer(void);
> +void ring_test_stress_1_N_producer_consumer(void);
> +void ring_test_stress_N_1_producer_consumer(void);
> +void ring_test_stress_N_M_producer_consumer(void);
> +void ring_test_stress_ring_list_dump(void);
> +
> +int ring_suites_main(int argc, char *argv[]);
> diff --git a/platform/linux-generic/test/ring/ringtest.c
> b/platform/linux-generic/test/ring/ringtest.c
> deleted file mode 100644
> index 2ebef8a..0000000
> --- a/platform/linux-generic/test/ring/ringtest.c
> +++ /dev/null
> @@ -1,495 +0,0 @@
> -/* Copyright (c) 2014, Linaro Limited
> - * All rights reserved.
> - *
> - * SPDX-License-Identifier:     BSD-3-Clause
> - */
> -
> -/*-
> - *   BSD LICENSE
> - *
> - *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
> - *   All rights reserved.
> - *
> - *   Redistribution and use in source and binary forms, with or without
> - *   modification, are permitted provided that the following conditions
> - *   are met:
> - *
> - *     * Redistributions of source code must retain the above copyright
> - *       notice, this list of conditions and the following disclaimer.
> - *     * Redistributions in binary form must reproduce the above copyright
> - *       notice, this list of conditions and the following disclaimer in
> - *       the documentation and/or other materials provided with the
> - *       distribution.
> - *     * Neither the name of Intel Corporation nor the names of its
> - *       contributors may be used to endorse or promote products derived
> - *       from this software without specific prior written permission.
> - *
> - *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> - *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> - *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
> - *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
> - *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
> - *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
> - *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
> - *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
> - *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
> - *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
> - *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
> - */
> -
> -/**
> - * @file
> - *
> - * ODP test ring
> - */
> -
> -#include <stdlib.h>
> -#include <stdio.h>
> -#include <string.h>
> -
> -#include <odp_api.h>
> -#include <odp/helper/linux.h>
> -#include <odp_packet_io_ring_internal.h>
> -#include <test_debug.h>
> -#include <odp_cunit_common.h>
> -
> -#define RING_SIZE 4096
> -#define MAX_BULK 32
> -
> -enum {
> -       ODP_RING_TEST_BASIC,
> -       ODP_RING_TEST_STRESS,
> -};
> -
> -/* local struct for ring_thread argument */
> -typedef struct {
> -       pthrd_arg thrdarg;
> -       int stress_type;
> -} ring_arg_t;
> -
> -static int test_ring_basic(_ring_t *r)
> -{
> -       void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst =
> NULL;
> -       int ret;
> -       unsigned i, num_elems;
> -
> -       /* alloc dummy object pointers */
> -       src = malloc(RING_SIZE * 2 * sizeof(void *));
> -       if (src == NULL) {
> -               LOG_ERR("failed to allocate test ring src memory\n");
> -               goto fail;
> -       }
> -       for (i = 0; i < RING_SIZE * 2; i++)
> -               src[i] = (void *)(unsigned long)i;
> -
> -       cur_src = src;
> -
> -       /* alloc some room for copied objects */
> -       dst = malloc(RING_SIZE * 2 * sizeof(void *));
> -       if (dst == NULL) {
> -               LOG_ERR("failed to allocate test ring dst memory\n");
> -               goto fail;
> -       }
> -
> -       memset(dst, 0, RING_SIZE * 2 * sizeof(void *));
> -       cur_dst = dst;
> -
> -       printf("Test SP & SC basic functions\n");
> -       printf("enqueue 1 obj\n");
> -       ret = _ring_sp_enqueue_burst(r, cur_src, 1);
> -       cur_src += 1;
> -       if ((ret & _RING_SZ_MASK) != 1) {
> -               LOG_ERR("sp_enq for 1 obj failed\n");
> -               goto fail;
> -       }
> -
> -       printf("enqueue 2 objs\n");
> -       ret = _ring_sp_enqueue_burst(r, cur_src, 2);
> -       cur_src += 2;
> -       if ((ret & _RING_SZ_MASK) != 2) {
> -               LOG_ERR("sp_enq for 2 obj failed\n");
> -               goto fail;
> -       }
> -
> -       printf("enqueue MAX_BULK objs\n");
> -       ret = _ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
> -       if ((ret & _RING_SZ_MASK) != MAX_BULK) {
> -               LOG_ERR("sp_enq for %d obj failed\n", MAX_BULK);
> -               goto fail;
> -       }
> -
> -       printf("dequeue 1 obj\n");
> -       ret = _ring_sc_dequeue_burst(r, cur_dst, 1);
> -       cur_dst += 1;
> -       if ((ret & _RING_SZ_MASK) != 1) {
> -               LOG_ERR("sc_deq for 1 obj failed\n");
> -               goto fail;
> -       }
> -
> -       printf("dequeue 2 objs\n");
> -       ret = _ring_sc_dequeue_burst(r, cur_dst, 2);
> -       cur_dst += 2;
> -       if ((ret & _RING_SZ_MASK) != 2) {
> -               LOG_ERR("sc_deq for 2 obj failed\n");
> -               goto fail;
> -       }
> -
> -       printf("dequeue MAX_BULK objs\n");
> -       ret = _ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
> -       cur_dst += MAX_BULK;
> -       if ((ret & _RING_SZ_MASK) != MAX_BULK) {
> -               LOG_ERR("sc_deq for %d obj failed\n", MAX_BULK);
> -               goto fail;
> -       }
> -
> -       /* check data */
> -       if (memcmp(src, dst, cur_dst - dst)) {
> -               LOG_ERR("data after dequeue is not the same\n");
> -               goto fail;
> -       }
> -
> -       cur_src = src;
> -       cur_dst = dst;
> -
> -       printf("Test MP & MC basic functions\n");
> -
> -       printf("enqueue 1 obj\n");
> -       ret = _ring_mp_enqueue_bulk(r, cur_src, 1);
> -       cur_src += 1;
> -       if (ret != 0) {
> -               LOG_ERR("mp_enq for 1 obj failed\n");
> -               goto fail;
> -       }
> -       printf("enqueue 2 objs\n");
> -       ret = _ring_mp_enqueue_bulk(r, cur_src, 2);
> -       cur_src += 2;
> -       if (ret != 0) {
> -               LOG_ERR("mp_enq for 2 obj failed\n");
> -               goto fail;
> -       }
> -       printf("enqueue MAX_BULK objs\n");
> -       ret = _ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
> -       if (ret != 0) {
> -               LOG_ERR("mp_enq for %d obj failed\n", MAX_BULK);
> -               goto fail;
> -       }
> -       printf("dequeue 1 obj\n");
> -       ret = _ring_mc_dequeue_bulk(r, cur_dst, 1);
> -       cur_dst += 1;
> -       if (ret != 0) {
> -               LOG_ERR("mc_deq for 1 obj failed\n");
> -               goto fail;
> -       }
> -       printf("dequeue 2 objs\n");
> -       ret = _ring_mc_dequeue_bulk(r, cur_dst, 2);
> -       cur_dst += 2;
> -       if (ret != 0) {
> -               LOG_ERR("mc_deq for 2 obj failed\n");
> -               goto fail;
> -       }
> -       printf("dequeue MAX_BULK objs\n");
> -       ret = _ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
> -       cur_dst += MAX_BULK;
> -       if (ret != 0) {
> -               LOG_ERR("mc_deq for %d obj failed\n", MAX_BULK);
> -               goto fail;
> -       }
> -       /* check data */
> -       if (memcmp(src, dst, cur_dst - dst)) {
> -               LOG_ERR("data after dequeue is not the same\n");
> -               goto fail;
> -       }
> -
> -       printf("test watermark and default bulk enqueue / dequeue\n");
> -       _ring_set_water_mark(r, 20);
> -       num_elems = 16;
> -
> -       cur_src = src;
> -       cur_dst = dst;
> -
> -       ret = _ring_mp_enqueue_bulk(r, cur_src, num_elems);
> -       cur_src += num_elems;
> -       if (ret != 0) {
> -               LOG_ERR("Cannot enqueue\n");
> -               goto fail;
> -       }
> -       ret = _ring_mp_enqueue_bulk(r, cur_src, num_elems);
> -       if (ret != -EDQUOT) {
> -               LOG_ERR("Watermark not exceeded\n");
> -               goto fail;
> -       }
> -       ret = _ring_mc_dequeue_bulk(r, cur_dst, num_elems);
> -       cur_dst += num_elems;
> -       if (ret != 0) {
> -               LOG_ERR("Cannot dequeue\n");
> -               goto fail;
> -       }
> -       ret = _ring_mc_dequeue_bulk(r, cur_dst, num_elems);
> -       cur_dst += num_elems;
> -       if (ret != 0) {
> -               LOG_ERR("Cannot dequeue2\n");
> -               goto fail;
> -       }
> -
> -       /* check data */
> -       if (memcmp(src, dst, cur_dst - dst)) {
> -               LOG_ERR("data after dequeue is not the same\n");
> -               goto fail;
> -       }
> -
> -       printf("basic enqueu, dequeue test for ring <%s>@%p passed\n",
> -              r->name, r);
> -
> -       free(src);
> -       free(dst);
> -       return 0;
> -
> -fail:
> -       free(src);
> -       free(dst);
> -       return -1;
> -}
> -
> -/* global shared ring used for stress testing */
> -static _ring_t *r_stress;
> -
> -/* Stress func for Multi producer only */
> -static int producer_fn(void)
> -{
> -       unsigned i;
> -
> -       void **src = NULL;
> -
> -       /* alloc dummy object pointers */
> -       src = malloc(MAX_BULK * 2 * sizeof(void *));
> -       if (src == NULL) {
> -               LOG_ERR("failed to allocate producer memory.\n");
> -               return -1;
> -       }
> -       for (i = 0; i < MAX_BULK; i++)
> -               src[i] = (void *)(unsigned long)i;
> -
> -       do {
> -               i = _ring_mp_enqueue_bulk(r_stress, src, MAX_BULK);
> -               if (i == 0) {
> -                       free(src);
> -                       return 0;
> -               }
> -       } while (1);
> -}
> -
> -/* Stress func for Multi consumer only */
> -static int consumer_fn(void)
> -{
> -       unsigned i;
> -       void **src = NULL;
> -
> -       /* alloc dummy object pointers */
> -       src = malloc(MAX_BULK * 2 * sizeof(void *));
> -       if (src == NULL) {
> -               LOG_ERR("failed to allocate consumer memory.\n");
> -               return -1;
> -       }
> -
> -       do {
> -               i = _ring_mc_dequeue_bulk(r_stress, src, MAX_BULK);
> -               if (i == 0) {
> -                       for (i = 0; i < MAX_BULK; i++) {
> -                               if (src[i] != (void *)(unsigned long)i) {
> -                                       free(src);
> -                                       printf("data mismatch.. lockless
> ops fail\n");
> -                                       return -1;
> -                               }
> -                       }
> -                       free(src);
> -                       printf("\n Test OK !\n");
> -                       return 0;
> -               }
> -       } while (1);
> -}
> -
> -/*
> - * Note : make sure that both enqueue and dequeue
> - * operation starts at same time so to avoid data corruption
> - * Its because atomic lock will protect only indexes, but if order of
> - * read or write operation incorrect then data mismatch will happen
> - * So its resposibility of application develop to take care of order of
> - * data read or write.
> -*/
> -typedef enum {
> -       one_enq_one_deq,        /* One thread to enqueue one to
> -                                  dequeu at same time */
> -       one_enq_rest_deq,       /* one thread to enq rest to
> -                                  dequeue at same time */
> -       one_deq_rest_enq,       /* one to deq and rest enq at very same
> time */
> -       multi_enq_multi_deq     /* multiple enq,deq */
> -} stress_type_t;
> -
> -static void test_ring_stress(stress_type_t type)
> -{
> -       int thr;
> -
> -       thr = odp_thread_id();
> -
> -       switch (type) {
> -       case one_enq_one_deq:
> -               if (thr == 1)
> -                       producer_fn();
> -               if (thr == 2)
> -                       consumer_fn();
> -               break;
> -       case multi_enq_multi_deq:
> -               if (thr % 2 == 0)
> -                       producer_fn();
> -               else
> -                       consumer_fn();
> -               break;
> -       case one_deq_rest_enq:
> -       case one_enq_rest_deq:/*TBD*/
> -       default:
> -               LOG_ERR("Invalid stress type or test case yet not
> supported\n");
> -       }
> -}
> -
> -static int test_ring(void *arg)
> -{
> -       ring_arg_t *parg = (ring_arg_t *)arg;
> -       int thr;
> -       char ring_name[_RING_NAMESIZE];
> -       _ring_t *r;
> -       int result = 0;
> -
> -       thr = odp_thread_id();
> -
> -       printf("Thread %i starts\n", thr);
> -
> -       switch (parg->thrdarg.testcase) {
> -       case ODP_RING_TEST_BASIC:
> -               snprintf(ring_name, sizeof(ring_name), "test_ring_%i",
> thr);
> -
> -               r = _ring_create(ring_name, RING_SIZE,
> -                                0 /* not used, alignement
> -                                     taken care inside func : todo */);
> -               if (r == NULL) {
> -                       LOG_ERR("ring create failed\n");
> -                       result = -1;
> -                       break;
> -               }
> -               /* lookup ring from its name */
> -               if (_ring_lookup(ring_name) != r) {
> -                       LOG_ERR("ring lookup failed\n");
> -                       result = -1;
> -                       break;
> -               }
> -
> -               /* basic operations */
> -               if (test_ring_basic(r) < 0) {
> -                       LOG_ERR("ring basic enqueue/dequeu ops failed\n");
> -                       result = -1;
> -               }
> -
> -               if (result)
> -                       _ring_list_dump();
> -
> -               break;
> -
> -       case ODP_RING_TEST_STRESS:
> -               test_ring_stress(parg->stress_type);
> -
> -               if (result)
> -                       _ring_list_dump();
> -               break;
> -
> -       default:
> -               LOG_ERR("Invalid test case [%d]\n",
> parg->thrdarg.testcase);
> -               result = -1;
> -               break;
> -       }
> -
> -       LOG_DBG("result = %d\n", result);
> -       if (result == 0)
> -               printf("test_ring Result:pass\n");
> -       else
> -               printf("test_ring Result:fail\n");
> -
> -       fflush(stdout);
> -
> -       return 0;
> -}
> -
> -int main(int argc, char *argv[])
> -{
> -       ring_arg_t rarg;
> -       odph_odpthread_t thread_tbl[MAX_WORKERS];
> -       odp_cpumask_t cpu_mask;
> -       char ring_name[_RING_NAMESIZE];
> -       odp_instance_t instance;
> -       odph_odpthread_params_t thr_params;
> -
> -       if (odp_init_global(&instance, NULL, NULL)) {
> -               LOG_ERR("Error: ODP global init failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
> -       if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
> -               LOG_ERR("Error: ODP local init failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
> -       /* let helper collect its own arguments (e.g. --odph_proc) */
> -       odph_parse_options(argc, argv, NULL, NULL);
> -
> -       _ring_tailq_init();
> -
> -       odp_cpumask_default_worker(&cpu_mask, MAX_WORKERS);
> -       rarg.thrdarg.numthrds = rarg.thrdarg.numthrds;
> -
> -       rarg.thrdarg.testcase = ODP_RING_TEST_BASIC;
> -
> -       memset(&thr_params, 0, sizeof(thr_params));
> -       thr_params.start    = test_ring;
> -       thr_params.arg      = &rarg;
> -       thr_params.thr_type = ODP_THREAD_WORKER;
> -       thr_params.instance = instance;
> -
> -       printf("starting stess test type : %d..\n", rarg.stress_type);
> -       odph_odpthreads_create(&thread_tbl[0], &cpu_mask, &thr_params);
> -       odph_odpthreads_join(thread_tbl);
> -
> -       rarg.thrdarg.testcase = ODP_RING_TEST_STRESS;
> -       rarg.stress_type = one_enq_one_deq;
> -
> -       printf("starting stess test type : %d..\n", rarg.stress_type);
> -       snprintf(ring_name, sizeof(ring_name), "test_ring_stress");
> -       r_stress = _ring_create(ring_name, RING_SIZE,
> -                               0/* not used, alignement
> -                                   taken care inside func : todo */);
> -       if (r_stress == NULL) {
> -               LOG_ERR("ring create failed\n");
> -               goto fail;
> -       }
> -       /* lookup ring from its name */
> -       if (_ring_lookup(ring_name) != r_stress) {
> -               LOG_ERR("ring lookup failed\n");
> -               goto fail;
> -       }
> -
> -       thr_params.start = test_ring;
> -       thr_params.arg   = &rarg;
> -
> -       odph_odpthreads_create(&thread_tbl[0], &cpu_mask, &thr_params);
> -       odph_odpthreads_join(thread_tbl);
> -
> -fail:
> -       if (odp_term_local()) {
> -               LOG_ERR("Error: ODP local term failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
> -       if (odp_term_global(instance)) {
> -               LOG_ERR("Error: ODP global term failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
> -       return 0;
> -}
> --
> 2.7.4
>
>
_______________________________________________
lng-odp mailing list
lng-odp@lists.linaro.org
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to