[lng-odp] [PATCH v1 1/1] test: sched_perf: add num queues option
From: Petri Savolainen Added option to set number of queues per worker thread. Number of active queues affects usually scheduler performance. Signed-off-by: Petri Savolainen --- /** Email created from pull request 666 (psavol:master-sched-perf-numqueue) ** https://github.com/Linaro/odp/pull/666 ** Patch: https://github.com/Linaro/odp/pull/666.patch ** Base sha: 7c87b66edc84e8c713fefc68d46464660adaf71e ** Merge commit sha: d8a76e7a44b96d574b4e8cc1741af827a1717475 **/ test/performance/odp_sched_perf.c | 60 ++- 1 file changed, 43 insertions(+), 17 deletions(-) diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c index e76725cc0..eb27a3139 100644 --- a/test/performance/odp_sched_perf.c +++ b/test/performance/odp_sched_perf.c @@ -14,12 +14,18 @@ #include #include +#define MAX_QUEUES_PER_CPU 1024 +#define MAX_QUEUES (ODP_THREAD_COUNT_MAX * MAX_QUEUES_PER_CPU) + typedef struct test_options_t { uint32_t num_cpu; + uint32_t num_queue; uint32_t num_event; uint32_t num_round; uint32_t max_burst; int queue_type; + uint32_t tot_queue; + uint32_t tot_event; } test_options_t; @@ -38,7 +44,7 @@ typedef struct test_global_t { odp_barrier_t barrier; odp_pool_t pool; odp_cpumask_t cpumask; - odp_queue_t queue[ODP_THREAD_COUNT_MAX]; + odp_queue_t queue[MAX_QUEUES]; odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX]; test_stat_t stat[ODP_THREAD_COUNT_MAX]; @@ -53,11 +59,12 @@ static void print_usage(void) "\n" "Usage: odp_sched_perf [options]\n" "\n" - " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default 1.\n" + " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1.\n" + " -q, --num_queueNumber of queues per CPU. Default: 1.\n" " -e, --num_eventNumber of events per queue\n" " -r, --num_roundNumber of rounds\n" " -b, --burstMaximum number of events per operation\n" - " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default 0.\n" + " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n" " -h, --help This help\n" "\n"); } @@ -70,6 +77,7 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) static const struct option longopts[] = { {"num_cpu", required_argument, NULL, 'c'}, + {"num_queue", required_argument, NULL, 'q'}, {"num_event", required_argument, NULL, 'e'}, {"num_round", required_argument, NULL, 'r'}, {"burst", required_argument, NULL, 'b'}, @@ -78,9 +86,10 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) {NULL, 0, NULL, 0} }; - static const char *shortopts = "+c:e:r:b:t:h"; + static const char *shortopts = "+c:q:e:r:b:t:h"; test_options->num_cpu= 1; + test_options->num_queue = 1; test_options->num_event = 100; test_options->num_round = 10; test_options->max_burst = 100; @@ -96,6 +105,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) case 'c': test_options->num_cpu = atoi(optarg); break; + case 'q': + test_options->num_queue = atoi(optarg); + break; case 'e': test_options->num_event = atoi(optarg); break; @@ -117,6 +129,17 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options) } } + if (test_options->num_queue > MAX_QUEUES_PER_CPU) { + printf("Error: Too many queues per worker. Max supported %i\n.", + MAX_QUEUES_PER_CPU); + ret = -1; + } + + test_options->tot_queue = test_options->num_queue * + test_options->num_cpu; + test_options->tot_event = test_options->tot_queue * + test_options->num_event; + return ret; } @@ -157,18 +180,22 @@ static int create_pool(test_global_t *global) odp_pool_param_t pool_param; odp_pool_t pool; test_options_t *test_options = >test_options; + uint32_t num_cpu = test_options->num_cpu; + uint32_t num_queue = test_options->num_queue; uint32_t num_event = test_options->num_event; uint32_t num_round = test_options->num_round; uint32_t max_burst = test_options->max_burst; - int num_cpu =
[lng-odp] [PATCH v1 0/1] test: sched_perf: add num queues option
Added option to set number of queues per worker thread. Number of active queues affects usually scheduler performance. Signed-off-by: Petri Savolainen petri.savolai...@linaro.org github /** Email created from pull request 666 (psavol:master-sched-perf-numqueue) ** https://github.com/Linaro/odp/pull/666 ** Patch: https://github.com/Linaro/odp/pull/666.patch ** Base sha: 7c87b66edc84e8c713fefc68d46464660adaf71e ** Merge commit sha: d8a76e7a44b96d574b4e8cc1741af827a1717475 **/ /github checkpatch.pl total: 0 errors, 0 warnings, 0 checks, 167 lines checked to_send-p-000.patch has no obvious style problems and is ready for submission. /checkpatch.pl
[lng-odp] [PATCH API-NEXT v2 2/3] linux-gen: timer: implement new timer pool capabilities
From: Matias Elo Signed-off-by: Matias Elo --- /** Email created from pull request 664 (matiaselo:dev/timer_pool_capability) ** https://github.com/Linaro/odp/pull/664 ** Patch: https://github.com/Linaro/odp/pull/664.patch ** Base sha: 9b2b5a9695ad66977c964c83691cd2fef4c45b85 ** Merge commit sha: 621d82de4e80bb59b8ff0e838b7d006155bf7c48 **/ platform/linux-generic/odp_timer.c | 4 1 file changed, 4 insertions(+) diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 5fef5af2e..ae0e6e140 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -1072,6 +1072,10 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src, int ret = 0; if (clk_src == ODP_CLOCK_CPU) { + capa->common.max_pools = MAX_TIMER_POOLS; + + capa->max_pools = MAX_TIMER_POOLS; + capa->max_num = 0; capa->highest_res_ns = highest_res_ns; } else { ODP_ERR("ODP timer system doesn't support external clock source currently\n");
[lng-odp] [PATCH API-NEXT v2 3/3] validation: timer: use new timer pool capabilities
From: Matias Elo Signed-off-by: Matias Elo --- /** Email created from pull request 664 (matiaselo:dev/timer_pool_capability) ** https://github.com/Linaro/odp/pull/664 ** Patch: https://github.com/Linaro/odp/pull/664.patch ** Base sha: 9b2b5a9695ad66977c964c83691cd2fef4c45b85 ** Merge commit sha: 621d82de4e80bb59b8ff0e838b7d006155bf7c48 **/ test/validation/api/timer/timer.c | 23 +++ 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c index e0f068823..8913b0166 100644 --- a/test/validation/api/timer/timer.c +++ b/test/validation/api/timer/timer.c @@ -773,6 +773,7 @@ static void timer_test_odp_timer_all(void) uint64_t ns, tick, ns2; pthrd_arg thrdarg; odp_timer_capability_t timer_capa; + uint32_t num_timers; /* Reserve at least one core for running other processes so the timer * test hopefully can run undisturbed and thus get better timing @@ -787,24 +788,26 @@ static void timer_test_odp_timer_all(void) if (num_workers < 1) num_workers = 1; + num_timers = num_workers * NTIMERS; + CU_ASSERT_FATAL(!odp_timer_capability(ODP_CLOCK_CPU, _capa)); + if (timer_capa.max_num && timer_capa.max_num < num_timers) + num_timers = timer_capa.max_num; + /* Create timeout pools */ odp_pool_param_init(); params.type= ODP_POOL_TIMEOUT; - params.tmo.num = (NTIMERS + 1) * num_workers; + params.tmo.num = num_timers + num_workers; tbp = odp_pool_create("tmo_pool", ); if (tbp == ODP_POOL_INVALID) CU_FAIL_FATAL("Timeout pool create failed"); /* Create a timer pool */ - if (odp_timer_capability(ODP_CLOCK_CPU, _capa)) - CU_FAIL("Error: get timer capacity failed.\n"); - resolution_ns = MAX(RES, timer_capa.highest_res_ns); tparam.res_ns = resolution_ns; tparam.min_tmo = MIN_TMO; tparam.max_tmo = MAX_TMO; - tparam.num_timers = num_workers * NTIMERS; + tparam.num_timers = num_timers; tparam.priv = 0; tparam.clk_src = ODP_CLOCK_CPU; tp = odp_timer_pool_create(NAME, ); @@ -827,9 +830,13 @@ static void timer_test_odp_timer_all(void) LOG_DBG("Resolution: %" PRIu64 "\n", tparam.res_ns); LOG_DBG("Min timeout: %" PRIu64 "\n", tparam.min_tmo); LOG_DBG("Max timeout: %" PRIu64 "\n", tparam.max_tmo); - LOG_DBG("Num timers..: %u\n", tparam.num_timers); - LOG_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS, + LOG_DBG("Num timers: %u\n", tparam.num_timers); + LOG_DBG("Tmo range:%u ms (%" PRIu64 " ticks)\n", RANGE_MS, odp_timer_ns_to_tick(tp, 100ULL * RANGE_MS)); + LOG_DBG("Max timers: %" PRIu32 "\n", timer_capa.max_num); + LOG_DBG("Max timer pools: %" PRIu32 "\n", timer_capa.max_pools); + LOG_DBG("Total max timer pools: %" PRIu32 "\n", + timer_capa.common.max_pools); tick = odp_timer_ns_to_tick(tp, 0); CU_ASSERT(tick == 0); @@ -875,7 +882,7 @@ static void timer_test_odp_timer_all(void) /* Check some statistics after the test */ if (odp_timer_pool_info(tp, ) != 0) CU_FAIL("odp_timer_pool_info"); - CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS); + CU_ASSERT(tpinfo.param.num_timers == num_timers); CU_ASSERT(tpinfo.cur_timers == 0); CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(_allocated));
[lng-odp] [PATCH API-NEXT v2 1/3] api: timer: add timer pool capabilities
From: Matias Elo Signed-off-by: Matias Elo --- /** Email created from pull request 664 (matiaselo:dev/timer_pool_capability) ** https://github.com/Linaro/odp/pull/664 ** Patch: https://github.com/Linaro/odp/pull/664.patch ** Base sha: 9b2b5a9695ad66977c964c83691cd2fef4c45b85 ** Merge commit sha: 621d82de4e80bb59b8ff0e838b7d006155bf7c48 **/ include/odp/api/spec/timer.h | 15 +++ 1 file changed, 15 insertions(+) diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h index d88faaaea..c6eca1433 100644 --- a/include/odp/api/spec/timer.h +++ b/include/odp/api/spec/timer.h @@ -135,6 +135,21 @@ typedef struct { * Timer capability */ typedef struct { + /** Common for all clock sources */ + struct { + /** Maximum number of timer pools of any clock source */ + uint32_t max_pools; + } common; + + /** Maximum number of timer pools */ + uint32_t max_pools; + + /** Maximum number of timers in a pool +* +* The value of zero means that limited only by the available +* memory size for the pool. */ + uint32_t max_num; + /** Highest timer resolution in nanoseconds. * * This defines the highest resolution supported by a timer.