Also, when doing ./configure --enable-schedule-iquery I notice this
message output from ./configure:

...
checking for pcap/bpf.h... yes
checking for pcap_open_offline in -lpcap... yes
checking for library containing dlopen... -ldl
./configure: line 20700: schedule-iquery=yes: command not found
checking for doxygen... doxygen
checking for asciidoctor... asciidoctor
...

This isn't related to this patch, but something to look into?

On Mon, Feb 6, 2017 at 7:04 PM, Bill Fischofer
<bill.fischo...@linaro.org> wrote:
> This looks reasonable, but I'm having trouble trying to test it. I've
> specified --enable-test-perf-proc on ./configure but it the various
> perf tests still seem to be using pthreads. Did I miss some
> configuration flag?
>
> On Mon, Feb 6, 2017 at 1:37 AM, Yi He <yi...@linaro.org> wrote:
>> Ping, this patch is still good for most recent api-next branch
>>
>> Best Regards, Yi
>>
>> On 23 December 2016 at 10:32, Yi He <yi...@linaro.org> wrote:
>>
>>> SP scheduler hangs in process mode performance test
>>> due to global data structure were not created in shared
>>> memory region.
>>>
>>> Signed-off-by: Yi He <yi...@linaro.org>
>>> ---
>>> since v1: rebased upon Petri's linux-gen: schedule_sp: use ring as
>>> priority queue
>>>
>>>  platform/linux-generic/odp_schedule_sp.c | 100
>>> ++++++++++++++++++-------------
>>>  1 file changed, 60 insertions(+), 40 deletions(-)
>>>
>>> diff --git a/platform/linux-generic/odp_schedule_sp.c
>>> b/platform/linux-generic/odp_schedule_sp.c
>>> index 5150d28..bb7416a 100644
>>> --- a/platform/linux-generic/odp_schedule_sp.c
>>> +++ b/platform/linux-generic/odp_schedule_sp.c
>>> @@ -9,6 +9,7 @@
>>>  #include <odp/api/thread.h>
>>>  #include <odp/api/time.h>
>>>  #include <odp/api/schedule.h>
>>> +#include <odp/api/shared_memory.h>
>>>  #include <odp_schedule_if.h>
>>>  #include <odp_debug_internal.h>
>>>  #include <odp_align_internal.h>
>>> @@ -108,6 +109,7 @@ typedef struct {
>>>         sched_cmd_t   pktio_cmd[NUM_PKTIO];
>>>         prio_queue_t  prio_queue[NUM_GROUP][NUM_PRIO];
>>>         sched_group_t sched_group;
>>> +       odp_shm_t     shm;
>>>  } sched_global_t;
>>>
>>>  typedef struct {
>>> @@ -119,7 +121,7 @@ typedef struct {
>>>         int          group[NUM_GROUP];
>>>  } sched_local_t;
>>>
>>> -static sched_global_t sched_global;
>>> +static sched_global_t *sched_global;
>>>  static __thread sched_local_t sched_local;
>>>
>>>  static inline uint32_t index_to_ring_idx(int pktio, uint32_t index)
>>> @@ -145,30 +147,44 @@ static inline uint32_t index_from_ring_idx(uint32_t
>>> *index, uint32_t ring_idx)
>>>  static int init_global(void)
>>>  {
>>>         int i, j;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       odp_shm_t shm;
>>> +       sched_group_t *sched_group = NULL;
>>>
>>>         ODP_DBG("Using SP scheduler\n");
>>>
>>> -       memset(&sched_global, 0, sizeof(sched_global_t));
>>> +       shm = odp_shm_reserve("sp_scheduler",
>>> +                             sizeof(sched_global_t),
>>> +                             ODP_CACHE_LINE_SIZE, 0);
>>> +
>>> +       sched_global = odp_shm_addr(shm);
>>> +
>>> +       if (sched_global == NULL) {
>>> +               ODP_ERR("Schedule init: Shm reserve failed.\n");
>>> +               return -1;
>>> +       }
>>> +
>>> +       memset(sched_global, 0, sizeof(sched_global_t));
>>> +       sched_global->shm = shm;
>>>
>>>         for (i = 0; i < NUM_QUEUE; i++) {
>>> -               sched_global.queue_cmd[i].s.type     = CMD_QUEUE;
>>> -               sched_global.queue_cmd[i].s.index    = i;
>>> -               sched_global.queue_cmd[i].s.ring_idx =
>>> index_to_ring_idx(0, i);
>>> +               sched_global->queue_cmd[i].s.type     = CMD_QUEUE;
>>> +               sched_global->queue_cmd[i].s.index    = i;
>>> +               sched_global->queue_cmd[i].s.ring_idx =
>>> index_to_ring_idx(0, i);
>>>         }
>>>
>>>         for (i = 0; i < NUM_PKTIO; i++) {
>>> -               sched_global.pktio_cmd[i].s.type     = CMD_PKTIO;
>>> -               sched_global.pktio_cmd[i].s.index    = i;
>>> -               sched_global.pktio_cmd[i].s.ring_idx =
>>> index_to_ring_idx(1, i);
>>> -               sched_global.pktio_cmd[i].s.prio     = PKTIN_PRIO;
>>> -               sched_global.pktio_cmd[i].s.group    = GROUP_PKTIN;
>>> +               sched_global->pktio_cmd[i].s.type     = CMD_PKTIO;
>>> +               sched_global->pktio_cmd[i].s.index    = i;
>>> +               sched_global->pktio_cmd[i].s.ring_idx =
>>> index_to_ring_idx(1, i);
>>> +               sched_global->pktio_cmd[i].s.prio     = PKTIN_PRIO;
>>> +               sched_global->pktio_cmd[i].s.group    = GROUP_PKTIN;
>>>         }
>>>
>>>         for (i = 0; i < NUM_GROUP; i++)
>>>                 for (j = 0; j < NUM_PRIO; j++)
>>> -                       ring_init(&sched_global.prio_queue[i][j].ring);
>>> +                       ring_init(&sched_global->prio_queue[i][j].ring);
>>>
>>> +       sched_group = &sched_global->sched_group;
>>>         odp_ticketlock_init(&sched_group->s.lock);
>>>
>>>         for (i = 0; i < NUM_THREAD; i++)
>>> @@ -202,16 +218,22 @@ static int init_local(void)
>>>
>>>  static int term_global(void)
>>>  {
>>> -       int qi;
>>> +       int qi, ret = 0;
>>>
>>>         for (qi = 0; qi < NUM_QUEUE; qi++) {
>>> -               if (sched_global.queue_cmd[qi].s.init) {
>>> +               if (sched_global->queue_cmd[qi].s.init) {
>>>                         /* todo: dequeue until empty ? */
>>>                         sched_cb_queue_destroy_finalize(qi);
>>>                 }
>>>         }
>>>
>>> -       return 0;
>>> +       ret = odp_shm_free(sched_global->shm);
>>> +       if (ret < 0) {
>>> +               ODP_ERR("Shm free failed for sp_scheduler");
>>> +               ret = -1;
>>> +       }
>>> +
>>> +       return ret;
>>>  }
>>>
>>>  static int term_local(void)
>>> @@ -267,7 +289,7 @@ static void remove_group(sched_group_t *sched_group,
>>> int thr, int group)
>>>
>>>  static int thr_add(odp_schedule_group_t group, int thr)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < 0 || group >= NUM_GROUP)
>>>                 return -1;
>>> @@ -292,7 +314,7 @@ static int thr_add(odp_schedule_group_t group, int thr)
>>>
>>>  static int thr_rem(odp_schedule_group_t group, int thr)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < 0 || group >= NUM_GROUP)
>>>                 return -1;
>>> @@ -320,7 +342,7 @@ static int num_grps(void)
>>>
>>>  static int init_queue(uint32_t qi, const odp_schedule_param_t
>>> *sched_param)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>         odp_schedule_group_t group = sched_param->group;
>>>         int prio = 0;
>>>
>>> @@ -333,18 +355,18 @@ static int init_queue(uint32_t qi, const
>>> odp_schedule_param_t *sched_param)
>>>         if (sched_param->prio > 0)
>>>                 prio = LOWEST_QUEUE_PRIO;
>>>
>>> -       sched_global.queue_cmd[qi].s.prio  = prio;
>>> -       sched_global.queue_cmd[qi].s.group = group;
>>> -       sched_global.queue_cmd[qi].s.init  = 1;
>>> +       sched_global->queue_cmd[qi].s.prio  = prio;
>>> +       sched_global->queue_cmd[qi].s.group = group;
>>> +       sched_global->queue_cmd[qi].s.init  = 1;
>>>
>>>         return 0;
>>>  }
>>>
>>>  static void destroy_queue(uint32_t qi)
>>>  {
>>> -       sched_global.queue_cmd[qi].s.prio  = 0;
>>> -       sched_global.queue_cmd[qi].s.group = 0;
>>> -       sched_global.queue_cmd[qi].s.init  = 0;
>>> +       sched_global->queue_cmd[qi].s.prio  = 0;
>>> +       sched_global->queue_cmd[qi].s.group = 0;
>>> +       sched_global->queue_cmd[qi].s.init  = 0;
>>>  }
>>>
>>>  static inline void add_tail(sched_cmd_t *cmd)
>>> @@ -354,8 +376,7 @@ static inline void add_tail(sched_cmd_t *cmd)
>>>         int prio     = cmd->s.prio;
>>>         uint32_t idx = cmd->s.ring_idx;
>>>
>>> -       prio_queue = &sched_global.prio_queue[group][prio];
>>> -
>>> +       prio_queue = &sched_global->prio_queue[group][prio];
>>>         ring_enq(&prio_queue->ring, RING_MASK, idx);
>>>  }
>>>
>>> @@ -365,8 +386,7 @@ static inline sched_cmd_t *rem_head(int group, int
>>> prio)
>>>         uint32_t ring_idx, index;
>>>         int pktio;
>>>
>>> -       prio_queue = &sched_global.prio_queue[group][prio];
>>> -
>>> +       prio_queue = &sched_global->prio_queue[group][prio];
>>>         ring_idx = ring_deq(&prio_queue->ring, RING_MASK);
>>>
>>>         if (ring_idx == RING_EMPTY)
>>> @@ -375,16 +395,16 @@ static inline sched_cmd_t *rem_head(int group, int
>>> prio)
>>>         pktio = index_from_ring_idx(&index, ring_idx);
>>>
>>>         if (pktio)
>>> -               return &sched_global.pktio_cmd[index];
>>> +               return &sched_global->pktio_cmd[index];
>>>
>>> -       return &sched_global.queue_cmd[index];
>>> +       return &sched_global->queue_cmd[index];
>>>  }
>>>
>>>  static int sched_queue(uint32_t qi)
>>>  {
>>>         sched_cmd_t *cmd;
>>>
>>> -       cmd = &sched_global.queue_cmd[qi];
>>> +       cmd = &sched_global->queue_cmd[qi];
>>>         add_tail(cmd);
>>>
>>>         return 0;
>>> @@ -410,7 +430,7 @@ static void pktio_start(int pktio_index, int num, int
>>> pktin_idx[])
>>>         ODP_DBG("pktio index: %i, %i pktin queues %i\n",
>>>                 pktio_index, num, pktin_idx[0]);
>>>
>>> -       cmd = &sched_global.pktio_cmd[pktio_index];
>>> +       cmd = &sched_global->pktio_cmd[pktio_index];
>>>
>>>         if (num > NUM_PKTIN)
>>>                 ODP_ABORT("Supports only %i pktin queues per interface\n",
>>> @@ -428,7 +448,7 @@ static inline sched_cmd_t *sched_cmd(void)
>>>  {
>>>         int prio, i;
>>>         int thr = sched_local.thr_id;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>         thr_group_t *thr_group = &sched_group->s.thr[thr];
>>>         uint32_t gen_cnt;
>>>
>>> @@ -602,7 +622,7 @@ static odp_schedule_group_t
>>> schedule_group_create(const char *name,
>>>                                                   const odp_thrmask_t
>>> *thrmask)
>>>  {
>>>         odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>         int i;
>>>
>>>         odp_ticketlock_lock(&sched_group->s.lock);
>>> @@ -633,7 +653,7 @@ static odp_schedule_group_t
>>> schedule_group_create(const char *name,
>>>
>>>  static int schedule_group_destroy(odp_schedule_group_t group)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < NUM_STATIC_GROUP || group >= NUM_GROUP)
>>>                 return -1;
>>> @@ -656,7 +676,7 @@ static int schedule_group_destroy(odp_schedule_group_t
>>> group)
>>>  static odp_schedule_group_t schedule_group_lookup(const char *name)
>>>  {
>>>         odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>         int i;
>>>
>>>         odp_ticketlock_lock(&sched_group->s.lock);
>>> @@ -677,7 +697,7 @@ static int schedule_group_join(odp_schedule_group_t
>>> group,
>>>                                const odp_thrmask_t *thrmask)
>>>  {
>>>         int thr;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < 0 || group >= NUM_GROUP)
>>>                 return -1;
>>> @@ -709,7 +729,7 @@ static int schedule_group_leave(odp_schedule_group_t
>>> group,
>>>                                 const odp_thrmask_t *thrmask)
>>>  {
>>>         int thr;
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>         odp_thrmask_t *all = &sched_group->s.group[GROUP_ALL].mask;
>>>         odp_thrmask_t not;
>>>
>>> @@ -743,7 +763,7 @@ static int schedule_group_leave(odp_schedule_group_t
>>> group,
>>>  static int schedule_group_thrmask(odp_schedule_group_t group,
>>>                                   odp_thrmask_t *thrmask)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < 0 || group >= NUM_GROUP)
>>>                 return -1;
>>> @@ -765,7 +785,7 @@ static int schedule_group_thrmask(odp_schedule_group_t
>>> group,
>>>  static int schedule_group_info(odp_schedule_group_t group,
>>>                                odp_schedule_group_info_t *info)
>>>  {
>>> -       sched_group_t *sched_group = &sched_global.sched_group;
>>> +       sched_group_t *sched_group = &sched_global->sched_group;
>>>
>>>         if (group < 0 || group >= NUM_GROUP)
>>>                 return -1;
>>> --
>>> 2.7.4
>>>
>>>

Reply via email to