I've fixed these in v2.  Also added is the new API
odp_schedule_group_count() that gives the number of threads belonging to a
schedule group.

On Thu, Jul 30, 2015 at 1:12 AM, Mario Torrecillas Rodriguez <
mario.torrecillasrodrig...@arm.com> wrote:

> The problem with using count as a way of determining whether the slot is
> unallocated or not is that count keeps the current number of threads in the
> group. If a group is created with no threads in it (which is allowed by the
> implementation as far as I can see), then no thread will be able to join at
> a later point. You may want to do this – create a few empty groups at
> initialization time, and then populate them as threads get spawned.
>
> I will be trying this out today; we have an internal project that requires
> schedule groups support.
>
> Cheers,
> Mario.
>
> From: lng-odp <lng-odp-boun...@lists.linaro.org> on behalf of Bill
> Fischofer <bill.fischo...@linaro.org>
> Date: Wednesday, 29 July 2015 13:09
> To: Stuart Haslam <stuart.has...@linaro.org>
> Cc: LNG ODP Mailman List <lng-odp@lists.linaro.org>
> Subject: Re: [lng-odp] [API-NEXT PATCH 2/5] linux-generic: schedule:
> implement scheduler groups
>
> Thanks for the comments.
>
> On Wed, Jul 29, 2015 at 6:53 AM, Stuart Haslam <stuart.has...@linaro.org>
> wrote:
>
>> On Tue, Jul 28, 2015 at 08:56:53PM -0500, Bill Fischofer wrote:
>> > Signed-off-by: Bill Fischofer <bill.fischo...@linaro.org>
>> > ---
>> >  include/odp/api/config.h                           |   5 +
>> >  .../include/odp/plat/schedule_types.h              |   4 +
>> >  platform/linux-generic/odp_schedule.c              | 149
>> +++++++++++++++++++++
>> >  platform/linux-generic/odp_thread.c                |  25 +++-
>> >  4 files changed, 177 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/include/odp/api/config.h b/include/odp/api/config.h
>> > index b5c8fdd..302eaf5 100644
>> > --- a/include/odp/api/config.h
>> > +++ b/include/odp/api/config.h
>> > @@ -44,6 +44,11 @@ extern "C" {
>> >  #define ODP_CONFIG_SCHED_PRIOS  8
>> >
>> >  /**
>> > + * Number of scheduling groups
>> > + */
>> > +#define ODP_CONFIG_SCHED_GRPS  16
>> > +
>> > +/**
>> >   * Maximum number of packet IO resources
>> >   */
>> >  #define ODP_CONFIG_PKTIO_ENTRIES 64
>> > diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h
>> b/platform/linux-generic/include/odp/plat/schedule_types.h
>> > index 91e62e7..f13bfab 100644
>> > --- a/platform/linux-generic/include/odp/plat/schedule_types.h
>> > +++ b/platform/linux-generic/include/odp/plat/schedule_types.h
>> > @@ -43,8 +43,12 @@ typedef int odp_schedule_sync_t;
>> >
>> >  typedef int odp_schedule_group_t;
>> >
>> > +/* These must be kept in sync with thread_globals_t in odp_thread.c */
>> > +#define ODP_SCHED_GROUP_INVALID -1
>> >  #define ODP_SCHED_GROUP_ALL     0
>> >  #define ODP_SCHED_GROUP_WORKER  1
>> > +#define ODP_SCHED_GROUP_CONTROL 2
>> > +#define ODP_SCHED_GROUP_NAMED   3
>> >
>> >  #define ODP_SCHED_GROUP_NAME_LEN 32
>> >
>> > diff --git a/platform/linux-generic/odp_schedule.c
>> b/platform/linux-generic/odp_schedule.c
>> > index 5d32c81..316aaaf 100644
>> > --- a/platform/linux-generic/odp_schedule.c
>> > +++ b/platform/linux-generic/odp_schedule.c
>> > @@ -23,6 +23,8 @@
>> >  #include <odp_queue_internal.h>
>> >  #include <odp_packet_io_internal.h>
>> >
>> > +odp_thrmask_t ODP_SCHED_MASK_ALL;
>>
>> This seems odd being upper case and ODP_ prefixed, how about just
>> sched_mask_all.
>>
>
> Good point.  I can fix in v2.
>
>>
>> > +
>> >  /* Number of schedule commands.
>> >   * One per scheduled queue and packet interface */
>> >  #define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
>> > @@ -48,6 +50,12 @@ typedef struct {
>> >       odp_pool_t     pool;
>> >       odp_shm_t      shm;
>> >       uint32_t       pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
>> > +     odp_spinlock_t grp_lock;
>> > +     struct {
>> > +             char           name[ODP_SCHED_GROUP_NAME_LEN];
>> > +             uint32_t       count;
>> > +             odp_thrmask_t *mask;
>> > +     } sched_grp[ODP_CONFIG_SCHED_GRPS];
>> >  } sched_t;
>> >
>> >  /* Schedule command */
>> > @@ -87,6 +95,9 @@ static sched_t *sched;
>> >  /* Thread local scheduler context */
>> >  static __thread sched_local_t sched_local;
>> >
>> > +/* Internal routine to get scheduler thread mask addrs */
>> > +odp_thrmask_t *thread_sched_grp_mask(int index);
>> > +
>> >  static void sched_local_init(void)
>> >  {
>> >       int i;
>> > @@ -163,6 +174,16 @@ int odp_schedule_init_global(void)
>> >               }
>> >       }
>> >
>> > +     odp_spinlock_init(&sched->grp_lock);
>> > +
>> > +     for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
>> > +             memset(&sched->sched_grp[i].name, 0,
>> ODP_SCHED_GROUP_NAME_LEN);
>> > +             sched->sched_grp[i].count = 0;
>> > +             sched->sched_grp[i].mask = thread_sched_grp_mask(i);
>> > +     }
>> > +
>> > +     odp_thrmask_setall(&ODP_SCHED_MASK_ALL);
>> > +
>> >       ODP_DBG("done\n");
>> >
>> >       return 0;
>> > @@ -466,6 +487,18 @@ static int schedule(odp_queue_t *out_queue,
>> odp_event_t out_ev[],
>> >                       }
>> >
>> >                       qe  = sched_cmd->qe;
>> > +                     if (qe->s.param.sched.group > ODP_SCHED_GROUP_ALL
>> &&
>> > +                         !odp_thrmask_isset(sched->sched_grp
>> > +
>> [qe->s.param.sched.group].mask,
>> > +                                            thr)) {
>> > +                             /* This thread is not eligible for work
>> from
>> > +                              * this queue, so continue scheduling it.
>> > +                              */
>> > +                             if (odp_queue_enq(pri_q, ev))
>> > +                                     ODP_ABORT("schedule failed\n");
>> > +                             continue;
>> > +                     }
>>
>> This doesn't seem like the most efficient way to do it, but I assume
>> it's like this because the pri_mask/pri_queue are globals so this is the
>> best way to do it without a larger rewrite.
>>
>>
> These are used to control the priorities (I assume "pri" is an
> abbreviation for "priority"?) and not thread groups.  This seemed the
> simplest and most straightforward way to filter things.  I'm open to other
> suggestions.
>
>
>> >                       num = queue_deq_multi(qe, sched_local.buf_hdr,
>> max_deq);
>> >
>> >                       if (num < 0) {
>> > @@ -587,3 +620,119 @@ int odp_schedule_num_prio(void)
>> >  {
>> >       return ODP_CONFIG_SCHED_PRIOS;
>> >  }
>> > +
>> > +odp_schedule_group_t odp_schedule_group_create(const char *name,
>> > +                                            const odp_thrmask_t *mask)
>> > +{
>> > +     odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
>> > +     int i;
>> > +
>> > +     odp_spinlock_lock(&sched->grp_lock);
>> > +
>> > +     for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
>> > +             if (sched->sched_grp[i].name[0] == 0 &&
>> > +                 sched->sched_grp[i].count == 0) {
>> > +                     strncpy(sched->sched_grp[i].name, name,
>> > +                             ODP_SCHED_GROUP_NAME_LEN - 1);
>> > +                     sched->sched_grp[i].name[ODP_SCHED_GROUP_NAME_LEN
>> - 1]
>> > +                             = 0;
>> > +                     memcpy(sched->sched_grp[i].mask, mask,
>> sizeof(*mask));
>>
>> odp_thrmask_copy()?
>>
>
> Good catch.  I missed that one in odp_thrmask.c.  Will correct in v2.
>
>>
>> > +                     sched->sched_grp[i].count =
>> odp_thrmask_count(mask);
>> > +                     group = (odp_schedule_group_t)i;
>> > +                     break;
>> > +             }
>> > +     }
>> > +
>> > +     odp_spinlock_unlock(&sched->grp_lock);
>> > +     return group;
>> > +}
>> > +
>> > +int odp_schedule_group_destroy(odp_schedule_group_t group)
>> > +{
>> > +     int ret;
>> > +
>> > +     odp_spinlock_lock(&sched->grp_lock);
>> > +
>> > +     if (group < ODP_CONFIG_SCHED_GRPS &&
>> > +         group > ODP_SCHED_GROUP_NAMED) {
>> > +             odp_thrmask_zero(sched->sched_grp[group].mask);
>> > +             sched->sched_grp[group].count = 0;
>> > +             memset(&sched->sched_grp[group].name, 0,
>> > +                    ODP_SCHED_GROUP_NAME_LEN);
>> > +             ret = 0;
>> > +     } else {
>> > +             ret = -1;
>> > +     }
>> > +
>> > +     odp_spinlock_unlock(&sched->grp_lock);
>> > +     return ret;
>> > +}
>> > +
>> > +odp_schedule_group_t odp_schedule_group_lookup(const char *name)
>> > +{
>> > +     odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
>> > +     int i;
>> > +
>> > +     odp_spinlock_lock(&sched->grp_lock);
>> > +
>> > +     for (i = ODP_SCHED_GROUP_NAMED; i < ODP_CONFIG_SCHED_GRPS; i++) {
>> > +             if (strcmp(name, sched->sched_grp[i].name) == 0) {
>> > +                     group = (odp_schedule_group_t)i;
>> > +                     break;
>> > +             }
>> > +     }
>> > +
>> > +     odp_spinlock_unlock(&sched->grp_lock);
>> > +     return group;
>> > +}
>> > +
>> > +int odp_schedule_group_join(odp_schedule_group_t group,
>> > +                         const odp_thrmask_t *mask)
>> > +{
>> > +     int ret;
>> > +
>> > +     odp_spinlock_lock(&sched->grp_lock);
>> > +
>> > +     if (group < ODP_CONFIG_SCHED_GRPS &&
>> > +         group >= ODP_SCHED_GROUP_NAMED &&
>> > +         sched->sched_grp[group].count > 0) {
>>
>> I can't think why it would be an error to join a group that's currently
>> empty.
>>
>
> A count of 0 indicates that the slot is unallocated.  You can't join a
> group that hasn't been created.
>
>>
>> > +             odp_thrmask_or(sched->sched_grp[group].mask,
>> > +                            sched->sched_grp[group].mask,
>> > +                            mask);
>> > +             sched->sched_grp[group].count =
>> > +                     odp_thrmask_count(sched->sched_grp[group].mask);
>> > +             ret = 0;
>> > +     } else {
>> > +             ret = -1;
>> > +     }
>> > +
>> > +     odp_spinlock_unlock(&sched->grp_lock);
>> > +     return ret;
>> > +}
>> > +
>> > +int odp_schedule_group_leave(odp_schedule_group_t group,
>> > +                          const odp_thrmask_t *mask)
>> > +{
>> > +     int ret;
>> > +
>> > +     odp_spinlock_lock(&sched->grp_lock);
>> > +
>> > +     if (group < ODP_CONFIG_SCHED_GRPS &&
>> > +         group >= ODP_SCHED_GROUP_NAMED &&
>> > +         sched->sched_grp[group].count > 0) {
>>
>> Similar to above, not sure why count==0 should be treated as an error
>> here, so long as the group is valid (name[0]), the end result is that
>> the provided mask isn't a member of that group which is as expected.
>>
>> Actually I'm now not sure why count is needed at all.
>>
>
> As noted above, I'm currently using count > 0 to indicate that the slot is
> allocated.  I agree name[0] could serve the same purpose, in which case
> count is probably not needed.  I'll rework this in v2.
>
>
>>
>> > +             odp_thrmask_t leavemask;
>> > +
>> > +             odp_thrmask_xor(&leavemask, mask, &ODP_SCHED_MASK_ALL);
>> > +             odp_thrmask_and(sched->sched_grp[group].mask,
>> > +                             sched->sched_grp[group].mask,
>> > +                             &leavemask);
>> > +             sched->sched_grp[group].count =
>> > +                     odp_thrmask_count(sched->sched_grp[group].mask);
>> > +             ret = 0;
>> > +     } else {
>> > +             ret = -1;
>> > +     }
>> > +
>> > +     odp_spinlock_unlock(&sched->grp_lock);
>> > +     return ret;
>> > +}
>> > diff --git a/platform/linux-generic/odp_thread.c
>> b/platform/linux-generic/odp_thread.c
>> > index 9905c78..770c64e 100644
>> > --- a/platform/linux-generic/odp_thread.c
>> > +++ b/platform/linux-generic/odp_thread.c
>> > @@ -32,9 +32,15 @@ typedef struct {
>> >
>> >  typedef struct {
>> >       thread_state_t thr[ODP_CONFIG_MAX_THREADS];
>> > -     odp_thrmask_t  all;
>> > -     odp_thrmask_t  worker;
>> > -     odp_thrmask_t  control;
>> > +     union {
>> > +             /* struct order must be kept in sync with
>> schedule_types.h */
>> > +             struct {
>> > +                     odp_thrmask_t  all;
>> > +                     odp_thrmask_t  worker;
>> > +                     odp_thrmask_t  control;
>> > +             };
>> > +             odp_thrmask_t sched_grp_mask[ODP_CONFIG_SCHED_GRPS];
>> > +     };
>> >       uint32_t       num;
>> >       uint32_t       num_worker;
>> >       uint32_t       num_control;
>> > @@ -53,6 +59,7 @@ static __thread thread_state_t *this_thread;
>> >  int odp_thread_init_global(void)
>> >  {
>> >       odp_shm_t shm;
>> > +     int i;
>> >
>> >       shm = odp_shm_reserve("odp_thread_globals",
>> >                             sizeof(thread_globals_t),
>> > @@ -65,13 +72,19 @@ int odp_thread_init_global(void)
>> >
>> >       memset(thread_globals, 0, sizeof(thread_globals_t));
>> >       odp_spinlock_init(&thread_globals->lock);
>> > -     odp_thrmask_zero(&thread_globals->all);
>> > -     odp_thrmask_zero(&thread_globals->worker);
>> > -     odp_thrmask_zero(&thread_globals->control);
>> > +
>> > +     for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++)
>> > +             odp_thrmask_zero(&thread_globals->sched_grp_mask[i]);
>> >
>> >       return 0;
>> >  }
>> >
>> > +odp_thrmask_t *thread_sched_grp_mask(int index);
>> > +odp_thrmask_t *thread_sched_grp_mask(int index)
>> > +{
>> > +     return &thread_globals->sched_grp_mask[index];
>> > +}
>> > +
>> >  int odp_thread_term_global(void)
>> >  {
>> >       int ret;
>> > --
>> > 2.1.4
>>
>
>
> -- IMPORTANT NOTICE: The contents of this email and any attachments are
> confidential and may also be privileged. If you are not the intended
> recipient, please notify the sender immediately and do not disclose the
> contents to any other person, use it for any purpose, or store or copy the
> information in any medium. Thank you.
>
> ARM Limited, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ,
> Registered in England & Wales, Company No: 2557590
> ARM Holdings plc, Registered office 110 Fulbourn Road, Cambridge CB1 9NJ,
> Registered in England & Wales, Company No: 2548782
>
_______________________________________________
lng-odp mailing list
lng-odp@lists.linaro.org
https://lists.linaro.org/mailman/listinfo/lng-odp

Reply via email to