Re: [lng-odp] [PATCH 1/6] linux-gen: queue: reuse enq_ and deq_multi

2016-09-19 Thread Bill Fischofer
For this series:

Reviewed-and-tested-by: Bill Fischofer 

On Thu, Sep 15, 2016 at 8:39 AM, Petri Savolainen <
petri.savolai...@nokia.com> wrote:

> Reuse multi enqueue and dequeue implementations for single
> enq/deq operations. This enables implementation to
> concentrate on optimizing the multi operations. Single
> operations do not suffer a major performance decrease since
> compiler likely optimizes the inlined code for single
> operations (num is fixed to 1).
>
> Signed-off-by: Petri Savolainen 
> ---
>  platform/linux-generic/include/odp_schedule_if.h |   3 -
>  platform/linux-generic/odp_queue.c   | 134
> +++
>  platform/linux-generic/odp_schedule.c|   1 -
>  platform/linux-generic/odp_schedule_ordered.c|  20 
>  platform/linux-generic/odp_schedule_sp.c |  12 --
>  5 files changed, 41 insertions(+), 129 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp_schedule_if.h
> b/platform/linux-generic/include/odp_schedule_if.h
> index 13cdfb3..df73e70 100644
> --- a/platform/linux-generic/include/odp_schedule_if.h
> +++ b/platform/linux-generic/include/odp_schedule_if.h
> @@ -30,8 +30,6 @@ typedef int (*schedule_init_queue_fn_t)(uint32_t
> queue_index,
>);
>  typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index);
>  typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index);
> -typedef int (*schedule_ord_enq_fn_t)(uint32_t queue_index, void *buf_hdr,
> -int sustain, int *ret);
>  typedef int (*schedule_ord_enq_multi_fn_t)(uint32_t queue_index,
>void *buf_hdr[], int num,
>int sustain, int *ret);
> @@ -48,7 +46,6 @@ typedef struct schedule_fn_t {
> schedule_init_queue_fn_tinit_queue;
> schedule_destroy_queue_fn_t destroy_queue;
> schedule_sched_queue_fn_t   sched_queue;
> -   schedule_ord_enq_fn_t   ord_enq;
> schedule_ord_enq_multi_fn_t ord_enq_multi;
> schedule_init_global_fn_t   init_global;
> schedule_term_global_fn_t   term_global;
> diff --git a/platform/linux-generic/odp_queue.c
> b/platform/linux-generic/odp_queue.c
> index bec1e51..80d99e8 100644
> --- a/platform/linux-generic/odp_queue.c
> +++ b/platform/linux-generic/odp_queue.c
> @@ -65,19 +65,6 @@ static inline int queue_is_ordered(queue_entry_t *qe)
> return qe->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED;
>  }
>
> -static inline void queue_add(queue_entry_t *queue,
> -odp_buffer_hdr_t *buf_hdr)
> -{
> -   buf_hdr->next = NULL;
> -
> -   if (queue->s.head)
> -   queue->s.tail->next = buf_hdr;
> -   else
> -   queue->s.head = buf_hdr;
> -
> -   queue->s.tail = buf_hdr;
> -}
> -
>  queue_entry_t *get_qentry(uint32_t queue_id)
>  {
> return &queue_tbl->queue[queue_id];
> @@ -396,37 +383,8 @@ odp_queue_t odp_queue_lookup(const char *name)
> return ODP_QUEUE_INVALID;
>  }
>
> -int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int
> sustain)
> -{
> -   int ret;
> -
> -   if (sched_fn->ord_enq(queue->s.index, buf_hdr, sustain, &ret))
> -   return ret;
> -
> -   LOCK(&queue->s.lock);
> -
> -   if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
> -   UNLOCK(&queue->s.lock);
> -   ODP_ERR("Bad queue status\n");
> -   return -1;
> -   }
> -
> -   queue_add(queue, buf_hdr);
> -
> -   if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
> -   queue->s.status = QUEUE_STATUS_SCHED;
> -   UNLOCK(&queue->s.lock);
> -   if (sched_fn->sched_queue(queue->s.index))
> -   ODP_ABORT("schedule_queue failed\n");
> -   return 0;
> -   }
> -
> -   UNLOCK(&queue->s.lock);
> -   return 0;
> -}
> -
> -int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
> -   int num, int sustain)
> +static inline int enq_multi(queue_entry_t *queue, odp_buffer_hdr_t
> *buf_hdr[],
> +   int num, int sustain)
>  {
> int sched = 0;
> int i, ret;
> @@ -472,6 +430,24 @@ int queue_enq_multi(queue_entry_t *queue,
> odp_buffer_hdr_t *buf_hdr[],
> return num; /* All events enqueued */
>  }
>
> +int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
> int num,
> +   int sustain)
> +{
> +   return enq_multi(queue, buf_hdr, num, sustain);
> +}
> +
> +int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr, int
> sustain)
> +{
> +   int ret;
> +
> +   ret = enq_multi(queue, &buf_hdr, 1, sustain);
> +
> +   if (ret == 1)
> +   return 0;
> +   else
> +   return -1;
> +}
> +
>  int odp_queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int
> num)
>  {
> odp_buff

Re: [lng-odp] [PATCH] linux-gen: using ODP instantiation pid as odp instance

2016-09-19 Thread Bill Fischofer
On Mon, Sep 19, 2016 at 10:56 AM, Christophe Milard <
christophe.mil...@linaro.org> wrote:

> Rather than using INSTANCE_ID (constant 0xdeadbeef), the ODP main
> instantiation process ID is used as instance ID in the linux-generic
> implementation. This is a simple way to guarantee instance uniqueness
> on linux systems.
>
> Signed-off-by: Christophe Milard 
>

Reviewed-and-tested-by: Bill Fischofer 


> ---
>  platform/linux-generic/include/odp_internal.h | 1 -
>  platform/linux-generic/odp_init.c | 7 +++
>  platform/linux-generic/odp_traffic_mngr.c | 3 ++-
>  3 files changed, 5 insertions(+), 6 deletions(-)
>
> diff --git a/platform/linux-generic/include/odp_internal.h
> b/platform/linux-generic/include/odp_internal.h
> index 8bad450..3429781 100644
> --- a/platform/linux-generic/include/odp_internal.h
> +++ b/platform/linux-generic/include/odp_internal.h
> @@ -25,7 +25,6 @@ extern "C" {
>
>  extern __thread int __odp_errno;
>
> -#define INSTANCE_ID0xdeadbeef
>  #define MAX_CPU_NUMBER 128
>
>  typedef struct {
> diff --git a/platform/linux-generic/odp_init.c
> b/platform/linux-generic/odp_init.c
> index f534759..77f4f8a 100644
> --- a/platform/linux-generic/odp_init.c
> +++ b/platform/linux-generic/odp_init.c
> @@ -116,8 +116,7 @@ int odp_init_global(odp_instance_t *instance,
> goto init_failed;
> }
>
> -   /* Dummy support for single instance */
> -   *instance = INSTANCE_ID;
> +   *instance = (odp_instance_t)odp_global_data.main_pid;
>
> return 0;
>
> @@ -128,7 +127,7 @@ init_failed:
>
>  int odp_term_global(odp_instance_t instance)
>  {
> -   if (instance != INSTANCE_ID) {
> +   if (instance != (odp_instance_t)odp_global_data.main_pid) {
> ODP_ERR("Bad instance.\n");
> return -1;
> }
> @@ -250,7 +249,7 @@ int odp_init_local(odp_instance_t instance,
> odp_thread_type_t thr_type)
>  {
> enum init_stage stage = NO_INIT;
>
> -   if (instance != INSTANCE_ID) {
> +   if (instance != (odp_instance_t)odp_global_data.main_pid) {
> ODP_ERR("Bad instance.\n");
> goto init_fail;
> }
> diff --git a/platform/linux-generic/odp_traffic_mngr.c
> b/platform/linux-generic/odp_traffic_mngr.c
> index 4fe07ef..85228cd 100644
> --- a/platform/linux-generic/odp_traffic_mngr.c
> +++ b/platform/linux-generic/odp_traffic_mngr.c
> @@ -2317,7 +2317,8 @@ static void *tm_system_thread(void *arg)
> uint32_t destroying, work_queue_cnt, timer_cnt;
> int rc;
>
> -   rc = odp_init_local(INSTANCE_ID, ODP_THREAD_WORKER);
> +   rc = odp_init_local((odp_instance_t)odp_global_data.main_pid,
> +   ODP_THREAD_WORKER);
> ODP_ASSERT(rc == 0);
> tm_group = arg;
>
> --
> 2.7.4
>
>


Re: [lng-odp] [PATCH v2 1/2] test: perf: add new scheduling latency test

2016-09-19 Thread Bill Fischofer
On Mon, Sep 19, 2016 at 2:11 PM, Brian Brooks 
wrote:

> On 09/19 07:55:22, Elo, Matias (Nokia - FI/Espoo) wrote:
> > >
> > > On 09/14 11:53:06, Matias Elo wrote:
> > > > +
> > > > + /* Clear possible locally stored buffers */
> > > > + odp_schedule_pause();
> > > > +
> > > > + while (1) {
> > > > + ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT);
> > > > +
> > > > + if (ev == ODP_EVENT_INVALID)
> > > > + break;
> > > > +
> > > > + if (odp_queue_enq(src_queue, ev)) {
> > > > + LOG_ERR("[%i] Queue enqueue failed.\n", thr);
> > > > + odp_event_free(ev);
> > > > + return -1;
> > > > + }
> > > > + }
> > > > +
> > > > + odp_schedule_resume();
> > >
> > > Is it possible to skip this and go straight to draining the queues?
> > >
> > > Locally pre-scheduled work is an implementation detail that should be
> hidden
> > > by the scheduling APIs.
> > >
> > > A hardware scheduler may not pre-schedule work to cores the way the
> current
> > > software implementation does.
> >
> > Also some HW schedulers may operate in push mode and do local cashing.
> Calling
> > odp_schedule_pause() is the only ODP method to signal the scheduler to
> stop this.
> > So to keep the application platform agnostic (and follow the API
> documentation),
> > this step cannot be skipped.
> >
> > -Matias
>
> Thinking in the general sense..
>
> Should applications have to reason about _and_ code around pre-scheduled
> and non-scheduled events? If the event hasn't crossed the API boundary to
> be
> delivered to the application according to the scheduling group policies for
> that core, what is the difference to the application?
>
> If a scheduler implementation uses TLS to pre-schedule events it also seems
> like it should be able to support work-stealing of those pre-scheduled
> events
> by other threads in the runtime case where odp_schedule() is not called
> from
> that thread or the thread id is removed from scheduling group masks. From
> the application perspective these are all implementation details.
>

You're making an argument I made some time back. :)  As I recall, the
rationale for pause/resume was to make life easier for existing code that
is introducing ODP on a more gradual basis. Presumably Nokia has examples
of such code in house.

>From a design standpoint worker threads shouldn't "change their minds" and
go off to do something else for a while. For whatever else they might want
to do it would seem that such requirements would be better served by simply
having another thread to do the other things that wakes up periodically to
do them.


>
> This pause state may also cause some confusion for application writers
> because
> it is now possible to write two different event loops for the same core
> depending on how a particular scheduler implementation behaves. The
> semantics
> seem to blur a bit with scheduling groups. Level of abstraction can be
> raised
> by deprecating the scheduler pause state and APIs.
>

This is a worthwhile discussion to have. I'll add it to the agenda for
tomorrow's ODP call and we can include it in the wider scheduler
discussions scheduled for next week. The other rationale for not wanting
this behavior (another argument I advanced earlier) is that it greatly
complicates recovery processing. A robustly designed application should be
able to recover from the failure of an individual thread (this is
especially true if the ODP thread is in fact a separate process). If the
implementation has prescheduled events to a failed thread then how are they
recovered gracefully? Conversely, if the implementation can recover from
such a scenario than it would seem it could equally "unschedule" prestaged
events as needed due to thread termination (normal or abnormal) or for load
balancing purposes.

We may not be able to fully deprecate these APIs, but perhaps we can make
it clearer how they are intended to be used and classify them as
"discouraged" for new code.


>
> > > The ODP implementation for that environment
> > > would have to turn the scheduling call into a nop for that core if it
> is
> > > paused by use of these APIs. Another way to implement it would be to
> remove
> > > this core from all queue scheduling groups and leave the schedule call
> as-is.
> > > If implemented by the first method, the application writer could
> simply just
> > > not call the API to schedule work. If implemented by the second
> method, there
> > > are already scheduling group APIs to do this.
> >
> > The ODP implementation is free to choose how it implements these calls.
> For
> > example adding a single 'if (odp_unlikely(x))' to odp_schedule() to make
> it a NOP
> > after odp_schedule_pause() has been called shouldn't cause a significant
> overhead.
> >
> > >
> > > Are odp_schedule_pause() and odp_schedule_resume() deprecated?
> >
> > Nope.
> >
> > >
> > > > + odp_barrier_wait(&globals->barrier);
> > > > +
> > > > + clear_sched_queues();

Re: [lng-odp] [PATCH v2 1/2] test: perf: add new scheduling latency test

2016-09-19 Thread Brian Brooks
On 09/19 07:55:22, Elo, Matias (Nokia - FI/Espoo) wrote:
> > 
> > On 09/14 11:53:06, Matias Elo wrote:
> > > +
> > > + /* Clear possible locally stored buffers */
> > > + odp_schedule_pause();
> > > +
> > > + while (1) {
> > > + ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT);
> > > +
> > > + if (ev == ODP_EVENT_INVALID)
> > > + break;
> > > +
> > > + if (odp_queue_enq(src_queue, ev)) {
> > > + LOG_ERR("[%i] Queue enqueue failed.\n", thr);
> > > + odp_event_free(ev);
> > > + return -1;
> > > + }
> > > + }
> > > +
> > > + odp_schedule_resume();
> > 
> > Is it possible to skip this and go straight to draining the queues?
> > 
> > Locally pre-scheduled work is an implementation detail that should be hidden
> > by the scheduling APIs.
> > 
> > A hardware scheduler may not pre-schedule work to cores the way the current
> > software implementation does.
> 
> Also some HW schedulers may operate in push mode and do local cashing. Calling
> odp_schedule_pause() is the only ODP method to signal the scheduler to stop 
> this.
> So to keep the application platform agnostic (and follow the API 
> documentation),
> this step cannot be skipped.
> 
> -Matias

Thinking in the general sense..

Should applications have to reason about _and_ code around pre-scheduled
and non-scheduled events? If the event hasn't crossed the API boundary to be
delivered to the application according to the scheduling group policies for
that core, what is the difference to the application?

If a scheduler implementation uses TLS to pre-schedule events it also seems
like it should be able to support work-stealing of those pre-scheduled events
by other threads in the runtime case where odp_schedule() is not called from
that thread or the thread id is removed from scheduling group masks. From
the application perspective these are all implementation details.

This pause state may also cause some confusion for application writers because
it is now possible to write two different event loops for the same core
depending on how a particular scheduler implementation behaves. The semantics
seem to blur a bit with scheduling groups. Level of abstraction can be raised
by deprecating the scheduler pause state and APIs.

> > The ODP implementation for that environment
> > would have to turn the scheduling call into a nop for that core if it is
> > paused by use of these APIs. Another way to implement it would be to remove
> > this core from all queue scheduling groups and leave the schedule call 
> > as-is.
> > If implemented by the first method, the application writer could simply just
> > not call the API to schedule work. If implemented by the second method, 
> > there
> > are already scheduling group APIs to do this.
> 
> The ODP implementation is free to choose how it implements these calls. For
> example adding a single 'if (odp_unlikely(x))' to odp_schedule() to make it a 
> NOP
> after odp_schedule_pause() has been called shouldn't cause a significant 
> overhead.
> 
> > 
> > Are odp_schedule_pause() and odp_schedule_resume() deprecated?
> 
> Nope.
> 
> > 
> > > + odp_barrier_wait(&globals->barrier);
> > > +
> > > + clear_sched_queues();


Re: [lng-odp] [PATCH 1/1] validation: classification: fix TCP/UDP checksum update

2016-09-19 Thread Bala Manoharan
Ping.

Regards,
Bala


On 9 September 2016 at 19:49, Balasubramanian Manoharan
 wrote:
> Fixes https://bugs.linaro.org/show_bug.cgi?id=2512
>
> Signed-off-by: Balasubramanian Manoharan 
> ---
>  .../validation/api/classification/odp_classification_common.c  | 10 
> +-
>  1 file changed, 9 insertions(+), 1 deletion(-)
>
> diff --git 
> a/test/common_plat/validation/api/classification/odp_classification_common.c 
> b/test/common_plat/validation/api/classification/odp_classification_common.c
> index 7a42ac7..93ac0c0 100644
> --- 
> a/test/common_plat/validation/api/classification/odp_classification_common.c
> +++ 
> b/test/common_plat/validation/api/classification/odp_classification_common.c
> @@ -11,6 +11,7 @@
>  #include 
>  #include 
>  #include 
> +#include "test_debug.h"
>
>  typedef struct cls_test_packet {
> odp_u32be_t magic;
> @@ -291,6 +292,8 @@ odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
> parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
> ip->src_addr = odp_cpu_to_be_32(addr);
> ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
> +   odp_packet_has_ipv4_set(pkt, 1);
> +
> if (flag_udp)
> ip->tot_len = odp_cpu_to_be_16(ODPH_UDPHDR_LEN + payload_len +
>ODPH_IPV4HDR_LEN);
> @@ -318,14 +321,19 @@ odp_packet_t create_packet_len(odp_pool_t pool, bool 
> vlan,
> udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
> udp->length = odp_cpu_to_be_16(payload_len + ODPH_UDPHDR_LEN);
> udp->chksum = 0;
> +   odp_packet_has_udp_set(pkt, 1);
> +   if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0)
> +   LOG_ERR("odph_udp_tcp_chksum failed\n");
> } else {
> odp_packet_l4_offset_set(pkt, offset);
> tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
> tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
> tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
> tcp->hl = ODPH_TCPHDR_LEN / 4;
> -   /* TODO: checksum field has to be updated */
> tcp->cksm = 0;
> +   odp_packet_has_tcp_set(pkt, 1);
> +   if (odph_udp_tcp_chksum(pkt, ODPH_CHKSUM_GENERATE, NULL) != 0)
> +   LOG_ERR("odph_udp_tcp_chksum failed\n");
> }
>
> /* set pkt sequence number */
> --
> 1.9.1
>


Re: [lng-odp] [API-NEXT] api: pktio: updates classification configuration documentation.

2016-09-19 Thread Bala Manoharan
Ping

Regards,
Bala


On 15 September 2016 at 11:50, Balasubramanian Manoharan
 wrote:
> From: Balasubramanian Manoharan 
>
> Updates documentation for default CoS and pktin queue param configuration.
>
> Signed-off-by: Balasubramanian Manoharan 
> ---
>  include/odp/api/spec/packet_io.h | 17 ++---
>  1 file changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/include/odp/api/spec/packet_io.h 
> b/include/odp/api/spec/packet_io.h
> index d46e405..9542c66 100644
> --- a/include/odp/api/spec/packet_io.h
> +++ b/include/odp/api/spec/packet_io.h
> @@ -189,12 +189,11 @@ typedef struct odp_pktin_queue_param_t {
>
> /** Number of input queues to be created
>   *
> - * When classifier is enabled the number of queues may be zero
> - * (in odp_pktin_queue_config() step), otherwise at least one
> - * queue is required. More than one input queues require either flow
> - * hashing or classifier enabled. The maximum value is defined by
> - * pktio capability 'max_input_queues'. Queue type is defined by the
> - * input mode. The default value is 1. */
> + * When classifier is enabled (in odp_pktin_queue_config() step) this
> + * value is ignored, otherwise at least one queue is required.
> + * More than one input queues require flow hashing configured.
> + * The maximum value is defined by pktio capability 
> 'max_input_queues'.
> + * Queue type is defined by the input mode. The default value is 1. 
> */
> unsigned num_queues;
>
> /** Queue parameters
> @@ -202,7 +201,9 @@ typedef struct odp_pktin_queue_param_t {
>   * These are used for input queue creation in ODP_PKTIN_MODE_QUEUE
>   * or ODP_PKTIN_MODE_SCHED modes. Scheduler parameters are considered
>   * only in ODP_PKTIN_MODE_SCHED mode. Default values are defined in
> - * odp_queue_param_t documentation. */
> + * odp_queue_param_t documentation.
> + * when classifier is enabled (in odp_pktin_queue_config() step) this
> + * value is ignored. */
> odp_queue_param_t queue_param;
>
>  } odp_pktin_queue_param_t;
> @@ -887,6 +888,8 @@ int odp_pktio_mac_addr(odp_pktio_t pktio, void *mac_addr, 
> int size);
>   *
>   * @retval 0 on success
>   * @retval <0 on failure
> + *
> + * @note The default_cos has to be unique per odp_pktio_t instance.
>   */
>  int odp_pktio_default_cos_set(odp_pktio_t pktio, odp_cos_t default_cos);
>
> --
> 1.9.1
>


[lng-odp] [PATCH] linux-gen: using ODP instantiation pid as odp instance

2016-09-19 Thread Christophe Milard
Rather than using INSTANCE_ID (constant 0xdeadbeef), the ODP main
instantiation process ID is used as instance ID in the linux-generic
implementation. This is a simple way to guarantee instance uniqueness
on linux systems.

Signed-off-by: Christophe Milard 
---
 platform/linux-generic/include/odp_internal.h | 1 -
 platform/linux-generic/odp_init.c | 7 +++
 platform/linux-generic/odp_traffic_mngr.c | 3 ++-
 3 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/platform/linux-generic/include/odp_internal.h 
b/platform/linux-generic/include/odp_internal.h
index 8bad450..3429781 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -25,7 +25,6 @@ extern "C" {
 
 extern __thread int __odp_errno;
 
-#define INSTANCE_ID0xdeadbeef
 #define MAX_CPU_NUMBER 128
 
 typedef struct {
diff --git a/platform/linux-generic/odp_init.c 
b/platform/linux-generic/odp_init.c
index f534759..77f4f8a 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -116,8 +116,7 @@ int odp_init_global(odp_instance_t *instance,
goto init_failed;
}
 
-   /* Dummy support for single instance */
-   *instance = INSTANCE_ID;
+   *instance = (odp_instance_t)odp_global_data.main_pid;
 
return 0;
 
@@ -128,7 +127,7 @@ init_failed:
 
 int odp_term_global(odp_instance_t instance)
 {
-   if (instance != INSTANCE_ID) {
+   if (instance != (odp_instance_t)odp_global_data.main_pid) {
ODP_ERR("Bad instance.\n");
return -1;
}
@@ -250,7 +249,7 @@ int odp_init_local(odp_instance_t instance, 
odp_thread_type_t thr_type)
 {
enum init_stage stage = NO_INIT;
 
-   if (instance != INSTANCE_ID) {
+   if (instance != (odp_instance_t)odp_global_data.main_pid) {
ODP_ERR("Bad instance.\n");
goto init_fail;
}
diff --git a/platform/linux-generic/odp_traffic_mngr.c 
b/platform/linux-generic/odp_traffic_mngr.c
index 4fe07ef..85228cd 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -2317,7 +2317,8 @@ static void *tm_system_thread(void *arg)
uint32_t destroying, work_queue_cnt, timer_cnt;
int rc;
 
-   rc = odp_init_local(INSTANCE_ID, ODP_THREAD_WORKER);
+   rc = odp_init_local((odp_instance_t)odp_global_data.main_pid,
+   ODP_THREAD_WORKER);
ODP_ASSERT(rc == 0);
tm_group = arg;
 
-- 
2.7.4



Re: [lng-odp] [PATCH v2] linux-gen: build: de-couple abi compatibility from shared lib

2016-09-19 Thread Mike Holmes
On 19 September 2016 at 10:20, Bill Fischofer 
wrote:

> On Mon, Sep 19, 2016 at 2:50 AM, Savolainen, Petri (Nokia - FI/Espoo) <
> petri.savolai...@nokia-bell-labs.com> wrote:
>
> > Ping. V1 was reviewed 6 days ago. Changes from v1 to v2 are trivial.
> >
>
> I thought my review would be propagated to v2.
>

I think we should make it policy that we always add the reviewed by again
to save any ambiguity.

It is possible that a V2 breaks something new and then if Maxim moved the
review forward as a paperwork exercise only, the onus is on him to really
re review it to be sure it still works.


>
>
> >
> > -Petri
> >
> >
> > > -Original Message-
> > > From: lng-odp [mailto:lng-odp-boun...@lists.linaro.org] On Behalf Of
> > Petri
> > > Savolainen
> > > Sent: Wednesday, September 14, 2016 3:11 PM
> > > To: lng-odp@lists.linaro.org
> > > Subject: Suspected SPAM - [lng-odp] [PATCH v2] linux-gen: build:
> > de-couple
> > > abi compatibility from shared lib
> > >
> > > Building ABI compatible or shared library are two different
> > > targets. A shared library may be used also without ABI
> > > compatibility. A new --enable-abi-compat configuration option
> > > is introduced. By default libraries are not built in ABI compat
> > > mode to enable function inlining. There is a noticeable
> > > performance difference when e.g. odp_atomic_xxx calls
> > > are not inlined.
> > >
> > > Signed-off-by: Petri Savolainen 
> >
>
> Reviewed-by: Bill Fischofer 
>
>
> > > ---
> > >
> > > v2:
> > >   * ABI compat enabled by default
> > >   * print static/shared/abi_compat selection in config results
> > >   * added missing header file include guards
> > >
> >
>



-- 
Mike Holmes
Program Manager - Linaro Networking Group
Linaro.org  *│ *Open source software for ARM SoCs
"Work should be fun and collaborative, the rest follows"


Re: [lng-odp] [PATCH v2] linux-gen: build: de-couple abi compatibility from shared lib

2016-09-19 Thread Bill Fischofer
On Mon, Sep 19, 2016 at 2:50 AM, Savolainen, Petri (Nokia - FI/Espoo) <
petri.savolai...@nokia-bell-labs.com> wrote:

> Ping. V1 was reviewed 6 days ago. Changes from v1 to v2 are trivial.
>

I thought my review would be propagated to v2.


>
> -Petri
>
>
> > -Original Message-
> > From: lng-odp [mailto:lng-odp-boun...@lists.linaro.org] On Behalf Of
> Petri
> > Savolainen
> > Sent: Wednesday, September 14, 2016 3:11 PM
> > To: lng-odp@lists.linaro.org
> > Subject: Suspected SPAM - [lng-odp] [PATCH v2] linux-gen: build:
> de-couple
> > abi compatibility from shared lib
> >
> > Building ABI compatible or shared library are two different
> > targets. A shared library may be used also without ABI
> > compatibility. A new --enable-abi-compat configuration option
> > is introduced. By default libraries are not built in ABI compat
> > mode to enable function inlining. There is a noticeable
> > performance difference when e.g. odp_atomic_xxx calls
> > are not inlined.
> >
> > Signed-off-by: Petri Savolainen 
>

Reviewed-by: Bill Fischofer 


> > ---
> >
> > v2:
> >   * ABI compat enabled by default
> >   * print static/shared/abi_compat selection in config results
> >   * added missing header file include guards
> >
>


Re: [lng-odp] [PATCH v2 1/2] test: perf: add new scheduling latency test

2016-09-19 Thread Elo, Matias (Nokia - FI/Espoo)


> -Original Message-
> From: Brian Brooks [mailto:brian.bro...@linaro.org]
> Sent: Saturday, September 17, 2016 1:05 AM
> To: Elo, Matias (Nokia - FI/Espoo) 
> Cc: lng-odp@lists.linaro.org
> Subject: Re: [lng-odp] [PATCH v2 1/2] test: perf: add new scheduling latency 
> test
> 
> On 09/14 11:53:06, Matias Elo wrote:
> > +
> > +   /* Clear possible locally stored buffers */
> > +   odp_schedule_pause();
> > +
> > +   while (1) {
> > +   ev = odp_schedule(&src_queue, ODP_SCHED_NO_WAIT);
> > +
> > +   if (ev == ODP_EVENT_INVALID)
> > +   break;
> > +
> > +   if (odp_queue_enq(src_queue, ev)) {
> > +   LOG_ERR("[%i] Queue enqueue failed.\n", thr);
> > +   odp_event_free(ev);
> > +   return -1;
> > +   }
> > +   }
> > +
> > +   odp_schedule_resume();
> 
> Is it possible to skip this and go straight to draining the queues?
> 
> Locally pre-scheduled work is an implementation detail that should be hidden
> by the scheduling APIs.
> 
> A hardware scheduler may not pre-schedule work to cores the way the current
> software implementation does.

Also some HW schedulers may operate in push mode and do local cashing. Calling
odp_schedule_pause() is the only ODP method to signal the scheduler to stop 
this.
So to keep the application platform agnostic (and follow the API documentation),
this step cannot be skipped.

-Matias

> The ODP implementation for that environment
> would have to turn the scheduling call into a nop for that core if it is
> paused by use of these APIs. Another way to implement it would be to remove
> this core from all queue scheduling groups and leave the schedule call as-is.
> If implemented by the first method, the application writer could simply just
> not call the API to schedule work. If implemented by the second method, there
> are already scheduling group APIs to do this.

The ODP implementation is free to choose how it implements these calls. For
example adding a single 'if (odp_unlikely(x))' to odp_schedule() to make it a 
NOP
after odp_schedule_pause() has been called shouldn't cause a significant 
overhead.

> 
> Are odp_schedule_pause() and odp_schedule_resume() deprecated?

Nope.

> 
> > +   odp_barrier_wait(&globals->barrier);
> > +
> > +   clear_sched_queues();


Re: [lng-odp] [PATCH v2] linux-gen: build: de-couple abi compatibility from shared lib

2016-09-19 Thread Savolainen, Petri (Nokia - FI/Espoo)
Ping. V1 was reviewed 6 days ago. Changes from v1 to v2 are trivial.

-Petri


> -Original Message-
> From: lng-odp [mailto:lng-odp-boun...@lists.linaro.org] On Behalf Of Petri
> Savolainen
> Sent: Wednesday, September 14, 2016 3:11 PM
> To: lng-odp@lists.linaro.org
> Subject: Suspected SPAM - [lng-odp] [PATCH v2] linux-gen: build: de-couple
> abi compatibility from shared lib
> 
> Building ABI compatible or shared library are two different
> targets. A shared library may be used also without ABI
> compatibility. A new --enable-abi-compat configuration option
> is introduced. By default libraries are not built in ABI compat
> mode to enable function inlining. There is a noticeable
> performance difference when e.g. odp_atomic_xxx calls
> are not inlined.
> 
> Signed-off-by: Petri Savolainen 
> ---
> 
> v2:
>   * ABI compat enabled by default
>   * print static/shared/abi_compat selection in config results
>   * added missing header file include guards
>