Re: [ovs-dev] [PATCH 2/4] dpif-netdev: Incremental addition/deletion of PMD threads.

2017-05-29 Thread Ferriter, Cian
> -Original Message-
> From: ovs-dev-boun...@openvswitch.org [mailto:ovs-dev-
> boun...@openvswitch.org] On Behalf Of Daniele Di Proietto
> Sent: 10 March 2017 04:13
> To: Ilya Maximets <i.maxim...@samsung.com>
> Cc: d...@openvswitch.org; Heetae Ahn <heetae82@samsung.com>
> Subject: Re: [ovs-dev] [PATCH 2/4] dpif-netdev: Incremental
> addition/deletion of PMD threads.
> 
> 2017-02-21 6:49 GMT-08:00 Ilya Maximets <i.maxim...@samsung.com>:
> > Currently, change of 'pmd-cpu-mask' is very heavy operation.
> > It requires destroying of all the PMD threads and creating them back.
> > After that, all the threads will sleep until ports' redistribution
> > finished.
> >
> > This patch adds ability to not stop the datapath while adjusting
> > number/placement of PMD threads. All not affected threads will forward
> > traffic without any additional latencies.
> >
> > id-pool created for static tx queue ids to keep them sequential in a
> > flexible way. non-PMD thread will always have static_tx_qid = 0 as it
> > was before.
> >
> > Signed-off-by: Ilya Maximets <i.maxim...@samsung.com>
> 
> Thanks for the patch
> 
> The idea looks good to me.
> 
> I'm still looking at the code, and I have one comment below
> 

Hi Ilya,

While reviewing the RFC Patch 4/4 in this series, I ran checkpatch.py over the 
entire series. The tool returned the warning below for this patch. I don't plan 
on reviewing this patch, but I thought I was send this as an FYI.

# ./checkpatch.py 
ovs-dev-2-4-dpif-netdev-Incremental-addition-deletion-of-PMD-threads..patch
WARNING: Line length is >79-characters long
#66 FILE: lib/dpif-netdev.c:1088:
/* We need 1 Tx queue for each possible cpu core + 1 for non-PMD threads. */

Lines checked: 272, Warnings: 1, Errors: 0


> > ---
> >  lib/dpif-netdev.c | 119
> +-
> >  tests/pmd.at  |   2 +-
> >  2 files changed, 91 insertions(+), 30 deletions(-)
> >
> > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index
> > 30907b7..6e575ab 100644
> > --- a/lib/dpif-netdev.c
> > +++ b/lib/dpif-netdev.c
> > @@ -48,6 +48,7 @@
> >  #include "fat-rwlock.h"
> >  #include "flow.h"
> >  #include "hmapx.h"
> > +#include "id-pool.h"
> >  #include "latch.h"
> >  #include "netdev.h"
> >  #include "netdev-vport.h"
> > @@ -241,6 +242,9 @@ struct dp_netdev {
> >
> >  /* Stores all 'struct dp_netdev_pmd_thread's. */
> >  struct cmap poll_threads;
> > +/* id pool for per thread static_tx_qid. */
> > +struct id_pool *tx_qid_pool;
> > +struct ovs_mutex tx_qid_pool_mutex;
> >
> >  /* Protects the access of the 'struct dp_netdev_pmd_thread'
> >   * instance for non-pmd thread. */ @@ -514,7 +518,7 @@ struct
> > dp_netdev_pmd_thread {
> >  /* Queue id used by this pmd thread to send packets on all netdevs if
> >   * XPS disabled for this netdev. All static_tx_qid's are unique and 
> > less
> >   * than 'cmap_count(dp->poll_threads)'. */
> > -const int static_tx_qid;
> > +uint32_t static_tx_qid;
> >
> >  struct ovs_mutex port_mutex;/* Mutex for 'poll_list' and 
> > 'tx_ports'. */
> >  /* List of rx queues to poll. */
> > @@ -594,6 +598,8 @@ static struct dp_netdev_pmd_thread
> *dp_netdev_get_pmd(struct dp_netdev *dp,
> >unsigned
> > core_id);  static struct dp_netdev_pmd_thread *
> > dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position
> > *pos);
> > +static void dp_netdev_del_pmd(struct dp_netdev *dp,
> > +  struct dp_netdev_pmd_thread *pmd);
> >  static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool
> > non_pmd);  static void dp_netdev_pmd_clear_ports(struct
> > dp_netdev_pmd_thread *pmd);  static void
> > dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
> @@ -1077,10 +1083,17 @@ create_dp_netdev(const char *name, const
> struct dpif_class *class,
> >  atomic_init(>emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
> >
> >  cmap_init(>poll_threads);
> > +
> > +ovs_mutex_init(>tx_qid_pool_mutex);
> > +/* We need 1 Tx queue for each possible cpu core + 1 for non-PMD
> threads. */
> > +dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
> > +
> >  ovs_mutex_init_recursive(>non_pmd_mutex);
> >  ovsthread_key_create(>per_pmd_key, NULL);
> >
&g

Re: [ovs-dev] [PATCH 2/4] dpif-netdev: Incremental addition/deletion of PMD threads.

2017-03-09 Thread Daniele Di Proietto
2017-02-21 6:49 GMT-08:00 Ilya Maximets :
> Currently, change of 'pmd-cpu-mask' is very heavy operation.
> It requires destroying of all the PMD threads and creating
> them back. After that, all the threads will sleep until
> ports' redistribution finished.
>
> This patch adds ability to not stop the datapath while
> adjusting number/placement of PMD threads. All not affected
> threads will forward traffic without any additional latencies.
>
> id-pool created for static tx queue ids to keep them sequential
> in a flexible way. non-PMD thread will always have
> static_tx_qid = 0 as it was before.
>
> Signed-off-by: Ilya Maximets 

Thanks for the patch

The idea looks good to me.

I'm still looking at the code, and I have one comment below

> ---
>  lib/dpif-netdev.c | 119 
> +-
>  tests/pmd.at  |   2 +-
>  2 files changed, 91 insertions(+), 30 deletions(-)
>
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 30907b7..6e575ab 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -48,6 +48,7 @@
>  #include "fat-rwlock.h"
>  #include "flow.h"
>  #include "hmapx.h"
> +#include "id-pool.h"
>  #include "latch.h"
>  #include "netdev.h"
>  #include "netdev-vport.h"
> @@ -241,6 +242,9 @@ struct dp_netdev {
>
>  /* Stores all 'struct dp_netdev_pmd_thread's. */
>  struct cmap poll_threads;
> +/* id pool for per thread static_tx_qid. */
> +struct id_pool *tx_qid_pool;
> +struct ovs_mutex tx_qid_pool_mutex;
>
>  /* Protects the access of the 'struct dp_netdev_pmd_thread'
>   * instance for non-pmd thread. */
> @@ -514,7 +518,7 @@ struct dp_netdev_pmd_thread {
>  /* Queue id used by this pmd thread to send packets on all netdevs if
>   * XPS disabled for this netdev. All static_tx_qid's are unique and less
>   * than 'cmap_count(dp->poll_threads)'. */
> -const int static_tx_qid;
> +uint32_t static_tx_qid;
>
>  struct ovs_mutex port_mutex;/* Mutex for 'poll_list' and 'tx_ports'. 
> */
>  /* List of rx queues to poll. */
> @@ -594,6 +598,8 @@ static struct dp_netdev_pmd_thread 
> *dp_netdev_get_pmd(struct dp_netdev *dp,
>unsigned core_id);
>  static struct dp_netdev_pmd_thread *
>  dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
> +static void dp_netdev_del_pmd(struct dp_netdev *dp,
> +  struct dp_netdev_pmd_thread *pmd);
>  static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
>  static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
>  static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
> @@ -1077,10 +1083,17 @@ create_dp_netdev(const char *name, const struct 
> dpif_class *class,
>  atomic_init(>emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
>
>  cmap_init(>poll_threads);
> +
> +ovs_mutex_init(>tx_qid_pool_mutex);
> +/* We need 1 Tx queue for each possible cpu core + 1 for non-PMD 
> threads. */
> +dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
> +
>  ovs_mutex_init_recursive(>non_pmd_mutex);
>  ovsthread_key_create(>per_pmd_key, NULL);
>
>  ovs_mutex_lock(>port_mutex);
> +/* non-PMD will be created before all other threads and will
> + * allocate static_tx_qid = 0. */
>  dp_netdev_set_nonpmd(dp);
>
>  error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
> @@ -1164,6 +1177,9 @@ dp_netdev_free(struct dp_netdev *dp)
>  dp_netdev_destroy_all_pmds(dp, true);
>  cmap_destroy(>poll_threads);
>
> +ovs_mutex_destroy(>tx_qid_pool_mutex);
> +id_pool_destroy(dp->tx_qid_pool);
> +
>  ovs_mutex_destroy(>non_pmd_mutex);
>  ovsthread_key_delete(dp->per_pmd_key);
>
> @@ -3175,7 +3191,10 @@ reconfigure_pmd_threads(struct dp_netdev *dp)
>  {
>  struct dp_netdev_pmd_thread *pmd;
>  struct ovs_numa_dump *pmd_cores;
> -bool changed = false;
> +struct ovs_numa_info_core *core;
> +struct hmapx to_delete = HMAPX_INITIALIZER(_delete);
> +struct hmapx_node *node;
> +int created = 0, deleted = 0;
>
>  /* The pmd threads should be started only if there's a pmd port in the
>   * datapath.  If the user didn't provide any "pmd-cpu-mask", we start
> @@ -3188,45 +3207,62 @@ reconfigure_pmd_threads(struct dp_netdev *dp)
>  pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS);
>  }
>
> -/* Check for changed configuration */
> -if (ovs_numa_dump_count(pmd_cores) != cmap_count(>poll_threads) - 1) 
> {
> -changed = true;
> -} else {
> -CMAP_FOR_EACH (pmd, node, >poll_threads) {
> -if (pmd->core_id != NON_PMD_CORE_ID
> -&& !ovs_numa_dump_contains_core(pmd_cores,
> -pmd->numa_id,
> -pmd->core_id)) {
> -

[ovs-dev] [PATCH 2/4] dpif-netdev: Incremental addition/deletion of PMD threads.

2017-02-21 Thread Ilya Maximets
Currently, change of 'pmd-cpu-mask' is very heavy operation.
It requires destroying of all the PMD threads and creating
them back. After that, all the threads will sleep until
ports' redistribution finished.

This patch adds ability to not stop the datapath while
adjusting number/placement of PMD threads. All not affected
threads will forward traffic without any additional latencies.

id-pool created for static tx queue ids to keep them sequential
in a flexible way. non-PMD thread will always have
static_tx_qid = 0 as it was before.

Signed-off-by: Ilya Maximets 
---
 lib/dpif-netdev.c | 119 +-
 tests/pmd.at  |   2 +-
 2 files changed, 91 insertions(+), 30 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 30907b7..6e575ab 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -48,6 +48,7 @@
 #include "fat-rwlock.h"
 #include "flow.h"
 #include "hmapx.h"
+#include "id-pool.h"
 #include "latch.h"
 #include "netdev.h"
 #include "netdev-vport.h"
@@ -241,6 +242,9 @@ struct dp_netdev {
 
 /* Stores all 'struct dp_netdev_pmd_thread's. */
 struct cmap poll_threads;
+/* id pool for per thread static_tx_qid. */
+struct id_pool *tx_qid_pool;
+struct ovs_mutex tx_qid_pool_mutex;
 
 /* Protects the access of the 'struct dp_netdev_pmd_thread'
  * instance for non-pmd thread. */
@@ -514,7 +518,7 @@ struct dp_netdev_pmd_thread {
 /* Queue id used by this pmd thread to send packets on all netdevs if
  * XPS disabled for this netdev. All static_tx_qid's are unique and less
  * than 'cmap_count(dp->poll_threads)'. */
-const int static_tx_qid;
+uint32_t static_tx_qid;
 
 struct ovs_mutex port_mutex;/* Mutex for 'poll_list' and 'tx_ports'. */
 /* List of rx queues to poll. */
@@ -594,6 +598,8 @@ static struct dp_netdev_pmd_thread 
*dp_netdev_get_pmd(struct dp_netdev *dp,
   unsigned core_id);
 static struct dp_netdev_pmd_thread *
 dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
+static void dp_netdev_del_pmd(struct dp_netdev *dp,
+  struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp, bool non_pmd);
 static void dp_netdev_pmd_clear_ports(struct dp_netdev_pmd_thread *pmd);
 static void dp_netdev_add_port_tx_to_pmd(struct dp_netdev_pmd_thread *pmd,
@@ -1077,10 +1083,17 @@ create_dp_netdev(const char *name, const struct 
dpif_class *class,
 atomic_init(>emc_insert_min, DEFAULT_EM_FLOW_INSERT_MIN);
 
 cmap_init(>poll_threads);
+
+ovs_mutex_init(>tx_qid_pool_mutex);
+/* We need 1 Tx queue for each possible cpu core + 1 for non-PMD threads. 
*/
+dp->tx_qid_pool = id_pool_create(0, ovs_numa_get_n_cores() + 1);
+
 ovs_mutex_init_recursive(>non_pmd_mutex);
 ovsthread_key_create(>per_pmd_key, NULL);
 
 ovs_mutex_lock(>port_mutex);
+/* non-PMD will be created before all other threads and will
+ * allocate static_tx_qid = 0. */
 dp_netdev_set_nonpmd(dp);
 
 error = do_add_port(dp, name, dpif_netdev_port_open_type(dp->class,
@@ -1164,6 +1177,9 @@ dp_netdev_free(struct dp_netdev *dp)
 dp_netdev_destroy_all_pmds(dp, true);
 cmap_destroy(>poll_threads);
 
+ovs_mutex_destroy(>tx_qid_pool_mutex);
+id_pool_destroy(dp->tx_qid_pool);
+
 ovs_mutex_destroy(>non_pmd_mutex);
 ovsthread_key_delete(dp->per_pmd_key);
 
@@ -3175,7 +3191,10 @@ reconfigure_pmd_threads(struct dp_netdev *dp)
 {
 struct dp_netdev_pmd_thread *pmd;
 struct ovs_numa_dump *pmd_cores;
-bool changed = false;
+struct ovs_numa_info_core *core;
+struct hmapx to_delete = HMAPX_INITIALIZER(_delete);
+struct hmapx_node *node;
+int created = 0, deleted = 0;
 
 /* The pmd threads should be started only if there's a pmd port in the
  * datapath.  If the user didn't provide any "pmd-cpu-mask", we start
@@ -3188,45 +3207,62 @@ reconfigure_pmd_threads(struct dp_netdev *dp)
 pmd_cores = ovs_numa_dump_n_cores_per_numa(NR_PMD_THREADS);
 }
 
-/* Check for changed configuration */
-if (ovs_numa_dump_count(pmd_cores) != cmap_count(>poll_threads) - 1) {
-changed = true;
-} else {
-CMAP_FOR_EACH (pmd, node, >poll_threads) {
-if (pmd->core_id != NON_PMD_CORE_ID
-&& !ovs_numa_dump_contains_core(pmd_cores,
-pmd->numa_id,
-pmd->core_id)) {
-changed = true;
-break;
-}
+/* Check for unwanted pmd threads */
+CMAP_FOR_EACH(pmd, node, >poll_threads) {
+if (pmd->core_id != NON_PMD_CORE_ID
+&& !ovs_numa_dump_contains_core(pmd_cores,
+pmd->numa_id,
+pmd->core_id)) {
+