On 5/23/2019 3:23 PM, David Marchand wrote:
No need for a latch here since we don't have to wait.
A simple boolean flag is enough.

The memory order on the reload flag is changed to rel-acq ordering to
serve as a synchronisation point between the pmd threads and the control
thread that asks for termination.

Fixes: e4cfed38b159 ("dpif-netdev: Add poll-mode-device thread.")
Signed-off-by: David Marchand <david.march...@redhat.com>
Acked-by: Eelco Chaudron <echau...@redhat.com>

Hi David, this change looks ok to me, tested a number scenarios and did not see any breakage. I'm happy to Ack, but will hold off applying until Ilya has a chance to look also as he's quite familiar with this area of the code base.

Ian

---
  lib/dpif-netdev.c | 14 ++++++--------
  1 file changed, 6 insertions(+), 8 deletions(-)

---
Changelog since RFC v2:
- removed now unused latch.h inclusion

Changelog since RFC v1:
- added memory ordering on 'reload' atomic to serve as a synchronisation
   point between control thread and pmd threads

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 5a6f2ab..19d7f7d 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -48,7 +48,6 @@
  #include "hmapx.h"
  #include "id-pool.h"
  #include "ipf.h"
-#include "latch.h"
  #include "netdev.h"
  #include "netdev-provider.h"
  #include "netdev-vport.h"
@@ -681,10 +680,10 @@ struct dp_netdev_pmd_thread {
      /* Current context of the PMD thread. */
      struct dp_netdev_pmd_thread_ctx ctx;
- struct latch exit_latch; /* For terminating the pmd thread. */
      struct seq *reload_seq;
      uint64_t last_reload_seq;
      atomic_bool reload;             /* Do we need to reload ports? */
+    atomic_bool exit;               /* For terminating the pmd thread. */
      pthread_t thread;
      unsigned core_id;               /* CPU core id of this pmd thread. */
      int numa_id;                    /* numa node id of this pmd thread. */
@@ -1756,7 +1755,7 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
ovs_mutex_lock(&pmd->cond_mutex);
      seq_change(pmd->reload_seq);
-    atomic_store_relaxed(&pmd->reload, true);
+    atomic_store_explicit(&pmd->reload, true, memory_order_release);
      ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
      ovs_mutex_unlock(&pmd->cond_mutex);
  }
@@ -5468,7 +5467,7 @@ reload:
                  emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
              }
- atomic_read_relaxed(&pmd->reload, &reload);
+            atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
              if (reload) {
                  break;
              }
@@ -5479,7 +5478,7 @@ reload:
      ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);
poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
-    exiting = latch_is_set(&pmd->exit_latch);
+    atomic_read_relaxed(&pmd->exit, &exiting);
      /* Signal here to make sure the pmd finishes
       * reloading the updated configuration. */
      dp_netdev_pmd_reload_done(pmd);
@@ -5898,7 +5897,7 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, 
struct dp_netdev *dp,
      pmd->n_output_batches = 0;
ovs_refcount_init(&pmd->ref_cnt);
-    latch_init(&pmd->exit_latch);
+    atomic_init(&pmd->exit, false);
      pmd->reload_seq = seq_create();
      pmd->last_reload_seq = seq_read(pmd->reload_seq);
      atomic_init(&pmd->reload, false);
@@ -5945,7 +5944,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
      cmap_destroy(&pmd->classifiers);
      cmap_destroy(&pmd->flow_table);
      ovs_mutex_destroy(&pmd->flow_mutex);
-    latch_destroy(&pmd->exit_latch);
      seq_destroy(pmd->reload_seq);
      xpthread_cond_destroy(&pmd->cond);
      ovs_mutex_destroy(&pmd->cond_mutex);
@@ -5967,7 +5965,7 @@ dp_netdev_del_pmd(struct dp_netdev *dp, struct 
dp_netdev_pmd_thread *pmd)
          pmd_free_static_tx_qid(pmd);
          ovs_mutex_unlock(&dp->non_pmd_mutex);
      } else {
-        latch_set(&pmd->exit_latch);
+        atomic_store_relaxed(&pmd->exit, true);
          dp_netdev_reload_pmd__(pmd);
          xpthread_join(pmd->thread, NULL);
      }


_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to