No need for a latch here since we don't have to wait.
A simple boolean flag is enough.

The memory order on the reload flag is changed to rel-acq ordering to
serve as a synchronisation point between the pmd threads and the control
thread that asks for termination.

Fixes: e4cfed38b159 ("dpif-netdev: Add poll-mode-device thread.")
Signed-off-by: David Marchand <david.march...@redhat.com>
Acked-by: Eelco Chaudron <echau...@redhat.com>
Acked-by: Ian Stokes <ian.sto...@intel.com>
Acked-by: Ilya Maximets <i.maxim...@samsung.com>
---
Changelog since v1:
- added acks, no change

Changelog since RFC v2:
- removed now unused latch.h inclusion

Changelog since RFC v1:
- added memory ordering on 'reload' atomic to serve as a synchronisation
  point between control thread and pmd threads

---
 lib/dpif-netdev.c | 14 ++++++--------
 1 file changed, 6 insertions(+), 8 deletions(-)

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index f4b59e4..8eeec63 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -48,7 +48,6 @@
 #include "hmapx.h"
 #include "id-pool.h"
 #include "ipf.h"
-#include "latch.h"
 #include "netdev.h"
 #include "netdev-offload.h"
 #include "netdev-provider.h"
@@ -684,10 +683,10 @@ struct dp_netdev_pmd_thread {
     /* Current context of the PMD thread. */
     struct dp_netdev_pmd_thread_ctx ctx;
 
-    struct latch exit_latch;        /* For terminating the pmd thread. */
     struct seq *reload_seq;
     uint64_t last_reload_seq;
     atomic_bool reload;             /* Do we need to reload ports? */
+    atomic_bool exit;               /* For terminating the pmd thread. */
     pthread_t thread;
     unsigned core_id;               /* CPU core id of this pmd thread. */
     int numa_id;                    /* numa node id of this pmd thread. */
@@ -1761,7 +1760,7 @@ dp_netdev_reload_pmd__(struct dp_netdev_pmd_thread *pmd)
 
     ovs_mutex_lock(&pmd->cond_mutex);
     seq_change(pmd->reload_seq);
-    atomic_store_relaxed(&pmd->reload, true);
+    atomic_store_explicit(&pmd->reload, true, memory_order_release);
     ovs_mutex_cond_wait(&pmd->cond, &pmd->cond_mutex);
     ovs_mutex_unlock(&pmd->cond_mutex);
 }
@@ -5488,7 +5487,7 @@ reload:
                 emc_cache_slow_sweep(&((pmd->flow_cache).emc_cache));
             }
 
-            atomic_read_relaxed(&pmd->reload, &reload);
+            atomic_read_explicit(&pmd->reload, &reload, memory_order_acquire);
             if (reload) {
                 break;
             }
@@ -5509,7 +5508,7 @@ reload:
     ovs_mutex_unlock(&pmd->perf_stats.stats_mutex);
 
     poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
-    exiting = latch_is_set(&pmd->exit_latch);
+    atomic_read_relaxed(&pmd->exit, &exiting);
     /* Signal here to make sure the pmd finishes
      * reloading the updated configuration. */
     dp_netdev_pmd_reload_done(pmd);
@@ -5928,7 +5927,7 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, 
struct dp_netdev *dp,
     pmd->n_output_batches = 0;
 
     ovs_refcount_init(&pmd->ref_cnt);
-    latch_init(&pmd->exit_latch);
+    atomic_init(&pmd->exit, false);
     pmd->reload_seq = seq_create();
     pmd->last_reload_seq = seq_read(pmd->reload_seq);
     atomic_init(&pmd->reload, false);
@@ -5975,7 +5974,6 @@ dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd)
     cmap_destroy(&pmd->classifiers);
     cmap_destroy(&pmd->flow_table);
     ovs_mutex_destroy(&pmd->flow_mutex);
-    latch_destroy(&pmd->exit_latch);
     seq_destroy(pmd->reload_seq);
     xpthread_cond_destroy(&pmd->cond);
     ovs_mutex_destroy(&pmd->cond_mutex);
@@ -5997,7 +5995,7 @@ dp_netdev_del_pmd(struct dp_netdev *dp, struct 
dp_netdev_pmd_thread *pmd)
         pmd_free_static_tx_qid(pmd);
         ovs_mutex_unlock(&dp->non_pmd_mutex);
     } else {
-        latch_set(&pmd->exit_latch);
+        atomic_store_relaxed(&pmd->exit, true);
         dp_netdev_reload_pmd__(pmd);
         xpthread_join(pmd->thread, NULL);
     }
-- 
1.8.3.1

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to