Simply replace rte_smp barrier with atomic threand fence.
Signed-off-by: Phil Yang <[email protected]>
Signed-off-by: Feifei Wang <[email protected]>
Reviewed-by: Ruifeng Wang <[email protected]>
---
app/test-eventdev/test_perf_common.h | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/app/test-eventdev/test_perf_common.h
b/app/test-eventdev/test_perf_common.h
index e7233e5a5..9785dc3e2 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -98,11 +98,11 @@ perf_process_last_stage(struct rte_mempool *const pool,
{
bufs[count++] = ev->event_ptr;
- /* wmb here ensures event_prt is stored before
- * updating the number of processed packets
- * for worker lcores
+ /* release fence here ensures event_prt is
+ * stored before updating the number of
+ * processed packets for worker lcores
*/
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
w->processed_pkts++;
if (unlikely(count == buf_sz)) {
@@ -122,11 +122,11 @@ perf_process_last_stage_latency(struct rte_mempool *const
pool,
bufs[count++] = ev->event_ptr;
- /* wmb here ensures event_prt is stored before
- * updating the number of processed packets
- * for worker lcores
+ /* release fence here ensures event_prt is
+ * stored before updating the number of
+ * processed packets for worker lcores
*/
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
w->processed_pkts++;
if (unlikely(count == buf_sz)) {
--
2.25.1