All accesses to 'pmd->poll_list' should be guarded by 'pmd->port_mutex'. Additionally fixed inappropriate usage of 'HMAP_FOR_EACH_SAFE' (hmap doesn't change in a loop) and dropped not needed local variable 'proc_cycles'.
CC: Nitin Katiyar <nitin.kati...@ericsson.com> Fixes: 5bf84282482a ("Adding support for PMD auto load balancing") Signed-off-by: Ilya Maximets <i.maxim...@samsung.com> --- lib/dpif-netdev.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 4d5bc576a..914b2bb8b 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -3776,9 +3776,12 @@ set_pmd_auto_lb(struct dp_netdev *dp) continue; } + ovs_mutex_lock(&pmd->port_mutex); if (hmap_count(&pmd->poll_list) > 1) { multi_rxq = true; } + ovs_mutex_unlock(&pmd->port_mutex); + if (cnt && multi_rxq) { enable_alb = true; break; @@ -5095,7 +5098,7 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) uint64_t improvement = 0; uint32_t num_pmds; uint32_t *pmd_corelist; - struct rxq_poll *poll, *poll_next; + struct rxq_poll *poll; bool ret; num_pmds = cmap_count(&dp->poll_threads); @@ -5121,13 +5124,14 @@ pmd_rebalance_dry_run(struct dp_netdev *dp) /* Estimate the cycles to cover all intervals. */ total_cycles *= PMD_RXQ_INTERVAL_MAX; - HMAP_FOR_EACH_SAFE (poll, poll_next, node, &pmd->poll_list) { - uint64_t proc_cycles = 0; + ovs_mutex_lock(&pmd->port_mutex); + HMAP_FOR_EACH (poll, node, &pmd->poll_list) { for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) { - proc_cycles += dp_netdev_rxq_get_intrvl_cycles(poll->rxq, i); + total_proc += dp_netdev_rxq_get_intrvl_cycles(poll->rxq, i); } - total_proc += proc_cycles; } + ovs_mutex_unlock(&pmd->port_mutex); + if (total_proc) { curr_pmd_usage[num_pmds] = (total_proc * 100) / total_cycles; } -- 2.17.1 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev