Count the cycles used for processing an rxq during the pmd optimization interval. As this is an in flight counter and pmds run independently, also store the total cycles used during the last full interval.
Signed-off-by: Kevin Traynor <ktray...@redhat.com> --- lib/dpif-netdev.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 7d7abff..3131255 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -666,5 +666,6 @@ static void pmd_load_cached_ports(struct dp_netdev_pmd_thread *pmd) OVS_REQUIRES(pmd->port_mutex); static inline void -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd); +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, + struct polled_queue *poll_list, int poll_cnt); static void dp_netdev_rxq_set_cyc_curr(struct dp_netdev_rxq *rx, @@ -3137,5 +3138,5 @@ port_reconfigure(struct dp_netdev_port *port) { struct netdev *netdev = port->netdev; - int i, err; + int i, err, last_nrxq; port->need_reconfigure = false; @@ -3146,4 +3147,5 @@ port_reconfigure(struct dp_netdev_port *port) port->rxqs[i].rx = NULL; } + last_nrxq = port->n_rxq; port->n_rxq = 0; @@ -3166,4 +3168,9 @@ port_reconfigure(struct dp_netdev_port *port) for (i = 0; i < netdev_n_rxq(netdev); i++) { port->rxqs[i].port = port; + if (i >= last_nrxq) { + /* Only reset cycle stats for new queues */ + dp_netdev_rxq_set_cyc_curr(&port->rxqs[i], 0); + dp_netdev_rxq_set_cyc_last(&port->rxqs[i], 0); + } err = netdev_rxq_open(netdev, &port->rxqs[i].rx, i); if (err) { @@ -3758,5 +3765,5 @@ reload: dp_netdev_process_rxq_port(pmd, poll_list[i].rxq->rx, poll_list[i].port_no); - cycles_count_intermediate(pmd, NULL, + cycles_count_intermediate(pmd, poll_list[i].rxq, process_packets ? PMD_CYCLES_PROCESSING : PMD_CYCLES_IDLE); @@ -3769,5 +3776,5 @@ reload: coverage_try_clear(); - dp_netdev_pmd_try_optimize(pmd); + dp_netdev_pmd_try_optimize(pmd, poll_list, poll_cnt); if (!ovsrcu_try_quiesce()) { emc_cache_slow_sweep(&pmd->flow_cache); @@ -5560,8 +5567,11 @@ dpcls_sort_subtable_vector(struct dpcls *cls) static inline void -dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd) +dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd, + struct polled_queue *poll_list, int poll_cnt) { struct dpcls *cls; long long int now = time_msec(); + int i; + uint64_t rxq_cyc_curr; if (now > pmd->next_optimization) { @@ -5575,4 +5585,11 @@ dp_netdev_pmd_try_optimize(struct dp_netdev_pmd_thread *pmd) ovs_mutex_unlock(&pmd->flow_mutex); } + + /* Get the cycles that were used to process each queue and store. */ + for (i = 0; i < poll_cnt; i++) { + rxq_cyc_curr = dp_netdev_rxq_get_cyc_curr(poll_list[i].rxq); + dp_netdev_rxq_set_cyc_last(poll_list[i].rxq, rxq_cyc_curr); + dp_netdev_rxq_set_cyc_curr(poll_list[i].rxq, 0); + } /* Start new measuring interval */ pmd->next_optimization = now + PMD_OPTIMIZATION_INTERVAL; -- 1.8.3.1 _______________________________________________ dev mailing list d...@openvswitch.org https://mail.openvswitch.org/mailman/listinfo/ovs-dev