git sha: 7feab68b5c2f033aa1e9da80fc5b0f562b7e5526
Author: Eelco Chaudron <[email protected]>
One line subject: dpif-offload: Move the flow_get_n_flows() netdev-offload API 
to dpif.

This patch moves the flow count APIs from the netdev-offload layer to the
netdev-offload provider.

> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index 79b05bc0d..82df85823 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -4762,11 +4762,8 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
>          [DP_NETDEV_HW_OFFLOADS_STATS_LAT_EMA_STDDEV] =
>              { "  Exponential Latency stddev (us)", 0 },
>      };
> -    struct dp_netdev *dp = get_dp_netdev(dpif);
> -    struct dp_netdev_port *port;
> +
>      unsigned int nb_thread;
> -    uint64_t *port_nb_offloads;
> -    uint64_t *nb_offloads;
>      unsigned int tid;
>      size_t i;
>  
> @@ -4783,29 +4780,11 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
>      stats->size = ARRAY_SIZE(hwol_stats) * (nb_thread + 1);
>      stats->counters = xcalloc(stats->size, sizeof *stats->counters);
>  
> -    nb_offloads = xcalloc(nb_thread, sizeof *nb_offloads);
> -    port_nb_offloads = xcalloc(nb_thread, sizeof *port_nb_offloads);
> -
> -    ovs_rwlock_rdlock(&dp->port_rwlock);
> -    HMAP_FOR_EACH (port, node, &dp->ports) {
> -        memset(port_nb_offloads, 0, nb_thread * sizeof *port_nb_offloads);
> -        /* Do not abort on read error from a port, just report 0. */
> -        if (!netdev_flow_get_n_flows(port->netdev, port_nb_offloads)) {
> -            for (i = 0; i < nb_thread; i++) {
> -                nb_offloads[i] += port_nb_offloads[i];
>              }
> -        }
> -    }
> -    ovs_rwlock_unlock(&dp->port_rwlock);
> -
> -    free(port_nb_offloads);
> -
>      for (tid = 0; tid < nb_thread; tid++) {
>          uint64_t counts[ARRAY_SIZE(hwol_stats)];
>          size_t idx = ((tid + 1) * ARRAY_SIZE(hwol_stats));
>  
>          memset(counts, 0, sizeof counts);
> -        counts[DP_NETDEV_HW_OFFLOADS_STATS_INSERTED] = nb_offloads[tid];
>          if (dp_offload_threads != NULL) {
>              atomic_read_relaxed(&dp_offload_threads[tid].enqueued_item,
>                                  
> &counts[DP_NETDEV_HW_OFFLOADS_STATS_ENQUEUED]);
> @@ -4830,14 +4809,16 @@ dpif_netdev_offload_stats_get(struct dpif *dpif,
>          }
>      }
>  
> -    free(nb_offloads);
> -
>      /* Do an average of the average for the aggregate. */
>      hwol_stats[DP_NETDEV_HW_OFFLOADS_STATS_LAT_CMA_MEAN].total /= nb_thread;
>      hwol_stats[DP_NETDEV_HW_OFFLOADS_STATS_LAT_CMA_STDDEV].total /= 
> nb_thread;
>      hwol_stats[DP_NETDEV_HW_OFFLOADS_STATS_LAT_EMA_MEAN].total /= nb_thread;
>      hwol_stats[DP_NETDEV_HW_OFFLOADS_STATS_LAT_EMA_STDDEV].total /= 
> nb_thread;
>  
> +    /* Get the total offload count. */
> +    hwol_stats[DP_NETDEV_HW_OFFLOADS_STATS_INSERTED].total =
> +        dpif_offload_flow_get_n_offloaded(dpif);
> +

How does the refactoring maintain per-thread flow count breakdowns? The
original code tracked nb_offloads[tid] for each thread, but now
dpif_offload_flow_get_n_offloaded returns only a global total. Would
this lose the per-thread statistics that were previously reported?

[ ... ]

> diff --git a/lib/netdev-offload-dpdk.c b/lib/netdev-offload-dpdk.c
> index 072103596..d1781d8e4 100644
> --- a/lib/netdev-offload-dpdk.c
> +++ b/lib/netdev-offload-dpdk.c
> @@ -2776,24 +2776,24 @@ close_vport_netdev:
>      return ret;
>  }
>  
> -static int
> -netdev_offload_dpdk_get_n_flows(struct netdev *netdev,
> -                                uint64_t *n_flows)
> +uint64_t
> +netdev_offload_dpdk_flow_get_n_offloaded(struct netdev *netdev)
>  {
>      struct netdev_offload_dpdk_data *data;
> +    uint64_t total = 0;
>      unsigned int tid;
>  
>      data = (struct netdev_offload_dpdk_data *)
>          ovsrcu_get(void *, &netdev->hw_info.offload_data);
>      if (!data) {
> -        return -1;
> +        return 0;
>      }
>  
>      for (tid = 0; tid < netdev_offload_thread_nb(); tid++) {
> -        n_flows[tid] = data->rte_flow_counters[tid];
> +        total += data->rte_flow_counters[tid];
>      }
>  
> -    return 0;
> +    return total;
>  }

Can the per-thread counter data be safely accessed here without any
locking? These rte_flow_counters are modified by different threads and
the reads might observe partial updates without atomics or barriers.

[ ... ]

> diff --git a/lib/netdev-offload-tc.c b/lib/netdev-offload-tc.c
> index 8dc6b0c91..553dc734b 100644
> --- a/lib/netdev-offload-tc.c
> +++ b/lib/netdev-offload-tc.c
> @@ -2794,22 +2794,17 @@ netdev_tc_flow_del(struct netdev *netdev OVS_UNUSED,
>      return del_filter_and_ufid_mapping(&id, ufid, stats);
>  }
>  
> -static int
> -netdev_tc_get_n_flows(struct netdev *netdev, uint64_t *n_flows)
> +uint64_t
> +dpif_offload_tc_flow_get_n_offloaded(const struct dpif_offload *offload \
> +                                         OVS_UNUSED)
>  {
> -    struct ufid_tc_data *data;
> -    uint64_t total = 0;
> +    uint64_t total;
>  
>      ovs_mutex_lock(&ufid_lock);
> -    HMAP_FOR_EACH (data, tc_to_ufid_node, &tc_to_ufid) {
> -        if (data->netdev == netdev) {
> -            total++;
> -        }
> -    }
> +    total = hmap_count(&tc_to_ufid);
>      ovs_mutex_unlock(&ufid_lock);
>  
> -    *n_flows = total;
> -    return 0;
> +    return total;
>  }

Why does the new implementation return the global tc_to_ufid count
instead of filtering by netdev like the original? Does this change the
semantics from per-netdev flow counts to global flow counts across all
tc netdevs?
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to