git sha: 477a867094ec824a12b84d1bbea7d549eb3ce8ee
Author: Eelco Chaudron <[email protected]>
Subject: dpif-offload: Move the tc flow dump netdev APIs to dpif-offload.

This commit relocates the tc flow dump functionality from netdev-offload
to dpif-offload, centralizing flow dump handling in the dpif-offload 
layer.

> diff --git a/lib/dpif-offload-provider.h b/lib/dpif-offload-provider.h
> index 4ae221828..d5cfa8d28 100644
> --- a/lib/dpif-offload-provider.h
> +++ b/lib/dpif-offload-provider.h
> @@ -281,6 +281,7 @@ bool dpif_offload_port_mgr_add(struct 
> dpif_offload_port_mgr *,
>  struct dpif_offload_port_mgr_port *dpif_offload_port_mgr_remove(
>      struct dpif_offload_port_mgr *, odp_port_t, bool keep_netdev_ref);
>  void dpif_offload_port_mgr_uninit(struct dpif_offload_port_mgr *);
> +size_t dpif_offload_port_mgr_port_count(struct dpif_offload_port_mgr *);
>  struct dpif_offload_port_mgr_port *dpif_offload_port_mgr_find_by_ifindex(
>      struct dpif_offload_port_mgr *, int ifindex);
>  struct dpif_offload_port_mgr_port *dpif_offload_port_mgr_find_by_netdev(
> @@ -292,6 +293,8 @@ void dpif_offload_port_mgr_traverse_ports(
>      bool (*cb)(struct dpif_offload_port_mgr_port *, void *),
>      void *aux);
> 
> +#define DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH(PORT, PORT_MGR) \
> +    CMAP_FOR_EACH (PORT, odp_port_node, &(PORT_MGR)->odp_port_to_port)
> 
>  /* Global functions, called by the dpif layer or offload providers. */
>  void dp_offload_initialize(void);

[ ... ]

> diff --git a/lib/dpif-offload-tc.c b/lib/dpif-offload-tc.c
> index a0e6194a9..c74bfb52e 100644
> --- a/lib/dpif-offload-tc.c
> +++ b/lib/dpif-offload-tc.c

[ ... ]

> +static struct dpif_offload_flow_dump *
> +dpif_offload_tc_flow_dump_create(const struct dpif_offload *offload_,
>                                   bool terse)
>  {
> +    struct dpif_offload_tc *offload = dpif_offload_tc_cast(offload_);
> +    struct dpif_offload_port_mgr_port *port;
> +    struct dpif_offload_tc_flow_dump *dump;
> +    size_t added_port_count = 0;
> +    size_t port_count;
> 
> +    port_count = dpif_offload_port_mgr_port_count(offload->port_mgr);
> 
> +    dump = xmalloc(sizeof *dump +
> +                   (port_count * sizeof(struct netdev_flow_dump)));
> 
> +    dpif_offload_flow_dump_init(&dump->dump, offload_, terse);
> 
> +    DPIF_OFFLOAD_PORT_MGR_PORT_FOR_EACH (port, offload->port_mgr) {
> +        if (added_port_count >= port_count) {
> +            break;
> +        }

Can this check ever be true given the port_count comes from the same
source? The port count is retrieved from the same data structure that's
being iterated over.

> +        if (netdev_offload_tc_flow_dump_create(
> +            port->netdev, &dump->netdev_dumps[added_port_count], terse)) {
> +            continue;
> +        }
> +        dump->netdev_dumps[added_port_count]->port = port->port_no;
> +        added_port_count++;
> +    }

Will this code leak netdev_dumps[] entries if 
netdev_offload_tc_flow_dump_create fails for some ports? The 
netdev_dump_count gets set to added_port_count, but previously created 
dumps won't be cleaned up if allocation fails.

[ ... ]

> +static void
> +dpif_offload_tc_advance_provider_dump(
> +    struct dpif_offload_tc_flow_dump_thread *thread)
> +{
> +    struct dpif_offload_tc_flow_dump *dump = thread->dump;
> 
> +    ovs_mutex_lock(&dump->netdev_dump_mutex);
> 
> +    /* If we haven't finished (dumped all providers). */
> +    if (dump->netdev_dump_index < dump->netdev_dump_count) {
> +        /* If we are the first to find that current dump is finished
> +         * advance it. */
> +        if (thread->netdev_dump_index == dump->netdev_dump_index) {
> +            thread->netdev_dump_index = ++dump->netdev_dump_index;
> +            /* Did we just finish the last dump? If so we are done. */
> +            if (dump->netdev_dump_index == dump->netdev_dump_count) {
> +                thread->netdev_dump_done = true;
> +            }
> +        } else {
> +            /* Otherwise, we are behind, catch up. */
> +            thread->netdev_dump_index = dump->netdev_dump_index;
> +        }
> +    } else {
> +        /* Some other thread finished. */
> +        thread->netdev_dump_done = true;
> +    }
> 
> +    ovs_mutex_unlock(&dump->netdev_dump_mutex);
> +}

[ ... ]

> +static int
> +dpif_offload_tc_flow_dump_destroy(struct dpif_offload_flow_dump *dump_)
> +{
> +    struct dpif_offload_tc_flow_dump *dump;
> +    int error = 0;
> 
> +    dump = dpif_offload_tc_flow_dump_cast(dump_);
> +    for (int i = 0; i < dump->netdev_dump_count; i++) {
> +        struct netdev_flow_dump *dump_netdev = dump->netdev_dumps[i];
> +        int rc = netdev_offload_tc_flow_dump_destroy(dump_netdev);
> 
> +        if (rc && !error) {
> +            error = rc;
> +        }
> +    }
> +    ovs_mutex_init(&dump->netdev_dump_mutex);

Should this be ovs_mutex_destroy instead of ovs_mutex_init? The mutex was
initialized in dpif_offload_tc_flow_dump_create and should be destroyed
when cleaning up the dump structure.

[ ... ]
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to