On 11/22/2019 6:10 PM, Ilya Maximets wrote:
> On 20.11.2019 16:28, Eli Britstein wrote:
>> From: Ophir Munk <ophi...@mellanox.com>
>>
>> Flow stats are retrieved by revalidator threads. Specifically for
>> dpif-netdev the function dpif_netdev_flow_dump_next() is called.
>> When the flow is fully offloaded reading the PMD SW stats alone will
>> result in no updates and will cause the revalidator to falsely delete
>> the flow after some aging time.
>> This commit adds a new function called dp_netdev_offload_used() which
>> reads the hw counters during the flow_dump_next() call.
>> The new function calls netdev_flow_stats_get() which in turn reads the
>> hardware stats by calling DPDK rte_flow_query() API.
>>
>> Signed-off-by: Ophir Munk <ophi...@mellanox.com>
>> Reviewed-by: Oz Shlomo <o...@mellanox.com>
>> Signed-off-by: Eli Britstein <el...@mellanox.com>
>> ---
>>   lib/dpif-netdev.c | 40 +++++++++++++++++++++++++++++++++++++++-
>>   1 file changed, 39 insertions(+), 1 deletion(-)
>>
>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>> index 4720ba1ab..d9f28cfcd 100644
>> --- a/lib/dpif-netdev.c
>> +++ b/lib/dpif-netdev.c
>> @@ -527,6 +527,7 @@ struct dp_netdev_flow {
>>   
>>       bool dead;
>>       uint32_t mark;               /* Unique flow mark assigned to a flow */
>> +    bool actions_offloaded;      /* true if flow is fully offloaded */
>>   
>>       /* Statistics. */
>>       struct dp_netdev_flow_stats stats;
>> @@ -2422,6 +2423,7 @@ dp_netdev_flow_offload_put(struct dp_flow_offload_item 
>> *offload)
>>                             offload->actions_len, &flow->mega_ufid, &info,
>>                             NULL);
>>       ovs_mutex_unlock(&pmd->dp->port_mutex);
>> +    flow->actions_offloaded = info.actions_offloaded;
>>   
>>       if (ret) {
>>           goto err_free;
>> @@ -3073,7 +3075,7 @@ dp_netdev_flow_to_dpif_flow(const struct 
>> dp_netdev_flow *netdev_flow,
>>       flow->pmd_id = netdev_flow->pmd_id;
>>       get_dpif_flow_stats(netdev_flow, &flow->stats);
>>   
>> -    flow->attrs.offloaded = false;
>> +    flow->attrs.offloaded = netdev_flow->actions_offloaded;
>>       flow->attrs.dp_layer = "ovs";
> We, probably, should change the 'dp_layer' for fully offloaded flows.
>
>>   }
>>   
>> @@ -3244,6 +3246,7 @@ dp_netdev_flow_add(struct dp_netdev_pmd_thread *pmd,
>>       flow->dead = false;
>>       flow->batch = NULL;
>>       flow->mark = INVALID_FLOW_MARK;
>> +    flow->actions_offloaded = false;
>>       *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
>>       *CONST_CAST(struct flow *, &flow->flow) = match->flow;
>>       *CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
>> @@ -3598,6 +3601,37 @@ dpif_netdev_flow_dump_thread_destroy(struct 
>> dpif_flow_dump_thread *thread_)
>>       free(thread);
>>   }
>>   
>> +static int
>> +dpif_netdev_offload_used(struct dp_netdev_flow *netdev_flow,
>> +                         struct dp_netdev_pmd_thread *pmd)
>> +{
>> +    int ret;
>> +    struct dp_netdev_port *port;
>> +    struct dpif_flow_stats stats;
>> +
>> +    odp_port_t in_port = netdev_flow->flow.in_port.odp_port;
>> +
>> +    ovs_mutex_lock(&pmd->dp->port_mutex);
>> +    port = dp_netdev_lookup_port(pmd->dp, in_port);
> Use netdev_ports_get() instead.
> See: 
> https://eur03.safelinks.protection.outlook.com/?url=https%3A%2F%2Fpatchwork.ozlabs.org%2Fpatch%2F1199525%2F&amp;data=02%7C01%7Celibr%40mellanox.com%7Ce6156f21dee847deeff408d76f669285%7Ca652971c7d2e4d9ba6a4d149256f461b%7C0%7C0%7C637100358654694310&amp;sdata=HZzqYAJrtR91jAPyi1CmFK5VHy3qcuBMjXEVSfmD%2FT4%3D&amp;reserved=0
OK
>
>> +    if (!port) {
>> +        ovs_mutex_unlock(&pmd->dp->port_mutex);
>> +        return -1;
>> +    }
>> +    /* get offloaded stats */
>> +    ret = netdev_flow_stats_get(port->netdev, &netdev_flow->mega_ufid, 
>> &stats);
>> +    ovs_mutex_unlock(&pmd->dp->port_mutex);
>> +    if (ret) {
>> +        return -1;
>> +    }
>> +    if (stats.n_packets) {
>> +        atomic_store_relaxed(&netdev_flow->stats.used, pmd->ctx.now / 1000);
>> +        non_atomic_ullong_add(&netdev_flow->stats.packet_count, 
>> stats.n_packets);
>> +        non_atomic_ullong_add(&netdev_flow->stats.byte_count, 
>> stats.n_bytes);
>> +    }
>> +
>> +    return 0;
>> +}
>> +
>>   static int
>>   dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
>>                              struct dpif_flow *flows, int max_flows)
>> @@ -3638,6 +3672,10 @@ dpif_netdev_flow_dump_next(struct 
>> dpif_flow_dump_thread *thread_,
>>                   netdev_flows[n_flows] = CONTAINER_OF(node,
>>                                                        struct dp_netdev_flow,
>>                                                        node);
>> +                /* Read hardware stats in case of hardware offload */
>> +                if (netdev_flows[n_flows]->actions_offloaded) {
>> +                    dpif_netdev_offload_used(netdev_flows[n_flows], pmd);
>> +                }
>>               }
>>               /* When finishing dumping the current pmd thread, moves to
>>                * the next. */
>>
_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to