On 21 Feb 2023, at 12:15, Simon Horman wrote:

Thanks for the review Simon, see responses inline. Will send out a v4 soon.

//Eelco

> On Fri, Feb 03, 2023 at 12:12:12PM +0100, Eelco Chaudron wrote:
>> When the ukey's action set changes, it could caus the flow to use a
>
> nit: s/caus/cause/

Will fix

>> different datapath, for example, when it moves from tc to kernel.
>> This will cause the the cached previous datapath statistics to be used.
>>
>> This change will reset the cached statistics when a change in
>> datapath is discovered.
>>
>> Signed-off-by: Eelco Chaudron <echau...@redhat.com>
>
> ...
>
>> diff --git a/lib/dpif-provider.h b/lib/dpif-provider.h
>> index 12477a24f..2795ccc81 100644
>> --- a/lib/dpif-provider.h
>> +++ b/lib/dpif-provider.h
>> @@ -338,6 +338,15 @@ struct dpif_class {
>>      int (*offload_stats_get)(struct dpif *dpif,
>>                               struct netdev_custom_stats *stats);
>>
>> +    /* This function will return true if the specific dpif implementation
>> +     * synchronizes the various datapath implementation layers, i.e., the
>> +     * dpif's layer in combination with the underlying netdev offload 
>> layers.
>> +     * For example, dpif-netlink does not sync its kernel flows with the tc
>> +     * ones, i.e., only one gets installed. On the other hand, dpif-netdev
>> +     * installs both flows, and internally keeps track of both, and 
>> represents
>
> nit: s/and internally/internally/

Will fix

>
>> +     * them as one. */
>> +    bool (*synced_dp_layers)(struct dpif *dpif);
>> +
>
> Both implementations of this callback simply return a boolean,
> without an logic. I don't know if it makes sense, but it occurs
> to me that synced_dp_layers could be a bool rather than a function pointer.

Changed to a bool as you suggested.

> ...
>
>> diff --git a/lib/dpif.h b/lib/dpif.h
>> index 6cb4dae6d..129cbf6a1 100644
>> --- a/lib/dpif.h
>> +++ b/lib/dpif.h
>
> ...
>
>> @@ -54,11 +54,14 @@ VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
>>  COVERAGE_DEFINE(dumped_duplicate_flow);
>>  COVERAGE_DEFINE(dumped_new_flow);
>>  COVERAGE_DEFINE(handler_duplicate_upcall);
>> -COVERAGE_DEFINE(upcall_ukey_contention);
>> -COVERAGE_DEFINE(upcall_ukey_replace);
>>  COVERAGE_DEFINE(revalidate_missed_dp_flow);
>> +COVERAGE_DEFINE(ukey_dp_change);
>> +COVERAGE_DEFINE(ukey_invalid_stat_reset);
>>  COVERAGE_DEFINE(upcall_flow_limit_hit);
>>  COVERAGE_DEFINE(upcall_flow_limit_kill);
>> +COVERAGE_DEFINE(upcall_ukey_contention);
>> +COVERAGE_DEFINE(upcall_ukey_replace);
>> +
>>
>
> nit: now there are two blank lines here.

Will fix

>>  /* A thread that reads upcalls from dpif, forwards each upcall's packet,
>>   * and possibly sets up a kernel flow as a cache. */
>> @@ -288,6 +291,7 @@ struct udpif_key {
>>
>>      struct ovs_mutex mutex;                   /* Guards the following. */
>>      struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
>> +    const char *dp_layer OVS_GUARDED;         /* Last known dp_layer. */
>>      long long int created OVS_GUARDED;        /* Estimate of creation time. 
>> */
>>      uint64_t dump_seq OVS_GUARDED;            /* Tracks udpif->dump_seq. */
>>      uint64_t reval_seq OVS_GUARDED;           /* Tracks udpif->reval_seq. */
>> @@ -1771,6 +1775,7 @@ ukey_create__(const struct nlattr *key, size_t key_len,
>>      ukey->created = ukey->flow_time = time_msec();
>>      memset(&ukey->stats, 0, sizeof ukey->stats);
>>      ukey->stats.used = used;
>> +    ukey->dp_layer = NULL;
>>      ukey->xcache = NULL;
>>
>>      ukey->offloaded = false;
>> @@ -2356,6 +2361,13 @@ revalidate_ukey(struct udpif *udpif, struct udpif_key 
>> *ukey,
>>                      ? stats->n_bytes - ukey->stats.n_bytes
>>                      : 0);
>>
>> +    if (stats->n_packets < ukey->stats.n_packets &&
>> +        ukey->stats.n_packets < (UINT64_MAX / 4 * 3)) {
>
> Is this logic used elsewhere in the code-base?
> Perhaps it warrants a helper.

Added a MACRO for this…

>> +        /* Report cases where the packet counter is lower than the previous
>> +         * instance, but exclude the potential wrapping of an uint64_t. */
>> +        COVERAGE_INC(ukey_invalid_stat_reset);
>> +    }
>> +
>>      if (need_revalidate) {
>>          if (should_revalidate(udpif, push.n_packets, ukey->stats.used)) {
>>              if (!ukey->xcache) {
>> @@ -2469,6 +2481,15 @@ push_dp_ops(struct udpif *udpif, struct ukey_op *ops, 
>> size_t n_ops)
>>              push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
>>              push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
>>              push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
>> +
>> +            if (stats->n_packets < op->ukey->stats.n_packets &&
>> +                op->ukey->stats.n_packets < (UINT64_MAX / 4 * 3)) {
>> +                /* Report cases where the packet counter is lower than the
>> +                 * previous instance, but exclude the potential wrapping of 
>> an
>
> ... yes, here it is :)
>
>> +                 * uint64_t. */
>> +                COVERAGE_INC(ukey_invalid_stat_reset);
>> +            }
>> +
>>              ovs_mutex_unlock(&op->ukey->mutex);
>>          } else {
>>              push = stats;
>
> ...

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to