Ping again. This was requested to make the sort function behave in a
more standard manner. I don't think there's anything controversial in it.

thanks,
Kevin.

On 09/22/2017 08:21 PM, Darrell Ball wrote:
> Are there any other comments for this patch?
> 
> 
> On 8/30/17, 11:01 AM, "Darrell Ball" <db...@vmware.com> wrote:
> 
>     Thanks for the patch Kevin
>     
>     On 8/30/17, 10:45 AM, "Kevin Traynor" <ktray...@redhat.com> wrote:
>     
>         rxq_cycle_sort summed the latest cycles from each queue for sorting.
>         While each comparison was correct with the latest cycles, the cycles
>         could change between calls to rxq_cycle_sort. In order to use
>         consistent values through each call to rxq_cycle_sort, sum the cycles
>         prior to rxq_cycle_sort being called.
>     
>     As discussed, these changes are optional and have some tradeoffs, but
>     overall, I don’t see any major issue introduced here, since this is 
> understood to be a
>     rough comparison anyways.
>         
>         Also, change return to 0 when values are equal.
>     
>     As discussed, this means the equal tie-breaker is done by qsort instead of
>     the compare function; the net practical effect of this is nil, but this 
> is more
>     standard.
>     
>         Reported-by: Ilya Maximets <i.maxim...@samsung.com>
>         Signed-off-by: Kevin Traynor <ktray...@redhat.com>
>         ---
>          lib/dpif-netdev.c | 22 +++++++++++++---------
>          1 file changed, 13 insertions(+), 9 deletions(-)
>         
>         diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>         index 1db9f10..7c21ee5 100644
>         --- a/lib/dpif-netdev.c
>         +++ b/lib/dpif-netdev.c
>         @@ -3432,18 +3432,14 @@ rxq_cycle_sort(const void *a, const void *b)
>              struct dp_netdev_rxq *qb;
>              uint64_t total_qa, total_qb;
>         -    unsigned i;
>          
>              qa = *(struct dp_netdev_rxq **) a;
>              qb = *(struct dp_netdev_rxq **) b;
>          
>         -    total_qa = total_qb = 0;
>         -    for (i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>         -        total_qa += dp_netdev_rxq_get_intrvl_cycles(qa, i);
>         -        total_qb += dp_netdev_rxq_get_intrvl_cycles(qb, i);
>         -    }
>         -    dp_netdev_rxq_set_cycles(qa, RXQ_CYCLES_PROC_HIST, total_qa);
>         -    dp_netdev_rxq_set_cycles(qb, RXQ_CYCLES_PROC_HIST, total_qb);
>         +    total_qa = dp_netdev_rxq_get_cycles(qa, RXQ_CYCLES_PROC_HIST);
>         +    total_qb = dp_netdev_rxq_get_cycles(qb, RXQ_CYCLES_PROC_HIST);
>          
>         -    if (total_qa >= total_qb) {
>         +    if (total_qa == total_qb) {
>         +        return 0;
>         +    } else if (total_qa > total_qb) {
>                  return -1;
>              }
>         @@ -3493,4 +3489,6 @@ rxq_scheduling(struct dp_netdev *dp, bool 
> pinned) OVS_REQUIRES(dp->port_mutex)
>                          }
>                      } else if (!pinned && q->core_id == OVS_CORE_UNSPEC) {
>         +                uint64_t cycle_hist = 0;
>         +
>                          if (n_rxqs == 0) {
>                              rxqs = xmalloc(sizeof *rxqs);
>         @@ -3498,4 +3496,10 @@ rxq_scheduling(struct dp_netdev *dp, bool 
> pinned) OVS_REQUIRES(dp->port_mutex)
>                              rxqs = xrealloc(rxqs, sizeof *rxqs * (n_rxqs + 
> 1));
>                          }
>         +
>         +                for (unsigned i = 0; i < PMD_RXQ_INTERVAL_MAX; i++) {
>         +                    cycle_hist += dp_netdev_rxq_get_intrvl_cycles(q, 
> i);
>         +                }
>         +                dp_netdev_rxq_set_cycles(q, RXQ_CYCLES_PROC_HIST, 
> cycle_hist);
>         +
>                          /* Store the queue. */
>                          rxqs[n_rxqs++] = q;
>         -- 
>         1.8.3.1
>         
>         
>     
>     
> 

_______________________________________________
dev mailing list
d...@openvswitch.org
https://mail.openvswitch.org/mailman/listinfo/ovs-dev

Reply via email to