Thanks for the work on this George! That’s a nice step forward for Cake’s 
fairness with bi-directional traffic. :)

I’ll try some tests when I get a chance...

> On Feb 14, 2019, at 7:02 PM, George Amanakis <gamana...@gmail.com> wrote:
> 
> I tried Toke's and Jonathan's suggestion, dropping the
> sparse_flow_count. Tthe results are the same (fairness).
> In a hash collision in this patch the host_bulk_flow_count is not updated,
> does this make sense?
> 
> -------------------8<-------------------
> Client A:
> Data file written to ./tcp_8down-2019-02-14T115248.702453.flent.gz.
> Summary of tcp_8down test run at 2019-02-14 16:52:48.702453:
> 
>                             avg       median          # data pts
> Ping (ms) ICMP   :         0.78         0.69 ms              341
> TCP download avg :         6.00         5.81 Mbits/s         301
> TCP download sum :        48.00        46.51 Mbits/s         301
> TCP download::1  :         6.00         5.82 Mbits/s         297
> TCP download::2  :         5.99         5.82 Mbits/s         297
> TCP download::3  :         6.00         5.82 Mbits/s         297
> TCP download::4  :         6.00         5.82 Mbits/s         297
> TCP download::5  :         6.00         5.82 Mbits/s         297
> TCP download::6  :         6.00         5.82 Mbits/s         298
> TCP download::7  :         6.01         5.82 Mbits/s         297
> TCP download::8  :         6.00         5.82 Mbits/s         298
> Data file written to ./tcp_1up-2019-02-14T115249.700445.flent.gz.
> Summary of tcp_1up test run at 2019-02-14 16:52:49.700445:
> 
>                           avg       median          # data pts
> Ping (ms) ICMP :         0.79         0.69 ms              341
> TCP upload     :        47.69        46.33 Mbits/s         266
> 
> 
> 
> Client B:
> Data file written to ./tcp_1down-2019-02-14T115250.817948.flent.gz.
> Summary of tcp_1down test run at 2019-02-14 16:52:50.817948:
> 
>                           avg       median          # data pts
> Ping (ms) ICMP :         0.83         0.69 ms              341
> TCP download   :        47.82        46.57 Mbits/s         300
> Data file written to ./tcp_8up-2019-02-14T115251.710755.flent.gz.
> Summary of tcp_8up test run at 2019-02-14 16:52:51.710755:
> 
>                           avg       median          # data pts
> Ping (ms) ICMP :         0.78         0.69 ms              341
> TCP upload avg :         5.99         5.79 Mbits/s         301
> TCP upload sum :        47.88        46.30 Mbits/s         301
> TCP upload::1  :         5.98         5.81 Mbits/s         224
> TCP upload::2  :         5.99         5.82 Mbits/s         224
> TCP upload::3  :         5.98         5.77 Mbits/s         219
> TCP upload::4  :         5.98         5.82 Mbits/s         224
> TCP upload::5  :         5.99         5.77 Mbits/s         218
> TCP upload::6  :         5.99         5.77 Mbits/s         221
> TCP upload::7  :         5.98         5.77 Mbits/s         219
> TCP upload::8  :         5.99         5.77 Mbits/s         221
> -------------------8<-------------------
> 
> 
> ---
> sch_cake.c | 84 +++++++++++++++++++++++++++++++++++-------------------
> 1 file changed, 55 insertions(+), 29 deletions(-)
> 
> diff --git a/sch_cake.c b/sch_cake.c
> index d434ae0..ed3fbd9 100644
> --- a/sch_cake.c
> +++ b/sch_cake.c
> @@ -146,8 +146,8 @@ struct cake_flow {
> struct cake_host {
>       u32 srchost_tag;
>       u32 dsthost_tag;
> -     u16 srchost_refcnt;
> -     u16 dsthost_refcnt;
> +     u16 srchost_bulk_flow_count;
> +     u16 dsthost_bulk_flow_count;
> };
> 
> struct cake_heap_entry {
> @@ -844,8 +844,6 @@ skip_hash:
>                * queue, accept the collision, update the host tags.
>                */
>               q->way_collisions++;
> -             q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
> -             q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
>               allocate_src = cake_dsrc(flow_mode);
>               allocate_dst = cake_ddst(flow_mode);
> found:
> @@ -865,13 +863,12 @@ found:
>                       }
>                       for (i = 0; i < CAKE_SET_WAYS;
>                               i++, k = (k + 1) % CAKE_SET_WAYS) {
> -                             if (!q->hosts[outer_hash + k].srchost_refcnt)
> +                             if (!q->hosts[outer_hash + 
> k].srchost_bulk_flow_count)
>                                       break;
>                       }
>                       q->hosts[outer_hash + k].srchost_tag = srchost_hash;
> found_src:
>                       srchost_idx = outer_hash + k;
> -                     q->hosts[srchost_idx].srchost_refcnt++;
>                       q->flows[reduced_hash].srchost = srchost_idx;
>               }
> 
> @@ -887,13 +884,12 @@ found_src:
>                       }
>                       for (i = 0; i < CAKE_SET_WAYS;
>                            i++, k = (k + 1) % CAKE_SET_WAYS) {
> -                             if (!q->hosts[outer_hash + k].dsthost_refcnt)
> +                             if (!q->hosts[outer_hash + 
> k].dsthost_bulk_flow_count)
>                                       break;
>                       }
>                       q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
> found_dst:
>                       dsthost_idx = outer_hash + k;
> -                     q->hosts[dsthost_idx].dsthost_refcnt++;
>                       q->flows[reduced_hash].dsthost = dsthost_idx;
>               }
>       }
> @@ -1913,20 +1909,30 @@ static s32 cake_enqueue(struct sk_buff *skb, struct 
> Qdisc *sch,
>               b->sparse_flow_count++;
> 
>               if (cake_dsrc(q->flow_mode))
> -                     host_load = max(host_load, srchost->srchost_refcnt);
> +                     host_load = max(host_load, 
> srchost->srchost_bulk_flow_count);
> 
>               if (cake_ddst(q->flow_mode))
> -                     host_load = max(host_load, dsthost->dsthost_refcnt);
> +                     host_load = max(host_load, 
> dsthost->dsthost_bulk_flow_count);
> 
>               flow->deficit = (b->flow_quantum *
>                                quantum_div[host_load]) >> 16;
>       } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
> +             struct cake_host *srchost = &b->hosts[flow->srchost];
> +             struct cake_host *dsthost = &b->hosts[flow->dsthost];
> +
>               /* this flow was empty, accounted as a sparse flow, but actually
>                * in the bulk rotation.
>                */
>               flow->set = CAKE_SET_BULK;
>               b->sparse_flow_count--;
>               b->bulk_flow_count++;
> +
> +             if (cake_dsrc(q->flow_mode))
> +                     srchost->srchost_bulk_flow_count++;
> +
> +             if (cake_ddst(q->flow_mode))
> +                     dsthost->dsthost_bulk_flow_count++;
> +
>       }
> 
>       if (q->buffer_used > q->buffer_max_used)
> @@ -2097,23 +2103,8 @@ retry:
>       dsthost = &b->hosts[flow->dsthost];
>       host_load = 1;
> 
> -     if (cake_dsrc(q->flow_mode))
> -             host_load = max(host_load, srchost->srchost_refcnt);
> -
> -     if (cake_ddst(q->flow_mode))
> -             host_load = max(host_load, dsthost->dsthost_refcnt);
> -
> -     WARN_ON(host_load > CAKE_QUEUES);
> -
>       /* flow isolation (DRR++) */
>       if (flow->deficit <= 0) {
> -             /* The shifted prandom_u32() is a way to apply dithering to
> -              * avoid accumulating roundoff errors
> -              */
> -             flow->deficit += (b->flow_quantum * quantum_div[host_load] +
> -                               (prandom_u32() >> 16)) >> 16;
> -             list_move_tail(&flow->flowchain, &b->old_flows);
> -
>               /* Keep all flows with deficits out of the sparse and decaying
>                * rotations.  No non-empty flow can go into the decaying
>                * rotation, so they can't get deficits
> @@ -2122,6 +2113,13 @@ retry:
>                       if (flow->head) {
>                               b->sparse_flow_count--;
>                               b->bulk_flow_count++;
> +
> +                             if (cake_dsrc(q->flow_mode))
> +                                     srchost->srchost_bulk_flow_count++;
> +
> +                             if (cake_ddst(q->flow_mode))
> +                                     dsthost->dsthost_bulk_flow_count++;
> +
>                               flow->set = CAKE_SET_BULK;
>                       } else {
>                               /* we've moved it to the bulk rotation for
> @@ -2131,6 +2129,22 @@ retry:
>                               flow->set = CAKE_SET_SPARSE_WAIT;
>                       }
>               }
> +
> +             if (cake_dsrc(q->flow_mode))
> +                     host_load = max(host_load, 
> srchost->srchost_bulk_flow_count);
> +
> +             if (cake_ddst(q->flow_mode))
> +                     host_load = max(host_load, 
> dsthost->dsthost_bulk_flow_count);
> +
> +             WARN_ON(host_load > CAKE_QUEUES);
> +
> +             /* The shifted prandom_u32() is a way to apply dithering to
> +              * avoid accumulating roundoff errors
> +              */
> +             flow->deficit += (b->flow_quantum * quantum_div[host_load] +
> +                               (prandom_u32() >> 16)) >> 16;
> +             list_move_tail(&flow->flowchain, &b->old_flows);
> +
>               goto retry;
>       }
> 
> @@ -2151,6 +2165,13 @@ retry:
>                                              &b->decaying_flows);
>                               if (flow->set == CAKE_SET_BULK) {
>                                       b->bulk_flow_count--;
> +
> +                                     if (cake_dsrc(q->flow_mode))
> +                                             
> srchost->srchost_bulk_flow_count--;
> +
> +                                     if (cake_ddst(q->flow_mode))
> +                                             
> dsthost->dsthost_bulk_flow_count--;
> +
>                                       b->decaying_flow_count++;
>                               } else if (flow->set == CAKE_SET_SPARSE ||
>                                          flow->set == CAKE_SET_SPARSE_WAIT) {
> @@ -2164,14 +2185,19 @@ retry:
>                               if (flow->set == CAKE_SET_SPARSE ||
>                                   flow->set == CAKE_SET_SPARSE_WAIT)
>                                       b->sparse_flow_count--;
> -                             else if (flow->set == CAKE_SET_BULK)
> +                             else if (flow->set == CAKE_SET_BULK) {
>                                       b->bulk_flow_count--;
> -                             else
> +
> +                                     if (cake_dsrc(q->flow_mode))
> +                                             
> srchost->srchost_bulk_flow_count--;
> +
> +                                     if (cake_ddst(q->flow_mode))
> +                                             
> dsthost->dsthost_bulk_flow_count--;
> +
> +                             } else
>                                       b->decaying_flow_count--;
> 
>                               flow->set = CAKE_SET_NONE;
> -                             srchost->srchost_refcnt--;
> -                             dsthost->dsthost_refcnt--;
>                       }
>                       goto begin;
>               }
> -- 
> 2.20.1
> 
> _______________________________________________
> Cake mailing list
> Cake@lists.bufferbloat.net
> https://lists.bufferbloat.net/listinfo/cake

_______________________________________________
Cake mailing list
Cake@lists.bufferbloat.net
https://lists.bufferbloat.net/listinfo/cake

Reply via email to