On Mon, Jun 27, 2016 at 9:27 AM, Eric Dumazet <eric.duma...@gmail.com> wrote:
> Excellent idea ;)
>
> Here is the v2 patch.
>
> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
> index 8bd9911fdd16..e00e972c4e6a 100644
> --- a/net/ipv4/tcp_output.c
> +++ b/net/ipv4/tcp_output.c
> @@ -2751,7 +2751,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
>         struct tcp_sock *tp = tcp_sk(sk);
>         struct sk_buff *skb;
>         struct sk_buff *hole = NULL;
> -       u32 last_lost;
> +       u32 max_segs, last_lost;
>         int mib_idx;
>         int fwd_rexmitting = 0;
>
> @@ -2771,6 +2771,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
>                 last_lost = tp->snd_una;
>         }
>
> +       max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
>         tcp_for_write_queue_from(skb, sk) {
>                 __u8 sacked = TCP_SKB_CB(skb)->sacked;
>                 int segs;
> @@ -2784,6 +2785,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
>                 segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
>                 if (segs <= 0)
>                         return;
> +               /* In case tcp_shift_skb_data() have aggregated large skbs,
> +                * we need to make sure not sending too big TSO packets.
> +                */
> +               segs = min_t(int, segs, max_segs);
>
>                 if (fwd_rexmitting) {
>  begin_fwd:
>

Looks great to me. Thanks, Eric!

neal

Reply via email to