It reuses the tcp_sendmsg_noappend() to decide if a new_segment
is needed before entering the loop.  More checks could be added
later for the tcp_sendpage case to decide if a new_segment is
needed immediately.

Signed-off-by: Martin KaFai Lau <ka...@fb.com>
Cc: Eric Dumazet <eduma...@google.com>
Cc: Neal Cardwell <ncardw...@google.com>
Cc: Soheil Hassas Yeganeh <soheil.k...@gmail.com>
Cc: Willem de Bruijn <will...@google.com>
Cc: Yuchung Cheng <ych...@google.com>
---
 net/ipv4/tcp.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2918f42..6bb33b8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -913,6 +913,9 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct 
page *page, int offset,
        if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                goto out_err;
 
+       if (tcp_sendmsg_noappend(sk, sk->sk_tsflags))
+               goto new_segment;
+
        while (size > 0) {
                struct sk_buff *skb = tcp_write_queue_tail(sk);
                int copy, i;
@@ -969,7 +972,7 @@ new_segment:
                offset += copy;
                size -= copy;
                if (!size) {
-                       tcp_tx_timestamp(sk, sk->sk_tsflags, skb, 0);
+                       tcp_tx_timestamp(sk, sk->sk_tsflags, skb, flags);
                        goto out;
                }
 
-- 
2.5.1

Reply via email to