This patch adds TCP_NLA_SENDQ_SIZE stat into SCM_TIMESTAMPING_OPT_STATS.
It reports no. of bytes present in send queue, when timestamp is
generated.

Signed-off-by: Priyaranjan Jha <priyar...@google.com>
Signed-off-by: Neal Cardwell <ncardw...@google.com>
Signed-off-by: Yuchung Cheng <ych...@google.com>
Signed-off-by: Soheil Hassas Yeganeh <soh...@google.com>
---
 include/uapi/linux/tcp.h | 1 +
 net/ipv4/tcp.c           | 4 +++-
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index b4a4f64635fa..93bad2128ef6 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -241,6 +241,7 @@ enum {
        TCP_NLA_MIN_RTT,        /* minimum RTT */
        TCP_NLA_RECUR_RETRANS,  /* Recurring retransmits for the current pkt */
        TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
+       TCP_NLA_SNDQ_SIZE,      /* Data (bytes) pending in send queue */
 
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a33539798bf6..162ba4227446 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3031,7 +3031,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const 
struct sock *sk)
        u32 rate;
 
        stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) +
-                         3 * nla_total_size(sizeof(u32)) +
+                         4 * nla_total_size(sizeof(u32)) +
                          2 * nla_total_size(sizeof(u8)), GFP_ATOMIC);
        if (!stats)
                return NULL;
@@ -3061,6 +3061,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const 
struct sock *sk)
 
        nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, 
inet_csk(sk)->icsk_retransmits);
        nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, 
!!tp->rate_app_limited);
+
+       nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
        return stats;
 }
 
-- 
2.16.2.395.g2e18187dfd-goog

Reply via email to