There are few places where TCP reads skb->skb_mstamp expecting
a value in usec unit.

skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value.

Add tcp_skb_timestamp_us() to provide proper conversion when needed.

Signed-off-by: Eric Dumazet <eduma...@google.com>
---
 include/net/tcp.h       |  8 +++++++-
 net/ipv4/tcp_input.c    | 11 ++++++-----
 net/ipv4/tcp_ipv4.c     |  2 +-
 net/ipv4/tcp_output.c   |  2 +-
 net/ipv4/tcp_rate.c     | 17 +++++++++--------
 net/ipv4/tcp_recovery.c |  5 +++--
 6 files changed, 27 insertions(+), 18 deletions(-)

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 
c6f0bc1dc6782a1976c06932e846b3f6d708ba9f..0ca5ea10dc06f3552597c94de31dcd0c8e0ecc32
 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -774,6 +774,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff 
*skb)
        return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 }
 
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+       return skb->skb_mstamp;
+}
+
 
 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 
@@ -1940,7 +1946,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
 {
        const struct sk_buff *skb = tcp_rtx_queue_head(sk);
        u32 rto = inet_csk(sk)->icsk_rto;
-       u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+       u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + 
jiffies_to_usecs(rto);
 
        return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 
d9034073138ce49c423f7a22143bac415415bc09..d703a0b3b6a2f0efd8607354c1c74ac1a8e78d4f
 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct 
sk_buff *prev,
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
                        start_seq, end_seq, dup_sack, pcount,
-                       skb->skb_mstamp);
+                       tcp_skb_timestamp_us(skb));
        tcp_rate_skb_delivered(sk, skb, state->rate);
 
        if (skb == tp->lost_skb_hint)
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff 
*skb, struct sock *sk,
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
                                                tcp_skb_pcount(skb),
-                                               skb->skb_mstamp);
+                                               tcp_skb_timestamp_us(skb));
                        tcp_rate_skb_delivered(sk, skb, state->rate);
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
                                list_del_init(&skb->tcp_tsorted_anchor);
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 
prior_fack,
                                tp->retrans_out -= acked_pcount;
                        flag |= FLAG_RETRANS_DATA_ACKED;
                } else if (!(sacked & TCPCB_SACKED_ACKED)) {
-                       last_ackt = skb->skb_mstamp;
+                       last_ackt = tcp_skb_timestamp_us(skb);
                        WARN_ON_ONCE(last_ackt == 0);
                        if (!first_ackt)
                                first_ackt = last_ackt;
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 
prior_fack,
                        tp->delivered += acked_pcount;
                        if (!tcp_skb_spurious_retrans(tp, skb))
                                tcp_rack_advance(tp, sacked, scb->end_seq,
-                                                skb->skb_mstamp);
+                                                tcp_skb_timestamp_us(skb));
                }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 
prior_fack,
                        tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
                }
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
-                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, 
skb->skb_mstamp)) {
+                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
+                                                   tcp_skb_timestamp_us(skb))) 
{
                /* Do not re-arm RTO if the sack RTT is measured from data sent
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 
09547ef9c4c644fba0f7887afad0a6393e3dd03a..1f2496e8620dd78cecefbb0dceb8570fc92661e5
 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                BUG_ON(!skb);
 
                tcp_mstamp_refresh(tp);
-               delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
+               delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
                remaining = icsk->icsk_rto -
                            usecs_to_jiffies(delta_us);
 
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 
597dbd749f05dc72e53962a5821861fc218774d6..b95aa72d88233dd6376a70ccd7cbb13744444889
 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct 
sk_buff *skb,
        head = tcp_rtx_queue_head(sk);
        if (!head)
                goto send_now;
-       age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
+       age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
        /* If next ACK is likely to come too late (half srtt), do not defer */
        if (age < (tp->srtt_us >> 4))
                goto send_now;
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index 
4dff40dad4dc5ccc372f5108b0d6ba38497ab81f..baed2186c7c623737c739cbc1e35a3c772a8b15a
 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
          * bandwidth estimate.
          */
        if (!tp->packets_out) {
-               tp->first_tx_mstamp  = skb->skb_mstamp;
-               tp->delivered_mstamp = skb->skb_mstamp;
+               u64 tstamp_us = tcp_skb_timestamp_us(skb);
+
+               tp->first_tx_mstamp  = tstamp_us;
+               tp->delivered_mstamp = tstamp_us;
        }
 
        TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff 
*skb,
                rs->is_app_limited   = scb->tx.is_app_limited;
                rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
 
-               /* Find the duration of the "send phase" of this window: */
-               rs->interval_us      = tcp_stamp_us_delta(
-                                               skb->skb_mstamp,
-                                               scb->tx.first_tx_mstamp);
-
                /* Record send time of most recently ACKed packet: */
-               tp->first_tx_mstamp  = skb->skb_mstamp;
+               tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
+               /* Find the duration of the "send phase" of this window: */
+               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+                                                    scb->tx.first_tx_mstamp);
+
        }
        /* Mark off the skb delivered once it's sacked to avoid being
         * used again when it's cumulatively acked. For acked packets
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 
c81aadff769b2c3eee02e6de3a5545c27e8cbc38..fdb715bdd2d11dd33a1474d02892546bbac66f41
 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 {
        return tp->rack.rtt_us + reo_wnd -
-              tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
+              tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 }
 
 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 
*reo_timeout)
                    !(scb->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
+               if (!tcp_rack_sent_after(tp->rack.mstamp,
+                                        tcp_skb_timestamp_us(skb),
                                         tp->rack.end_seq, scb->end_seq))
                        break;
 
-- 
2.19.0.444.g18242da7ef-goog

Reply via email to