The commit is pushed to "branch-rh7-3.10.0-229.7.2.vz7.9.x-ovz" and will appear 
at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.9.17
------>
commit a14cdd2c8d11dc07bc4b6438fd0ceb995b0497e7
Author: Kirill Tkhai <ktk...@odin.com>
Date:   Mon Dec 28 13:58:20 2015 +0400

    ve/net: Revert "ve/net: Virtualize tcp_time_stamp"
    
    This reverts commit f54381c3801c3612822b5687960603cee1bf1e6e
    ("ve/net: Virtualize tcp_time_stamp").
    
    which is not need after ms commits:
    
    ee684b6f2830047d19877e5547989740f18b1a5d
        ("tcp: send packets with a socket timestamp")
    93be6ce0e91b6a94783e012b1857a347a5e6e9f2
        ("tcp: set and get per-socket timestamp")
    ceaa1fef65a7c2e017b260b879b310dd24888083
        ("tcp: adding a per-socket timestamp offset")
    
    These patches are applied in rh 3.10.0-327.3.1.el7 kernel.
    
    https://jira.sw.ru/browse/PSBM-42305
    
    Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 include/linux/ve.h                            |  1 -
 include/net/netfilter/nf_conntrack_synproxy.h |  3 +-
 include/net/tcp.h                             | 16 +++-----
 kernel/ve/ve.c                                |  1 -
 net/dccp/ccids/ccid2.c                        |  8 ++--
 net/ipv4/netfilter/ipt_SYNPROXY.c             |  2 +-
 net/ipv4/syncookies.c                         |  4 +-
 net/ipv4/tcp.c                                |  8 ++--
 net/ipv4/tcp_bic.c                            | 10 ++---
 net/ipv4/tcp_cubic.c                          | 14 +++----
 net/ipv4/tcp_htcp.c                           |  2 +-
 net/ipv4/tcp_input.c                          | 58 +++++++++++++--------------
 net/ipv4/tcp_ipv4.c                           | 12 +++---
 net/ipv4/tcp_lp.c                             |  8 ++--
 net/ipv4/tcp_metrics.c                        |  2 +-
 net/ipv4/tcp_output.c                         | 38 +++++++++---------
 net/ipv4/tcp_timer.c                          | 12 +++---
 net/ipv4/tcp_westwood.c                       |  6 +--
 net/ipv6/netfilter/ip6t_SYNPROXY.c            |  2 +-
 net/ipv6/tcp_ipv6.c                           |  6 +--
 net/netfilter/nf_synproxy_core.c              |  5 +--
 21 files changed, 104 insertions(+), 114 deletions(-)

diff --git a/include/linux/ve.h b/include/linux/ve.h
index 87450c1..0cffd4c 100644
--- a/include/linux/ve.h
+++ b/include/linux/ve.h
@@ -88,7 +88,6 @@ struct ve_struct {
        struct timespec         start_timespec;         /* monotonic time */
        struct timespec         real_start_timespec;    /* boot based time */
        u64                     start_jiffies;  /* Deprecated */
-       u32                     jiffies_fixup;
 
        struct kstat_lat_pcpu_struct    sched_lat_ve;
 
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h 
b/include/net/netfilter/nf_conntrack_synproxy.h
index 6ce2360..6793614 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -63,8 +63,7 @@ unsigned int synproxy_options_size(const struct 
synproxy_options *opts);
 void synproxy_build_options(struct tcphdr *th,
                            const struct synproxy_options *opts);
 
-void synproxy_init_timestamp_cookie(struct sock *sk,
-                                   const struct xt_synproxy_info *info,
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
                                    struct synproxy_options *opts);
 void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
 
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 05a5252..22b8d43 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -546,7 +546,7 @@ static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 }
 #endif
 
-extern __u32 cookie_init_timestamp(struct sock *sk, struct request_sock *req);
+extern __u32 cookie_init_timestamp(struct request_sock *req);
 extern bool cookie_check_timestamp(struct tcp_options_received *opt,
                                struct net *net, bool *ecn_ok);
 
@@ -725,12 +725,7 @@ void tcp_send_window_probe(struct sock *sk);
  * to use only the low 32-bits of jiffies and hide the ugly
  * casts with the following macro.
  */
-static inline u32 tcp_time_stamp(const struct sock *sk)
-{
-       struct ve_struct *ve = sock_net(sk)->owner_ve;
-
-       return (__u32)(jiffies) + ve->jiffies_fixup;
-}
+#define tcp_time_stamp         ((__u32)(jiffies))
 
 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 
@@ -1154,7 +1149,7 @@ static inline void tcp_synack_rtt_meas(struct sock *sk,
 {
        if (tcp_rsk(req)->snt_synack)
                tcp_valid_rtt_meas(sk,
-                   tcp_time_stamp(sk) - tcp_rsk(req)->snt_synack);
+                   tcp_time_stamp - tcp_rsk(req)->snt_synack);
 }
 
 extern void tcp_enter_memory_pressure(struct sock *sk);
@@ -1176,11 +1171,10 @@ static inline int keepalive_probes(const struct 
tcp_sock *tp)
 
 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
 {
-       const struct sock *sk = (struct sock *)tp;
        const struct inet_connection_sock *icsk = &tp->inet_conn;
 
-       return min_t(u32, tcp_time_stamp(sk) - icsk->icsk_ack.lrcvtime,
-                         tcp_time_stamp(sk) - tp->rcv_tstamp);
+       return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
+                         tcp_time_stamp - tp->rcv_tstamp);
 }
 
 static inline int tcp_fin_time(const struct sock *sk)
diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
index 8f5f905..8f59d01 100644
--- a/kernel/ve/ve.c
+++ b/kernel/ve/ve.c
@@ -64,7 +64,6 @@ static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, 
ve0_lat_stats);
 struct ve_struct ve0 = {
        .ve_name                = "0",
        .start_jiffies          = INITIAL_JIFFIES,
-       .jiffies_fixup          = 0,
        RCU_POINTER_INITIALIZER(ve_ns, &init_nsproxy),
        .ve_netns               = &init_net,
        .is_running             = 1,
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index eca2f0a..f053198 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -233,7 +233,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, 
unsigned int len)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-       const u32 now = ccid2_time_stamp(sk);
+       const u32 now = ccid2_time_stamp;
        struct ccid2_seq *next;
 
        /* slow-start after idle periods (RFC 2581, RFC 2861) */
@@ -466,7 +466,7 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq 
*seqp,
         * The cleanest solution is to not use the ccid2s_sent field at all
         * and instead use DCCP timestamps: requires changes in other places.
         */
-       ccid2_rtt_estimator(sk, ccid2_time_stamp(sk) - seqp->ccid2s_sent);
+       ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
 }
 
 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
@@ -478,7 +478,7 @@ static void ccid2_congestion_event(struct sock *sk, struct 
ccid2_seq *seqp)
                return;
        }
 
-       hc->tx_last_cong = ccid2_time_stamp(sk);
+       hc->tx_last_cong = ccid2_time_stamp;
 
        hc->tx_cwnd      = hc->tx_cwnd / 2 ? : 1U;
        hc->tx_ssthresh  = max(hc->tx_cwnd, 2U);
@@ -731,7 +731,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock 
*sk)
 
        hc->tx_rto       = DCCP_TIMEOUT_INIT;
        hc->tx_rpdupack  = -1;
-       hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = 
ccid2_time_stamp(sk);
+       hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = 
ccid2_time_stamp;
        hc->tx_cwnd_used = 0;
        setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
                        (unsigned long)sk);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c 
b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f67dc09..a313c3f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -280,7 +280,7 @@ synproxy_tg4(struct sk_buff *skb, const struct 
xt_action_param *par)
 
                opts.options &= info->options;
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
-                       synproxy_init_timestamp_cookie(skb->sk, info, &opts);
+                       synproxy_init_timestamp_cookie(info, &opts);
                else
                        opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
                                          XT_SYNPROXY_OPT_SACK_PERM |
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index d7db4f6..3b64c59 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -64,10 +64,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 
sport, __be16 dport,
  * Since subsequent timestamps use the normal tcp_time_stamp value, we
  * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
  */
-__u32 cookie_init_timestamp(struct sock *sk, struct request_sock *req)
+__u32 cookie_init_timestamp(struct request_sock *req)
 {
        struct inet_request_sock *ireq;
-       u32 ts, ts_now = tcp_time_stamp(sk);
+       u32 ts, ts_now = tcp_time_stamp;
        u32 options = 0;
 
        ireq = inet_rsk(req);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2d73a57..e853cfa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1143,7 +1143,7 @@ new_segment:
                                 * already been sent.
                                 */
                                if (tp->repair)
-                                       TCP_SKB_CB(skb)->when = 
tcp_time_stamp(sk);
+                                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                                /*
                                 * Check whether we can use HW checksum.
@@ -2552,7 +2552,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (!tp->repair)
                        err = -EPERM;
                else
-                       tp->tsoffset = val - tcp_time_stamp(sk);
+                       tp->tsoffset = val - tcp_time_stamp;
                break;
        default:
                err = -ENOPROTOOPT;
@@ -2592,7 +2592,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info 
*info)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       u32 now = tcp_time_stamp(sk);
+       u32 now = tcp_time_stamp;
 
        memset(info, 0, sizeof(*info));
 
@@ -2768,7 +2768,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                val = jiffies_to_msecs(icsk->icsk_user_timeout);
                break;
        case TCP_TIMESTAMP:
-               val = tcp_time_stamp(sk) + tp->tsoffset;
+               val = tcp_time_stamp + tp->tsoffset;
                break;
        default:
                return -ENOPROTOOPT;
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 184f5ef..f45e1c2 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -83,17 +83,17 @@ static void bictcp_init(struct sock *sk)
 /*
  * Compute congestion window to use.
  */
-static inline void bictcp_update(struct sock *sk, struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 {
        if (ca->last_cwnd == cwnd &&
-           (s32)(tcp_time_stamp(sk) - ca->last_time) <= HZ / 32)
+           (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
                return;
 
        ca->last_cwnd = cwnd;
-       ca->last_time = tcp_time_stamp(sk);
+       ca->last_time = tcp_time_stamp;
 
        if (ca->epoch_start == 0) /* record the beginning of an epoch */
-               ca->epoch_start = tcp_time_stamp(sk);
+               ca->epoch_start = tcp_time_stamp;
 
        /* start off normal */
        if (cwnd <= low_window) {
@@ -151,7 +151,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 
in_flight)
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp);
        else {
-               bictcp_update(sk, ca, tp->snd_cwnd);
+               bictcp_update(ca, tp->snd_cwnd);
                tcp_cong_avoid_ai(tp, ca->cnt);
        }
 
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 1441cf1..b6ae92a 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -204,7 +204,7 @@ static u32 cubic_root(u64 a)
 /*
  * Compute congestion window to use.
  */
-static inline void bictcp_update(struct sock *sk, struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
 {
        u32 delta, bic_target, max_cnt;
        u64 offs, t;
@@ -212,14 +212,14 @@ static inline void bictcp_update(struct sock *sk, struct 
bictcp *ca, u32 cwnd)
        ca->ack_cnt++;  /* count the number of ACKs */
 
        if (ca->last_cwnd == cwnd &&
-           (s32)(tcp_time_stamp(sk) - ca->last_time) <= HZ / 32)
+           (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
                return;
 
        ca->last_cwnd = cwnd;
-       ca->last_time = tcp_time_stamp(sk);
+       ca->last_time = tcp_time_stamp;
 
        if (ca->epoch_start == 0) {
-               ca->epoch_start = tcp_time_stamp(sk);   /* record the beginning 
of an epoch */
+               ca->epoch_start = tcp_time_stamp;       /* record the beginning 
of an epoch */
                ca->ack_cnt = 1;                        /* start counting */
                ca->tcp_cwnd = cwnd;                    /* syn with cubic */
 
@@ -250,7 +250,7 @@ static inline void bictcp_update(struct sock *sk, struct 
bictcp *ca, u32 cwnd)
         * if the cwnd < 1 million packets !!!
         */
 
-       t = (s32)(tcp_time_stamp(sk) - ca->epoch_start);
+       t = (s32)(tcp_time_stamp - ca->epoch_start);
        t += msecs_to_jiffies(ca->delay_min >> 3);
        /* change the unit from HZ to bictcp_HZ */
        t <<= BICTCP_HZ;
@@ -317,7 +317,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 
in_flight)
                        bictcp_hystart_reset(sk);
                tcp_slow_start(tp);
        } else {
-               bictcp_update(sk, ca, tp->snd_cwnd);
+               bictcp_update(ca, tp->snd_cwnd);
                tcp_cong_avoid_ai(tp, ca->cnt);
        }
 
@@ -416,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 
rtt_us)
                return;
 
        /* Discard delay samples right after fast recovery */
-       if (ca->epoch_start && (s32)(tcp_time_stamp(sk) - ca->epoch_start) < HZ)
+       if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
                return;
 
        delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index a826d1f..c1a8175 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -103,7 +103,7 @@ static void measure_achieved_throughput(struct sock *sk, 
u32 pkts_acked, s32 rtt
        const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        struct htcp *ca = inet_csk_ca(sk);
-       u32 now = tcp_time_stamp(sk);
+       u32 now = tcp_time_stamp;
 
        if (icsk->icsk_ca_state == TCP_CA_Open)
                ca->pkts_acked = pkts_acked;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 11ba8f4..84d34c1 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -402,7 +402,7 @@ void tcp_init_buffer_space(struct sock *sk)
                tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
 
        tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 /* 5. Recalculate window clamp after socket hit its memory bounds. */
@@ -491,17 +491,17 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 
sample, int win_dep)
                tp->rcv_rtt_est.rtt = new_sample;
 }
 
-static inline void tcp_rcv_rtt_measure(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
 {
        if (tp->rcv_rtt_est.time == 0)
                goto new_measure;
        if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
                return;
-       tcp_rcv_rtt_update(tp, tcp_time_stamp(sk) - tp->rcv_rtt_est.time, 1);
+       tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
 
 new_measure:
        tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
-       tp->rcv_rtt_est.time = tcp_time_stamp(sk);
+       tp->rcv_rtt_est.time = tcp_time_stamp;
 }
 
 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
@@ -511,7 +511,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
        if (tp->rx_opt.rcv_tsecr &&
            (TCP_SKB_CB(skb)->end_seq -
             TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
-               tcp_rcv_rtt_update(tp, tcp_time_stamp(sk) - 
tp->rx_opt.rcv_tsecr, 0);
+               tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 
0);
 }
 
 /*
@@ -527,7 +527,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
        if (tp->rcvq_space.time == 0)
                goto new_measure;
 
-       time = tcp_time_stamp(sk) - tp->rcvq_space.time;
+       time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
                return;
 
@@ -567,7 +567,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
 
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
-       tp->rcvq_space.time = tcp_time_stamp(sk);
+       tp->rcvq_space.time = tcp_time_stamp;
 }
 
 /* There is something which you must keep in mind when you analyze the
@@ -590,9 +590,9 @@ static void tcp_event_data_recv(struct sock *sk, struct 
sk_buff *skb)
 
        tcp_measure_rcv_mss(sk, skb);
 
-       tcp_rcv_rtt_measure(sk, tp);
+       tcp_rcv_rtt_measure(tp);
 
-       now = tcp_time_stamp(sk);
+       now = tcp_time_stamp;
 
        if (!icsk->icsk_ack.ato) {
                /* The _first_ data packet received, initialize
@@ -1882,7 +1882,7 @@ void tcp_enter_loss(struct sock *sk, int how)
        }
        tp->snd_cwnd       = 1;
        tp->snd_cwnd_cnt   = 0;
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 
        tcp_clear_retrans_partial(tp);
 
@@ -1999,7 +1999,7 @@ static bool tcp_pause_early_retransmit(struct sock *sk, 
int flag)
 static inline int tcp_skb_timedout(const struct sock *sk,
                                   const struct sk_buff *skb)
 {
-       return tcp_time_stamp(sk) - TCP_SKB_CB(skb)->when > 
inet_csk(sk)->icsk_rto;
+       return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
 }
 
 static inline int tcp_head_timedout(const struct sock *sk)
@@ -2288,11 +2288,11 @@ static void tcp_update_scoreboard(struct sock *sk, int 
fast_rexmit)
 /* CWND moderation, preventing bursts due to too big ACKs
  * in dubious situations.
  */
-static inline void tcp_moderate_cwnd(struct sock *sk, struct tcp_sock *tp)
+static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
 {
        tp->snd_cwnd = min(tp->snd_cwnd,
                           tcp_packets_in_flight(tp) + tcp_max_burst(tp));
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 /* Nothing was retransmitted or returned timestamp is less
@@ -2385,7 +2385,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool 
undo_ssthresh)
        } else {
                tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
        }
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 static inline bool tcp_may_undo(const struct tcp_sock *tp)
@@ -2418,7 +2418,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
                /* Hold old state until something *above* high_seq
                 * is ACKed. For Reno it is MUST to prevent false
                 * fast retransmits (RFC2582). SACK TCP is safe. */
-               tcp_moderate_cwnd(sk, tp);
+               tcp_moderate_cwnd(tp);
                if (!tcp_any_retrans_done(sk))
                        tp->retrans_stamp = 0;
                return true;
@@ -2556,7 +2556,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
        if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
            (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
                tp->snd_cwnd = tp->snd_ssthresh;
-               tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+               tp->snd_cwnd_stamp = tcp_time_stamp;
        }
        tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
 }
@@ -2603,7 +2603,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, 
int newly_acked_sacked)
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
-                       tcp_moderate_cwnd(sk, tp);
+                       tcp_moderate_cwnd(tp);
        } else {
                tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
        }
@@ -2628,7 +2628,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
                       tcp_mss_to_mtu(sk, tp->mss_cache) /
                       icsk->icsk_mtup.probe_size;
        tp->snd_cwnd_cnt = 0;
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->snd_ssthresh = tcp_current_ssthresh(sk);
 
        icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
@@ -2914,7 +2914,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
         */
        struct tcp_sock *tp = tcp_sk(sk);
 
-       tcp_valid_rtt_meas(sk, tcp_time_stamp(sk) - tp->rx_opt.rcv_tsecr);
+       tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 }
 
 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
@@ -2949,7 +2949,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 
in_flight)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
-       tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 /* Restart timer after forward progress on connection.
@@ -2975,7 +2975,7 @@ void tcp_rearm_rto(struct sock *sk)
                    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                        struct sk_buff *skb = tcp_write_queue_head(sk);
                        const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
-                       s32 delta = (s32)(rto_time_stamp - tcp_time_stamp(sk));
+                       s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
                        /* delta may not be positive if the socket is locked
                         * when the retrans timer fires and is rescheduled.
                         */
@@ -3036,7 +3036,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int 
prior_fackets,
        struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb;
-       u32 now = tcp_time_stamp(sk);
+       u32 now = tcp_time_stamp;
        int fully_acked = true;
        int flag = 0;
        u32 pkts_acked = 0;
@@ -3437,7 +3437,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff 
*skb, int flag)
         */
        sk->sk_err_soft = 0;
        icsk->icsk_probes_out = 0;
-       tp->rcv_tstamp = tcp_time_stamp(sk);
+       tp->rcv_tstamp = tcp_time_stamp;
        if (!prior_packets)
                goto no_queue;
 
@@ -4725,7 +4725,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
                }
                tp->snd_cwnd_used = 0;
        }
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 static bool tcp_should_expand_sndbuf(const struct sock *sk)
@@ -4773,7 +4773,7 @@ static void tcp_new_space(struct sock *sk)
                sndmem *= 2 * demanded;
                if (sndmem > sk->sk_sndbuf)
                        sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
-               tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+               tp->snd_cwnd_stamp = tcp_time_stamp;
        }
 
        sk->sk_write_space(sk);
@@ -5287,7 +5287,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff 
*skb)
        /* Prevent spurious tcp_cwnd_restart() on first data
         * packet.
         */
-       tp->lsndtime = tcp_time_stamp(sk);
+       tp->lsndtime = tcp_time_stamp;
 
        tcp_init_buffer_space(sk);
 
@@ -5374,7 +5374,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, 
struct sk_buff *skb,
 
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
                    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
-                            tcp_time_stamp(sk))) {
+                            tcp_time_stamp)) {
                        NET_INC_STATS_BH(sock_net(sk), 
LINUX_MIB_PAWSACTIVEREJECTED);
                        goto reset_and_undo;
                }
@@ -5471,7 +5471,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, 
struct sk_buff *skb,
                         * to stand against the temptation 8)     --ANK
                         */
                        inet_csk_schedule_ack(sk);
-                       icsk->icsk_ack.lrcvtime = tcp_time_stamp(sk);
+                       icsk->icsk_ack.lrcvtime = tcp_time_stamp;
                        tcp_enter_quickack_mode(sk);
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
                                                  TCP_DELACK_MAX, TCP_RTO_MAX);
@@ -5716,7 +5716,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff 
*skb,
                                /* Prevent spurious tcp_cwnd_restart() on
                                 * first data packet.
                                 */
-                               tp->lsndtime = tcp_time_stamp(sk);
+                               tp->lsndtime = tcp_time_stamp;
 
                                tcp_initialize_rcv_mss(sk);
                                tcp_fast_path_on(tp);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a946e9b..b8e2dd3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -442,7 +442,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                BUG_ON(!skb);
 
                remaining = icsk->icsk_rto - min(icsk->icsk_rto,
-                               tcp_time_stamp(sk) - TCP_SKB_CB(skb)->when);
+                               tcp_time_stamp - TCP_SKB_CB(skb)->when);
 
                if (remaining) {
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -786,7 +786,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct 
sk_buff *skb)
        tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >>
                                (tw->tw_rcv_wscale & TW_WSCALE_MASK),
-                       tcp_time_stamp(sk) + tcptw->tw_ts_offset,
+                       tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent,
                        tw->tw_bound_dev_if,
                        tcp_twsk_md5_key(tcptw),
@@ -806,7 +806,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct 
sk_buff *skb,
        tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
                        tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
-                       tcp_time_stamp(sk),
+                       tcp_time_stamp,
                        req->ts_recent,
                        0,
                        tcp_md5_do_lookup(sk, (union tcp_md5_addr 
*)&ip_hdr(skb)->daddr,
@@ -845,7 +845,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct 
dst_entry *dst,
                                            ireq->opt);
                err = net_xmit_eval(err);
                if (!tcp_rsk(req)->snt_synack && !err)
-                       tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
+                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
@@ -1376,7 +1376,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
                                    ireq->ir_rmt_addr, ireq->opt);
        err = net_xmit_eval(err);
        if (!err)
-               tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
        /* XXX (TFO) - is it ok to ignore error and continue? */
 
        spin_lock(&queue->fastopenq->lock);
@@ -1597,7 +1597,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff 
*skb)
                if (err || want_cookie)
                        goto drop_and_free;
 
-               tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
+               tcp_rsk(req)->snt_synack = tcp_time_stamp;
                tcp_rsk(req)->listener = NULL;
                /* Add the request_sock to the SYN table */
                inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 7d5da92..72f7218 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -269,11 +269,11 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 
num_acked, s32 rtt_us)
                tcp_lp_rtt_sample(sk, rtt_us);
 
        /* calc inference */
-       if (tcp_time_stamp(sk) > tp->rx_opt.rcv_tsecr)
-               lp->inference = 3 * (tcp_time_stamp(sk) - tp->rx_opt.rcv_tsecr);
+       if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
+               lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 
        /* test if within inference */
-       if (lp->last_drop && (tcp_time_stamp(sk) - lp->last_drop < 
lp->inference))
+       if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
                lp->flag |= LP_WITHIN_INF;
        else
                lp->flag &= ~LP_WITHIN_INF;
@@ -310,7 +310,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 
num_acked, s32 rtt_us)
                tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
 
        /* record this drop time */
-       lp->last_drop = tcp_time_stamp(sk);
+       lp->last_drop = tcp_time_stamp;
 }
 
 static struct tcp_congestion_ops tcp_lp __read_mostly = {
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 935cb33..40cbb0b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -534,7 +534,7 @@ reset:
                tp->snd_cwnd = 1;
        else
                tp->snd_cwnd = tcp_init_cwnd(tp, dst);
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
 }
 
 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool 
paws_check)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 02419aa..8aa908c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -138,7 +138,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
 static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       s32 delta = tcp_time_stamp(sk) - tp->lsndtime;
+       s32 delta = tcp_time_stamp - tp->lsndtime;
        u32 restart_cwnd = tcp_init_cwnd(tp, dst);
        u32 cwnd = tp->snd_cwnd;
 
@@ -150,7 +150,7 @@ static void tcp_cwnd_restart(struct sock *sk, const struct 
dst_entry *dst)
        while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
                cwnd >>= 1;
        tp->snd_cwnd = max(cwnd, restart_cwnd);
-       tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+       tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->snd_cwnd_used = 0;
 }
 
@@ -159,7 +159,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
                                struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
-       const u32 now = tcp_time_stamp(sk);
+       const u32 now = tcp_time_stamp;
        const struct dst_entry *dst = __sk_dst_get(sk);
 
        if (sysctl_tcp_slow_start_after_idle &&
@@ -1355,14 +1355,14 @@ static void tcp_cwnd_validate(struct sock *sk)
        if (tp->packets_out >= tp->snd_cwnd) {
                /* Network is feed fully. */
                tp->snd_cwnd_used = 0;
-               tp->snd_cwnd_stamp = tcp_time_stamp(sk);
+               tp->snd_cwnd_stamp = tcp_time_stamp;
        } else {
                /* Network starves. */
                if (tp->packets_out > tp->snd_cwnd_used)
                        tp->snd_cwnd_used = tp->packets_out;
 
                if (sysctl_tcp_slow_start_after_idle &&
-                   (s32)(tcp_time_stamp(sk) - tp->snd_cwnd_stamp) >= 
inet_csk(sk)->icsk_rto)
+                   (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= 
inet_csk(sk)->icsk_rto)
                        tcp_cwnd_application_limited(sk);
        }
 }
@@ -1775,7 +1775,7 @@ static int tcp_mtu_probe(struct sock *sk)
 
        /* We're ready to send.  If this fails, the probe will
         * be resegmented into mss-sized pieces by tcp_write_xmit(). */
-       TCP_SKB_CB(nskb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(nskb)->when = tcp_time_stamp;
        if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
                /* Decrement cwnd here because we are sending
                 * effectively two packets. */
@@ -1895,7 +1895,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int 
mss_now, int nonagle,
                    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
                        break;
 
-               TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+               TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
                if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
                        break;
@@ -1971,10 +1971,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        timeout = max_t(u32, timeout, msecs_to_jiffies(10));
 
        /* If RTO is shorter, just schedule TLP in its place. */
-       tlp_time_stamp = tcp_time_stamp(sk) + timeout;
+       tlp_time_stamp = tcp_time_stamp + timeout;
        rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
        if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
-               s32 delta = rto_time_stamp - tcp_time_stamp(sk);
+               s32 delta = rto_time_stamp - tcp_time_stamp;
                if (delta > 0)
                        timeout = delta;
        }
@@ -2372,7 +2372,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff 
*skb)
        /* Make a copy, if the first transmission SKB clone we made
         * is still in somebody's hands, else make a clone.
         */
-       TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
        /* make sure skb->data is aligned on arches that require it
         * and check if ack-trimming & collapsing extended the headroom
@@ -2617,7 +2617,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t 
priority)
        tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
                             TCPHDR_ACK | TCPHDR_RST);
        /* Send it off. */
-       TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
        if (tcp_transmit_skb(sk, skb, 0, priority))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
 
@@ -2656,7 +2656,7 @@ int tcp_send_synack(struct sock *sk)
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
                TCP_ECN_send_synack(tcp_sk(sk), skb);
        }
-       TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
        return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
 }
 
@@ -2721,10 +2721,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct 
dst_entry *dst,
        memset(&opts, 0, sizeof(opts));
 #ifdef CONFIG_SYN_COOKIES
        if (unlikely(req->cookie_ts))
-               TCP_SKB_CB(skb)->when = cookie_init_timestamp(sk, req);
+               TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
        else
 #endif
-       TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
                                             foc) + sizeof(*th);
 
@@ -2827,7 +2827,7 @@ void tcp_connect_init(struct sock *sk)
        if (likely(!tp->repair))
                tp->rcv_nxt = 0;
        else
-               tp->rcv_tstamp = tcp_time_stamp(sk);
+               tp->rcv_tstamp = tcp_time_stamp;
        tp->rcv_wup = tp->rcv_nxt;
        tp->copied_seq = tp->rcv_nxt;
 
@@ -2961,7 +2961,7 @@ int tcp_connect(struct sock *sk)
        skb_reserve(buff, MAX_TCP_HEADER);
 
        tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
-       tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp(sk);
+       tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
        tcp_connect_queue_skb(sk, buff);
        TCP_ECN_send_syn(sk, buff);
 
@@ -3068,7 +3068,7 @@ void tcp_send_ack(struct sock *sk)
        tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
 
        /* Send it off, this clears delayed acks for us. */
-       TCP_SKB_CB(buff)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(buff)->when = tcp_time_stamp;
        tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
 }
 
@@ -3100,7 +3100,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
         * send it.
         */
        tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
-       TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
        return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
@@ -3144,7 +3144,7 @@ int tcp_write_wakeup(struct sock *sk)
                        tcp_set_skb_tso_segs(sk, skb, mss);
 
                TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
-               TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
+               TCP_SKB_CB(skb)->when = tcp_time_stamp;
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
                if (!err)
                        tcp_event_new_data_sent(sk, skb);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 592c37b..1d898ac 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -59,7 +59,7 @@ static int tcp_out_of_resources(struct sock *sk, bool 
do_reset)
 
        /* If peer does not open window for long time, or did not transmit
         * anything for long time, penalize it. */
-       if ((s32)(tcp_time_stamp(sk) - tp->lsndtime) > 2*TCP_RTO_MAX || 
!do_reset)
+       if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
                shift++;
 
        /* If some dubious ICMP arrived, penalize even more. */
@@ -69,7 +69,7 @@ static int tcp_out_of_resources(struct sock *sk, bool 
do_reset)
        if (tcp_check_oom(sk, shift)) {
                /* Catch exceptional cases, when connection requires reset.
                 *      1. Last segment was sent recently. */
-               if ((s32)(tcp_time_stamp(sk) - tp->lsndtime) <= 
TCP_TIMEWAIT_LEN ||
+               if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
                    /*  2. Window is closed. */
                    (!tp->snd_wnd && !tp->packets_out))
                        do_reset = true;
@@ -149,7 +149,7 @@ static bool retransmits_timed_out(struct sock *sk,
                        timeout = ((2 << linear_backoff_thresh) - 1) * rto_base 
+
                                (boundary - linear_backoff_thresh) * 
TCP_RTO_MAX;
        }
-       return (tcp_time_stamp(sk) - start_ts) >= timeout;
+       return (tcp_time_stamp - start_ts) >= timeout;
 }
 
 /* A write timeout has occurred. Process the after effects. */
@@ -281,9 +281,9 @@ static void tcp_probe_timer(struct sock *sk)
         */
        start_ts = TCP_SKB_CB(tcp_send_head(sk))->when;
        if (!start_ts)
-               TCP_SKB_CB(tcp_send_head(sk))->when = tcp_time_stamp(sk);
+               TCP_SKB_CB(tcp_send_head(sk))->when = tcp_time_stamp;
        else if (icsk->icsk_user_timeout &&
-                (s32)(tcp_time_stamp(sk) - start_ts) > icsk->icsk_user_timeout)
+                (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
                goto abort;
 
        max_probes = sysctl_tcp_retries2;
@@ -381,7 +381,7 @@ void tcp_retransmit_timer(struct sock *sk)
                                       tp->snd_una, tp->snd_nxt);
                }
 #endif
-               if (tcp_time_stamp(sk) - tp->rcv_tstamp > TCP_RTO_MAX) {
+               if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
                        tcp_write_err(sk);
                        goto out;
                }
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index b3ef319..76a1e23 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -69,7 +69,7 @@ static void tcp_westwood_init(struct sock *sk)
        w->cumul_ack = 0;
        w->reset_rtt_min = 1;
        w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
-       w->rtt_win_sx = tcp_time_stamp(sk);
+       w->rtt_win_sx = tcp_time_stamp;
        w->snd_una = tcp_sk(sk)->snd_una;
        w->first_ack = 1;
 }
@@ -116,7 +116,7 @@ static void tcp_westwood_pkts_acked(struct sock *sk, u32 
cnt, s32 rtt)
 static void westwood_update_window(struct sock *sk)
 {
        struct westwood *w = inet_csk_ca(sk);
-       s32 delta = tcp_time_stamp(sk) - w->rtt_win_sx;
+       s32 delta = tcp_time_stamp - w->rtt_win_sx;
 
        /* Initialize w->snd_una with the first acked sequence number in order
         * to fix mismatch between tp->snd_una and w->snd_una for the first
@@ -140,7 +140,7 @@ static void westwood_update_window(struct sock *sk)
                westwood_filter(w, delta);
 
                w->bk = 0;
-               w->rtt_win_sx = tcp_time_stamp(sk);
+               w->rtt_win_sx = tcp_time_stamp;
        }
 }
 
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c 
b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 67f0a33..a0d1727 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -295,7 +295,7 @@ synproxy_tg6(struct sk_buff *skb, const struct 
xt_action_param *par)
 
                opts.options &= info->options;
                if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
-                       synproxy_init_timestamp_cookie(skb->sk, info, &opts);
+                       synproxy_init_timestamp_cookie(info, &opts);
                else
                        opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
                                          XT_SYNPROXY_OPT_SACK_PERM |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b299e76..7123a8b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -896,7 +896,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct 
sk_buff *skb)
 
        tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
-                       tcp_time_stamp(sk) + tcptw->tw_ts_offset,
+                       tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
                        tw->tw_tclass);
 
@@ -907,7 +907,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct 
sk_buff *skb,
                                  struct request_sock *req)
 {
        tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 
1,
-                       req->rcv_wnd, tcp_time_stamp(sk), req->ts_recent,
+                       req->rcv_wnd, tcp_time_stamp, req->ts_recent,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
 }
 
@@ -1071,7 +1071,7 @@ have_isn:
            want_cookie)
                goto drop_and_free;
 
-       tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        tcp_rsk(req)->listener = NULL;
        inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        return 0;
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 32641b8..52e20c9 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -145,12 +145,11 @@ synproxy_build_options(struct tcphdr *th, const struct 
synproxy_options *opts)
 }
 EXPORT_SYMBOL_GPL(synproxy_build_options);
 
-void synproxy_init_timestamp_cookie(struct sock *sk,
-                                   const struct xt_synproxy_info *info,
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
                                    struct synproxy_options *opts)
 {
        opts->tsecr = opts->tsval;
-       opts->tsval = tcp_time_stamp(sk) & ~0x3f;
+       opts->tsval = tcp_time_stamp & ~0x3f;
 
        if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
                opts->tsval |= opts->wscale;
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to