[Devel] [PATCH] net: Virtualize tcp_time_stamp
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Jun 22 02:43:37 PDT 2015
Kirill, waiting for your ack here.
--
Best regards,
Konstantin Khorenko,
Virtuozzo Linux Kernel Team
On 06/19/2015 01:10 PM, Kirill Tkhai wrote:
> When a CT is migrating from a node to a node,
> we need to synchronize their tcp_time_stamps,
> as the time mustn't go backward.
>
> In 2.6.32 we used the simple trick with set_exec_env()
> to do that. In 3.10 we don't have that primitives,
> so this patch makes tcp_time_stamp "per-VE" in other
> way.
>
> In futher, CRIU should set appropriate jiffies_fixup
> on restore.
>
> Also, we may think about submitting this patch to
> mainstream.
>
>>From patch diff-ve-network-combined-10-rh494-2 of
> https://jira.sw.ru/browse/PSBM-33645
>
> Signed-off-by: Kirill Tkhai <ktkhai at odin.com>
> ---
> include/linux/ve.h | 1
> include/net/netfilter/nf_conntrack_synproxy.h | 3 +
> include/net/tcp.h | 16 +++++--
> kernel/ve/ve.c | 1
> net/dccp/ccids/ccid2.c | 8 ++-
> net/ipv4/netfilter/ipt_SYNPROXY.c | 2 -
> net/ipv4/syncookies.c | 4 +-
> net/ipv4/tcp.c | 8 ++-
> net/ipv4/tcp_bic.c | 10 ++--
> net/ipv4/tcp_cubic.c | 14 +++---
> net/ipv4/tcp_htcp.c | 2 -
> net/ipv4/tcp_input.c | 58 +++++++++++++------------
> net/ipv4/tcp_ipv4.c | 12 +++--
> net/ipv4/tcp_lp.c | 8 ++-
> net/ipv4/tcp_metrics.c | 2 -
> net/ipv4/tcp_output.c | 38 ++++++++--------
> net/ipv4/tcp_timer.c | 8 ++-
> net/ipv4/tcp_westwood.c | 6 +--
> net/ipv6/netfilter/ip6t_SYNPROXY.c | 2 -
> net/ipv6/tcp_ipv6.c | 6 +--
> net/netfilter/nf_synproxy_core.c | 5 +-
> 21 files changed, 112 insertions(+), 102 deletions(-)
>
> diff --git a/include/linux/ve.h b/include/linux/ve.h
> index e3fc636..de745b0 100644
> --- a/include/linux/ve.h
> +++ b/include/linux/ve.h
> @@ -94,6 +94,7 @@ struct ve_struct {
> struct timespec start_timespec; /* monotonic time */
> struct timespec real_start_timespec; /* boot based time */
> u64 start_jiffies; /* Deprecated */
> + u32 jiffies_fixup;
>
> struct kstat_lat_pcpu_struct sched_lat_ve;
>
> diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
> index f572f31..17750fb 100644
> --- a/include/net/netfilter/nf_conntrack_synproxy.h
> +++ b/include/net/netfilter/nf_conntrack_synproxy.h
> @@ -63,7 +63,8 @@ extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
> extern void synproxy_build_options(struct tcphdr *th,
> const struct synproxy_options *opts);
>
> -extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
> +extern void synproxy_init_timestamp_cookie(struct sock *sk,
> + const struct xt_synproxy_info *info,
> struct synproxy_options *opts);
> extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
>
> diff --git a/include/net/tcp.h b/include/net/tcp.h
> index f4e704f..5666168 100644
> --- a/include/net/tcp.h
> +++ b/include/net/tcp.h
> @@ -531,7 +531,7 @@ static inline __u32 cookie_v4_init_sequence(struct sock *sk,
> }
> #endif
>
> -extern __u32 cookie_init_timestamp(struct request_sock *req);
> +extern __u32 cookie_init_timestamp(struct sock *sk, struct request_sock *req);
> extern bool cookie_check_timestamp(struct tcp_options_received *opt,
> struct net *net, bool *ecn_ok);
>
> @@ -710,7 +710,12 @@ void tcp_send_window_probe(struct sock *sk);
> * to use only the low 32-bits of jiffies and hide the ugly
> * casts with the following macro.
> */
> -#define tcp_time_stamp ((__u32)(jiffies))
> +static inline u32 tcp_time_stamp(const struct sock *sk)
> +{
> + struct ve_struct *ve = sock_net(sk)->owner_ve;
> +
> + return (__u32)(jiffies) + ve->jiffies_fixup;
> +}
>
> #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
>
> @@ -1140,7 +1145,7 @@ static inline void tcp_synack_rtt_meas(struct sock *sk,
> {
> if (tcp_rsk(req)->snt_synack)
> tcp_valid_rtt_meas(sk,
> - tcp_time_stamp - tcp_rsk(req)->snt_synack);
> + tcp_time_stamp(sk) - tcp_rsk(req)->snt_synack);
> }
>
> extern void tcp_enter_memory_pressure(struct sock *sk);
> @@ -1162,10 +1167,11 @@ static inline int keepalive_probes(const struct tcp_sock *tp)
>
> static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
> {
> + const struct sock *sk = (struct sock *)tp;
> const struct inet_connection_sock *icsk = &tp->inet_conn;
>
> - return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
> - tcp_time_stamp - tp->rcv_tstamp);
> + return min_t(u32, tcp_time_stamp(sk) - icsk->icsk_ack.lrcvtime,
> + tcp_time_stamp(sk) - tp->rcv_tstamp);
> }
>
> static inline int tcp_fin_time(const struct sock *sk)
> diff --git a/kernel/ve/ve.c b/kernel/ve/ve.c
> index 0ffd8d8..1710dcd 100644
> --- a/kernel/ve/ve.c
> +++ b/kernel/ve/ve.c
> @@ -66,6 +66,7 @@ static DEFINE_PER_CPU(struct kstat_lat_pcpu_snap_struct, ve0_lat_stats);
> struct ve_struct ve0 = {
> .ve_name = "0",
> .start_jiffies = INITIAL_JIFFIES,
> + .jiffies_fixup = 0,
> RCU_POINTER_INITIALIZER(ve_ns, &init_nsproxy),
> .ve_netns = &init_net,
> .is_running = 1,
> diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
> index f053198..eca2f0a 100644
> --- a/net/dccp/ccids/ccid2.c
> +++ b/net/dccp/ccids/ccid2.c
> @@ -233,7 +233,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
> {
> struct dccp_sock *dp = dccp_sk(sk);
> struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
> - const u32 now = ccid2_time_stamp;
> + const u32 now = ccid2_time_stamp(sk);
> struct ccid2_seq *next;
>
> /* slow-start after idle periods (RFC 2581, RFC 2861) */
> @@ -466,7 +466,7 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
> * The cleanest solution is to not use the ccid2s_sent field at all
> * and instead use DCCP timestamps: requires changes in other places.
> */
> - ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
> + ccid2_rtt_estimator(sk, ccid2_time_stamp(sk) - seqp->ccid2s_sent);
> }
>
> static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
> @@ -478,7 +478,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
> return;
> }
>
> - hc->tx_last_cong = ccid2_time_stamp;
> + hc->tx_last_cong = ccid2_time_stamp(sk);
>
> hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
> hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
> @@ -731,7 +731,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
>
> hc->tx_rto = DCCP_TIMEOUT_INIT;
> hc->tx_rpdupack = -1;
> - hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
> + hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp(sk);
> hc->tx_cwnd_used = 0;
> setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
> (unsigned long)sk);
> diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
> index f13bd91..f17a923 100644
> --- a/net/ipv4/netfilter/ipt_SYNPROXY.c
> +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
> @@ -280,7 +280,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
>
> opts.options &= info->options;
> if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
> - synproxy_init_timestamp_cookie(info, &opts);
> + synproxy_init_timestamp_cookie(skb->sk, info, &opts);
> else
> opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
> XT_SYNPROXY_OPT_SACK_PERM |
> diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
> index ce93c3b..c78491e 100644
> --- a/net/ipv4/syncookies.c
> +++ b/net/ipv4/syncookies.c
> @@ -64,10 +64,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
> * Since subsequent timestamps use the normal tcp_time_stamp value, we
> * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
> */
> -__u32 cookie_init_timestamp(struct request_sock *req)
> +__u32 cookie_init_timestamp(struct sock *sk, struct request_sock *req)
> {
> struct inet_request_sock *ireq;
> - u32 ts, ts_now = tcp_time_stamp;
> + u32 ts, ts_now = tcp_time_stamp(sk);
> u32 options = 0;
>
> ireq = inet_rsk(req);
> diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
> index 074fd3b..9a713df 100644
> --- a/net/ipv4/tcp.c
> +++ b/net/ipv4/tcp.c
> @@ -1144,7 +1144,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
> * already been sent.
> */
> if (tp->repair)
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
>
> /*
> * Check whether we can use HW checksum.
> @@ -2671,7 +2671,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
> if (!tp->repair)
> err = -EPERM;
> else
> - tp->tsoffset = val - tcp_time_stamp;
> + tp->tsoffset = val - tcp_time_stamp(sk);
> break;
> default:
> err = -ENOPROTOOPT;
> @@ -2711,7 +2711,7 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
> {
> const struct tcp_sock *tp = tcp_sk(sk);
> const struct inet_connection_sock *icsk = inet_csk(sk);
> - u32 now = tcp_time_stamp;
> + u32 now = tcp_time_stamp(sk);
>
> memset(info, 0, sizeof(*info));
>
> @@ -2887,7 +2887,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
> val = jiffies_to_msecs(icsk->icsk_user_timeout);
> break;
> case TCP_TIMESTAMP:
> - val = tcp_time_stamp + tp->tsoffset;
> + val = tcp_time_stamp(sk) + tp->tsoffset;
> break;
> default:
> return -ENOPROTOOPT;
> diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
> index f45e1c2..184f5ef 100644
> --- a/net/ipv4/tcp_bic.c
> +++ b/net/ipv4/tcp_bic.c
> @@ -83,17 +83,17 @@ static void bictcp_init(struct sock *sk)
> /*
> * Compute congestion window to use.
> */
> -static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
> +static inline void bictcp_update(struct sock *sk, struct bictcp *ca, u32 cwnd)
> {
> if (ca->last_cwnd == cwnd &&
> - (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
> + (s32)(tcp_time_stamp(sk) - ca->last_time) <= HZ / 32)
> return;
>
> ca->last_cwnd = cwnd;
> - ca->last_time = tcp_time_stamp;
> + ca->last_time = tcp_time_stamp(sk);
>
> if (ca->epoch_start == 0) /* record the beginning of an epoch */
> - ca->epoch_start = tcp_time_stamp;
> + ca->epoch_start = tcp_time_stamp(sk);
>
> /* start off normal */
> if (cwnd <= low_window) {
> @@ -151,7 +151,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
> if (tp->snd_cwnd <= tp->snd_ssthresh)
> tcp_slow_start(tp);
> else {
> - bictcp_update(ca, tp->snd_cwnd);
> + bictcp_update(sk, ca, tp->snd_cwnd);
> tcp_cong_avoid_ai(tp, ca->cnt);
> }
>
> diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
> index b6ae92a..1441cf1 100644
> --- a/net/ipv4/tcp_cubic.c
> +++ b/net/ipv4/tcp_cubic.c
> @@ -204,7 +204,7 @@ static u32 cubic_root(u64 a)
> /*
> * Compute congestion window to use.
> */
> -static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
> +static inline void bictcp_update(struct sock *sk, struct bictcp *ca, u32 cwnd)
> {
> u32 delta, bic_target, max_cnt;
> u64 offs, t;
> @@ -212,14 +212,14 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
> ca->ack_cnt++; /* count the number of ACKs */
>
> if (ca->last_cwnd == cwnd &&
> - (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
> + (s32)(tcp_time_stamp(sk) - ca->last_time) <= HZ / 32)
> return;
>
> ca->last_cwnd = cwnd;
> - ca->last_time = tcp_time_stamp;
> + ca->last_time = tcp_time_stamp(sk);
>
> if (ca->epoch_start == 0) {
> - ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */
> + ca->epoch_start = tcp_time_stamp(sk); /* record the beginning of an epoch */
> ca->ack_cnt = 1; /* start counting */
> ca->tcp_cwnd = cwnd; /* syn with cubic */
>
> @@ -250,7 +250,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
> * if the cwnd < 1 million packets !!!
> */
>
> - t = (s32)(tcp_time_stamp - ca->epoch_start);
> + t = (s32)(tcp_time_stamp(sk) - ca->epoch_start);
> t += msecs_to_jiffies(ca->delay_min >> 3);
> /* change the unit from HZ to bictcp_HZ */
> t <<= BICTCP_HZ;
> @@ -317,7 +317,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
> bictcp_hystart_reset(sk);
> tcp_slow_start(tp);
> } else {
> - bictcp_update(ca, tp->snd_cwnd);
> + bictcp_update(sk, ca, tp->snd_cwnd);
> tcp_cong_avoid_ai(tp, ca->cnt);
> }
>
> @@ -416,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
> return;
>
> /* Discard delay samples right after fast recovery */
> - if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
> + if (ca->epoch_start && (s32)(tcp_time_stamp(sk) - ca->epoch_start) < HZ)
> return;
>
> delay = (rtt_us << 3) / USEC_PER_MSEC;
> diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
> index c1a8175..a826d1f 100644
> --- a/net/ipv4/tcp_htcp.c
> +++ b/net/ipv4/tcp_htcp.c
> @@ -103,7 +103,7 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt
> const struct inet_connection_sock *icsk = inet_csk(sk);
> const struct tcp_sock *tp = tcp_sk(sk);
> struct htcp *ca = inet_csk_ca(sk);
> - u32 now = tcp_time_stamp;
> + u32 now = tcp_time_stamp(sk);
>
> if (icsk->icsk_ca_state == TCP_CA_Open)
> ca->pkts_acked = pkts_acked;
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index fa94a5a..56ffe0a 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -403,7 +403,7 @@ void tcp_init_buffer_space(struct sock *sk)
> tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
>
> tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> /* 5. Recalculate window clamp after socket hit its memory bounds. */
> @@ -492,17 +492,17 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
> tp->rcv_rtt_est.rtt = new_sample;
> }
>
> -static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
> +static inline void tcp_rcv_rtt_measure(struct sock *sk, struct tcp_sock *tp)
> {
> if (tp->rcv_rtt_est.time == 0)
> goto new_measure;
> if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
> return;
> - tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1);
> + tcp_rcv_rtt_update(tp, tcp_time_stamp(sk) - tp->rcv_rtt_est.time, 1);
>
> new_measure:
> tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
> - tp->rcv_rtt_est.time = tcp_time_stamp;
> + tp->rcv_rtt_est.time = tcp_time_stamp(sk);
> }
>
> static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
> @@ -512,7 +512,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
> if (tp->rx_opt.rcv_tsecr &&
> (TCP_SKB_CB(skb)->end_seq -
> TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
> - tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
> + tcp_rcv_rtt_update(tp, tcp_time_stamp(sk) - tp->rx_opt.rcv_tsecr, 0);
> }
>
> /*
> @@ -528,7 +528,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
> if (tp->rcvq_space.time == 0)
> goto new_measure;
>
> - time = tcp_time_stamp - tp->rcvq_space.time;
> + time = tcp_time_stamp(sk) - tp->rcvq_space.time;
> if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
> return;
>
> @@ -568,7 +568,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
>
> new_measure:
> tp->rcvq_space.seq = tp->copied_seq;
> - tp->rcvq_space.time = tcp_time_stamp;
> + tp->rcvq_space.time = tcp_time_stamp(sk);
> }
>
> /* There is something which you must keep in mind when you analyze the
> @@ -591,9 +591,9 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
>
> tcp_measure_rcv_mss(sk, skb);
>
> - tcp_rcv_rtt_measure(tp);
> + tcp_rcv_rtt_measure(sk, tp);
>
> - now = tcp_time_stamp;
> + now = tcp_time_stamp(sk);
>
> if (!icsk->icsk_ack.ato) {
> /* The _first_ data packet received, initialize
> @@ -1883,7 +1883,7 @@ void tcp_enter_loss(struct sock *sk, int how)
> }
> tp->snd_cwnd = 1;
> tp->snd_cwnd_cnt = 0;
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
>
> tcp_clear_retrans_partial(tp);
>
> @@ -2000,7 +2000,7 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
> static inline int tcp_skb_timedout(const struct sock *sk,
> const struct sk_buff *skb)
> {
> - return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
> + return tcp_time_stamp(sk) - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
> }
>
> static inline int tcp_head_timedout(const struct sock *sk)
> @@ -2289,11 +2289,11 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
> /* CWND moderation, preventing bursts due to too big ACKs
> * in dubious situations.
> */
> -static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
> +static inline void tcp_moderate_cwnd(struct sock *sk, struct tcp_sock *tp)
> {
> tp->snd_cwnd = min(tp->snd_cwnd,
> tcp_packets_in_flight(tp) + tcp_max_burst(tp));
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> /* Nothing was retransmitted or returned timestamp is less
> @@ -2357,7 +2357,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
> } else {
> tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
> }
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> static inline bool tcp_may_undo(const struct tcp_sock *tp)
> @@ -2390,7 +2390,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
> /* Hold old state until something *above* high_seq
> * is ACKed. For Reno it is MUST to prevent false
> * fast retransmits (RFC2582). SACK TCP is safe. */
> - tcp_moderate_cwnd(tp);
> + tcp_moderate_cwnd(sk, tp);
> return true;
> }
> tcp_set_ca_state(sk, TCP_CA_Open);
> @@ -2555,7 +2555,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
> if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
> (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
> tp->snd_cwnd = tp->snd_ssthresh;
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
> tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
> }
> @@ -2602,7 +2602,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
> if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
> tcp_try_keep_open(sk);
> if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
> - tcp_moderate_cwnd(tp);
> + tcp_moderate_cwnd(sk, tp);
> } else {
> tcp_cwnd_reduction(sk, newly_acked_sacked, 0);
> }
> @@ -2627,7 +2627,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
> tcp_mss_to_mtu(sk, tp->mss_cache) /
> icsk->icsk_mtup.probe_size;
> tp->snd_cwnd_cnt = 0;
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> tp->snd_ssthresh = tcp_current_ssthresh(sk);
>
> icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
> @@ -2913,7 +2913,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
> */
> struct tcp_sock *tp = tcp_sk(sk);
>
> - tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
> + tcp_valid_rtt_meas(sk, tcp_time_stamp(sk) - tp->rx_opt.rcv_tsecr);
> }
>
> static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
> @@ -2948,7 +2948,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
> {
> const struct inet_connection_sock *icsk = inet_csk(sk);
> icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
> - tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
> + tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> /* Restart timer after forward progress on connection.
> @@ -2974,7 +2974,7 @@ void tcp_rearm_rto(struct sock *sk)
> icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
> struct sk_buff *skb = tcp_write_queue_head(sk);
> const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
> - s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
> + s32 delta = (s32)(rto_time_stamp - tcp_time_stamp(sk));
> /* delta may not be positive if the socket is locked
> * when the retrans timer fires and is rescheduled.
> */
> @@ -3035,7 +3035,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
> struct tcp_sock *tp = tcp_sk(sk);
> const struct inet_connection_sock *icsk = inet_csk(sk);
> struct sk_buff *skb;
> - u32 now = tcp_time_stamp;
> + u32 now = tcp_time_stamp(sk);
> int fully_acked = true;
> int flag = 0;
> u32 pkts_acked = 0;
> @@ -3436,7 +3436,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
> */
> sk->sk_err_soft = 0;
> icsk->icsk_probes_out = 0;
> - tp->rcv_tstamp = tcp_time_stamp;
> + tp->rcv_tstamp = tcp_time_stamp(sk);
> if (!prior_packets)
> goto no_queue;
>
> @@ -4724,7 +4724,7 @@ void tcp_cwnd_application_limited(struct sock *sk)
> }
> tp->snd_cwnd_used = 0;
> }
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> static bool tcp_should_expand_sndbuf(const struct sock *sk)
> @@ -4772,7 +4772,7 @@ static void tcp_new_space(struct sock *sk)
> sndmem *= 2 * demanded;
> if (sndmem > sk->sk_sndbuf)
> sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> sk->sk_write_space(sk);
> @@ -5352,7 +5352,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
> /* Prevent spurious tcp_cwnd_restart() on first data
> * packet.
> */
> - tp->lsndtime = tcp_time_stamp;
> + tp->lsndtime = tcp_time_stamp(sk);
>
> tcp_init_buffer_space(sk);
>
> @@ -5439,7 +5439,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
>
> if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
> !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
> - tcp_time_stamp)) {
> + tcp_time_stamp(sk))) {
> NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
> goto reset_and_undo;
> }
> @@ -5536,7 +5536,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
> * to stand against the temptation 8) --ANK
> */
> inet_csk_schedule_ack(sk);
> - icsk->icsk_ack.lrcvtime = tcp_time_stamp;
> + icsk->icsk_ack.lrcvtime = tcp_time_stamp(sk);
> tcp_enter_quickack_mode(sk);
> inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
> TCP_DELACK_MAX, TCP_RTO_MAX);
> @@ -5781,7 +5781,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
> /* Prevent spurious tcp_cwnd_restart() on
> * first data packet.
> */
> - tp->lsndtime = tcp_time_stamp;
> + tp->lsndtime = tcp_time_stamp(sk);
>
> tcp_initialize_rcv_mss(sk);
> tcp_fast_path_on(tp);
> diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
> index 0ada1d5..4125556 100644
> --- a/net/ipv4/tcp_ipv4.c
> +++ b/net/ipv4/tcp_ipv4.c
> @@ -442,7 +442,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
> BUG_ON(!skb);
>
> remaining = icsk->icsk_rto - min(icsk->icsk_rto,
> - tcp_time_stamp - TCP_SKB_CB(skb)->when);
> + tcp_time_stamp(sk) - TCP_SKB_CB(skb)->when);
>
> if (remaining) {
> inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
> @@ -786,7 +786,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
> tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
> tcptw->tw_rcv_wnd >>
> (tw->tw_rcv_wscale & TW_WSCALE_MASK),
> - tcp_time_stamp + tcptw->tw_ts_offset,
> + tcp_time_stamp(sk) + tcptw->tw_ts_offset,
> tcptw->tw_ts_recent,
> tw->tw_bound_dev_if,
> tcp_twsk_md5_key(tcptw),
> @@ -806,7 +806,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
> tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
> tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
> tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
> - tcp_time_stamp,
> + tcp_time_stamp(sk),
> req->ts_recent,
> 0,
> tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
> @@ -845,7 +845,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
> ireq->opt);
> err = net_xmit_eval(err);
> if (!tcp_rsk(req)->snt_synack && !err)
> - tcp_rsk(req)->snt_synack = tcp_time_stamp;
> + tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
> }
>
> return err;
> @@ -1376,7 +1376,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
> ireq->ir_rmt_addr, ireq->opt);
> err = net_xmit_eval(err);
> if (!err)
> - tcp_rsk(req)->snt_synack = tcp_time_stamp;
> + tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
> /* XXX (TFO) - is it ok to ignore error and continue? */
>
> spin_lock(&queue->fastopenq->lock);
> @@ -1596,7 +1596,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
> if (err || want_cookie)
> goto drop_and_free;
>
> - tcp_rsk(req)->snt_synack = tcp_time_stamp;
> + tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
> tcp_rsk(req)->listener = NULL;
> /* Add the request_sock to the SYN table */
> inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
> diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
> index 72f7218..7d5da92 100644
> --- a/net/ipv4/tcp_lp.c
> +++ b/net/ipv4/tcp_lp.c
> @@ -269,11 +269,11 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
> tcp_lp_rtt_sample(sk, rtt_us);
>
> /* calc inference */
> - if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
> - lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
> + if (tcp_time_stamp(sk) > tp->rx_opt.rcv_tsecr)
> + lp->inference = 3 * (tcp_time_stamp(sk) - tp->rx_opt.rcv_tsecr);
>
> /* test if within inference */
> - if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
> + if (lp->last_drop && (tcp_time_stamp(sk) - lp->last_drop < lp->inference))
> lp->flag |= LP_WITHIN_INF;
> else
> lp->flag &= ~LP_WITHIN_INF;
> @@ -310,7 +310,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
> tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
>
> /* record this drop time */
> - lp->last_drop = tcp_time_stamp;
> + lp->last_drop = tcp_time_stamp(sk);
> }
>
> static struct tcp_congestion_ops tcp_lp __read_mostly = {
> diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
> index 37136f1..a567788 100644
> --- a/net/ipv4/tcp_metrics.c
> +++ b/net/ipv4/tcp_metrics.c
> @@ -534,7 +534,7 @@ void tcp_init_metrics(struct sock *sk)
> tp->snd_cwnd = 1;
> else
> tp->snd_cwnd = tcp_init_cwnd(tp, dst);
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> }
>
> bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
> diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
> index a217305..38304fb 100644
> --- a/net/ipv4/tcp_output.c
> +++ b/net/ipv4/tcp_output.c
> @@ -138,7 +138,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
> static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
> {
> struct tcp_sock *tp = tcp_sk(sk);
> - s32 delta = tcp_time_stamp - tp->lsndtime;
> + s32 delta = tcp_time_stamp(sk) - tp->lsndtime;
> u32 restart_cwnd = tcp_init_cwnd(tp, dst);
> u32 cwnd = tp->snd_cwnd;
>
> @@ -150,7 +150,7 @@ static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
> while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
> cwnd >>= 1;
> tp->snd_cwnd = max(cwnd, restart_cwnd);
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> tp->snd_cwnd_used = 0;
> }
>
> @@ -159,7 +159,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
> struct sock *sk)
> {
> struct inet_connection_sock *icsk = inet_csk(sk);
> - const u32 now = tcp_time_stamp;
> + const u32 now = tcp_time_stamp(sk);
> const struct dst_entry *dst = __sk_dst_get(sk);
>
> if (sysctl_tcp_slow_start_after_idle &&
> @@ -1354,14 +1354,14 @@ static void tcp_cwnd_validate(struct sock *sk)
> if (tp->packets_out >= tp->snd_cwnd) {
> /* Network is feed fully. */
> tp->snd_cwnd_used = 0;
> - tp->snd_cwnd_stamp = tcp_time_stamp;
> + tp->snd_cwnd_stamp = tcp_time_stamp(sk);
> } else {
> /* Network starves. */
> if (tp->packets_out > tp->snd_cwnd_used)
> tp->snd_cwnd_used = tp->packets_out;
>
> if (sysctl_tcp_slow_start_after_idle &&
> - (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
> + (s32)(tcp_time_stamp(sk) - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
> tcp_cwnd_application_limited(sk);
> }
> }
> @@ -1774,7 +1774,7 @@ static int tcp_mtu_probe(struct sock *sk)
>
> /* We're ready to send. If this fails, the probe will
> * be resegmented into mss-sized pieces by tcp_write_xmit(). */
> - TCP_SKB_CB(nskb)->when = tcp_time_stamp;
> + TCP_SKB_CB(nskb)->when = tcp_time_stamp(sk);
> if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
> /* Decrement cwnd here because we are sending
> * effectively two packets. */
> @@ -1886,7 +1886,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
> unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
> break;
>
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
>
> if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
> break;
> @@ -1962,10 +1962,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
> timeout = max_t(u32, timeout, msecs_to_jiffies(10));
>
> /* If RTO is shorter, just schedule TLP in its place. */
> - tlp_time_stamp = tcp_time_stamp + timeout;
> + tlp_time_stamp = tcp_time_stamp(sk) + timeout;
> rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
> if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
> - s32 delta = rto_time_stamp - tcp_time_stamp;
> + s32 delta = rto_time_stamp - tcp_time_stamp(sk);
> if (delta > 0)
> timeout = delta;
> }
> @@ -2363,7 +2363,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
> /* Make a copy, if the first transmission SKB clone we made
> * is still in somebody's hands, else make a clone.
> */
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
>
> /* make sure skb->data is aligned on arches that require it
> * and check if ack-trimming & collapsing extended the headroom
> @@ -2608,7 +2608,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
> tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
> TCPHDR_ACK | TCPHDR_RST);
> /* Send it off. */
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
> if (tcp_transmit_skb(sk, skb, 0, priority))
> NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
>
> @@ -2647,7 +2647,7 @@ int tcp_send_synack(struct sock *sk)
> TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
> TCP_ECN_send_synack(tcp_sk(sk), skb);
> }
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
> return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
> }
>
> @@ -2712,10 +2712,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
> memset(&opts, 0, sizeof(opts));
> #ifdef CONFIG_SYN_COOKIES
> if (unlikely(req->cookie_ts))
> - TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
> + TCP_SKB_CB(skb)->when = cookie_init_timestamp(sk, req);
> else
> #endif
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
> tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
> foc) + sizeof(*th);
>
> @@ -2818,7 +2818,7 @@ void tcp_connect_init(struct sock *sk)
> if (likely(!tp->repair))
> tp->rcv_nxt = 0;
> else
> - tp->rcv_tstamp = tcp_time_stamp;
> + tp->rcv_tstamp = tcp_time_stamp(sk);
> tp->rcv_wup = tp->rcv_nxt;
> tp->copied_seq = tp->rcv_nxt;
>
> @@ -2952,7 +2952,7 @@ int tcp_connect(struct sock *sk)
> skb_reserve(buff, MAX_TCP_HEADER);
>
> tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
> - tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
> + tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp(sk);
> tcp_connect_queue_skb(sk, buff);
> TCP_ECN_send_syn(sk, buff);
>
> @@ -3059,7 +3059,7 @@ void tcp_send_ack(struct sock *sk)
> tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
>
> /* Send it off, this clears delayed acks for us. */
> - TCP_SKB_CB(buff)->when = tcp_time_stamp;
> + TCP_SKB_CB(buff)->when = tcp_time_stamp(sk);
> tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
> }
>
> @@ -3091,7 +3091,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
> * send it.
> */
> tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
> return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
> }
>
> @@ -3135,7 +3135,7 @@ int tcp_write_wakeup(struct sock *sk)
> tcp_set_skb_tso_segs(sk, skb, mss);
>
> TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
> - TCP_SKB_CB(skb)->when = tcp_time_stamp;
> + TCP_SKB_CB(skb)->when = tcp_time_stamp(sk);
> err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
> if (!err)
> tcp_event_new_data_sent(sk, skb);
> diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
> index af07b5b..682ac62 100644
> --- a/net/ipv4/tcp_timer.c
> +++ b/net/ipv4/tcp_timer.c
> @@ -59,7 +59,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
>
> /* If peer does not open window for long time, or did not transmit
> * anything for long time, penalize it. */
> - if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
> + if ((s32)(tcp_time_stamp(sk) - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
> shift++;
>
> /* If some dubious ICMP arrived, penalize even more. */
> @@ -69,7 +69,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
> if (tcp_check_oom(sk, shift)) {
> /* Catch exceptional cases, when connection requires reset.
> * 1. Last segment was sent recently. */
> - if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
> + if ((s32)(tcp_time_stamp(sk) - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
> /* 2. Window is closed. */
> (!tp->snd_wnd && !tp->packets_out))
> do_reset = 1;
> @@ -149,7 +149,7 @@ static bool retransmits_timed_out(struct sock *sk,
> timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
> (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
> }
> - return (tcp_time_stamp - start_ts) >= timeout;
> + return (tcp_time_stamp(sk) - start_ts) >= timeout;
> }
>
> /* A write timeout has occurred. Process the after effects. */
> @@ -380,7 +380,7 @@ void tcp_retransmit_timer(struct sock *sk)
> tp->snd_una, tp->snd_nxt);
> }
> #endif
> - if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
> + if (tcp_time_stamp(sk) - tp->rcv_tstamp > TCP_RTO_MAX) {
> tcp_write_err(sk);
> goto out;
> }
> diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
> index 76a1e23..b3ef319 100644
> --- a/net/ipv4/tcp_westwood.c
> +++ b/net/ipv4/tcp_westwood.c
> @@ -69,7 +69,7 @@ static void tcp_westwood_init(struct sock *sk)
> w->cumul_ack = 0;
> w->reset_rtt_min = 1;
> w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
> - w->rtt_win_sx = tcp_time_stamp;
> + w->rtt_win_sx = tcp_time_stamp(sk);
> w->snd_una = tcp_sk(sk)->snd_una;
> w->first_ack = 1;
> }
> @@ -116,7 +116,7 @@ static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
> static void westwood_update_window(struct sock *sk)
> {
> struct westwood *w = inet_csk_ca(sk);
> - s32 delta = tcp_time_stamp - w->rtt_win_sx;
> + s32 delta = tcp_time_stamp(sk) - w->rtt_win_sx;
>
> /* Initialize w->snd_una with the first acked sequence number in order
> * to fix mismatch between tp->snd_una and w->snd_una for the first
> @@ -140,7 +140,7 @@ static void westwood_update_window(struct sock *sk)
> westwood_filter(w, delta);
>
> w->bk = 0;
> - w->rtt_win_sx = tcp_time_stamp;
> + w->rtt_win_sx = tcp_time_stamp(sk);
> }
> }
>
> diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
> index f78f41a..1141626 100644
> --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
> +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
> @@ -295,7 +295,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
>
> opts.options &= info->options;
> if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
> - synproxy_init_timestamp_cookie(info, &opts);
> + synproxy_init_timestamp_cookie(skb->sk, info, &opts);
> else
> opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
> XT_SYNPROXY_OPT_SACK_PERM |
> diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
> index 2727bb3..bb72460 100644
> --- a/net/ipv6/tcp_ipv6.c
> +++ b/net/ipv6/tcp_ipv6.c
> @@ -894,7 +894,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
>
> tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
> tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
> - tcp_time_stamp + tcptw->tw_ts_offset,
> + tcp_time_stamp(sk) + tcptw->tw_ts_offset,
> tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
> tw->tw_tclass);
>
> @@ -905,7 +905,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
> struct request_sock *req)
> {
> tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
> - req->rcv_wnd, tcp_time_stamp, req->ts_recent,
> + req->rcv_wnd, tcp_time_stamp(sk), req->ts_recent,
> tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
> }
>
> @@ -1068,7 +1068,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
> want_cookie)
> goto drop_and_free;
>
> - tcp_rsk(req)->snt_synack = tcp_time_stamp;
> + tcp_rsk(req)->snt_synack = tcp_time_stamp(sk);
> tcp_rsk(req)->listener = NULL;
> inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
> return 0;
> diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
> index 9858e3e..b3a1960 100644
> --- a/net/netfilter/nf_synproxy_core.c
> +++ b/net/netfilter/nf_synproxy_core.c
> @@ -145,11 +145,12 @@ synproxy_build_options(struct tcphdr *th, const struct synproxy_options *opts)
> }
> EXPORT_SYMBOL_GPL(synproxy_build_options);
>
> -void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
> +void synproxy_init_timestamp_cookie(struct sock *sk,
> + const struct xt_synproxy_info *info,
> struct synproxy_options *opts)
> {
> opts->tsecr = opts->tsval;
> - opts->tsval = tcp_time_stamp & ~0x3f;
> + opts->tsval = tcp_time_stamp(sk) & ~0x3f;
>
> if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
> opts->tsval |= opts->wscale;
>
> _______________________________________________
> Devel mailing list
> Devel at openvz.org
> https://lists.openvz.org/mailman/listinfo/devel
>
More information about the Devel
mailing list