Lines Matching +full:tp +full:- +full:sensitive +full:- +full:adjust

1 // SPDX-License-Identifier: GPL-2.0-only
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
33 * Cacophonix Gaul : draft-minshall-nagle-01
53 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh() argument
57 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
58 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
68 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
69 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
71 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
73 __skb_unlink(skb, &sk->sk_write_queue); in tcp_event_new_data_sent()
74 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); in tcp_event_new_data_sent()
76 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
77 tp->highest_sack = skb; in tcp_event_new_data_sent()
79 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
80 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) in tcp_event_new_data_sent()
91 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
97 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
99 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
100 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
101 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
102 return tp->snd_nxt; in tcp_acceptable_seq()
104 return tcp_wnd_end(tp); in tcp_acceptable_seq()
108 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
111 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
123 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local
125 int mss = tp->advmss; in tcp_advertise_mss()
132 tp->advmss = mss; in tcp_advertise_mss()
144 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() local
145 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
146 u32 cwnd = tcp_snd_cwnd(tp); in tcp_cwnd_restart()
150 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
153 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) in tcp_cwnd_restart()
155 tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); in tcp_cwnd_restart()
156 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
157 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
161 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() argument
167 if (tcp_packets_in_flight(tp) == 0) in tcp_event_data_sent()
170 tp->lsndtime = now; in tcp_event_data_sent()
175 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent()
182 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent() local
184 if (unlikely(tp->compressed_ack)) { in tcp_event_ack_sent()
186 tp->compressed_ack); in tcp_event_ack_sent()
187 tp->compressed_ack = 0; in tcp_event_ack_sent()
188 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
192 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
200 * will be offered. Store the results in the tp structure.
225 * we will truncate our initial window offering to 32K-1 in tcp_select_initial_window()
230 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_initial_window()
241 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window()
244 *rcv_wscale = clamp_t(int, ilog2(space) - 15, in tcp_select_initial_window()
255 * value can be stuffed directly into th->window for an outgoing
260 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() local
262 u32 old_win = tp->rcv_wnd; in tcp_select_window()
268 if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) { in tcp_select_window()
269 tp->pred_flags = 0; in tcp_select_window()
270 tp->rcv_wnd = 0; in tcp_select_window()
271 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
275 cur_win = tcp_receive_window(tp); in tcp_select_window()
281 * window in time. --DaveM in tcp_select_window()
285 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { in tcp_select_window()
289 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
293 tp->rcv_wnd = new_win; in tcp_select_window()
294 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
299 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
300 READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_window()
303 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
306 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
310 tp->pred_flags = 0; in tcp_select_window()
320 /* Packet ECN state for a SYN-ACK */
323 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() local
325 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in tcp_ecn_send_synack()
326 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
327 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in tcp_ecn_send_synack()
336 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() local
338 bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 || in tcp_ecn_send_syn()
348 tp->ecn_flags = 0; in tcp_ecn_send_syn()
351 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in tcp_ecn_send_syn()
352 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
360 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) in tcp_ecn_clear_syn()
361 /* tp->ecn_flags are cleared at a later point in time when in tcp_ecn_clear_syn()
364 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); in tcp_ecn_clear_syn()
370 if (inet_rsk(req)->ecn_ok) in tcp_ecn_make_synack()
371 th->ece = 1; in tcp_ecn_make_synack()
380 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() local
382 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
383 /* Not-retransmitted data segment: set ECT and inject CWR. */ in tcp_ecn_send()
384 if (skb->len != tcp_header_len && in tcp_ecn_send()
385 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
387 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
388 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
389 th->cwr = 1; in tcp_ecn_send()
390 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in tcp_ecn_send()
396 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
397 th->ece = 1; in tcp_ecn_send()
401 /* Constructs common control bits of non-data skb. If SYN/FIN is present,
406 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
408 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
412 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
415 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
418 static inline bool tcp_urg_mode(const struct tcp_sock *tp) in tcp_urg_mode() argument
420 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
460 struct tcp_sock *tp, in mptcp_options_write() argument
464 if (unlikely(OPTION_MPTCP & opts->options)) in mptcp_options_write()
465 mptcp_write_options(th, ptr, tp, &opts->mptcp); in mptcp_options_write()
511 * Thus, "req" is passed here and the cgroup-bpf-progs in bpf_skops_hdr_opt_len()
516 * consistent between fastopen and non-fastopen on in bpf_skops_hdr_opt_len()
539 opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len; in bpf_skops_hdr_opt_len()
541 opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3; in bpf_skops_hdr_opt_len()
543 *remaining -= opts->bpf_opt_len; in bpf_skops_hdr_opt_len()
552 u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len; in bpf_skops_write_hdr_opt()
575 first_opt_off = tcp_hdrlen(skb) - max_opt_len; in bpf_skops_write_hdr_opt()
583 nr_written = max_opt_len - sock_ops.remaining_opt_len; in bpf_skops_write_hdr_opt()
586 memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP, in bpf_skops_write_hdr_opt()
587 max_opt_len - nr_written); in bpf_skops_write_hdr_opt()
610 * Beware: Something in the Internet is very sensitive to the ordering of
612 * Luckily we can at least blame others for their non-compliance but from
613 * inter-operability perspective it seems that we're somewhat stuck with
621 static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, in tcp_options_write() argument
625 u16 options = opts->options; /* mungable copy */ in tcp_options_write()
631 opts->hash_location = (__u8 *)ptr; in tcp_options_write()
635 if (unlikely(opts->mss)) { in tcp_options_write()
638 opts->mss); in tcp_options_write()
654 *ptr++ = htonl(opts->tsval); in tcp_options_write()
655 *ptr++ = htonl(opts->tsecr); in tcp_options_write()
669 opts->ws); in tcp_options_write()
672 if (unlikely(opts->num_sack_blocks)) { in tcp_options_write()
673 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
674 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
680 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * in tcp_options_write()
683 for (this_sack = 0; this_sack < opts->num_sack_blocks; in tcp_options_write()
689 tp->rx_opt.dsack = 0; in tcp_options_write()
693 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; in tcp_options_write()
697 if (foc->exp) { in tcp_options_write()
698 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; in tcp_options_write()
703 len = TCPOLEN_FASTOPEN_BASE + foc->len; in tcp_options_write()
708 memcpy(p, foc->val, foc->len); in tcp_options_write()
710 p[foc->len] = TCPOPT_NOP; in tcp_options_write()
711 p[foc->len + 1] = TCPOPT_NOP; in tcp_options_write()
718 mptcp_options_write(th, ptr, tp, opts); in tcp_options_write()
721 static void smc_set_option(const struct tcp_sock *tp, in smc_set_option() argument
727 if (tp->syn_smc) { in smc_set_option()
729 opts->options |= OPTION_SMC; in smc_set_option()
730 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option()
737 static void smc_set_option_cond(const struct tcp_sock *tp, in smc_set_option_cond() argument
744 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
746 opts->options |= OPTION_SMC; in smc_set_option_cond()
747 *remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED; in smc_set_option_cond()
761 if (mptcp_synack_options(req, &size, &opts->mptcp)) { in mptcp_set_option_cond()
763 opts->options |= OPTION_MPTCP; in mptcp_set_option_cond()
764 *remaining -= size; in mptcp_set_option_cond()
777 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() local
779 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
784 rcu_access_pointer(tp->md5sig_info)) { in tcp_syn_options()
785 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_syn_options()
787 opts->options |= OPTION_MD5; in tcp_syn_options()
788 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_syn_options()
795 * advertised. But we subtract them from tp->mss_cache so that in tcp_syn_options()
802 opts->mss = tcp_advertise_mss(sk); in tcp_syn_options()
803 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_syn_options()
805 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) { in tcp_syn_options()
806 opts->options |= OPTION_TS; in tcp_syn_options()
807 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; in tcp_syn_options()
808 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
809 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_syn_options()
811 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { in tcp_syn_options()
812 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
813 opts->options |= OPTION_WSCALE; in tcp_syn_options()
814 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_syn_options()
816 if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { in tcp_syn_options()
817 opts->options |= OPTION_SACK_ADVERTISE; in tcp_syn_options()
818 if (unlikely(!(OPTION_TS & opts->options))) in tcp_syn_options()
819 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_syn_options()
822 if (fastopen && fastopen->cookie.len >= 0) { in tcp_syn_options()
823 u32 need = fastopen->cookie.len; in tcp_syn_options()
825 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_syn_options()
829 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_syn_options()
830 opts->fastopen_cookie = &fastopen->cookie; in tcp_syn_options()
831 remaining -= need; in tcp_syn_options()
832 tp->syn_fastopen = 1; in tcp_syn_options()
833 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
837 smc_set_option(tp, opts, &remaining); in tcp_syn_options()
842 if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { in tcp_syn_options()
844 opts->options |= OPTION_MPTCP; in tcp_syn_options()
845 remaining -= size; in tcp_syn_options()
852 return MAX_TCP_OPTION_SPACE - remaining; in tcp_syn_options()
855 /* Set up TCP options for SYN-ACKs. */
870 opts->options |= OPTION_MD5; in tcp_synack_options()
871 remaining -= TCPOLEN_MD5SIG_ALIGNED; in tcp_synack_options()
879 ireq->tstamp_ok &= !ireq->sack_ok; in tcp_synack_options()
884 opts->mss = mss; in tcp_synack_options()
885 remaining -= TCPOLEN_MSS_ALIGNED; in tcp_synack_options()
887 if (likely(ireq->wscale_ok)) { in tcp_synack_options()
888 opts->ws = ireq->rcv_wscale; in tcp_synack_options()
889 opts->options |= OPTION_WSCALE; in tcp_synack_options()
890 remaining -= TCPOLEN_WSCALE_ALIGNED; in tcp_synack_options()
892 if (likely(ireq->tstamp_ok)) { in tcp_synack_options()
893 opts->options |= OPTION_TS; in tcp_synack_options()
894 opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off; in tcp_synack_options()
895 opts->tsecr = READ_ONCE(req->ts_recent); in tcp_synack_options()
896 remaining -= TCPOLEN_TSTAMP_ALIGNED; in tcp_synack_options()
898 if (likely(ireq->sack_ok)) { in tcp_synack_options()
899 opts->options |= OPTION_SACK_ADVERTISE; in tcp_synack_options()
900 if (unlikely(!ireq->tstamp_ok)) in tcp_synack_options()
901 remaining -= TCPOLEN_SACKPERM_ALIGNED; in tcp_synack_options()
903 if (foc != NULL && foc->len >= 0) { in tcp_synack_options()
904 u32 need = foc->len; in tcp_synack_options()
906 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : in tcp_synack_options()
910 opts->options |= OPTION_FAST_OPEN_COOKIE; in tcp_synack_options()
911 opts->fastopen_cookie = foc; in tcp_synack_options()
912 remaining -= need; in tcp_synack_options()
923 return MAX_TCP_OPTION_SPACE - remaining; in tcp_synack_options()
933 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() local
937 opts->options = 0; in tcp_established_options()
942 rcu_access_pointer(tp->md5sig_info)) { in tcp_established_options()
943 *md5 = tp->af_specific->md5_lookup(sk, sk); in tcp_established_options()
945 opts->options |= OPTION_MD5; in tcp_established_options()
951 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
952 opts->options |= OPTION_TS; in tcp_established_options()
953 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; in tcp_established_options()
954 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
965 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
969 &opts->mptcp)) { in tcp_established_options()
970 opts->options |= OPTION_MPTCP; in tcp_established_options()
975 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
977 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
982 opts->num_sack_blocks = in tcp_established_options()
984 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / in tcp_established_options()
988 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; in tcp_established_options()
991 if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp, in tcp_established_options()
993 unsigned int remaining = MAX_TCP_OPTION_SPACE - size; in tcp_established_options()
997 size = MAX_TCP_OPTION_SPACE - remaining; in tcp_established_options()
1012 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1026 if ((1 << sk->sk_state) & in tcp_tsq_write()
1029 struct tcp_sock *tp = tcp_sk(sk); in tcp_tsq_write() local
1031 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
1032 tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { in tcp_tsq_write()
1033 tcp_mstamp_refresh(tp); in tcp_tsq_write()
1037 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1047 else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) in tcp_tsq_handler()
1054 * transferring tsq->head because tcp_wfree() might
1063 struct tcp_sock *tp; in tcp_tasklet_func() local
1067 list_splice_init(&tsq->head, &list); in tcp_tasklet_func()
1071 tp = list_entry(q, struct tcp_sock, tsq_node); in tcp_tasklet_func()
1072 list_del(&tp->tsq_node); in tcp_tasklet_func()
1074 sk = (struct sock *)tp; in tcp_tasklet_func()
1076 clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags); in tcp_tasklet_func()
1088 * tcp_release_cb - tcp release_sock() callback
1096 unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags); in tcp_release_cb()
1104 } while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags)); in tcp_release_cb()
1114 * 3) socket owned by us (sk->sk_lock.owned == 1) in tcp_release_cb()
1130 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb()
1143 INIT_LIST_HEAD(&tsq->head); in tcp_tasklet_init()
1144 tasklet_setup(&tsq->tasklet, tcp_tasklet_func); in tcp_tasklet_init()
1155 struct sock *sk = skb->sk; in tcp_wfree()
1156 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree() local
1164 WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc)); in tcp_wfree()
1169 * - less callbacks to tcp_write_xmit(), reducing stress (batches) in tcp_wfree()
1170 * - chance for incoming ACK (processed by another cpu maybe) in tcp_wfree()
1171 * to migrate this flow (skb->ooo_okay will be eventually set) in tcp_wfree()
1173 if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) in tcp_wfree()
1176 oval = smp_load_acquire(&sk->sk_tsq_flags); in tcp_wfree()
1182 } while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval)); in tcp_wfree()
1187 empty = list_empty(&tsq->head); in tcp_wfree()
1188 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
1190 tasklet_schedule(&tsq->tasklet); in tcp_wfree()
1202 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); in tcp_pace_kick() local
1203 struct sock *sk = (struct sock *)tp; in tcp_pace_kick()
1214 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_skb_after_send() local
1216 if (sk->sk_pacing_status != SK_PACING_NONE) { in tcp_update_skb_after_send()
1217 unsigned long rate = sk->sk_pacing_rate; in tcp_update_skb_after_send()
1220 * Note that tp->data_segs_out overflows after 2^32 packets, in tcp_update_skb_after_send()
1223 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { in tcp_update_skb_after_send()
1224 u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate); in tcp_update_skb_after_send()
1225 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; in tcp_update_skb_after_send()
1228 len_ns -= min_t(u64, len_ns / 2, credit); in tcp_update_skb_after_send()
1229 tp->tcp_wstamp_ns += len_ns; in tcp_update_skb_after_send()
1232 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1255 struct tcp_sock *tp; in __tcp_transmit_skb() local
1266 tp = tcp_sk(sk); in __tcp_transmit_skb()
1267 prior_wstamp = tp->tcp_wstamp_ns; in __tcp_transmit_skb()
1268 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb()
1269 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); in __tcp_transmit_skb()
1281 return -ENOBUFS; in __tcp_transmit_skb()
1282 /* retransmit skbs might have a non zero value in skb->dev in __tcp_transmit_skb()
1283 * because skb->dev is aliased with skb->rbnode.rb_left in __tcp_transmit_skb()
1285 skb->dev = NULL; in __tcp_transmit_skb()
1292 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) { in __tcp_transmit_skb()
1301 * and in this case it is better to delay the delivery of 1-MSS in __tcp_transmit_skb()
1306 tcb->tcp_flags |= TCPHDR_PSH; in __tcp_transmit_skb()
1310 /* We set skb->ooo_okay to one if this packet can select in __tcp_transmit_skb()
1314 * if XPS is enabled, or sk->sk_txhash otherwise. in __tcp_transmit_skb()
1323 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) || in __tcp_transmit_skb()
1331 skb->pfmemalloc = 0; in __tcp_transmit_skb()
1337 skb->sk = sk; in __tcp_transmit_skb()
1338 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; in __tcp_transmit_skb()
1339 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in __tcp_transmit_skb()
1341 skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm)); in __tcp_transmit_skb()
1344 th = (struct tcphdr *)skb->data; in __tcp_transmit_skb()
1345 th->source = inet->inet_sport; in __tcp_transmit_skb()
1346 th->dest = inet->inet_dport; in __tcp_transmit_skb()
1347 th->seq = htonl(tcb->seq); in __tcp_transmit_skb()
1348 th->ack_seq = htonl(rcv_nxt); in __tcp_transmit_skb()
1350 tcb->tcp_flags); in __tcp_transmit_skb()
1352 th->check = 0; in __tcp_transmit_skb()
1353 th->urg_ptr = 0; in __tcp_transmit_skb()
1356 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1357 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1358 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1359 th->urg = 1; in __tcp_transmit_skb()
1360 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1361 th->urg_ptr = htons(0xFFFF); in __tcp_transmit_skb()
1362 th->urg = 1; in __tcp_transmit_skb()
1366 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in __tcp_transmit_skb()
1367 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { in __tcp_transmit_skb()
1368 th->window = htons(tcp_select_window(sk)); in __tcp_transmit_skb()
1374 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1377 tcp_options_write(th, tp, &opts); in __tcp_transmit_skb()
1383 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1391 INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, in __tcp_transmit_skb()
1395 if (likely(tcb->tcp_flags & TCPHDR_ACK)) in __tcp_transmit_skb()
1398 if (skb->len != tcp_header_size) { in __tcp_transmit_skb()
1399 tcp_event_data_sent(tp, sk); in __tcp_transmit_skb()
1400 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1401 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1404 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1408 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1410 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ in __tcp_transmit_skb()
1411 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); in __tcp_transmit_skb()
1412 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); in __tcp_transmit_skb()
1414 /* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */ in __tcp_transmit_skb()
1417 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), in __tcp_transmit_skb()
1420 tcp_add_tx_delay(skb, tp); in __tcp_transmit_skb()
1422 err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, in __tcp_transmit_skb()
1424 sk, skb, &inet->cork.fl); in __tcp_transmit_skb()
1441 tcp_sk(sk)->rcv_nxt); in tcp_transmit_skb()
1451 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb() local
1454 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1457 sk_wmem_queued_add(sk, skb->truesize); in tcp_queue_skb()
1458 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
1464 if (skb->len <= mss_now) { in tcp_set_skb_tso_segs()
1466 * non-TSO case. in tcp_set_skb_tso_segs()
1469 TCP_SKB_CB(skb)->tcp_gso_size = 0; in tcp_set_skb_tso_segs()
1471 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); in tcp_set_skb_tso_segs()
1472 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; in tcp_set_skb_tso_segs()
1481 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount() local
1483 tp->packets_out -= decr; in tcp_adjust_pcount()
1485 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1486 tp->sacked_out -= decr; in tcp_adjust_pcount()
1487 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1488 tp->retrans_out -= decr; in tcp_adjust_pcount()
1489 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1490 tp->lost_out -= decr; in tcp_adjust_pcount()
1493 if (tcp_is_reno(tp) && decr > 0) in tcp_adjust_pcount()
1494 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1496 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1497 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1498 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) in tcp_adjust_pcount()
1499 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1501 tcp_verify_left_out(tp); in tcp_adjust_pcount()
1506 return TCP_SKB_CB(skb)->txstamp_ack || in tcp_has_tx_tstamp()
1507 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); in tcp_has_tx_tstamp()
1515 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { in tcp_fragment_tstamp()
1517 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_fragment_tstamp()
1519 shinfo->tx_flags &= ~tsflags; in tcp_fragment_tstamp()
1520 shinfo2->tx_flags |= tsflags; in tcp_fragment_tstamp()
1521 swap(shinfo->tskey, shinfo2->tskey); in tcp_fragment_tstamp()
1522 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; in tcp_fragment_tstamp()
1523 TCP_SKB_CB(skb)->txstamp_ack = 0; in tcp_fragment_tstamp()
1529 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; in tcp_skb_fragment_eor()
1530 TCP_SKB_CB(skb)->eor = 0; in tcp_skb_fragment_eor()
1540 __skb_queue_after(&sk->sk_write_queue, skb, buff); in tcp_insert_write_queue_after()
1542 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_insert_write_queue_after()
1554 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment() local
1561 if (WARN_ON(len > skb->len)) in tcp_fragment()
1562 return -EINVAL; in tcp_fragment()
1571 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE); in tcp_fragment()
1572 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()
1577 return -ENOMEM; in tcp_fragment()
1581 return -ENOMEM; in tcp_fragment()
1586 return -ENOMEM; /* We'll just try again later. */ in tcp_fragment()
1590 sk_wmem_queued_add(sk, buff->truesize); in tcp_fragment()
1591 sk_mem_charge(sk, buff->truesize); in tcp_fragment()
1592 nlen = skb->len - len; in tcp_fragment()
1593 buff->truesize += nlen; in tcp_fragment()
1594 skb->truesize -= nlen; in tcp_fragment()
1597 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1598 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1599 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1602 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1603 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1604 TCP_SKB_CB(buff)->tcp_flags = flags; in tcp_fragment()
1605 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1610 skb_set_delivery_time(buff, skb->tstamp, true); in tcp_fragment()
1620 TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx; in tcp_fragment()
1623 * adjust the various packet counters. in tcp_fragment()
1625 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1626 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1637 list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor); in tcp_fragment()
1654 for (i = 0; i < shinfo->nr_frags; i++) { in __pskb_trim_head()
1655 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head()
1659 eat -= size; in __pskb_trim_head()
1661 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head()
1663 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head()
1664 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head()
1670 shinfo->nr_frags = k; in __pskb_trim_head()
1672 skb->data_len -= len; in __pskb_trim_head()
1673 skb->len = skb->data_len; in __pskb_trim_head()
1683 return -ENOMEM; in tcp_trim_head()
1687 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1689 skb->truesize -= delta_truesize; in tcp_trim_head()
1690 sk_wmem_queued_add(sk, -delta_truesize); in tcp_trim_head()
1694 /* Any change of skb->len requires recalculation of tso factor. */ in tcp_trim_head()
1704 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss() local
1709 It is MMS_S - sizeof(tcphdr) of rfc1122 in __tcp_mtu_to_mss()
1711 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); in __tcp_mtu_to_mss()
1714 if (icsk->icsk_af_ops->net_frag_header_len) { in __tcp_mtu_to_mss()
1718 mss_now -= icsk->icsk_af_ops->net_frag_header_len; in __tcp_mtu_to_mss()
1722 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1723 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1726 mss_now -= icsk->icsk_ext_hdr_len; in __tcp_mtu_to_mss()
1730 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); in __tcp_mtu_to_mss()
1738 return __tcp_mtu_to_mss(sk, pmtu) - in tcp_mtu_to_mss()
1739 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); in tcp_mtu_to_mss()
1746 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu() local
1751 tp->tcp_header_len + in tcp_mss_to_mtu()
1752 icsk->icsk_ext_hdr_len + in tcp_mss_to_mtu()
1753 icsk->icsk_af_ops->net_header_len; in tcp_mss_to_mtu()
1756 if (icsk->icsk_af_ops->net_frag_header_len) { in tcp_mss_to_mtu()
1760 mtu += icsk->icsk_af_ops->net_frag_header_len; in tcp_mss_to_mtu()
1769 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init() local
1773 icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; in tcp_mtup_init()
1774 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1775 icsk->icsk_af_ops->net_header_len; in tcp_mtup_init()
1776 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); in tcp_mtup_init()
1777 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_init()
1778 if (icsk->icsk_mtup.enabled) in tcp_mtup_init()
1779 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtup_init()
1785 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1788 tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1792 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1794 tp->mss_cache is current effective sending mss, including
1797 tp->rx_opt.mss_clamp.
1802 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1803 are READ ONLY outside this function. --ANK (980731)
1807 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss() local
1811 if (icsk->icsk_mtup.search_high > pmtu) in tcp_sync_mss()
1812 icsk->icsk_mtup.search_high = pmtu; in tcp_sync_mss()
1815 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1818 icsk->icsk_pmtu_cookie = pmtu; in tcp_sync_mss()
1819 if (icsk->icsk_mtup.enabled) in tcp_sync_mss()
1820 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); in tcp_sync_mss()
1821 tp->mss_cache = mss_now; in tcp_sync_mss()
1832 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss() local
1839 mss_now = tp->mss_cache; in tcp_current_mss()
1843 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss()
1849 /* The mss_cache is sized based on tp->tcp_header_len, which assumes in tcp_current_mss()
1852 * we have to adjust mss_now correspondingly */ in tcp_current_mss()
1853 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1854 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1855 mss_now -= delta; in tcp_current_mss()
1861 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1867 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited() local
1869 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && in tcp_cwnd_application_limited()
1870 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in tcp_cwnd_application_limited()
1872 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1873 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1874 if (win_used < tcp_snd_cwnd(tp)) { in tcp_cwnd_application_limited()
1875 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1876 tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); in tcp_cwnd_application_limited()
1878 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1880 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
1885 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_cwnd_validate()
1886 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate() local
1889 * is fully utilized. If cwnd-limited then remember that fact for the in tcp_cwnd_validate()
1890 * current window. If not cwnd-limited then track the maximum number of in tcp_cwnd_validate()
1891 * outstanding packets in the current window. (If cwnd-limited then we in tcp_cwnd_validate()
1892 * chose to not update tp->max_packets_out to avoid an extra else in tcp_cwnd_validate()
1895 if (!before(tp->snd_una, tp->cwnd_usage_seq) || in tcp_cwnd_validate()
1897 (!tp->is_cwnd_limited && in tcp_cwnd_validate()
1898 tp->packets_out > tp->max_packets_out)) { in tcp_cwnd_validate()
1899 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1900 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1901 tp->cwnd_usage_seq = tp->snd_nxt; in tcp_cwnd_validate()
1906 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1907 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
1910 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1911 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1913 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && in tcp_cwnd_validate()
1914 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1915 !ca_ops->cong_control) in tcp_cwnd_validate()
1925 if (tcp_write_queue_empty(sk) && sk->sk_socket && in tcp_cwnd_validate()
1926 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && in tcp_cwnd_validate()
1927 (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) in tcp_cwnd_validate()
1933 static bool tcp_minshall_check(const struct tcp_sock *tp) in tcp_minshall_check() argument
1935 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1936 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1940 * Note that a TSO packet might end with a sub-mss segment
1942 * if ((skb->len % mss) != 0)
1943 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1945 * skb_pcount = skb->len / mss_now
1947 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1950 if (skb->len < tcp_skb_pcount(skb) * mss_now) in tcp_minshall_update()
1951 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1961 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument
1966 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
1973 * - For close peers, we rather send bigger packets to reduce
1975 * - For long distance/rtt flows, we would like to get ACK clocking
1979 * in bigger TSO bursts. We we cut the RTT-based allowance in half
1980 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
1989 bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift); in tcp_tso_autosize()
1991 r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log); in tcp_tso_autosize()
1992 if (r < BITS_PER_TYPE(sk->sk_gso_max_size)) in tcp_tso_autosize()
1993 bytes += sk->sk_gso_max_size >> r; in tcp_tso_autosize()
1995 bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size); in tcp_tso_autosize()
2005 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; in tcp_tso_segs()
2008 min_tso = ca_ops->min_tso_segs ? in tcp_tso_segs()
2009 ca_ops->min_tso_segs(sk) : in tcp_tso_segs()
2010 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs); in tcp_tso_segs()
2013 return min_t(u32, tso_segs, sk->sk_gso_max_segs); in tcp_tso_segs()
2023 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point() local
2026 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
2032 needed = min(skb->len, window); in tcp_mss_split_point()
2042 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point()
2043 return needed - partial; in tcp_mss_split_point()
2051 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in tcp_cwnd_test() argument
2057 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
2061 in_flight = tcp_packets_in_flight(tp); in tcp_cwnd_test()
2062 cwnd = tcp_snd_cwnd(tp); in tcp_cwnd_test()
2070 return min(halfcwnd, cwnd - in_flight); in tcp_cwnd_test()
2092 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
2105 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
2108 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
2115 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, in tcp_snd_wnd_test() argument
2119 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
2121 if (skb->len > cur_mss) in tcp_snd_wnd_test()
2122 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
2124 return !after(end_seq, tcp_wnd_end(tp)); in tcp_snd_wnd_test()
2131 * know that all the data is in scatter-gather pages, and that the
2137 int nlen = skb->len - len; in tso_fragment()
2142 DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); in tso_fragment()
2146 return -ENOMEM; in tso_fragment()
2150 sk_wmem_queued_add(sk, buff->truesize); in tso_fragment()
2151 sk_mem_charge(sk, buff->truesize); in tso_fragment()
2152 buff->truesize += nlen; in tso_fragment()
2153 skb->truesize -= nlen; in tso_fragment()
2156 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
2157 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
2158 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
2161 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
2162 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
2163 TCP_SKB_CB(buff)->tcp_flags = flags; in tso_fragment()
2193 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer() local
2198 if (icsk->icsk_ca_state >= TCP_CA_Recovery) in tcp_tso_should_defer()
2203 * Note that tp->tcp_wstamp_ns can be in the future if we have in tcp_tso_should_defer()
2206 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; in tcp_tso_should_defer()
2210 in_flight = tcp_packets_in_flight(tp); in tcp_tso_should_defer()
2213 BUG_ON(tcp_snd_cwnd(tp) <= in_flight); in tcp_tso_should_defer()
2215 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
2218 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
2222 /* If a full-sized TSO skb can be sent, do it. */ in tcp_tso_should_defer()
2223 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
2227 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
2230 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); in tcp_tso_should_defer()
2232 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); in tcp_tso_should_defer()
2246 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
2254 delta = tp->tcp_clock_cache - head->tstamp; in tcp_tso_should_defer()
2256 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) in tcp_tso_should_defer()
2261 * 1) We are cwnd-limited in tcp_tso_should_defer()
2262 * 2) We are rwnd-limited in tcp_tso_should_defer()
2266 if (cong_win <= skb->len) { in tcp_tso_should_defer()
2271 if (send_win <= skb->len) { in tcp_tso_should_defer()
2278 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || in tcp_tso_should_defer()
2279 TCP_SKB_CB(skb)->eor) in tcp_tso_should_defer()
2291 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe() local
2296 interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); in tcp_mtu_check_reprobe()
2297 delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; in tcp_mtu_check_reprobe()
2302 icsk->icsk_mtup.probe_size = 0; in tcp_mtu_check_reprobe()
2303 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2305 icsk->icsk_af_ops->net_header_len; in tcp_mtu_check_reprobe()
2306 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_check_reprobe()
2309 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_check_reprobe()
2319 if (len <= skb->len) in tcp_can_coalesce_send_queue_head()
2325 len -= skb->len; in tcp_can_coalesce_send_queue_head()
2334 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; in tcp_clone_payload()
2338 if (!sk_wmem_schedule(sk, to->truesize + probe_size)) in tcp_clone_payload()
2339 return -ENOMEM; in tcp_clone_payload()
2341 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_clone_payload()
2342 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; in tcp_clone_payload()
2345 return -EINVAL; in tcp_clone_payload()
2347 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { in tcp_clone_payload()
2351 probe_size - len); in tcp_clone_payload()
2361 return -E2BIG; in tcp_clone_payload()
2374 skb_shinfo(to)->nr_frags = nr_frags; in tcp_clone_payload()
2375 to->truesize += probe_size; in tcp_clone_payload()
2376 to->len += probe_size; in tcp_clone_payload()
2377 to->data_len += probe_size; in tcp_clone_payload()
2389 * -1 otherwise
2394 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe() local
2408 if (likely(!icsk->icsk_mtup.enabled || in tcp_mtu_probe()
2409 icsk->icsk_mtup.probe_size || in tcp_mtu_probe()
2410 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || in tcp_mtu_probe()
2411 tcp_snd_cwnd(tp) < 11 || in tcp_mtu_probe()
2412 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2413 return -1; in tcp_mtu_probe()
2416 * and current mss_clamp. if (search_high - search_low) in tcp_mtu_probe()
2420 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + in tcp_mtu_probe()
2421 icsk->icsk_mtup.search_low) >> 1); in tcp_mtu_probe()
2422 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2423 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe()
2428 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || in tcp_mtu_probe()
2429 interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { in tcp_mtu_probe()
2434 return -1; in tcp_mtu_probe()
2438 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2439 return -1; in tcp_mtu_probe()
2441 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2442 return -1; in tcp_mtu_probe()
2443 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2447 if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { in tcp_mtu_probe()
2448 if (!tcp_packets_in_flight(tp)) in tcp_mtu_probe()
2449 return -1; in tcp_mtu_probe()
2455 return -1; in tcp_mtu_probe()
2460 return -1; in tcp_mtu_probe()
2466 return -1; in tcp_mtu_probe()
2468 sk_wmem_queued_add(sk, nskb->truesize); in tcp_mtu_probe()
2469 sk_mem_charge(sk, nskb->truesize); in tcp_mtu_probe()
2475 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
2476 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
2477 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; in tcp_mtu_probe()
2484 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
2486 if (skb->len <= copy) { in tcp_mtu_probe()
2489 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
2493 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; in tcp_mtu_probe()
2498 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
2502 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
2510 tcp_init_tso_segs(nskb, nskb->len); in tcp_mtu_probe()
2513 * be resegmented into mss-sized pieces by tcp_write_xmit(). in tcp_mtu_probe()
2518 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_mtu_probe()
2521 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); in tcp_mtu_probe()
2522 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2523 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2528 return -1; in tcp_mtu_probe()
2533 struct tcp_sock *tp = tcp_sk(sk); in tcp_pacing_check() local
2538 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) in tcp_pacing_check()
2541 if (!hrtimer_is_queued(&tp->pacing_timer)) { in tcp_pacing_check()
2542 hrtimer_start(&tp->pacing_timer, in tcp_pacing_check()
2543 ns_to_ktime(tp->tcp_wstamp_ns), in tcp_pacing_check()
2552 const struct rb_node *node = sk->tcp_rtx_queue.rb_node; in tcp_rtx_queue_empty_or_single_skb()
2559 return !node->rb_left && !node->rb_right; in tcp_rtx_queue_empty_or_single_skb()
2566 * - better RTT estimation and ACK scheduling
2567 * - faster recovery
2568 * - high rates
2579 2 * skb->truesize, in tcp_small_queue_check()
2580 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift)); in tcp_small_queue_check()
2581 if (sk->sk_pacing_status == SK_PACING_NONE) in tcp_small_queue_check()
2583 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes)); in tcp_small_queue_check()
2587 tcp_sk(sk)->tcp_tx_delay) { in tcp_small_queue_check()
2588 u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay; in tcp_small_queue_check()
2591 * approximate our needs assuming an ~100% skb->truesize overhead. in tcp_small_queue_check()
2595 extra_bytes >>= (20 - 1); in tcp_small_queue_check()
2598 if (refcount_read(&sk->sk_wmem_alloc) > limit) { in tcp_small_queue_check()
2607 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in tcp_small_queue_check()
2613 if (refcount_read(&sk->sk_wmem_alloc) > limit) in tcp_small_queue_check()
2619 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) in tcp_chrono_set() argument
2622 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2625 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2626 tp->chrono_start = now; in tcp_chrono_set()
2627 tp->chrono_type = new; in tcp_chrono_set()
2632 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_start() local
2639 if (type > tp->chrono_type) in tcp_chrono_start()
2640 tcp_chrono_set(tp, type); in tcp_chrono_start()
2645 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_stop() local
2656 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); in tcp_chrono_stop()
2657 else if (type == tp->chrono_type) in tcp_chrono_stop()
2658 tcp_chrono_set(tp, TCP_CHRONO_BUSY); in tcp_chrono_stop()
2666 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2678 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit() local
2688 tcp_mstamp_refresh(tp); in tcp_write_xmit()
2703 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2705 tp->tcp_wstamp_ns = tp->tcp_clock_cache; in tcp_write_xmit()
2706 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); in tcp_write_xmit()
2707 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_write_xmit()
2718 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
2727 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { in tcp_write_xmit()
2733 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2745 if (tso_segs > 1 && !tcp_urg_mode(tp)) in tcp_write_xmit()
2752 if (skb->len > limit && in tcp_write_xmit()
2761 * We do not want to send a pure-ack packet and have in tcp_write_xmit()
2764 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) in tcp_write_xmit()
2776 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2788 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); in tcp_write_xmit()
2794 tp->prr_out += sent_pkts; in tcp_write_xmit()
2801 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2807 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe() local
2814 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_schedule_loss_probe()
2817 early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); in tcp_schedule_loss_probe()
2822 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
2823 (icsk->icsk_ca_state != TCP_CA_Open && in tcp_schedule_loss_probe()
2824 icsk->icsk_ca_state != TCP_CA_CWR)) in tcp_schedule_loss_probe()
2831 if (tp->srtt_us) { in tcp_schedule_loss_probe()
2832 timeout_us = tp->srtt_us >> 2; in tcp_schedule_loss_probe()
2833 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2844 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe()
2861 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); in skb_still_in_host_queue()
2877 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe() local
2883 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2886 tp->tlp_retrans = 0; in tcp_send_loss_probe()
2888 if (skb && tcp_snd_wnd_test(tp, skb, mss)) { in tcp_send_loss_probe()
2889 pcount = tp->packets_out; in tcp_send_loss_probe()
2891 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2895 skb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_loss_probe()
2897 WARN_ONCE(tp->packets_out, in tcp_send_loss_probe()
2899 tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); in tcp_send_loss_probe()
2900 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2911 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2913 (pcount - 1) * mss, mss, in tcp_send_loss_probe()
2925 tp->tlp_retrans = 1; in tcp_send_loss_probe()
2929 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
2933 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe()
2949 if (unlikely(sk->sk_state == TCP_CLOSE)) in __tcp_push_pending_frames()
2964 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2966 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); in tcp_push_one()
2978 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2984 * since header prediction assumes th->window stays fixed.
2986 * Strictly speaking, keeping th->window fixed violates the receiver
3018 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
3024 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window() local
3030 * fluctuations. --SAW 1998/11/1 in __tcp_select_window()
3032 int mss = icsk->icsk_ack.rcv_mss; in __tcp_select_window()
3040 full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
3049 * a non-zero scaling factor in effect. in __tcp_select_window()
3051 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) in __tcp_select_window()
3057 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3065 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3068 * of the maximum allowed, try to move to zero-window, else in __tcp_select_window()
3078 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
3079 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3084 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
3091 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3093 window = tp->rcv_wnd; in __tcp_select_window()
3102 if (window <= free_space - mss || window > free_space) in __tcp_select_window()
3113 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3116 icsk->icsk_ack.quick = 0; in __tcp_select_window()
3123 free_space < (1 << tp->rx_opt.rcv_wscale)) in __tcp_select_window()
3127 if (free_space > tp->rcv_ssthresh) { in __tcp_select_window()
3128 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3133 * the memory-based limit, and rcv_ssthresh is not a hard limit in __tcp_select_window()
3136 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3150 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; in tcp_skb_collapse_tstamp()
3151 shinfo->tskey = next_shinfo->tskey; in tcp_skb_collapse_tstamp()
3152 TCP_SKB_CB(skb)->txstamp_ack |= in tcp_skb_collapse_tstamp()
3153 TCP_SKB_CB(next_skb)->txstamp_ack; in tcp_skb_collapse_tstamp()
3160 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans() local
3164 next_skb_size = next_skb->len; in tcp_collapse_retrans()
3174 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
3177 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
3182 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
3183 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; in tcp_collapse_retrans()
3186 tcp_clear_retrans_hints_partial(tp); in tcp_collapse_retrans()
3187 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
3188 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
3206 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
3218 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse() local
3222 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) in tcp_retrans_try_collapse()
3224 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
3234 space -= skb->len; in tcp_retrans_try_collapse()
3244 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
3253 * state updates are done by the caller. Returns non-zero if an
3259 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb() local
3265 if (icsk->icsk_mtup.probe_size) in __tcp_retransmit_skb()
3266 icsk->icsk_mtup.probe_size = 0; in __tcp_retransmit_skb()
3269 return -EBUSY; in __tcp_retransmit_skb()
3272 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
3273 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in __tcp_retransmit_skb()
3274 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; in __tcp_retransmit_skb()
3275 TCP_SKB_CB(skb)->seq++; in __tcp_retransmit_skb()
3278 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
3280 return -EINVAL; in __tcp_retransmit_skb()
3282 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
3283 return -ENOMEM; in __tcp_retransmit_skb()
3286 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in __tcp_retransmit_skb()
3287 return -EHOSTUNREACH; /* Routing failure or similar. */ in __tcp_retransmit_skb()
3290 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in __tcp_retransmit_skb()
3298 if (TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
3299 return -EAGAIN; in __tcp_retransmit_skb()
3309 if (skb->len > len) { in __tcp_retransmit_skb()
3312 return -ENOMEM; /* We'll try again later. */ in __tcp_retransmit_skb()
3315 return -ENOMEM; in __tcp_retransmit_skb()
3319 diff -= tcp_skb_pcount(skb); in __tcp_retransmit_skb()
3323 if (skb->len < avail_wnd) in __tcp_retransmit_skb()
3328 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) in __tcp_retransmit_skb()
3334 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in __tcp_retransmit_skb()
3336 tp->total_retrans += segs; in __tcp_retransmit_skb()
3337 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
3339 /* make sure skb->data is aligned on arches that require it in __tcp_retransmit_skb()
3340 * and check if ack-trimming & collapsing extended the headroom in __tcp_retransmit_skb()
3343 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
3350 nskb->dev = NULL; in __tcp_retransmit_skb()
3353 err = -ENOBUFS; in __tcp_retransmit_skb()
3358 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3368 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; in __tcp_retransmit_skb()
3370 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) in __tcp_retransmit_skb()
3372 TCP_SKB_CB(skb)->seq, segs, err); in __tcp_retransmit_skb()
3376 } else if (err != -EBUSY) { in __tcp_retransmit_skb()
3384 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb() local
3389 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
3393 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
3394 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3398 if (!tp->retrans_stamp) in tcp_retransmit_skb()
3399 tp->retrans_stamp = tcp_skb_timestamp(skb); in tcp_retransmit_skb()
3401 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
3402 tp->undo_retrans = 0; in tcp_retransmit_skb()
3403 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3416 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue() local
3421 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
3425 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
3436 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3438 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3441 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
3447 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3455 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_xmit_retransmit_queue()
3473 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3476 icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT) in tcp_xmit_retransmit_queue()
3482 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue()
3497 delta = size - sk->sk_forward_alloc; in sk_forced_mem_schedule()
3504 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in sk_forced_mem_schedule()
3505 mem_cgroup_charge_skmem(sk->sk_memcg, amt, in sk_forced_mem_schedule()
3515 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin() local
3524 tskb = skb_rb_last(&sk->tcp_rtx_queue); in tcp_send_fin()
3527 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
3528 TCP_SKB_CB(tskb)->end_seq++; in tcp_send_fin()
3529 tp->write_seq++; in tcp_send_fin()
3533 * We need to set tp->snd_nxt to the value it would have in tcp_send_fin()
3535 * does not change tp->snd_nxt. in tcp_send_fin()
3537 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); in tcp_send_fin()
3547 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); in tcp_send_fin()
3549 sk_forced_mem_schedule(sk, skb->truesize); in tcp_send_fin()
3551 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
3561 * by RFC 2525, section 2.17. -DaveM
3591 /* Send a crossed SYN-ACK during socket establishment.
3602 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
3604 return -EFAULT; in tcp_send_synack()
3606 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
3614 return -ENOMEM; in tcp_send_synack()
3615 INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor); in tcp_send_synack()
3619 tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb); in tcp_send_synack()
3620 sk_wmem_queued_add(sk, nskb->truesize); in tcp_send_synack()
3621 sk_mem_charge(sk, nskb->truesize); in tcp_send_synack()
3625 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
3632 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3648 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack() local
3677 * sk->sk_wmem_alloc in an atomic, we can promote to rw. in tcp_make_synack()
3684 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_make_synack()
3689 if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) in tcp_make_synack()
3696 if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ in tcp_make_synack()
3697 tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); in tcp_make_synack()
3702 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); in tcp_make_synack()
3704 skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4); in tcp_make_synack()
3706 TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK; in tcp_make_synack()
3714 th = (struct tcphdr *)skb->data; in tcp_make_synack()
3716 th->syn = 1; in tcp_make_synack()
3717 th->ack = 1; in tcp_make_synack()
3719 th->source = htons(ireq->ir_num); in tcp_make_synack()
3720 th->dest = ireq->ir_rmt_port; in tcp_make_synack()
3721 skb->mark = ireq->ir_mark; in tcp_make_synack()
3722 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_make_synack()
3723 th->seq = htonl(tcp_rsk(req)->snt_isn); in tcp_make_synack()
3725 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); in tcp_make_synack()
3728 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); in tcp_make_synack()
3730 th->doff = (tcp_header_size >> 2); in tcp_make_synack()
3734 /* Okay, we have all we need - do the md5 hash if needed */ in tcp_make_synack()
3736 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, in tcp_make_synack()
3745 tcp_add_tx_delay(skb, tp); in tcp_make_synack()
3762 if (likely(ca && bpf_try_module_get(ca, ca->owner))) { in tcp_ca_dst_init()
3763 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init()
3764 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_dst_init()
3765 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init()
3774 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init() local
3781 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
3782 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) in tcp_connect_init()
3783 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
3786 if (tp->rx_opt.user_mss) in tcp_connect_init()
3787 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3788 tp->max_window = 0; in tcp_connect_init()
3794 if (!tp->window_clamp) in tcp_connect_init()
3795 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); in tcp_connect_init()
3796 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
3801 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && in tcp_connect_init()
3802 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3803 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); in tcp_connect_init()
3810 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3811 &tp->rcv_wnd, in tcp_connect_init()
3812 &tp->window_clamp, in tcp_connect_init()
3813 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), in tcp_connect_init()
3817 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3818 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3820 WRITE_ONCE(sk->sk_err, 0); in tcp_connect_init()
3822 tp->snd_wnd = 0; in tcp_connect_init()
3823 tcp_init_wl(tp, 0); in tcp_connect_init()
3825 tp->snd_una = tp->write_seq; in tcp_connect_init()
3826 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3827 tp->snd_up = tp->write_seq; in tcp_connect_init()
3828 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init()
3830 if (likely(!tp->repair)) in tcp_connect_init()
3831 tp->rcv_nxt = 0; in tcp_connect_init()
3833 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
3834 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3835 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_connect_init()
3837 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init()
3838 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init()
3839 tcp_clear_retrans(tp); in tcp_connect_init()
3844 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb() local
3847 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
3849 sk_wmem_queued_add(sk, skb->truesize); in tcp_connect_queue_skb()
3850 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
3851 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb()
3852 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3856 * queue a data-only packet after the regular SYN, such that regular SYNs
3857 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3865 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data() local
3866 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3871 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3872 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3875 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and in tcp_send_syn_data()
3876 * user-MSS. Reserve maximum option space for middleboxes that add in tcp_send_syn_data()
3879 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
3881 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_send_syn_data()
3883 space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) - in tcp_send_syn_data()
3886 space = min_t(size_t, space, fo->size); in tcp_send_syn_data()
3890 pfrag, sk->sk_allocation)) in tcp_send_syn_data()
3892 syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); in tcp_send_syn_data()
3895 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); in tcp_send_syn_data()
3897 space = min_t(size_t, space, pfrag->size - pfrag->offset); in tcp_send_syn_data()
3901 space = copy_page_from_iter(pfrag->page, pfrag->offset, in tcp_send_syn_data()
3902 space, &fo->data->msg_iter); in tcp_send_syn_data()
3908 skb_fill_page_desc(syn_data, 0, pfrag->page, in tcp_send_syn_data()
3909 pfrag->offset, space); in tcp_send_syn_data()
3910 page_ref_inc(pfrag->page); in tcp_send_syn_data()
3911 pfrag->offset += space; in tcp_send_syn_data()
3913 skb_zcopy_set(syn_data, fo->uarg, NULL); in tcp_send_syn_data()
3916 if (space == fo->size) in tcp_send_syn_data()
3917 fo->data = NULL; in tcp_send_syn_data()
3918 fo->copied = space; in tcp_send_syn_data()
3921 if (syn_data->len) in tcp_send_syn_data()
3924 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); in tcp_send_syn_data()
3926 skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); in tcp_send_syn_data()
3933 TCP_SKB_CB(syn_data)->seq++; in tcp_send_syn_data()
3934 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; in tcp_send_syn_data()
3936 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
3937 tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data); in tcp_send_syn_data()
3943 __skb_queue_tail(&sk->sk_write_queue, syn_data); in tcp_send_syn_data()
3944 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
3948 if (fo->cookie.len > 0) in tcp_send_syn_data()
3949 fo->cookie.len = 0; in tcp_send_syn_data()
3950 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); in tcp_send_syn_data()
3952 tp->syn_fastopen = 0; in tcp_send_syn_data()
3954 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ in tcp_send_syn_data()
3961 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect() local
3967 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) in tcp_connect()
3968 return -EHOSTUNREACH; /* Routing failure or similar. */ in tcp_connect()
3972 if (unlikely(tp->repair)) { in tcp_connect()
3977 buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); in tcp_connect()
3979 return -ENOBUFS; in tcp_connect()
3981 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
3982 tcp_mstamp_refresh(tp); in tcp_connect()
3983 tp->retrans_stamp = tcp_time_stamp(tp); in tcp_connect()
3986 tcp_rbtree_insert(&sk->tcp_rtx_queue, buff); in tcp_connect()
3989 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
3990 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); in tcp_connect()
3991 if (err == -ECONNREFUSED) in tcp_connect()
3994 /* We change tp->snd_nxt after the tcp_transmit_skb() call in tcp_connect()
3997 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect()
3998 tp->pushed_seq = tp->write_seq; in tcp_connect()
4001 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); in tcp_connect()
4002 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
4008 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); in tcp_connect()
4016 u32 delack_max = inet_csk(sk)->icsk_delack_max; in tcp_delack_max()
4020 u32 delack_from_rto_min = max_t(int, 1, rto_min - 1); in tcp_delack_max()
4034 int ato = icsk->icsk_ack.ato; in tcp_send_delayed_ack()
4038 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack() local
4042 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) in tcp_send_delayed_ack()
4048 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements in tcp_send_delayed_ack()
4051 if (tp->srtt_us) { in tcp_send_delayed_ack()
4052 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
4068 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { in tcp_send_delayed_ack()
4070 if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { in tcp_send_delayed_ack()
4075 if (!time_before(timeout, icsk->icsk_ack.timeout)) in tcp_send_delayed_ack()
4076 timeout = icsk->icsk_ack.timeout; in tcp_send_delayed_ack()
4078 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; in tcp_send_delayed_ack()
4079 icsk->icsk_ack.timeout = timeout; in tcp_send_delayed_ack()
4080 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); in tcp_send_delayed_ack()
4089 if (sk->sk_state == TCP_CLOSE) in __tcp_send_ack()
4102 delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; in __tcp_send_ack()
4104 icsk->icsk_ack.retry++; in __tcp_send_ack()
4106 icsk->icsk_ack.ato = TCP_ATO_MIN; in __tcp_send_ack()
4128 __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); in tcp_send_ack()
4138 * Current solution: to send TWO zero-length segments in urgent mode:
4140 * out-of-date with SND.UNA-1 to probe window.
4144 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb() local
4151 return -1; in tcp_xmit_probe_skb()
4159 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
4167 if (sk->sk_state == TCP_ESTABLISHED) { in tcp_send_window_probe()
4168 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; in tcp_send_window_probe()
4177 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup() local
4180 if (sk->sk_state == TCP_CLOSE) in tcp_write_wakeup()
4181 return -1; in tcp_write_wakeup()
4184 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
4187 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
4189 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
4190 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
4196 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
4197 skb->len > mss) { in tcp_write_wakeup()
4199 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4202 return -1; in tcp_write_wakeup()
4206 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
4212 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
4224 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0() local
4231 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()
4233 icsk->icsk_probes_out = 0; in tcp_send_probe0()
4234 icsk->icsk_backoff = 0; in tcp_send_probe0()
4235 icsk->icsk_probes_tstamp = 0; in tcp_send_probe0()
4239 icsk->icsk_probes_out++; in tcp_send_probe0()
4241 if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) in tcp_send_probe0()
4242 icsk->icsk_backoff++; in tcp_send_probe0()
4257 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; in tcp_rtx_synack()
4262 if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED) in tcp_rtx_synack()
4263 WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash()); in tcp_rtx_synack()
4264 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL, in tcp_rtx_synack()
4274 tcp_sk_rw(sk)->total_retrans++; in tcp_rtx_synack()