1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 /* 23 * Changes: 24 * Pedro Roque : Fast Retransmit/Recovery. 25 * Two receive queues. 26 * Retransmit queue handled by TCP. 27 * Better retransmit timer handling. 28 * New congestion avoidance. 29 * Header prediction. 30 * Variable renaming. 31 * 32 * Eric : Fast Retransmit. 33 * Randy Scott : MSS option defines. 34 * Eric Schenk : Fixes to slow start algorithm. 35 * Eric Schenk : Yet another double ACK bug. 36 * Eric Schenk : Delayed ACK bug fixes. 37 * Eric Schenk : Floyd style fast retrans war avoidance. 38 * David S. Miller : Don't allow zero congestion window. 39 * Eric Schenk : Fix retransmitter so that it sends 40 * next packet on ack of previous packet. 41 * Andi Kleen : Moved open_request checking here 42 * and process RSTs for open_requests. 43 * Andi Kleen : Better prune_queue, and other fixes. 44 * Andrey Savochkin: Fix RTT measurements in the presence of 45 * timestamps. 46 * Andrey Savochkin: Check sequence numbers correctly when 47 * removing SACKs due to in sequence incoming 48 * data segments. 49 * Andi Kleen: Make sure we never ack data there is not 50 * enough room for. Also make this condition 51 * a fatal error if it might still happen. 52 * Andi Kleen: Add tcp_measure_rcv_mss to make 53 * connections with MSS<min(MTU,ann. MSS) 54 * work without delayed acks. 55 * Andi Kleen: Process packets with PSH set in the 56 * fast path. 57 * J Hadi Salim: ECN support 58 * Andrei Gurtov, 59 * Pasi Sarolahti, 60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 61 * engine. Lots of bugs are found. 62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 63 */ 64 65 #define pr_fmt(fmt) "TCP: " fmt 66 67 #include <linux/mm.h> 68 #include <linux/slab.h> 69 #include <linux/module.h> 70 #include <linux/sysctl.h> 71 #include <linux/kernel.h> 72 #include <linux/prefetch.h> 73 #include <net/dst.h> 74 #include <net/tcp.h> 75 #include <net/inet_common.h> 76 #include <linux/ipsec.h> 77 #include <asm/unaligned.h> 78 #include <linux/errqueue.h> 79 #include <trace/events/tcp.h> 80 #include <linux/jump_label_ratelimit.h> 81 #include <net/busy_poll.h> 82 #include <net/mptcp.h> 83 84 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 85 86 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 87 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 88 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 89 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 90 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 91 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 92 #define FLAG_ECE 0x40 /* ECE in this ACK */ 93 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ 94 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 95 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 96 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 97 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 98 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ 99 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 100 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 101 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 102 #define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */ 103 #define FLAG_DSACK_TLP 0x20000 /* DSACK for tail loss probe */ 104 105 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 106 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 107 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) 108 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 109 110 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 111 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 112 113 #define REXMIT_NONE 0 /* no loss recovery to do */ 114 #define REXMIT_LOST 1 /* retransmit packets marked lost */ 115 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 116 117 #if IS_ENABLED(CONFIG_TLS_DEVICE) 118 static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ); 119 120 void clean_acked_data_enable(struct inet_connection_sock *icsk, 121 void (*cad)(struct sock *sk, u32 ack_seq)) 122 { 123 icsk->icsk_clean_acked = cad; 124 static_branch_deferred_inc(&clean_acked_data_enabled); 125 } 126 EXPORT_SYMBOL_GPL(clean_acked_data_enable); 127 128 void clean_acked_data_disable(struct inet_connection_sock *icsk) 129 { 130 static_branch_slow_dec_deferred(&clean_acked_data_enabled); 131 icsk->icsk_clean_acked = NULL; 132 } 133 EXPORT_SYMBOL_GPL(clean_acked_data_disable); 134 135 void clean_acked_data_flush(void) 136 { 137 static_key_deferred_flush(&clean_acked_data_enabled); 138 } 139 EXPORT_SYMBOL_GPL(clean_acked_data_flush); 140 #endif 141 142 #ifdef CONFIG_CGROUP_BPF 143 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb) 144 { 145 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown && 146 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), 147 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG); 148 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), 149 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG); 150 struct bpf_sock_ops_kern sock_ops; 151 152 if (likely(!unknown_opt && !parse_all_opt)) 153 return; 154 155 /* The skb will be handled in the 156 * bpf_skops_established() or 157 * bpf_skops_write_hdr_opt(). 158 */ 159 switch (sk->sk_state) { 160 case TCP_SYN_RECV: 161 case TCP_SYN_SENT: 162 case TCP_LISTEN: 163 return; 164 } 165 166 sock_owned_by_me(sk); 167 168 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 169 sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB; 170 sock_ops.is_fullsock = 1; 171 sock_ops.sk = sk; 172 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb)); 173 174 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 175 } 176 177 static void bpf_skops_established(struct sock *sk, int bpf_op, 178 struct sk_buff *skb) 179 { 180 struct bpf_sock_ops_kern sock_ops; 181 182 sock_owned_by_me(sk); 183 184 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 185 sock_ops.op = bpf_op; 186 sock_ops.is_fullsock = 1; 187 sock_ops.sk = sk; 188 /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ 189 if (skb) 190 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb)); 191 192 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 193 } 194 #else 195 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb) 196 { 197 } 198 199 static void bpf_skops_established(struct sock *sk, int bpf_op, 200 struct sk_buff *skb) 201 { 202 } 203 #endif 204 205 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, 206 unsigned int len) 207 { 208 static bool __once __read_mostly; 209 210 if (!__once) { 211 struct net_device *dev; 212 213 __once = true; 214 215 rcu_read_lock(); 216 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 217 if (!dev || len >= dev->mtu) 218 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 219 dev ? dev->name : "Unknown driver"); 220 rcu_read_unlock(); 221 } 222 } 223 224 /* Adapt the MSS value used to make delayed ack decision to the 225 * real world. 226 */ 227 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 228 { 229 struct inet_connection_sock *icsk = inet_csk(sk); 230 const unsigned int lss = icsk->icsk_ack.last_seg_size; 231 unsigned int len; 232 233 icsk->icsk_ack.last_seg_size = 0; 234 235 /* skb->len may jitter because of SACKs, even if peer 236 * sends good full-sized frames. 237 */ 238 len = skb_shinfo(skb)->gso_size ? : skb->len; 239 if (len >= icsk->icsk_ack.rcv_mss) { 240 /* Note: divides are still a bit expensive. 241 * For the moment, only adjust scaling_ratio 242 * when we update icsk_ack.rcv_mss. 243 */ 244 if (unlikely(len != icsk->icsk_ack.rcv_mss)) { 245 u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE; 246 247 do_div(val, skb->truesize); 248 tcp_sk(sk)->scaling_ratio = val ? val : 1; 249 } 250 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 251 tcp_sk(sk)->advmss); 252 /* Account for possibly-removed options */ 253 if (unlikely(len > icsk->icsk_ack.rcv_mss + 254 MAX_TCP_OPTION_SPACE)) 255 tcp_gro_dev_warn(sk, skb, len); 256 /* If the skb has a len of exactly 1*MSS and has the PSH bit 257 * set then it is likely the end of an application write. So 258 * more data may not be arriving soon, and yet the data sender 259 * may be waiting for an ACK if cwnd-bound or using TX zero 260 * copy. So we set ICSK_ACK_PUSHED here so that 261 * tcp_cleanup_rbuf() will send an ACK immediately if the app 262 * reads all of the data and is not ping-pong. If len > MSS 263 * then this logic does not matter (and does not hurt) because 264 * tcp_cleanup_rbuf() will always ACK immediately if the app 265 * reads data and there is more than an MSS of unACKed data. 266 */ 267 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH) 268 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 269 } else { 270 /* Otherwise, we make more careful check taking into account, 271 * that SACKs block is variable. 272 * 273 * "len" is invariant segment length, including TCP header. 274 */ 275 len += skb->data - skb_transport_header(skb); 276 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 277 /* If PSH is not set, packet should be 278 * full sized, provided peer TCP is not badly broken. 279 * This observation (if it is correct 8)) allows 280 * to handle super-low mtu links fairly. 281 */ 282 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 283 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 284 /* Subtract also invariant (if peer is RFC compliant), 285 * tcp header plus fixed timestamp option length. 286 * Resulting "len" is MSS free of SACK jitter. 287 */ 288 len -= tcp_sk(sk)->tcp_header_len; 289 icsk->icsk_ack.last_seg_size = len; 290 if (len == lss) { 291 icsk->icsk_ack.rcv_mss = len; 292 return; 293 } 294 } 295 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 296 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 297 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 298 } 299 } 300 301 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) 302 { 303 struct inet_connection_sock *icsk = inet_csk(sk); 304 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 305 306 if (quickacks == 0) 307 quickacks = 2; 308 quickacks = min(quickacks, max_quickacks); 309 if (quickacks > icsk->icsk_ack.quick) 310 icsk->icsk_ack.quick = quickacks; 311 } 312 313 static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) 314 { 315 struct inet_connection_sock *icsk = inet_csk(sk); 316 317 tcp_incr_quickack(sk, max_quickacks); 318 inet_csk_exit_pingpong_mode(sk); 319 icsk->icsk_ack.ato = TCP_ATO_MIN; 320 } 321 322 /* Send ACKs quickly, if "quick" count is not exhausted 323 * and the session is not interactive. 324 */ 325 326 static bool tcp_in_quickack_mode(struct sock *sk) 327 { 328 const struct inet_connection_sock *icsk = inet_csk(sk); 329 const struct dst_entry *dst = __sk_dst_get(sk); 330 331 return (dst && dst_metric(dst, RTAX_QUICKACK)) || 332 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); 333 } 334 335 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) 336 { 337 if (tp->ecn_flags & TCP_ECN_OK) 338 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 339 } 340 341 static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb) 342 { 343 if (tcp_hdr(skb)->cwr) { 344 tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 345 346 /* If the sender is telling us it has entered CWR, then its 347 * cwnd may be very low (even just 1 packet), so we should ACK 348 * immediately. 349 */ 350 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) 351 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; 352 } 353 } 354 355 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 356 { 357 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 358 } 359 360 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) 361 { 362 struct tcp_sock *tp = tcp_sk(sk); 363 364 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 365 case INET_ECN_NOT_ECT: 366 /* Funny extension: if ECT is not set on a segment, 367 * and we already seen ECT on a previous segment, 368 * it is probably a retransmit. 369 */ 370 if (tp->ecn_flags & TCP_ECN_SEEN) 371 tcp_enter_quickack_mode(sk, 2); 372 break; 373 case INET_ECN_CE: 374 if (tcp_ca_needs_ecn(sk)) 375 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); 376 377 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 378 /* Better not delay acks, sender can have a very low cwnd */ 379 tcp_enter_quickack_mode(sk, 2); 380 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 381 } 382 tp->ecn_flags |= TCP_ECN_SEEN; 383 break; 384 default: 385 if (tcp_ca_needs_ecn(sk)) 386 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); 387 tp->ecn_flags |= TCP_ECN_SEEN; 388 break; 389 } 390 } 391 392 static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) 393 { 394 if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) 395 __tcp_ecn_check_ce(sk, skb); 396 } 397 398 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 399 { 400 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 401 tp->ecn_flags &= ~TCP_ECN_OK; 402 } 403 404 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 405 { 406 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 407 tp->ecn_flags &= ~TCP_ECN_OK; 408 } 409 410 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 411 { 412 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 413 return true; 414 return false; 415 } 416 417 /* Buffer size and advertised window tuning. 418 * 419 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 420 */ 421 422 static void tcp_sndbuf_expand(struct sock *sk) 423 { 424 const struct tcp_sock *tp = tcp_sk(sk); 425 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 426 int sndmem, per_mss; 427 u32 nr_segs; 428 429 /* Worst case is non GSO/TSO : each frame consumes one skb 430 * and skb->head is kmalloced using power of two area of memory 431 */ 432 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 433 MAX_TCP_HEADER + 434 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 435 436 per_mss = roundup_pow_of_two(per_mss) + 437 SKB_DATA_ALIGN(sizeof(struct sk_buff)); 438 439 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp)); 440 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); 441 442 /* Fast Recovery (RFC 5681 3.2) : 443 * Cubic needs 1.7 factor, rounded to 2 to include 444 * extra cushion (application might react slowly to EPOLLOUT) 445 */ 446 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; 447 sndmem *= nr_segs * per_mss; 448 449 if (sk->sk_sndbuf < sndmem) 450 WRITE_ONCE(sk->sk_sndbuf, 451 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2]))); 452 } 453 454 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 455 * 456 * All tcp_full_space() is split to two parts: "network" buffer, allocated 457 * forward and advertised in receiver window (tp->rcv_wnd) and 458 * "application buffer", required to isolate scheduling/application 459 * latencies from network. 460 * window_clamp is maximal advertised window. It can be less than 461 * tcp_full_space(), in this case tcp_full_space() - window_clamp 462 * is reserved for "application" buffer. The less window_clamp is 463 * the smoother our behaviour from viewpoint of network, but the lower 464 * throughput and the higher sensitivity of the connection to losses. 8) 465 * 466 * rcv_ssthresh is more strict window_clamp used at "slow start" 467 * phase to predict further behaviour of this connection. 468 * It is used for two goals: 469 * - to enforce header prediction at sender, even when application 470 * requires some significant "application buffer". It is check #1. 471 * - to prevent pruning of receive queue because of misprediction 472 * of receiver window. Check #2. 473 * 474 * The scheme does not work when sender sends good segments opening 475 * window and then starts to feed us spaghetti. But it should work 476 * in common situations. Otherwise, we have to rely on queue collapsing. 477 */ 478 479 /* Slow part of check#2. */ 480 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb, 481 unsigned int skbtruesize) 482 { 483 const struct tcp_sock *tp = tcp_sk(sk); 484 /* Optimize this! */ 485 int truesize = tcp_win_from_space(sk, skbtruesize) >> 1; 486 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; 487 488 while (tp->rcv_ssthresh <= window) { 489 if (truesize <= skb->len) 490 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 491 492 truesize >>= 1; 493 window >>= 1; 494 } 495 return 0; 496 } 497 498 /* Even if skb appears to have a bad len/truesize ratio, TCP coalescing 499 * can play nice with us, as sk_buff and skb->head might be either 500 * freed or shared with up to MAX_SKB_FRAGS segments. 501 * Only give a boost to drivers using page frag(s) to hold the frame(s), 502 * and if no payload was pulled in skb->head before reaching us. 503 */ 504 static u32 truesize_adjust(bool adjust, const struct sk_buff *skb) 505 { 506 u32 truesize = skb->truesize; 507 508 if (adjust && !skb_headlen(skb)) { 509 truesize -= SKB_TRUESIZE(skb_end_offset(skb)); 510 /* paranoid check, some drivers might be buggy */ 511 if (unlikely((int)truesize < (int)skb->len)) 512 truesize = skb->truesize; 513 } 514 return truesize; 515 } 516 517 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, 518 bool adjust) 519 { 520 struct tcp_sock *tp = tcp_sk(sk); 521 int room; 522 523 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; 524 525 if (room <= 0) 526 return; 527 528 /* Check #1 */ 529 if (!tcp_under_memory_pressure(sk)) { 530 unsigned int truesize = truesize_adjust(adjust, skb); 531 int incr; 532 533 /* Check #2. Increase window, if skb with such overhead 534 * will fit to rcvbuf in future. 535 */ 536 if (tcp_win_from_space(sk, truesize) <= skb->len) 537 incr = 2 * tp->advmss; 538 else 539 incr = __tcp_grow_window(sk, skb, truesize); 540 541 if (incr) { 542 incr = max_t(int, incr, 2 * skb->len); 543 tp->rcv_ssthresh += min(room, incr); 544 inet_csk(sk)->icsk_ack.quick |= 1; 545 } 546 } else { 547 /* Under pressure: 548 * Adjust rcv_ssthresh according to reserved mem 549 */ 550 tcp_adjust_rcv_ssthresh(sk); 551 } 552 } 553 554 /* 3. Try to fixup all. It is made immediately after connection enters 555 * established state. 556 */ 557 static void tcp_init_buffer_space(struct sock *sk) 558 { 559 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win); 560 struct tcp_sock *tp = tcp_sk(sk); 561 int maxwin; 562 563 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 564 tcp_sndbuf_expand(sk); 565 566 tcp_mstamp_refresh(tp); 567 tp->rcvq_space.time = tp->tcp_mstamp; 568 tp->rcvq_space.seq = tp->copied_seq; 569 570 maxwin = tcp_full_space(sk); 571 572 if (tp->window_clamp >= maxwin) { 573 tp->window_clamp = maxwin; 574 575 if (tcp_app_win && maxwin > 4 * tp->advmss) 576 tp->window_clamp = max(maxwin - 577 (maxwin >> tcp_app_win), 578 4 * tp->advmss); 579 } 580 581 /* Force reservation of one segment. */ 582 if (tcp_app_win && 583 tp->window_clamp > 2 * tp->advmss && 584 tp->window_clamp + tp->advmss > maxwin) 585 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 586 587 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 588 tp->snd_cwnd_stamp = tcp_jiffies32; 589 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, 590 (u32)TCP_INIT_CWND * tp->advmss); 591 } 592 593 /* 4. Recalculate window clamp after socket hit its memory bounds. */ 594 static void tcp_clamp_window(struct sock *sk) 595 { 596 struct tcp_sock *tp = tcp_sk(sk); 597 struct inet_connection_sock *icsk = inet_csk(sk); 598 struct net *net = sock_net(sk); 599 int rmem2; 600 601 icsk->icsk_ack.quick = 0; 602 rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]); 603 604 if (sk->sk_rcvbuf < rmem2 && 605 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 606 !tcp_under_memory_pressure(sk) && 607 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 608 WRITE_ONCE(sk->sk_rcvbuf, 609 min(atomic_read(&sk->sk_rmem_alloc), rmem2)); 610 } 611 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 612 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 613 } 614 615 /* Initialize RCV_MSS value. 616 * RCV_MSS is an our guess about MSS used by the peer. 617 * We haven't any direct information about the MSS. 618 * It's better to underestimate the RCV_MSS rather than overestimate. 619 * Overestimations make us ACKing less frequently than needed. 620 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 621 */ 622 void tcp_initialize_rcv_mss(struct sock *sk) 623 { 624 const struct tcp_sock *tp = tcp_sk(sk); 625 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 626 627 hint = min(hint, tp->rcv_wnd / 2); 628 hint = min(hint, TCP_MSS_DEFAULT); 629 hint = max(hint, TCP_MIN_MSS); 630 631 inet_csk(sk)->icsk_ack.rcv_mss = hint; 632 } 633 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 634 635 /* Receiver "autotuning" code. 636 * 637 * The algorithm for RTT estimation w/o timestamps is based on 638 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 639 * <https://public.lanl.gov/radiant/pubs.html#DRS> 640 * 641 * More detail on this code can be found at 642 * <http://staff.psc.edu/jheffner/>, 643 * though this reference is out of date. A new paper 644 * is pending. 645 */ 646 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 647 { 648 u32 new_sample = tp->rcv_rtt_est.rtt_us; 649 long m = sample; 650 651 if (new_sample != 0) { 652 /* If we sample in larger samples in the non-timestamp 653 * case, we could grossly overestimate the RTT especially 654 * with chatty applications or bulk transfer apps which 655 * are stalled on filesystem I/O. 656 * 657 * Also, since we are only going for a minimum in the 658 * non-timestamp case, we do not smooth things out 659 * else with timestamps disabled convergence takes too 660 * long. 661 */ 662 if (!win_dep) { 663 m -= (new_sample >> 3); 664 new_sample += m; 665 } else { 666 m <<= 3; 667 if (m < new_sample) 668 new_sample = m; 669 } 670 } else { 671 /* No previous measure. */ 672 new_sample = m << 3; 673 } 674 675 tp->rcv_rtt_est.rtt_us = new_sample; 676 } 677 678 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 679 { 680 u32 delta_us; 681 682 if (tp->rcv_rtt_est.time == 0) 683 goto new_measure; 684 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 685 return; 686 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 687 if (!delta_us) 688 delta_us = 1; 689 tcp_rcv_rtt_update(tp, delta_us, 1); 690 691 new_measure: 692 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 693 tp->rcv_rtt_est.time = tp->tcp_mstamp; 694 } 695 696 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 697 const struct sk_buff *skb) 698 { 699 struct tcp_sock *tp = tcp_sk(sk); 700 701 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) 702 return; 703 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; 704 705 if (TCP_SKB_CB(skb)->end_seq - 706 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) { 707 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 708 u32 delta_us; 709 710 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { 711 if (!delta) 712 delta = 1; 713 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 714 tcp_rcv_rtt_update(tp, delta_us, 0); 715 } 716 } 717 } 718 719 /* 720 * This function should be called every time data is copied to user space. 721 * It calculates the appropriate TCP receive buffer space. 722 */ 723 void tcp_rcv_space_adjust(struct sock *sk) 724 { 725 struct tcp_sock *tp = tcp_sk(sk); 726 u32 copied; 727 int time; 728 729 trace_tcp_rcv_space_adjust(sk); 730 731 tcp_mstamp_refresh(tp); 732 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); 733 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) 734 return; 735 736 /* Number of bytes copied to user in last RTT */ 737 copied = tp->copied_seq - tp->rcvq_space.seq; 738 if (copied <= tp->rcvq_space.space) 739 goto new_measure; 740 741 /* A bit of theory : 742 * copied = bytes received in previous RTT, our base window 743 * To cope with packet losses, we need a 2x factor 744 * To cope with slow start, and sender growing its cwin by 100 % 745 * every RTT, we need a 4x factor, because the ACK we are sending 746 * now is for the next RTT, not the current one : 747 * <prev RTT . ><current RTT .. ><next RTT .... > 748 */ 749 750 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && 751 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 752 u64 rcvwin, grow; 753 int rcvbuf; 754 755 /* minimal window to cope with packet losses, assuming 756 * steady state. Add some cushion because of small variations. 757 */ 758 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; 759 760 /* Accommodate for sender rate increase (eg. slow start) */ 761 grow = rcvwin * (copied - tp->rcvq_space.space); 762 do_div(grow, tp->rcvq_space.space); 763 rcvwin += (grow << 1); 764 765 rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin), 766 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); 767 if (rcvbuf > sk->sk_rcvbuf) { 768 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); 769 770 /* Make the window clamp follow along. */ 771 tp->window_clamp = tcp_win_from_space(sk, rcvbuf); 772 } 773 } 774 tp->rcvq_space.space = copied; 775 776 new_measure: 777 tp->rcvq_space.seq = tp->copied_seq; 778 tp->rcvq_space.time = tp->tcp_mstamp; 779 } 780 781 /* There is something which you must keep in mind when you analyze the 782 * behavior of the tp->ato delayed ack timeout interval. When a 783 * connection starts up, we want to ack as quickly as possible. The 784 * problem is that "good" TCP's do slow start at the beginning of data 785 * transmission. The means that until we send the first few ACK's the 786 * sender will sit on his end and only queue most of his data, because 787 * he can only send snd_cwnd unacked packets at any given time. For 788 * each ACK we send, he increments snd_cwnd and transmits more of his 789 * queue. -DaveM 790 */ 791 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 792 { 793 struct tcp_sock *tp = tcp_sk(sk); 794 struct inet_connection_sock *icsk = inet_csk(sk); 795 u32 now; 796 797 inet_csk_schedule_ack(sk); 798 799 tcp_measure_rcv_mss(sk, skb); 800 801 tcp_rcv_rtt_measure(tp); 802 803 now = tcp_jiffies32; 804 805 if (!icsk->icsk_ack.ato) { 806 /* The _first_ data packet received, initialize 807 * delayed ACK engine. 808 */ 809 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); 810 icsk->icsk_ack.ato = TCP_ATO_MIN; 811 } else { 812 int m = now - icsk->icsk_ack.lrcvtime; 813 814 if (m <= TCP_ATO_MIN / 2) { 815 /* The fastest case is the first. */ 816 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 817 } else if (m < icsk->icsk_ack.ato) { 818 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 819 if (icsk->icsk_ack.ato > icsk->icsk_rto) 820 icsk->icsk_ack.ato = icsk->icsk_rto; 821 } else if (m > icsk->icsk_rto) { 822 /* Too long gap. Apparently sender failed to 823 * restart window, so that we send ACKs quickly. 824 */ 825 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); 826 } 827 } 828 icsk->icsk_ack.lrcvtime = now; 829 830 tcp_ecn_check_ce(sk, skb); 831 832 if (skb->len >= 128) 833 tcp_grow_window(sk, skb, true); 834 } 835 836 /* Called to compute a smoothed rtt estimate. The data fed to this 837 * routine either comes from timestamps, or from segments that were 838 * known _not_ to have been retransmitted [see Karn/Partridge 839 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 840 * piece by Van Jacobson. 841 * NOTE: the next three routines used to be one big routine. 842 * To save cycles in the RFC 1323 implementation it was better to break 843 * it up into three procedures. -- erics 844 */ 845 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) 846 { 847 struct tcp_sock *tp = tcp_sk(sk); 848 long m = mrtt_us; /* RTT */ 849 u32 srtt = tp->srtt_us; 850 851 /* The following amusing code comes from Jacobson's 852 * article in SIGCOMM '88. Note that rtt and mdev 853 * are scaled versions of rtt and mean deviation. 854 * This is designed to be as fast as possible 855 * m stands for "measurement". 856 * 857 * On a 1990 paper the rto value is changed to: 858 * RTO = rtt + 4 * mdev 859 * 860 * Funny. This algorithm seems to be very broken. 861 * These formulae increase RTO, when it should be decreased, increase 862 * too slowly, when it should be increased quickly, decrease too quickly 863 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 864 * does not matter how to _calculate_ it. Seems, it was trap 865 * that VJ failed to avoid. 8) 866 */ 867 if (srtt != 0) { 868 m -= (srtt >> 3); /* m is now error in rtt est */ 869 srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 870 if (m < 0) { 871 m = -m; /* m is now abs(error) */ 872 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 873 /* This is similar to one of Eifel findings. 874 * Eifel blocks mdev updates when rtt decreases. 875 * This solution is a bit different: we use finer gain 876 * for mdev in this case (alpha*beta). 877 * Like Eifel it also prevents growth of rto, 878 * but also it limits too fast rto decreases, 879 * happening in pure Eifel. 880 */ 881 if (m > 0) 882 m >>= 3; 883 } else { 884 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 885 } 886 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 887 if (tp->mdev_us > tp->mdev_max_us) { 888 tp->mdev_max_us = tp->mdev_us; 889 if (tp->mdev_max_us > tp->rttvar_us) 890 tp->rttvar_us = tp->mdev_max_us; 891 } 892 if (after(tp->snd_una, tp->rtt_seq)) { 893 if (tp->mdev_max_us < tp->rttvar_us) 894 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; 895 tp->rtt_seq = tp->snd_nxt; 896 tp->mdev_max_us = tcp_rto_min_us(sk); 897 898 tcp_bpf_rtt(sk); 899 } 900 } else { 901 /* no previous measure. */ 902 srtt = m << 3; /* take the measured time to be rtt */ 903 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ 904 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); 905 tp->mdev_max_us = tp->rttvar_us; 906 tp->rtt_seq = tp->snd_nxt; 907 908 tcp_bpf_rtt(sk); 909 } 910 tp->srtt_us = max(1U, srtt); 911 } 912 913 static void tcp_update_pacing_rate(struct sock *sk) 914 { 915 const struct tcp_sock *tp = tcp_sk(sk); 916 u64 rate; 917 918 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ 919 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); 920 921 /* current rate is (cwnd * mss) / srtt 922 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. 923 * In Congestion Avoidance phase, set it to 120 % the current rate. 924 * 925 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) 926 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching 927 * end of slow start and should slow down. 928 */ 929 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2) 930 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio); 931 else 932 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio); 933 934 rate *= max(tcp_snd_cwnd(tp), tp->packets_out); 935 936 if (likely(tp->srtt_us)) 937 do_div(rate, tp->srtt_us); 938 939 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate 940 * without any lock. We want to make sure compiler wont store 941 * intermediate values in this location. 942 */ 943 WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate, 944 sk->sk_max_pacing_rate)); 945 } 946 947 /* Calculate rto without backoff. This is the second half of Van Jacobson's 948 * routine referred to above. 949 */ 950 static void tcp_set_rto(struct sock *sk) 951 { 952 const struct tcp_sock *tp = tcp_sk(sk); 953 /* Old crap is replaced with new one. 8) 954 * 955 * More seriously: 956 * 1. If rtt variance happened to be less 50msec, it is hallucination. 957 * It cannot be less due to utterly erratic ACK generation made 958 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 959 * to do with delayed acks, because at cwnd>2 true delack timeout 960 * is invisible. Actually, Linux-2.4 also generates erratic 961 * ACKs in some circumstances. 962 */ 963 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 964 965 /* 2. Fixups made earlier cannot be right. 966 * If we do not estimate RTO correctly without them, 967 * all the algo is pure shit and should be replaced 968 * with correct one. It is exactly, which we pretend to do. 969 */ 970 971 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 972 * guarantees that rto is higher. 973 */ 974 tcp_bound_rto(sk); 975 } 976 977 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 978 { 979 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 980 981 if (!cwnd) 982 cwnd = TCP_INIT_CWND; 983 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 984 } 985 986 struct tcp_sacktag_state { 987 /* Timestamps for earliest and latest never-retransmitted segment 988 * that was SACKed. RTO needs the earliest RTT to stay conservative, 989 * but congestion control should still get an accurate delay signal. 990 */ 991 u64 first_sackt; 992 u64 last_sackt; 993 u32 reord; 994 u32 sack_delivered; 995 int flag; 996 unsigned int mss_now; 997 struct rate_sample *rate; 998 }; 999 1000 /* Take a notice that peer is sending D-SACKs. Skip update of data delivery 1001 * and spurious retransmission information if this DSACK is unlikely caused by 1002 * sender's action: 1003 * - DSACKed sequence range is larger than maximum receiver's window. 1004 * - Total no. of DSACKed segments exceed the total no. of retransmitted segs. 1005 */ 1006 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, 1007 u32 end_seq, struct tcp_sacktag_state *state) 1008 { 1009 u32 seq_len, dup_segs = 1; 1010 1011 if (!before(start_seq, end_seq)) 1012 return 0; 1013 1014 seq_len = end_seq - start_seq; 1015 /* Dubious DSACK: DSACKed range greater than maximum advertised rwnd */ 1016 if (seq_len > tp->max_window) 1017 return 0; 1018 if (seq_len > tp->mss_cache) 1019 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); 1020 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) 1021 state->flag |= FLAG_DSACK_TLP; 1022 1023 tp->dsack_dups += dup_segs; 1024 /* Skip the DSACK if dup segs weren't retransmitted by sender */ 1025 if (tp->dsack_dups > tp->total_retrans) 1026 return 0; 1027 1028 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 1029 /* We increase the RACK ordering window in rounds where we receive 1030 * DSACKs that may have been due to reordering causing RACK to trigger 1031 * a spurious fast recovery. Thus RACK ignores DSACKs that happen 1032 * without having seen reordering, or that match TLP probes (TLP 1033 * is timer-driven, not triggered by RACK). 1034 */ 1035 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP)) 1036 tp->rack.dsack_seen = 1; 1037 1038 state->flag |= FLAG_DSACKING_ACK; 1039 /* A spurious retransmission is delivered */ 1040 state->sack_delivered += dup_segs; 1041 1042 return dup_segs; 1043 } 1044 1045 /* It's reordering when higher sequence was delivered (i.e. sacked) before 1046 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering 1047 * distance is approximated in full-mss packet distance ("reordering"). 1048 */ 1049 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, 1050 const int ts) 1051 { 1052 struct tcp_sock *tp = tcp_sk(sk); 1053 const u32 mss = tp->mss_cache; 1054 u32 fack, metric; 1055 1056 fack = tcp_highest_sack_seq(tp); 1057 if (!before(low_seq, fack)) 1058 return; 1059 1060 metric = fack - low_seq; 1061 if ((metric > tp->reordering * mss) && mss) { 1062 #if FASTRETRANS_DEBUG > 1 1063 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 1064 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 1065 tp->reordering, 1066 0, 1067 tp->sacked_out, 1068 tp->undo_marker ? tp->undo_retrans : 0); 1069 #endif 1070 tp->reordering = min_t(u32, (metric + mss - 1) / mss, 1071 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); 1072 } 1073 1074 /* This exciting event is worth to be remembered. 8) */ 1075 tp->reord_seen++; 1076 NET_INC_STATS(sock_net(sk), 1077 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER); 1078 } 1079 1080 /* This must be called before lost_out or retrans_out are updated 1081 * on a new loss, because we want to know if all skbs previously 1082 * known to be lost have already been retransmitted, indicating 1083 * that this newly lost skb is our next skb to retransmit. 1084 */ 1085 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 1086 { 1087 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || 1088 (tp->retransmit_skb_hint && 1089 before(TCP_SKB_CB(skb)->seq, 1090 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) 1091 tp->retransmit_skb_hint = skb; 1092 } 1093 1094 /* Sum the number of packets on the wire we have marked as lost, and 1095 * notify the congestion control module that the given skb was marked lost. 1096 */ 1097 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) 1098 { 1099 tp->lost += tcp_skb_pcount(skb); 1100 } 1101 1102 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) 1103 { 1104 __u8 sacked = TCP_SKB_CB(skb)->sacked; 1105 struct tcp_sock *tp = tcp_sk(sk); 1106 1107 if (sacked & TCPCB_SACKED_ACKED) 1108 return; 1109 1110 tcp_verify_retransmit_hint(tp, skb); 1111 if (sacked & TCPCB_LOST) { 1112 if (sacked & TCPCB_SACKED_RETRANS) { 1113 /* Account for retransmits that are lost again */ 1114 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1115 tp->retrans_out -= tcp_skb_pcount(skb); 1116 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, 1117 tcp_skb_pcount(skb)); 1118 tcp_notify_skb_loss_event(tp, skb); 1119 } 1120 } else { 1121 tp->lost_out += tcp_skb_pcount(skb); 1122 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1123 tcp_notify_skb_loss_event(tp, skb); 1124 } 1125 } 1126 1127 /* Updates the delivered and delivered_ce counts */ 1128 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, 1129 bool ece_ack) 1130 { 1131 tp->delivered += delivered; 1132 if (ece_ack) 1133 tp->delivered_ce += delivered; 1134 } 1135 1136 /* This procedure tags the retransmission queue when SACKs arrive. 1137 * 1138 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 1139 * Packets in queue with these bits set are counted in variables 1140 * sacked_out, retrans_out and lost_out, correspondingly. 1141 * 1142 * Valid combinations are: 1143 * Tag InFlight Description 1144 * 0 1 - orig segment is in flight. 1145 * S 0 - nothing flies, orig reached receiver. 1146 * L 0 - nothing flies, orig lost by net. 1147 * R 2 - both orig and retransmit are in flight. 1148 * L|R 1 - orig is lost, retransmit is in flight. 1149 * S|R 1 - orig reached receiver, retrans is still in flight. 1150 * (L|S|R is logically valid, it could occur when L|R is sacked, 1151 * but it is equivalent to plain S and code short-curcuits it to S. 1152 * L|S is logically invalid, it would mean -1 packet in flight 8)) 1153 * 1154 * These 6 states form finite state machine, controlled by the following events: 1155 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1156 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1157 * 3. Loss detection event of two flavors: 1158 * A. Scoreboard estimator decided the packet is lost. 1159 * A'. Reno "three dupacks" marks head of queue lost. 1160 * B. SACK arrives sacking SND.NXT at the moment, when the 1161 * segment was retransmitted. 1162 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1163 * 1164 * It is pleasant to note, that state diagram turns out to be commutative, 1165 * so that we are allowed not to be bothered by order of our actions, 1166 * when multiple events arrive simultaneously. (see the function below). 1167 * 1168 * Reordering detection. 1169 * -------------------- 1170 * Reordering metric is maximal distance, which a packet can be displaced 1171 * in packet stream. With SACKs we can estimate it: 1172 * 1173 * 1. SACK fills old hole and the corresponding segment was not 1174 * ever retransmitted -> reordering. Alas, we cannot use it 1175 * when segment was retransmitted. 1176 * 2. The last flaw is solved with D-SACK. D-SACK arrives 1177 * for retransmitted and already SACKed segment -> reordering.. 1178 * Both of these heuristics are not used in Loss state, when we cannot 1179 * account for retransmits accurately. 1180 * 1181 * SACK block validation. 1182 * ---------------------- 1183 * 1184 * SACK block range validation checks that the received SACK block fits to 1185 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 1186 * Note that SND.UNA is not included to the range though being valid because 1187 * it means that the receiver is rather inconsistent with itself reporting 1188 * SACK reneging when it should advance SND.UNA. Such SACK block this is 1189 * perfectly valid, however, in light of RFC2018 which explicitly states 1190 * that "SACK block MUST reflect the newest segment. Even if the newest 1191 * segment is going to be discarded ...", not that it looks very clever 1192 * in case of head skb. Due to potentional receiver driven attacks, we 1193 * choose to avoid immediate execution of a walk in write queue due to 1194 * reneging and defer head skb's loss recovery to standard loss recovery 1195 * procedure that will eventually trigger (nothing forbids us doing this). 1196 * 1197 * Implements also blockage to start_seq wrap-around. Problem lies in the 1198 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1199 * there's no guarantee that it will be before snd_nxt (n). The problem 1200 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1201 * wrap (s_w): 1202 * 1203 * <- outs wnd -> <- wrapzone -> 1204 * u e n u_w e_w s n_w 1205 * | | | | | | | 1206 * |<------------+------+----- TCP seqno space --------------+---------->| 1207 * ...-- <2^31 ->| |<--------... 1208 * ...---- >2^31 ------>| |<--------... 1209 * 1210 * Current code wouldn't be vulnerable but it's better still to discard such 1211 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1212 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1213 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1214 * equal to the ideal case (infinite seqno space without wrap caused issues). 1215 * 1216 * With D-SACK the lower bound is extended to cover sequence space below 1217 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1218 * again, D-SACK block must not to go across snd_una (for the same reason as 1219 * for the normal SACK blocks, explained above). But there all simplicity 1220 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1221 * fully below undo_marker they do not affect behavior in anyway and can 1222 * therefore be safely ignored. In rare cases (which are more or less 1223 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1224 * fragmentation and packet reordering past skb's retransmission. To consider 1225 * them correctly, the acceptable range must be extended even more though 1226 * the exact amount is rather hard to quantify. However, tp->max_window can 1227 * be used as an exaggerated estimate. 1228 */ 1229 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, 1230 u32 start_seq, u32 end_seq) 1231 { 1232 /* Too far in future, or reversed (interpretation is ambiguous) */ 1233 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1234 return false; 1235 1236 /* Nasty start_seq wrap-around check (see comments above) */ 1237 if (!before(start_seq, tp->snd_nxt)) 1238 return false; 1239 1240 /* In outstanding window? ...This is valid exit for D-SACKs too. 1241 * start_seq == snd_una is non-sensical (see comments above) 1242 */ 1243 if (after(start_seq, tp->snd_una)) 1244 return true; 1245 1246 if (!is_dsack || !tp->undo_marker) 1247 return false; 1248 1249 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1250 if (after(end_seq, tp->snd_una)) 1251 return false; 1252 1253 if (!before(start_seq, tp->undo_marker)) 1254 return true; 1255 1256 /* Too old */ 1257 if (!after(end_seq, tp->undo_marker)) 1258 return false; 1259 1260 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1261 * start_seq < undo_marker and end_seq >= undo_marker. 1262 */ 1263 return !before(start_seq, end_seq - tp->max_window); 1264 } 1265 1266 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1267 struct tcp_sack_block_wire *sp, int num_sacks, 1268 u32 prior_snd_una, struct tcp_sacktag_state *state) 1269 { 1270 struct tcp_sock *tp = tcp_sk(sk); 1271 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1272 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1273 u32 dup_segs; 1274 1275 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1276 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1277 } else if (num_sacks > 1) { 1278 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1279 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1280 1281 if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1)) 1282 return false; 1283 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); 1284 } else { 1285 return false; 1286 } 1287 1288 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); 1289 if (!dup_segs) { /* Skip dubious DSACK */ 1290 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS); 1291 return false; 1292 } 1293 1294 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs); 1295 1296 /* D-SACK for already forgotten data... Do dumb counting. */ 1297 if (tp->undo_marker && tp->undo_retrans > 0 && 1298 !after(end_seq_0, prior_snd_una) && 1299 after(end_seq_0, tp->undo_marker)) 1300 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); 1301 1302 return true; 1303 } 1304 1305 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1306 * the incoming SACK may not exactly match but we can find smaller MSS 1307 * aligned portion of it that matches. Therefore we might need to fragment 1308 * which may fail and creates some hassle (caller must handle error case 1309 * returns). 1310 * 1311 * FIXME: this could be merged to shift decision code 1312 */ 1313 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1314 u32 start_seq, u32 end_seq) 1315 { 1316 int err; 1317 bool in_sack; 1318 unsigned int pkt_len; 1319 unsigned int mss; 1320 1321 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1322 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1323 1324 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1325 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1326 mss = tcp_skb_mss(skb); 1327 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1328 1329 if (!in_sack) { 1330 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1331 if (pkt_len < mss) 1332 pkt_len = mss; 1333 } else { 1334 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1335 if (pkt_len < mss) 1336 return -EINVAL; 1337 } 1338 1339 /* Round if necessary so that SACKs cover only full MSSes 1340 * and/or the remaining small portion (if present) 1341 */ 1342 if (pkt_len > mss) { 1343 unsigned int new_len = (pkt_len / mss) * mss; 1344 if (!in_sack && new_len < pkt_len) 1345 new_len += mss; 1346 pkt_len = new_len; 1347 } 1348 1349 if (pkt_len >= skb->len && !in_sack) 1350 return 0; 1351 1352 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 1353 pkt_len, mss, GFP_ATOMIC); 1354 if (err < 0) 1355 return err; 1356 } 1357 1358 return in_sack; 1359 } 1360 1361 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1362 static u8 tcp_sacktag_one(struct sock *sk, 1363 struct tcp_sacktag_state *state, u8 sacked, 1364 u32 start_seq, u32 end_seq, 1365 int dup_sack, int pcount, 1366 u64 xmit_time) 1367 { 1368 struct tcp_sock *tp = tcp_sk(sk); 1369 1370 /* Account D-SACK for retransmitted packet. */ 1371 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1372 if (tp->undo_marker && tp->undo_retrans > 0 && 1373 after(end_seq, tp->undo_marker)) 1374 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); 1375 if ((sacked & TCPCB_SACKED_ACKED) && 1376 before(start_seq, state->reord)) 1377 state->reord = start_seq; 1378 } 1379 1380 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1381 if (!after(end_seq, tp->snd_una)) 1382 return sacked; 1383 1384 if (!(sacked & TCPCB_SACKED_ACKED)) { 1385 tcp_rack_advance(tp, sacked, end_seq, xmit_time); 1386 1387 if (sacked & TCPCB_SACKED_RETRANS) { 1388 /* If the segment is not tagged as lost, 1389 * we do not clear RETRANS, believing 1390 * that retransmission is still in flight. 1391 */ 1392 if (sacked & TCPCB_LOST) { 1393 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1394 tp->lost_out -= pcount; 1395 tp->retrans_out -= pcount; 1396 } 1397 } else { 1398 if (!(sacked & TCPCB_RETRANS)) { 1399 /* New sack for not retransmitted frame, 1400 * which was in hole. It is reordering. 1401 */ 1402 if (before(start_seq, 1403 tcp_highest_sack_seq(tp)) && 1404 before(start_seq, state->reord)) 1405 state->reord = start_seq; 1406 1407 if (!after(end_seq, tp->high_seq)) 1408 state->flag |= FLAG_ORIG_SACK_ACKED; 1409 if (state->first_sackt == 0) 1410 state->first_sackt = xmit_time; 1411 state->last_sackt = xmit_time; 1412 } 1413 1414 if (sacked & TCPCB_LOST) { 1415 sacked &= ~TCPCB_LOST; 1416 tp->lost_out -= pcount; 1417 } 1418 } 1419 1420 sacked |= TCPCB_SACKED_ACKED; 1421 state->flag |= FLAG_DATA_SACKED; 1422 tp->sacked_out += pcount; 1423 /* Out-of-order packets delivered */ 1424 state->sack_delivered += pcount; 1425 1426 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1427 if (tp->lost_skb_hint && 1428 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1429 tp->lost_cnt_hint += pcount; 1430 } 1431 1432 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1433 * frames and clear it. undo_retrans is decreased above, L|R frames 1434 * are accounted above as well. 1435 */ 1436 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1437 sacked &= ~TCPCB_SACKED_RETRANS; 1438 tp->retrans_out -= pcount; 1439 } 1440 1441 return sacked; 1442 } 1443 1444 /* Shift newly-SACKed bytes from this skb to the immediately previous 1445 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1446 */ 1447 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, 1448 struct sk_buff *skb, 1449 struct tcp_sacktag_state *state, 1450 unsigned int pcount, int shifted, int mss, 1451 bool dup_sack) 1452 { 1453 struct tcp_sock *tp = tcp_sk(sk); 1454 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1455 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1456 1457 BUG_ON(!pcount); 1458 1459 /* Adjust counters and hints for the newly sacked sequence 1460 * range but discard the return value since prev is already 1461 * marked. We must tag the range first because the seq 1462 * advancement below implicitly advances 1463 * tcp_highest_sack_seq() when skb is highest_sack. 1464 */ 1465 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1466 start_seq, end_seq, dup_sack, pcount, 1467 tcp_skb_timestamp_us(skb)); 1468 tcp_rate_skb_delivered(sk, skb, state->rate); 1469 1470 if (skb == tp->lost_skb_hint) 1471 tp->lost_cnt_hint += pcount; 1472 1473 TCP_SKB_CB(prev)->end_seq += shifted; 1474 TCP_SKB_CB(skb)->seq += shifted; 1475 1476 tcp_skb_pcount_add(prev, pcount); 1477 WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); 1478 tcp_skb_pcount_add(skb, -pcount); 1479 1480 /* When we're adding to gso_segs == 1, gso_size will be zero, 1481 * in theory this shouldn't be necessary but as long as DSACK 1482 * code can come after this skb later on it's better to keep 1483 * setting gso_size to something. 1484 */ 1485 if (!TCP_SKB_CB(prev)->tcp_gso_size) 1486 TCP_SKB_CB(prev)->tcp_gso_size = mss; 1487 1488 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1489 if (tcp_skb_pcount(skb) <= 1) 1490 TCP_SKB_CB(skb)->tcp_gso_size = 0; 1491 1492 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1493 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1494 1495 if (skb->len > 0) { 1496 BUG_ON(!tcp_skb_pcount(skb)); 1497 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1498 return false; 1499 } 1500 1501 /* Whole SKB was eaten :-) */ 1502 1503 if (skb == tp->retransmit_skb_hint) 1504 tp->retransmit_skb_hint = prev; 1505 if (skb == tp->lost_skb_hint) { 1506 tp->lost_skb_hint = prev; 1507 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1508 } 1509 1510 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1511 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; 1512 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1513 TCP_SKB_CB(prev)->end_seq++; 1514 1515 if (skb == tcp_highest_sack(sk)) 1516 tcp_advance_highest_sack(sk, skb); 1517 1518 tcp_skb_collapse_tstamp(prev, skb); 1519 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp)) 1520 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0; 1521 1522 tcp_rtx_queue_unlink_and_free(skb, sk); 1523 1524 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); 1525 1526 return true; 1527 } 1528 1529 /* I wish gso_size would have a bit more sane initialization than 1530 * something-or-zero which complicates things 1531 */ 1532 static int tcp_skb_seglen(const struct sk_buff *skb) 1533 { 1534 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1535 } 1536 1537 /* Shifting pages past head area doesn't work */ 1538 static int skb_can_shift(const struct sk_buff *skb) 1539 { 1540 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1541 } 1542 1543 int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, 1544 int pcount, int shiftlen) 1545 { 1546 /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE) 1547 * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need 1548 * to make sure not storing more than 65535 * 8 bytes per skb, 1549 * even if current MSS is bigger. 1550 */ 1551 if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE)) 1552 return 0; 1553 if (unlikely(tcp_skb_pcount(to) + pcount > 65535)) 1554 return 0; 1555 return skb_shift(to, from, shiftlen); 1556 } 1557 1558 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1559 * skb. 1560 */ 1561 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1562 struct tcp_sacktag_state *state, 1563 u32 start_seq, u32 end_seq, 1564 bool dup_sack) 1565 { 1566 struct tcp_sock *tp = tcp_sk(sk); 1567 struct sk_buff *prev; 1568 int mss; 1569 int pcount = 0; 1570 int len; 1571 int in_sack; 1572 1573 /* Normally R but no L won't result in plain S */ 1574 if (!dup_sack && 1575 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1576 goto fallback; 1577 if (!skb_can_shift(skb)) 1578 goto fallback; 1579 /* This frame is about to be dropped (was ACKed). */ 1580 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1581 goto fallback; 1582 1583 /* Can only happen with delayed DSACK + discard craziness */ 1584 prev = skb_rb_prev(skb); 1585 if (!prev) 1586 goto fallback; 1587 1588 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1589 goto fallback; 1590 1591 if (!tcp_skb_can_collapse(prev, skb)) 1592 goto fallback; 1593 1594 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1595 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1596 1597 if (in_sack) { 1598 len = skb->len; 1599 pcount = tcp_skb_pcount(skb); 1600 mss = tcp_skb_seglen(skb); 1601 1602 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1603 * drop this restriction as unnecessary 1604 */ 1605 if (mss != tcp_skb_seglen(prev)) 1606 goto fallback; 1607 } else { 1608 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1609 goto noop; 1610 /* CHECKME: This is non-MSS split case only?, this will 1611 * cause skipped skbs due to advancing loop btw, original 1612 * has that feature too 1613 */ 1614 if (tcp_skb_pcount(skb) <= 1) 1615 goto noop; 1616 1617 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1618 if (!in_sack) { 1619 /* TODO: head merge to next could be attempted here 1620 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1621 * though it might not be worth of the additional hassle 1622 * 1623 * ...we can probably just fallback to what was done 1624 * previously. We could try merging non-SACKed ones 1625 * as well but it probably isn't going to buy off 1626 * because later SACKs might again split them, and 1627 * it would make skb timestamp tracking considerably 1628 * harder problem. 1629 */ 1630 goto fallback; 1631 } 1632 1633 len = end_seq - TCP_SKB_CB(skb)->seq; 1634 BUG_ON(len < 0); 1635 BUG_ON(len > skb->len); 1636 1637 /* MSS boundaries should be honoured or else pcount will 1638 * severely break even though it makes things bit trickier. 1639 * Optimize common case to avoid most of the divides 1640 */ 1641 mss = tcp_skb_mss(skb); 1642 1643 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1644 * drop this restriction as unnecessary 1645 */ 1646 if (mss != tcp_skb_seglen(prev)) 1647 goto fallback; 1648 1649 if (len == mss) { 1650 pcount = 1; 1651 } else if (len < mss) { 1652 goto noop; 1653 } else { 1654 pcount = len / mss; 1655 len = pcount * mss; 1656 } 1657 } 1658 1659 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1660 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1661 goto fallback; 1662 1663 if (!tcp_skb_shift(prev, skb, pcount, len)) 1664 goto fallback; 1665 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) 1666 goto out; 1667 1668 /* Hole filled allows collapsing with the next as well, this is very 1669 * useful when hole on every nth skb pattern happens 1670 */ 1671 skb = skb_rb_next(prev); 1672 if (!skb) 1673 goto out; 1674 1675 if (!skb_can_shift(skb) || 1676 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1677 (mss != tcp_skb_seglen(skb))) 1678 goto out; 1679 1680 if (!tcp_skb_can_collapse(prev, skb)) 1681 goto out; 1682 len = skb->len; 1683 pcount = tcp_skb_pcount(skb); 1684 if (tcp_skb_shift(prev, skb, pcount, len)) 1685 tcp_shifted_skb(sk, prev, skb, state, pcount, 1686 len, mss, 0); 1687 1688 out: 1689 return prev; 1690 1691 noop: 1692 return skb; 1693 1694 fallback: 1695 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1696 return NULL; 1697 } 1698 1699 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1700 struct tcp_sack_block *next_dup, 1701 struct tcp_sacktag_state *state, 1702 u32 start_seq, u32 end_seq, 1703 bool dup_sack_in) 1704 { 1705 struct tcp_sock *tp = tcp_sk(sk); 1706 struct sk_buff *tmp; 1707 1708 skb_rbtree_walk_from(skb) { 1709 int in_sack = 0; 1710 bool dup_sack = dup_sack_in; 1711 1712 /* queue is in-order => we can short-circuit the walk early */ 1713 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1714 break; 1715 1716 if (next_dup && 1717 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1718 in_sack = tcp_match_skb_to_sack(sk, skb, 1719 next_dup->start_seq, 1720 next_dup->end_seq); 1721 if (in_sack > 0) 1722 dup_sack = true; 1723 } 1724 1725 /* skb reference here is a bit tricky to get right, since 1726 * shifting can eat and free both this skb and the next, 1727 * so not even _safe variant of the loop is enough. 1728 */ 1729 if (in_sack <= 0) { 1730 tmp = tcp_shift_skb_data(sk, skb, state, 1731 start_seq, end_seq, dup_sack); 1732 if (tmp) { 1733 if (tmp != skb) { 1734 skb = tmp; 1735 continue; 1736 } 1737 1738 in_sack = 0; 1739 } else { 1740 in_sack = tcp_match_skb_to_sack(sk, skb, 1741 start_seq, 1742 end_seq); 1743 } 1744 } 1745 1746 if (unlikely(in_sack < 0)) 1747 break; 1748 1749 if (in_sack) { 1750 TCP_SKB_CB(skb)->sacked = 1751 tcp_sacktag_one(sk, 1752 state, 1753 TCP_SKB_CB(skb)->sacked, 1754 TCP_SKB_CB(skb)->seq, 1755 TCP_SKB_CB(skb)->end_seq, 1756 dup_sack, 1757 tcp_skb_pcount(skb), 1758 tcp_skb_timestamp_us(skb)); 1759 tcp_rate_skb_delivered(sk, skb, state->rate); 1760 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1761 list_del_init(&skb->tcp_tsorted_anchor); 1762 1763 if (!before(TCP_SKB_CB(skb)->seq, 1764 tcp_highest_sack_seq(tp))) 1765 tcp_advance_highest_sack(sk, skb); 1766 } 1767 } 1768 return skb; 1769 } 1770 1771 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq) 1772 { 1773 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node; 1774 struct sk_buff *skb; 1775 1776 while (*p) { 1777 parent = *p; 1778 skb = rb_to_skb(parent); 1779 if (before(seq, TCP_SKB_CB(skb)->seq)) { 1780 p = &parent->rb_left; 1781 continue; 1782 } 1783 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) { 1784 p = &parent->rb_right; 1785 continue; 1786 } 1787 return skb; 1788 } 1789 return NULL; 1790 } 1791 1792 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1793 u32 skip_to_seq) 1794 { 1795 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq)) 1796 return skb; 1797 1798 return tcp_sacktag_bsearch(sk, skip_to_seq); 1799 } 1800 1801 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1802 struct sock *sk, 1803 struct tcp_sack_block *next_dup, 1804 struct tcp_sacktag_state *state, 1805 u32 skip_to_seq) 1806 { 1807 if (!next_dup) 1808 return skb; 1809 1810 if (before(next_dup->start_seq, skip_to_seq)) { 1811 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq); 1812 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1813 next_dup->start_seq, next_dup->end_seq, 1814 1); 1815 } 1816 1817 return skb; 1818 } 1819 1820 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1821 { 1822 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1823 } 1824 1825 static int 1826 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1827 u32 prior_snd_una, struct tcp_sacktag_state *state) 1828 { 1829 struct tcp_sock *tp = tcp_sk(sk); 1830 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1831 TCP_SKB_CB(ack_skb)->sacked); 1832 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1833 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1834 struct tcp_sack_block *cache; 1835 struct sk_buff *skb; 1836 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1837 int used_sacks; 1838 bool found_dup_sack = false; 1839 int i, j; 1840 int first_sack_index; 1841 1842 state->flag = 0; 1843 state->reord = tp->snd_nxt; 1844 1845 if (!tp->sacked_out) 1846 tcp_highest_sack_reset(sk); 1847 1848 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1849 num_sacks, prior_snd_una, state); 1850 1851 /* Eliminate too old ACKs, but take into 1852 * account more or less fresh ones, they can 1853 * contain valid SACK info. 1854 */ 1855 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1856 return 0; 1857 1858 if (!tp->packets_out) 1859 goto out; 1860 1861 used_sacks = 0; 1862 first_sack_index = 0; 1863 for (i = 0; i < num_sacks; i++) { 1864 bool dup_sack = !i && found_dup_sack; 1865 1866 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1867 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1868 1869 if (!tcp_is_sackblock_valid(tp, dup_sack, 1870 sp[used_sacks].start_seq, 1871 sp[used_sacks].end_seq)) { 1872 int mib_idx; 1873 1874 if (dup_sack) { 1875 if (!tp->undo_marker) 1876 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1877 else 1878 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1879 } else { 1880 /* Don't count olds caused by ACK reordering */ 1881 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1882 !after(sp[used_sacks].end_seq, tp->snd_una)) 1883 continue; 1884 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1885 } 1886 1887 NET_INC_STATS(sock_net(sk), mib_idx); 1888 if (i == 0) 1889 first_sack_index = -1; 1890 continue; 1891 } 1892 1893 /* Ignore very old stuff early */ 1894 if (!after(sp[used_sacks].end_seq, prior_snd_una)) { 1895 if (i == 0) 1896 first_sack_index = -1; 1897 continue; 1898 } 1899 1900 used_sacks++; 1901 } 1902 1903 /* order SACK blocks to allow in order walk of the retrans queue */ 1904 for (i = used_sacks - 1; i > 0; i--) { 1905 for (j = 0; j < i; j++) { 1906 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1907 swap(sp[j], sp[j + 1]); 1908 1909 /* Track where the first SACK block goes to */ 1910 if (j == first_sack_index) 1911 first_sack_index = j + 1; 1912 } 1913 } 1914 } 1915 1916 state->mss_now = tcp_current_mss(sk); 1917 skb = NULL; 1918 i = 0; 1919 1920 if (!tp->sacked_out) { 1921 /* It's already past, so skip checking against it */ 1922 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1923 } else { 1924 cache = tp->recv_sack_cache; 1925 /* Skip empty blocks in at head of the cache */ 1926 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1927 !cache->end_seq) 1928 cache++; 1929 } 1930 1931 while (i < used_sacks) { 1932 u32 start_seq = sp[i].start_seq; 1933 u32 end_seq = sp[i].end_seq; 1934 bool dup_sack = (found_dup_sack && (i == first_sack_index)); 1935 struct tcp_sack_block *next_dup = NULL; 1936 1937 if (found_dup_sack && ((i + 1) == first_sack_index)) 1938 next_dup = &sp[i + 1]; 1939 1940 /* Skip too early cached blocks */ 1941 while (tcp_sack_cache_ok(tp, cache) && 1942 !before(start_seq, cache->end_seq)) 1943 cache++; 1944 1945 /* Can skip some work by looking recv_sack_cache? */ 1946 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1947 after(end_seq, cache->start_seq)) { 1948 1949 /* Head todo? */ 1950 if (before(start_seq, cache->start_seq)) { 1951 skb = tcp_sacktag_skip(skb, sk, start_seq); 1952 skb = tcp_sacktag_walk(skb, sk, next_dup, 1953 state, 1954 start_seq, 1955 cache->start_seq, 1956 dup_sack); 1957 } 1958 1959 /* Rest of the block already fully processed? */ 1960 if (!after(end_seq, cache->end_seq)) 1961 goto advance_sp; 1962 1963 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1964 state, 1965 cache->end_seq); 1966 1967 /* ...tail remains todo... */ 1968 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1969 /* ...but better entrypoint exists! */ 1970 skb = tcp_highest_sack(sk); 1971 if (!skb) 1972 break; 1973 cache++; 1974 goto walk; 1975 } 1976 1977 skb = tcp_sacktag_skip(skb, sk, cache->end_seq); 1978 /* Check overlap against next cached too (past this one already) */ 1979 cache++; 1980 continue; 1981 } 1982 1983 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1984 skb = tcp_highest_sack(sk); 1985 if (!skb) 1986 break; 1987 } 1988 skb = tcp_sacktag_skip(skb, sk, start_seq); 1989 1990 walk: 1991 skb = tcp_sacktag_walk(skb, sk, next_dup, state, 1992 start_seq, end_seq, dup_sack); 1993 1994 advance_sp: 1995 i++; 1996 } 1997 1998 /* Clear the head of the cache sack blocks so we can skip it next time */ 1999 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 2000 tp->recv_sack_cache[i].start_seq = 0; 2001 tp->recv_sack_cache[i].end_seq = 0; 2002 } 2003 for (j = 0; j < used_sacks; j++) 2004 tp->recv_sack_cache[i++] = sp[j]; 2005 2006 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) 2007 tcp_check_sack_reordering(sk, state->reord, 0); 2008 2009 tcp_verify_left_out(tp); 2010 out: 2011 2012 #if FASTRETRANS_DEBUG > 0 2013 WARN_ON((int)tp->sacked_out < 0); 2014 WARN_ON((int)tp->lost_out < 0); 2015 WARN_ON((int)tp->retrans_out < 0); 2016 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 2017 #endif 2018 return state->flag; 2019 } 2020 2021 /* Limits sacked_out so that sum with lost_out isn't ever larger than 2022 * packets_out. Returns false if sacked_out adjustement wasn't necessary. 2023 */ 2024 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) 2025 { 2026 u32 holes; 2027 2028 holes = max(tp->lost_out, 1U); 2029 holes = min(holes, tp->packets_out); 2030 2031 if ((tp->sacked_out + holes) > tp->packets_out) { 2032 tp->sacked_out = tp->packets_out - holes; 2033 return true; 2034 } 2035 return false; 2036 } 2037 2038 /* If we receive more dupacks than we expected counting segments 2039 * in assumption of absent reordering, interpret this as reordering. 2040 * The only another reason could be bug in receiver TCP. 2041 */ 2042 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 2043 { 2044 struct tcp_sock *tp = tcp_sk(sk); 2045 2046 if (!tcp_limit_reno_sacked(tp)) 2047 return; 2048 2049 tp->reordering = min_t(u32, tp->packets_out + addend, 2050 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); 2051 tp->reord_seen++; 2052 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); 2053 } 2054 2055 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 2056 2057 static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack) 2058 { 2059 if (num_dupack) { 2060 struct tcp_sock *tp = tcp_sk(sk); 2061 u32 prior_sacked = tp->sacked_out; 2062 s32 delivered; 2063 2064 tp->sacked_out += num_dupack; 2065 tcp_check_reno_reordering(sk, 0); 2066 delivered = tp->sacked_out - prior_sacked; 2067 if (delivered > 0) 2068 tcp_count_delivered(tp, delivered, ece_ack); 2069 tcp_verify_left_out(tp); 2070 } 2071 } 2072 2073 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 2074 2075 static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack) 2076 { 2077 struct tcp_sock *tp = tcp_sk(sk); 2078 2079 if (acked > 0) { 2080 /* One ACK acked hole. The rest eat duplicate ACKs. */ 2081 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), 2082 ece_ack); 2083 if (acked - 1 >= tp->sacked_out) 2084 tp->sacked_out = 0; 2085 else 2086 tp->sacked_out -= acked - 1; 2087 } 2088 tcp_check_reno_reordering(sk, acked); 2089 tcp_verify_left_out(tp); 2090 } 2091 2092 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 2093 { 2094 tp->sacked_out = 0; 2095 } 2096 2097 void tcp_clear_retrans(struct tcp_sock *tp) 2098 { 2099 tp->retrans_out = 0; 2100 tp->lost_out = 0; 2101 tp->undo_marker = 0; 2102 tp->undo_retrans = -1; 2103 tp->sacked_out = 0; 2104 } 2105 2106 static inline void tcp_init_undo(struct tcp_sock *tp) 2107 { 2108 tp->undo_marker = tp->snd_una; 2109 /* Retransmission still in flight may cause DSACKs later. */ 2110 tp->undo_retrans = tp->retrans_out ? : -1; 2111 } 2112 2113 static bool tcp_is_rack(const struct sock *sk) 2114 { 2115 return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & 2116 TCP_RACK_LOSS_DETECTION; 2117 } 2118 2119 /* If we detect SACK reneging, forget all SACK information 2120 * and reset tags completely, otherwise preserve SACKs. If receiver 2121 * dropped its ofo queue, we will know this due to reneging detection. 2122 */ 2123 static void tcp_timeout_mark_lost(struct sock *sk) 2124 { 2125 struct tcp_sock *tp = tcp_sk(sk); 2126 struct sk_buff *skb, *head; 2127 bool is_reneg; /* is receiver reneging on SACKs? */ 2128 2129 head = tcp_rtx_queue_head(sk); 2130 is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED); 2131 if (is_reneg) { 2132 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 2133 tp->sacked_out = 0; 2134 /* Mark SACK reneging until we recover from this loss event. */ 2135 tp->is_sack_reneg = 1; 2136 } else if (tcp_is_reno(tp)) { 2137 tcp_reset_reno_sack(tp); 2138 } 2139 2140 skb = head; 2141 skb_rbtree_walk_from(skb) { 2142 if (is_reneg) 2143 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 2144 else if (tcp_is_rack(sk) && skb != head && 2145 tcp_rack_skb_timeout(tp, skb, 0) > 0) 2146 continue; /* Don't mark recently sent ones lost yet */ 2147 tcp_mark_skb_lost(sk, skb); 2148 } 2149 tcp_verify_left_out(tp); 2150 tcp_clear_all_retrans_hints(tp); 2151 } 2152 2153 /* Enter Loss state. */ 2154 void tcp_enter_loss(struct sock *sk) 2155 { 2156 const struct inet_connection_sock *icsk = inet_csk(sk); 2157 struct tcp_sock *tp = tcp_sk(sk); 2158 struct net *net = sock_net(sk); 2159 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; 2160 u8 reordering; 2161 2162 tcp_timeout_mark_lost(sk); 2163 2164 /* Reduce ssthresh if it has not yet been made inside this window. */ 2165 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 2166 !after(tp->high_seq, tp->snd_una) || 2167 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 2168 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2169 tp->prior_cwnd = tcp_snd_cwnd(tp); 2170 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2171 tcp_ca_event(sk, CA_EVENT_LOSS); 2172 tcp_init_undo(tp); 2173 } 2174 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1); 2175 tp->snd_cwnd_cnt = 0; 2176 tp->snd_cwnd_stamp = tcp_jiffies32; 2177 2178 /* Timeout in disordered state after receiving substantial DUPACKs 2179 * suggests that the degree of reordering is over-estimated. 2180 */ 2181 reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering); 2182 if (icsk->icsk_ca_state <= TCP_CA_Disorder && 2183 tp->sacked_out >= reordering) 2184 tp->reordering = min_t(unsigned int, tp->reordering, 2185 reordering); 2186 2187 tcp_set_ca_state(sk, TCP_CA_Loss); 2188 tp->high_seq = tp->snd_nxt; 2189 tcp_ecn_queue_cwr(tp); 2190 2191 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 2192 * loss recovery is underway except recurring timeout(s) on 2193 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 2194 */ 2195 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) && 2196 (new_recovery || icsk->icsk_retransmits) && 2197 !inet_csk(sk)->icsk_mtup.probe_size; 2198 } 2199 2200 /* If ACK arrived pointing to a remembered SACK, it means that our 2201 * remembered SACKs do not reflect real state of receiver i.e. 2202 * receiver _host_ is heavily congested (or buggy). 2203 * 2204 * To avoid big spurious retransmission bursts due to transient SACK 2205 * scoreboard oddities that look like reneging, we give the receiver a 2206 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will 2207 * restore sanity to the SACK scoreboard. If the apparent reneging 2208 * persists until this RTO then we'll clear the SACK scoreboard. 2209 */ 2210 static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag) 2211 { 2212 if (*ack_flag & FLAG_SACK_RENEGING && 2213 *ack_flag & FLAG_SND_UNA_ADVANCED) { 2214 struct tcp_sock *tp = tcp_sk(sk); 2215 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), 2216 msecs_to_jiffies(10)); 2217 2218 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2219 delay, TCP_RTO_MAX); 2220 *ack_flag &= ~FLAG_SET_XMIT_TIMER; 2221 return true; 2222 } 2223 return false; 2224 } 2225 2226 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 2227 * counter when SACK is enabled (without SACK, sacked_out is used for 2228 * that purpose). 2229 * 2230 * With reordering, holes may still be in flight, so RFC3517 recovery 2231 * uses pure sacked_out (total number of SACKed segments) even though 2232 * it violates the RFC that uses duplicate ACKs, often these are equal 2233 * but when e.g. out-of-window ACKs or packet duplication occurs, 2234 * they differ. Since neither occurs due to loss, TCP should really 2235 * ignore them. 2236 */ 2237 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2238 { 2239 return tp->sacked_out + 1; 2240 } 2241 2242 /* Linux NewReno/SACK/ECN state machine. 2243 * -------------------------------------- 2244 * 2245 * "Open" Normal state, no dubious events, fast path. 2246 * "Disorder" In all the respects it is "Open", 2247 * but requires a bit more attention. It is entered when 2248 * we see some SACKs or dupacks. It is split of "Open" 2249 * mainly to move some processing from fast path to slow one. 2250 * "CWR" CWND was reduced due to some Congestion Notification event. 2251 * It can be ECN, ICMP source quench, local device congestion. 2252 * "Recovery" CWND was reduced, we are fast-retransmitting. 2253 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2254 * 2255 * tcp_fastretrans_alert() is entered: 2256 * - each incoming ACK, if state is not "Open" 2257 * - when arrived ACK is unusual, namely: 2258 * * SACK 2259 * * Duplicate ACK. 2260 * * ECN ECE. 2261 * 2262 * Counting packets in flight is pretty simple. 2263 * 2264 * in_flight = packets_out - left_out + retrans_out 2265 * 2266 * packets_out is SND.NXT-SND.UNA counted in packets. 2267 * 2268 * retrans_out is number of retransmitted segments. 2269 * 2270 * left_out is number of segments left network, but not ACKed yet. 2271 * 2272 * left_out = sacked_out + lost_out 2273 * 2274 * sacked_out: Packets, which arrived to receiver out of order 2275 * and hence not ACKed. With SACKs this number is simply 2276 * amount of SACKed data. Even without SACKs 2277 * it is easy to give pretty reliable estimate of this number, 2278 * counting duplicate ACKs. 2279 * 2280 * lost_out: Packets lost by network. TCP has no explicit 2281 * "loss notification" feedback from network (for now). 2282 * It means that this number can be only _guessed_. 2283 * Actually, it is the heuristics to predict lossage that 2284 * distinguishes different algorithms. 2285 * 2286 * F.e. after RTO, when all the queue is considered as lost, 2287 * lost_out = packets_out and in_flight = retrans_out. 2288 * 2289 * Essentially, we have now a few algorithms detecting 2290 * lost packets. 2291 * 2292 * If the receiver supports SACK: 2293 * 2294 * RFC6675/3517: It is the conventional algorithm. A packet is 2295 * considered lost if the number of higher sequence packets 2296 * SACKed is greater than or equal the DUPACK thoreshold 2297 * (reordering). This is implemented in tcp_mark_head_lost and 2298 * tcp_update_scoreboard. 2299 * 2300 * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm 2301 * (2017-) that checks timing instead of counting DUPACKs. 2302 * Essentially a packet is considered lost if it's not S/ACKed 2303 * after RTT + reordering_window, where both metrics are 2304 * dynamically measured and adjusted. This is implemented in 2305 * tcp_rack_mark_lost. 2306 * 2307 * If the receiver does not support SACK: 2308 * 2309 * NewReno (RFC6582): in Recovery we assume that one segment 2310 * is lost (classic Reno). While we are in Recovery and 2311 * a partial ACK arrives, we assume that one more packet 2312 * is lost (NewReno). This heuristics are the same in NewReno 2313 * and SACK. 2314 * 2315 * Really tricky (and requiring careful tuning) part of algorithm 2316 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2317 * The first determines the moment _when_ we should reduce CWND and, 2318 * hence, slow down forward transmission. In fact, it determines the moment 2319 * when we decide that hole is caused by loss, rather than by a reorder. 2320 * 2321 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2322 * holes, caused by lost packets. 2323 * 2324 * And the most logically complicated part of algorithm is undo 2325 * heuristics. We detect false retransmits due to both too early 2326 * fast retransmit (reordering) and underestimated RTO, analyzing 2327 * timestamps and D-SACKs. When we detect that some segments were 2328 * retransmitted by mistake and CWND reduction was wrong, we undo 2329 * window reduction and abort recovery phase. This logic is hidden 2330 * inside several functions named tcp_try_undo_<something>. 2331 */ 2332 2333 /* This function decides, when we should leave Disordered state 2334 * and enter Recovery phase, reducing congestion window. 2335 * 2336 * Main question: may we further continue forward transmission 2337 * with the same cwnd? 2338 */ 2339 static bool tcp_time_to_recover(struct sock *sk, int flag) 2340 { 2341 struct tcp_sock *tp = tcp_sk(sk); 2342 2343 /* Trick#1: The loss is proven. */ 2344 if (tp->lost_out) 2345 return true; 2346 2347 /* Not-A-Trick#2 : Classic rule... */ 2348 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) 2349 return true; 2350 2351 return false; 2352 } 2353 2354 /* Detect loss in event "A" above by marking head of queue up as lost. 2355 * For RFC3517 SACK, a segment is considered lost if it 2356 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2357 * the maximum SACKed segments to pass before reaching this limit. 2358 */ 2359 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2360 { 2361 struct tcp_sock *tp = tcp_sk(sk); 2362 struct sk_buff *skb; 2363 int cnt; 2364 /* Use SACK to deduce losses of new sequences sent during recovery */ 2365 const u32 loss_high = tp->snd_nxt; 2366 2367 WARN_ON(packets > tp->packets_out); 2368 skb = tp->lost_skb_hint; 2369 if (skb) { 2370 /* Head already handled? */ 2371 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) 2372 return; 2373 cnt = tp->lost_cnt_hint; 2374 } else { 2375 skb = tcp_rtx_queue_head(sk); 2376 cnt = 0; 2377 } 2378 2379 skb_rbtree_walk_from(skb) { 2380 /* TODO: do this better */ 2381 /* this is not the most efficient way to do this... */ 2382 tp->lost_skb_hint = skb; 2383 tp->lost_cnt_hint = cnt; 2384 2385 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2386 break; 2387 2388 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2389 cnt += tcp_skb_pcount(skb); 2390 2391 if (cnt > packets) 2392 break; 2393 2394 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) 2395 tcp_mark_skb_lost(sk, skb); 2396 2397 if (mark_head) 2398 break; 2399 } 2400 tcp_verify_left_out(tp); 2401 } 2402 2403 /* Account newly detected lost packet(s) */ 2404 2405 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2406 { 2407 struct tcp_sock *tp = tcp_sk(sk); 2408 2409 if (tcp_is_sack(tp)) { 2410 int sacked_upto = tp->sacked_out - tp->reordering; 2411 if (sacked_upto >= 0) 2412 tcp_mark_head_lost(sk, sacked_upto, 0); 2413 else if (fast_rexmit) 2414 tcp_mark_head_lost(sk, 1, 1); 2415 } 2416 } 2417 2418 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) 2419 { 2420 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2421 before(tp->rx_opt.rcv_tsecr, when); 2422 } 2423 2424 /* skb is spurious retransmitted if the returned timestamp echo 2425 * reply is prior to the skb transmission time 2426 */ 2427 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, 2428 const struct sk_buff *skb) 2429 { 2430 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && 2431 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); 2432 } 2433 2434 /* Nothing was retransmitted or returned timestamp is less 2435 * than timestamp of the first retransmission. 2436 */ 2437 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) 2438 { 2439 return tp->retrans_stamp && 2440 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); 2441 } 2442 2443 /* Undo procedures. */ 2444 2445 /* We can clear retrans_stamp when there are no retransmissions in the 2446 * window. It would seem that it is trivially available for us in 2447 * tp->retrans_out, however, that kind of assumptions doesn't consider 2448 * what will happen if errors occur when sending retransmission for the 2449 * second time. ...It could the that such segment has only 2450 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2451 * the head skb is enough except for some reneging corner cases that 2452 * are not worth the effort. 2453 * 2454 * Main reason for all this complexity is the fact that connection dying 2455 * time now depends on the validity of the retrans_stamp, in particular, 2456 * that successive retransmissions of a segment must not advance 2457 * retrans_stamp under any conditions. 2458 */ 2459 static bool tcp_any_retrans_done(const struct sock *sk) 2460 { 2461 const struct tcp_sock *tp = tcp_sk(sk); 2462 struct sk_buff *skb; 2463 2464 if (tp->retrans_out) 2465 return true; 2466 2467 skb = tcp_rtx_queue_head(sk); 2468 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2469 return true; 2470 2471 return false; 2472 } 2473 2474 static void DBGUNDO(struct sock *sk, const char *msg) 2475 { 2476 #if FASTRETRANS_DEBUG > 1 2477 struct tcp_sock *tp = tcp_sk(sk); 2478 struct inet_sock *inet = inet_sk(sk); 2479 2480 if (sk->sk_family == AF_INET) { 2481 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2482 msg, 2483 &inet->inet_daddr, ntohs(inet->inet_dport), 2484 tcp_snd_cwnd(tp), tcp_left_out(tp), 2485 tp->snd_ssthresh, tp->prior_ssthresh, 2486 tp->packets_out); 2487 } 2488 #if IS_ENABLED(CONFIG_IPV6) 2489 else if (sk->sk_family == AF_INET6) { 2490 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2491 msg, 2492 &sk->sk_v6_daddr, ntohs(inet->inet_dport), 2493 tcp_snd_cwnd(tp), tcp_left_out(tp), 2494 tp->snd_ssthresh, tp->prior_ssthresh, 2495 tp->packets_out); 2496 } 2497 #endif 2498 #endif 2499 } 2500 2501 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) 2502 { 2503 struct tcp_sock *tp = tcp_sk(sk); 2504 2505 if (unmark_loss) { 2506 struct sk_buff *skb; 2507 2508 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2509 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2510 } 2511 tp->lost_out = 0; 2512 tcp_clear_all_retrans_hints(tp); 2513 } 2514 2515 if (tp->prior_ssthresh) { 2516 const struct inet_connection_sock *icsk = inet_csk(sk); 2517 2518 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); 2519 2520 if (tp->prior_ssthresh > tp->snd_ssthresh) { 2521 tp->snd_ssthresh = tp->prior_ssthresh; 2522 tcp_ecn_withdraw_cwr(tp); 2523 } 2524 } 2525 tp->snd_cwnd_stamp = tcp_jiffies32; 2526 tp->undo_marker = 0; 2527 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ 2528 } 2529 2530 static inline bool tcp_may_undo(const struct tcp_sock *tp) 2531 { 2532 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2533 } 2534 2535 static bool tcp_is_non_sack_preventing_reopen(struct sock *sk) 2536 { 2537 struct tcp_sock *tp = tcp_sk(sk); 2538 2539 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2540 /* Hold old state until something *above* high_seq 2541 * is ACKed. For Reno it is MUST to prevent false 2542 * fast retransmits (RFC2582). SACK TCP is safe. */ 2543 if (!tcp_any_retrans_done(sk)) 2544 tp->retrans_stamp = 0; 2545 return true; 2546 } 2547 return false; 2548 } 2549 2550 /* People celebrate: "We love our President!" */ 2551 static bool tcp_try_undo_recovery(struct sock *sk) 2552 { 2553 struct tcp_sock *tp = tcp_sk(sk); 2554 2555 if (tcp_may_undo(tp)) { 2556 int mib_idx; 2557 2558 /* Happy end! We did not retransmit anything 2559 * or our original transmission succeeded. 2560 */ 2561 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2562 tcp_undo_cwnd_reduction(sk, false); 2563 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2564 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2565 else 2566 mib_idx = LINUX_MIB_TCPFULLUNDO; 2567 2568 NET_INC_STATS(sock_net(sk), mib_idx); 2569 } else if (tp->rack.reo_wnd_persist) { 2570 tp->rack.reo_wnd_persist--; 2571 } 2572 if (tcp_is_non_sack_preventing_reopen(sk)) 2573 return true; 2574 tcp_set_ca_state(sk, TCP_CA_Open); 2575 tp->is_sack_reneg = 0; 2576 return false; 2577 } 2578 2579 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2580 static bool tcp_try_undo_dsack(struct sock *sk) 2581 { 2582 struct tcp_sock *tp = tcp_sk(sk); 2583 2584 if (tp->undo_marker && !tp->undo_retrans) { 2585 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, 2586 tp->rack.reo_wnd_persist + 1); 2587 DBGUNDO(sk, "D-SACK"); 2588 tcp_undo_cwnd_reduction(sk, false); 2589 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2590 return true; 2591 } 2592 return false; 2593 } 2594 2595 /* Undo during loss recovery after partial ACK or using F-RTO. */ 2596 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2597 { 2598 struct tcp_sock *tp = tcp_sk(sk); 2599 2600 if (frto_undo || tcp_may_undo(tp)) { 2601 tcp_undo_cwnd_reduction(sk, true); 2602 2603 DBGUNDO(sk, "partial loss"); 2604 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2605 if (frto_undo) 2606 NET_INC_STATS(sock_net(sk), 2607 LINUX_MIB_TCPSPURIOUSRTOS); 2608 inet_csk(sk)->icsk_retransmits = 0; 2609 if (tcp_is_non_sack_preventing_reopen(sk)) 2610 return true; 2611 if (frto_undo || tcp_is_sack(tp)) { 2612 tcp_set_ca_state(sk, TCP_CA_Open); 2613 tp->is_sack_reneg = 0; 2614 } 2615 return true; 2616 } 2617 return false; 2618 } 2619 2620 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. 2621 * It computes the number of packets to send (sndcnt) based on packets newly 2622 * delivered: 2623 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 2624 * cwnd reductions across a full RTT. 2625 * 2) Otherwise PRR uses packet conservation to send as much as delivered. 2626 * But when SND_UNA is acked without further losses, 2627 * slow starts cwnd up to ssthresh to speed up the recovery. 2628 */ 2629 static void tcp_init_cwnd_reduction(struct sock *sk) 2630 { 2631 struct tcp_sock *tp = tcp_sk(sk); 2632 2633 tp->high_seq = tp->snd_nxt; 2634 tp->tlp_high_seq = 0; 2635 tp->snd_cwnd_cnt = 0; 2636 tp->prior_cwnd = tcp_snd_cwnd(tp); 2637 tp->prr_delivered = 0; 2638 tp->prr_out = 0; 2639 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2640 tcp_ecn_queue_cwr(tp); 2641 } 2642 2643 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag) 2644 { 2645 struct tcp_sock *tp = tcp_sk(sk); 2646 int sndcnt = 0; 2647 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 2648 2649 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) 2650 return; 2651 2652 tp->prr_delivered += newly_acked_sacked; 2653 if (delta < 0) { 2654 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 2655 tp->prior_cwnd - 1; 2656 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 2657 } else { 2658 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out, 2659 newly_acked_sacked); 2660 if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost) 2661 sndcnt++; 2662 sndcnt = min(delta, sndcnt); 2663 } 2664 /* Force a fast retransmit upon entering fast recovery */ 2665 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); 2666 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt); 2667 } 2668 2669 static inline void tcp_end_cwnd_reduction(struct sock *sk) 2670 { 2671 struct tcp_sock *tp = tcp_sk(sk); 2672 2673 if (inet_csk(sk)->icsk_ca_ops->cong_control) 2674 return; 2675 2676 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2677 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2678 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2679 tcp_snd_cwnd_set(tp, tp->snd_ssthresh); 2680 tp->snd_cwnd_stamp = tcp_jiffies32; 2681 } 2682 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2683 } 2684 2685 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ 2686 void tcp_enter_cwr(struct sock *sk) 2687 { 2688 struct tcp_sock *tp = tcp_sk(sk); 2689 2690 tp->prior_ssthresh = 0; 2691 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2692 tp->undo_marker = 0; 2693 tcp_init_cwnd_reduction(sk); 2694 tcp_set_ca_state(sk, TCP_CA_CWR); 2695 } 2696 } 2697 EXPORT_SYMBOL(tcp_enter_cwr); 2698 2699 static void tcp_try_keep_open(struct sock *sk) 2700 { 2701 struct tcp_sock *tp = tcp_sk(sk); 2702 int state = TCP_CA_Open; 2703 2704 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2705 state = TCP_CA_Disorder; 2706 2707 if (inet_csk(sk)->icsk_ca_state != state) { 2708 tcp_set_ca_state(sk, state); 2709 tp->high_seq = tp->snd_nxt; 2710 } 2711 } 2712 2713 static void tcp_try_to_open(struct sock *sk, int flag) 2714 { 2715 struct tcp_sock *tp = tcp_sk(sk); 2716 2717 tcp_verify_left_out(tp); 2718 2719 if (!tcp_any_retrans_done(sk)) 2720 tp->retrans_stamp = 0; 2721 2722 if (flag & FLAG_ECE) 2723 tcp_enter_cwr(sk); 2724 2725 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2726 tcp_try_keep_open(sk); 2727 } 2728 } 2729 2730 static void tcp_mtup_probe_failed(struct sock *sk) 2731 { 2732 struct inet_connection_sock *icsk = inet_csk(sk); 2733 2734 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2735 icsk->icsk_mtup.probe_size = 0; 2736 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); 2737 } 2738 2739 static void tcp_mtup_probe_success(struct sock *sk) 2740 { 2741 struct tcp_sock *tp = tcp_sk(sk); 2742 struct inet_connection_sock *icsk = inet_csk(sk); 2743 u64 val; 2744 2745 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2746 2747 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache); 2748 do_div(val, icsk->icsk_mtup.probe_size); 2749 DEBUG_NET_WARN_ON_ONCE((u32)val != val); 2750 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val)); 2751 2752 tp->snd_cwnd_cnt = 0; 2753 tp->snd_cwnd_stamp = tcp_jiffies32; 2754 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2755 2756 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2757 icsk->icsk_mtup.probe_size = 0; 2758 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2759 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); 2760 } 2761 2762 /* Do a simple retransmit without using the backoff mechanisms in 2763 * tcp_timer. This is used for path mtu discovery. 2764 * The socket is already locked here. 2765 */ 2766 void tcp_simple_retransmit(struct sock *sk) 2767 { 2768 const struct inet_connection_sock *icsk = inet_csk(sk); 2769 struct tcp_sock *tp = tcp_sk(sk); 2770 struct sk_buff *skb; 2771 int mss; 2772 2773 /* A fastopen SYN request is stored as two separate packets within 2774 * the retransmit queue, this is done by tcp_send_syn_data(). 2775 * As a result simply checking the MSS of the frames in the queue 2776 * will not work for the SYN packet. 2777 * 2778 * Us being here is an indication of a path MTU issue so we can 2779 * assume that the fastopen SYN was lost and just mark all the 2780 * frames in the retransmit queue as lost. We will use an MSS of 2781 * -1 to mark all frames as lost, otherwise compute the current MSS. 2782 */ 2783 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT) 2784 mss = -1; 2785 else 2786 mss = tcp_current_mss(sk); 2787 2788 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2789 if (tcp_skb_seglen(skb) > mss) 2790 tcp_mark_skb_lost(sk, skb); 2791 } 2792 2793 tcp_clear_retrans_hints_partial(tp); 2794 2795 if (!tp->lost_out) 2796 return; 2797 2798 if (tcp_is_reno(tp)) 2799 tcp_limit_reno_sacked(tp); 2800 2801 tcp_verify_left_out(tp); 2802 2803 /* Don't muck with the congestion window here. 2804 * Reason is that we do not increase amount of _data_ 2805 * in network, but units changed and effective 2806 * cwnd/ssthresh really reduced now. 2807 */ 2808 if (icsk->icsk_ca_state != TCP_CA_Loss) { 2809 tp->high_seq = tp->snd_nxt; 2810 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2811 tp->prior_ssthresh = 0; 2812 tp->undo_marker = 0; 2813 tcp_set_ca_state(sk, TCP_CA_Loss); 2814 } 2815 tcp_xmit_retransmit_queue(sk); 2816 } 2817 EXPORT_SYMBOL(tcp_simple_retransmit); 2818 2819 void tcp_enter_recovery(struct sock *sk, bool ece_ack) 2820 { 2821 struct tcp_sock *tp = tcp_sk(sk); 2822 int mib_idx; 2823 2824 if (tcp_is_reno(tp)) 2825 mib_idx = LINUX_MIB_TCPRENORECOVERY; 2826 else 2827 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2828 2829 NET_INC_STATS(sock_net(sk), mib_idx); 2830 2831 tp->prior_ssthresh = 0; 2832 tcp_init_undo(tp); 2833 2834 if (!tcp_in_cwnd_reduction(sk)) { 2835 if (!ece_ack) 2836 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2837 tcp_init_cwnd_reduction(sk); 2838 } 2839 tcp_set_ca_state(sk, TCP_CA_Recovery); 2840 } 2841 2842 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are 2843 * recovered or spurious. Otherwise retransmits more on partial ACKs. 2844 */ 2845 static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, 2846 int *rexmit) 2847 { 2848 struct tcp_sock *tp = tcp_sk(sk); 2849 bool recovered = !before(tp->snd_una, tp->high_seq); 2850 2851 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) && 2852 tcp_try_undo_loss(sk, false)) 2853 return; 2854 2855 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2856 /* Step 3.b. A timeout is spurious if not all data are 2857 * lost, i.e., never-retransmitted data are (s)acked. 2858 */ 2859 if ((flag & FLAG_ORIG_SACK_ACKED) && 2860 tcp_try_undo_loss(sk, true)) 2861 return; 2862 2863 if (after(tp->snd_nxt, tp->high_seq)) { 2864 if (flag & FLAG_DATA_SACKED || num_dupack) 2865 tp->frto = 0; /* Step 3.a. loss was real */ 2866 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2867 tp->high_seq = tp->snd_nxt; 2868 /* Step 2.b. Try send new data (but deferred until cwnd 2869 * is updated in tcp_ack()). Otherwise fall back to 2870 * the conventional recovery. 2871 */ 2872 if (!tcp_write_queue_empty(sk) && 2873 after(tcp_wnd_end(tp), tp->snd_nxt)) { 2874 *rexmit = REXMIT_NEW; 2875 return; 2876 } 2877 tp->frto = 0; 2878 } 2879 } 2880 2881 if (recovered) { 2882 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ 2883 tcp_try_undo_recovery(sk); 2884 return; 2885 } 2886 if (tcp_is_reno(tp)) { 2887 /* A Reno DUPACK means new data in F-RTO step 2.b above are 2888 * delivered. Lower inflight to clock out (re)transmissions. 2889 */ 2890 if (after(tp->snd_nxt, tp->high_seq) && num_dupack) 2891 tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE); 2892 else if (flag & FLAG_SND_UNA_ADVANCED) 2893 tcp_reset_reno_sack(tp); 2894 } 2895 *rexmit = REXMIT_LOST; 2896 } 2897 2898 static bool tcp_force_fast_retransmit(struct sock *sk) 2899 { 2900 struct tcp_sock *tp = tcp_sk(sk); 2901 2902 return after(tcp_highest_sack_seq(tp), 2903 tp->snd_una + tp->reordering * tp->mss_cache); 2904 } 2905 2906 /* Undo during fast recovery after partial ACK. */ 2907 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una, 2908 bool *do_lost) 2909 { 2910 struct tcp_sock *tp = tcp_sk(sk); 2911 2912 if (tp->undo_marker && tcp_packet_delayed(tp)) { 2913 /* Plain luck! Hole if filled with delayed 2914 * packet, rather than with a retransmit. Check reordering. 2915 */ 2916 tcp_check_sack_reordering(sk, prior_snd_una, 1); 2917 2918 /* We are getting evidence that the reordering degree is higher 2919 * than we realized. If there are no retransmits out then we 2920 * can undo. Otherwise we clock out new packets but do not 2921 * mark more packets lost or retransmit more. 2922 */ 2923 if (tp->retrans_out) 2924 return true; 2925 2926 if (!tcp_any_retrans_done(sk)) 2927 tp->retrans_stamp = 0; 2928 2929 DBGUNDO(sk, "partial recovery"); 2930 tcp_undo_cwnd_reduction(sk, true); 2931 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2932 tcp_try_keep_open(sk); 2933 } else { 2934 /* Partial ACK arrived. Force fast retransmit. */ 2935 *do_lost = tcp_force_fast_retransmit(sk); 2936 } 2937 return false; 2938 } 2939 2940 static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag) 2941 { 2942 struct tcp_sock *tp = tcp_sk(sk); 2943 2944 if (tcp_rtx_queue_empty(sk)) 2945 return; 2946 2947 if (unlikely(tcp_is_reno(tp))) { 2948 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED); 2949 } else if (tcp_is_rack(sk)) { 2950 u32 prior_retrans = tp->retrans_out; 2951 2952 if (tcp_rack_mark_lost(sk)) 2953 *ack_flag &= ~FLAG_SET_XMIT_TIMER; 2954 if (prior_retrans > tp->retrans_out) 2955 *ack_flag |= FLAG_LOST_RETRANS; 2956 } 2957 } 2958 2959 /* Process an event, which can update packets-in-flight not trivially. 2960 * Main goal of this function is to calculate new estimate for left_out, 2961 * taking into account both packets sitting in receiver's buffer and 2962 * packets lost by network. 2963 * 2964 * Besides that it updates the congestion state when packet loss or ECN 2965 * is detected. But it does not reduce the cwnd, it is done by the 2966 * congestion control later. 2967 * 2968 * It does _not_ decide what to send, it is made in function 2969 * tcp_xmit_retransmit_queue(). 2970 */ 2971 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, 2972 int num_dupack, int *ack_flag, int *rexmit) 2973 { 2974 struct inet_connection_sock *icsk = inet_csk(sk); 2975 struct tcp_sock *tp = tcp_sk(sk); 2976 int fast_rexmit = 0, flag = *ack_flag; 2977 bool ece_ack = flag & FLAG_ECE; 2978 bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) && 2979 tcp_force_fast_retransmit(sk)); 2980 2981 if (!tp->packets_out && tp->sacked_out) 2982 tp->sacked_out = 0; 2983 2984 /* Now state machine starts. 2985 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 2986 if (ece_ack) 2987 tp->prior_ssthresh = 0; 2988 2989 /* B. In all the states check for reneging SACKs. */ 2990 if (tcp_check_sack_reneging(sk, ack_flag)) 2991 return; 2992 2993 /* C. Check consistency of the current state. */ 2994 tcp_verify_left_out(tp); 2995 2996 /* D. Check state exit conditions. State can be terminated 2997 * when high_seq is ACKed. */ 2998 if (icsk->icsk_ca_state == TCP_CA_Open) { 2999 WARN_ON(tp->retrans_out != 0 && !tp->syn_data); 3000 tp->retrans_stamp = 0; 3001 } else if (!before(tp->snd_una, tp->high_seq)) { 3002 switch (icsk->icsk_ca_state) { 3003 case TCP_CA_CWR: 3004 /* CWR is to be held something *above* high_seq 3005 * is ACKed for CWR bit to reach receiver. */ 3006 if (tp->snd_una != tp->high_seq) { 3007 tcp_end_cwnd_reduction(sk); 3008 tcp_set_ca_state(sk, TCP_CA_Open); 3009 } 3010 break; 3011 3012 case TCP_CA_Recovery: 3013 if (tcp_is_reno(tp)) 3014 tcp_reset_reno_sack(tp); 3015 if (tcp_try_undo_recovery(sk)) 3016 return; 3017 tcp_end_cwnd_reduction(sk); 3018 break; 3019 } 3020 } 3021 3022 /* E. Process state. */ 3023 switch (icsk->icsk_ca_state) { 3024 case TCP_CA_Recovery: 3025 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 3026 if (tcp_is_reno(tp)) 3027 tcp_add_reno_sack(sk, num_dupack, ece_ack); 3028 } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost)) 3029 return; 3030 3031 if (tcp_try_undo_dsack(sk)) 3032 tcp_try_keep_open(sk); 3033 3034 tcp_identify_packet_loss(sk, ack_flag); 3035 if (icsk->icsk_ca_state != TCP_CA_Recovery) { 3036 if (!tcp_time_to_recover(sk, flag)) 3037 return; 3038 /* Undo reverts the recovery state. If loss is evident, 3039 * starts a new recovery (e.g. reordering then loss); 3040 */ 3041 tcp_enter_recovery(sk, ece_ack); 3042 } 3043 break; 3044 case TCP_CA_Loss: 3045 tcp_process_loss(sk, flag, num_dupack, rexmit); 3046 tcp_identify_packet_loss(sk, ack_flag); 3047 if (!(icsk->icsk_ca_state == TCP_CA_Open || 3048 (*ack_flag & FLAG_LOST_RETRANS))) 3049 return; 3050 /* Change state if cwnd is undone or retransmits are lost */ 3051 fallthrough; 3052 default: 3053 if (tcp_is_reno(tp)) { 3054 if (flag & FLAG_SND_UNA_ADVANCED) 3055 tcp_reset_reno_sack(tp); 3056 tcp_add_reno_sack(sk, num_dupack, ece_ack); 3057 } 3058 3059 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3060 tcp_try_undo_dsack(sk); 3061 3062 tcp_identify_packet_loss(sk, ack_flag); 3063 if (!tcp_time_to_recover(sk, flag)) { 3064 tcp_try_to_open(sk, flag); 3065 return; 3066 } 3067 3068 /* MTU probe failure: don't reduce cwnd */ 3069 if (icsk->icsk_ca_state < TCP_CA_CWR && 3070 icsk->icsk_mtup.probe_size && 3071 tp->snd_una == tp->mtu_probe.probe_seq_start) { 3072 tcp_mtup_probe_failed(sk); 3073 /* Restores the reduction we did in tcp_mtup_probe() */ 3074 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 3075 tcp_simple_retransmit(sk); 3076 return; 3077 } 3078 3079 /* Otherwise enter Recovery state */ 3080 tcp_enter_recovery(sk, ece_ack); 3081 fast_rexmit = 1; 3082 } 3083 3084 if (!tcp_is_rack(sk) && do_lost) 3085 tcp_update_scoreboard(sk, fast_rexmit); 3086 *rexmit = REXMIT_LOST; 3087 } 3088 3089 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag) 3090 { 3091 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ; 3092 struct tcp_sock *tp = tcp_sk(sk); 3093 3094 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { 3095 /* If the remote keeps returning delayed ACKs, eventually 3096 * the min filter would pick it up and overestimate the 3097 * prop. delay when it expires. Skip suspected delayed ACKs. 3098 */ 3099 return; 3100 } 3101 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, 3102 rtt_us ? : jiffies_to_usecs(1)); 3103 } 3104 3105 static bool tcp_ack_update_rtt(struct sock *sk, const int flag, 3106 long seq_rtt_us, long sack_rtt_us, 3107 long ca_rtt_us, struct rate_sample *rs) 3108 { 3109 const struct tcp_sock *tp = tcp_sk(sk); 3110 3111 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because 3112 * broken middle-boxes or peers may corrupt TS-ECR fields. But 3113 * Karn's algorithm forbids taking RTT if some retransmitted data 3114 * is acked (RFC6298). 3115 */ 3116 if (seq_rtt_us < 0) 3117 seq_rtt_us = sack_rtt_us; 3118 3119 /* RTTM Rule: A TSecr value received in a segment is used to 3120 * update the averaged RTT measurement only if the segment 3121 * acknowledges some new data, i.e., only if it advances the 3122 * left edge of the send window. 3123 * See draft-ietf-tcplw-high-performance-00, section 3.3. 3124 */ 3125 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 3126 flag & FLAG_ACKED) { 3127 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 3128 3129 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { 3130 if (!delta) 3131 delta = 1; 3132 seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 3133 ca_rtt_us = seq_rtt_us; 3134 } 3135 } 3136 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 3137 if (seq_rtt_us < 0) 3138 return false; 3139 3140 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is 3141 * always taken together with ACK, SACK, or TS-opts. Any negative 3142 * values will be skipped with the seq_rtt_us < 0 check above. 3143 */ 3144 tcp_update_rtt_min(sk, ca_rtt_us, flag); 3145 tcp_rtt_estimator(sk, seq_rtt_us); 3146 tcp_set_rto(sk); 3147 3148 /* RFC6298: only reset backoff on valid RTT measurement. */ 3149 inet_csk(sk)->icsk_backoff = 0; 3150 return true; 3151 } 3152 3153 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 3154 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 3155 { 3156 struct rate_sample rs; 3157 long rtt_us = -1L; 3158 3159 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack) 3160 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack); 3161 3162 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs); 3163 } 3164 3165 3166 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 3167 { 3168 const struct inet_connection_sock *icsk = inet_csk(sk); 3169 3170 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); 3171 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32; 3172 } 3173 3174 /* Restart timer after forward progress on connection. 3175 * RFC2988 recommends to restart timer to now+rto. 3176 */ 3177 void tcp_rearm_rto(struct sock *sk) 3178 { 3179 const struct inet_connection_sock *icsk = inet_csk(sk); 3180 struct tcp_sock *tp = tcp_sk(sk); 3181 3182 /* If the retrans timer is currently being used by Fast Open 3183 * for SYN-ACK retrans purpose, stay put. 3184 */ 3185 if (rcu_access_pointer(tp->fastopen_rsk)) 3186 return; 3187 3188 if (!tp->packets_out) { 3189 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3190 } else { 3191 u32 rto = inet_csk(sk)->icsk_rto; 3192 /* Offset the time elapsed after installing regular RTO */ 3193 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 3194 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 3195 s64 delta_us = tcp_rto_delta_us(sk); 3196 /* delta_us may not be positive if the socket is locked 3197 * when the retrans timer fires and is rescheduled. 3198 */ 3199 rto = usecs_to_jiffies(max_t(int, delta_us, 1)); 3200 } 3201 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3202 TCP_RTO_MAX); 3203 } 3204 } 3205 3206 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ 3207 static void tcp_set_xmit_timer(struct sock *sk) 3208 { 3209 if (!tcp_schedule_loss_probe(sk, true)) 3210 tcp_rearm_rto(sk); 3211 } 3212 3213 /* If we get here, the whole TSO packet has not been acked. */ 3214 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3215 { 3216 struct tcp_sock *tp = tcp_sk(sk); 3217 u32 packets_acked; 3218 3219 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 3220 3221 packets_acked = tcp_skb_pcount(skb); 3222 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 3223 return 0; 3224 packets_acked -= tcp_skb_pcount(skb); 3225 3226 if (packets_acked) { 3227 BUG_ON(tcp_skb_pcount(skb) == 0); 3228 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 3229 } 3230 3231 return packets_acked; 3232 } 3233 3234 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, 3235 const struct sk_buff *ack_skb, u32 prior_snd_una) 3236 { 3237 const struct skb_shared_info *shinfo; 3238 3239 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ 3240 if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) 3241 return; 3242 3243 shinfo = skb_shinfo(skb); 3244 if (!before(shinfo->tskey, prior_snd_una) && 3245 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { 3246 tcp_skb_tsorted_save(skb) { 3247 __skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK); 3248 } tcp_skb_tsorted_restore(skb); 3249 } 3250 } 3251 3252 /* Remove acknowledged frames from the retransmission queue. If our packet 3253 * is before the ack sequence we can discard it as it's confirmed to have 3254 * arrived at the other end. 3255 */ 3256 static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, 3257 u32 prior_fack, u32 prior_snd_una, 3258 struct tcp_sacktag_state *sack, bool ece_ack) 3259 { 3260 const struct inet_connection_sock *icsk = inet_csk(sk); 3261 u64 first_ackt, last_ackt; 3262 struct tcp_sock *tp = tcp_sk(sk); 3263 u32 prior_sacked = tp->sacked_out; 3264 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ 3265 struct sk_buff *skb, *next; 3266 bool fully_acked = true; 3267 long sack_rtt_us = -1L; 3268 long seq_rtt_us = -1L; 3269 long ca_rtt_us = -1L; 3270 u32 pkts_acked = 0; 3271 bool rtt_update; 3272 int flag = 0; 3273 3274 first_ackt = 0; 3275 3276 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) { 3277 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3278 const u32 start_seq = scb->seq; 3279 u8 sacked = scb->sacked; 3280 u32 acked_pcount; 3281 3282 /* Determine how many packets and what bytes were acked, tso and else */ 3283 if (after(scb->end_seq, tp->snd_una)) { 3284 if (tcp_skb_pcount(skb) == 1 || 3285 !after(tp->snd_una, scb->seq)) 3286 break; 3287 3288 acked_pcount = tcp_tso_acked(sk, skb); 3289 if (!acked_pcount) 3290 break; 3291 fully_acked = false; 3292 } else { 3293 acked_pcount = tcp_skb_pcount(skb); 3294 } 3295 3296 if (unlikely(sacked & TCPCB_RETRANS)) { 3297 if (sacked & TCPCB_SACKED_RETRANS) 3298 tp->retrans_out -= acked_pcount; 3299 flag |= FLAG_RETRANS_DATA_ACKED; 3300 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3301 last_ackt = tcp_skb_timestamp_us(skb); 3302 WARN_ON_ONCE(last_ackt == 0); 3303 if (!first_ackt) 3304 first_ackt = last_ackt; 3305 3306 if (before(start_seq, reord)) 3307 reord = start_seq; 3308 if (!after(scb->end_seq, tp->high_seq)) 3309 flag |= FLAG_ORIG_SACK_ACKED; 3310 } 3311 3312 if (sacked & TCPCB_SACKED_ACKED) { 3313 tp->sacked_out -= acked_pcount; 3314 } else if (tcp_is_sack(tp)) { 3315 tcp_count_delivered(tp, acked_pcount, ece_ack); 3316 if (!tcp_skb_spurious_retrans(tp, skb)) 3317 tcp_rack_advance(tp, sacked, scb->end_seq, 3318 tcp_skb_timestamp_us(skb)); 3319 } 3320 if (sacked & TCPCB_LOST) 3321 tp->lost_out -= acked_pcount; 3322 3323 tp->packets_out -= acked_pcount; 3324 pkts_acked += acked_pcount; 3325 tcp_rate_skb_delivered(sk, skb, sack->rate); 3326 3327 /* Initial outgoing SYN's get put onto the write_queue 3328 * just like anything else we transmit. It is not 3329 * true data, and if we misinform our callers that 3330 * this ACK acks real data, we will erroneously exit 3331 * connection startup slow start one packet too 3332 * quickly. This is severely frowned upon behavior. 3333 */ 3334 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { 3335 flag |= FLAG_DATA_ACKED; 3336 } else { 3337 flag |= FLAG_SYN_ACKED; 3338 tp->retrans_stamp = 0; 3339 } 3340 3341 if (!fully_acked) 3342 break; 3343 3344 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una); 3345 3346 next = skb_rb_next(skb); 3347 if (unlikely(skb == tp->retransmit_skb_hint)) 3348 tp->retransmit_skb_hint = NULL; 3349 if (unlikely(skb == tp->lost_skb_hint)) 3350 tp->lost_skb_hint = NULL; 3351 tcp_highest_sack_replace(sk, skb, next); 3352 tcp_rtx_queue_unlink_and_free(skb, sk); 3353 } 3354 3355 if (!skb) 3356 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3357 3358 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3359 tp->snd_up = tp->snd_una; 3360 3361 if (skb) { 3362 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una); 3363 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 3364 flag |= FLAG_SACK_RENEGING; 3365 } 3366 3367 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) { 3368 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); 3369 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); 3370 3371 if (pkts_acked == 1 && fully_acked && !prior_sacked && 3372 (tp->snd_una - prior_snd_una) < tp->mss_cache && 3373 sack->rate->prior_delivered + 1 == tp->delivered && 3374 !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) { 3375 /* Conservatively mark a delayed ACK. It's typically 3376 * from a lone runt packet over the round trip to 3377 * a receiver w/o out-of-order or CE events. 3378 */ 3379 flag |= FLAG_ACK_MAYBE_DELAYED; 3380 } 3381 } 3382 if (sack->first_sackt) { 3383 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); 3384 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); 3385 } 3386 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, 3387 ca_rtt_us, sack->rate); 3388 3389 if (flag & FLAG_ACKED) { 3390 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3391 if (unlikely(icsk->icsk_mtup.probe_size && 3392 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3393 tcp_mtup_probe_success(sk); 3394 } 3395 3396 if (tcp_is_reno(tp)) { 3397 tcp_remove_reno_sacks(sk, pkts_acked, ece_ack); 3398 3399 /* If any of the cumulatively ACKed segments was 3400 * retransmitted, non-SACK case cannot confirm that 3401 * progress was due to original transmission due to 3402 * lack of TCPCB_SACKED_ACKED bits even if some of 3403 * the packets may have been never retransmitted. 3404 */ 3405 if (flag & FLAG_RETRANS_DATA_ACKED) 3406 flag &= ~FLAG_ORIG_SACK_ACKED; 3407 } else { 3408 int delta; 3409 3410 /* Non-retransmitted hole got filled? That's reordering */ 3411 if (before(reord, prior_fack)) 3412 tcp_check_sack_reordering(sk, reord, 0); 3413 3414 delta = prior_sacked - tp->sacked_out; 3415 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3416 } 3417 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3418 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, 3419 tcp_skb_timestamp_us(skb))) { 3420 /* Do not re-arm RTO if the sack RTT is measured from data sent 3421 * after when the head was last (re)transmitted. Otherwise the 3422 * timeout may continue to extend in loss recovery. 3423 */ 3424 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3425 } 3426 3427 if (icsk->icsk_ca_ops->pkts_acked) { 3428 struct ack_sample sample = { .pkts_acked = pkts_acked, 3429 .rtt_us = sack->rate->rtt_us }; 3430 3431 sample.in_flight = tp->mss_cache * 3432 (tp->delivered - sack->rate->prior_delivered); 3433 icsk->icsk_ca_ops->pkts_acked(sk, &sample); 3434 } 3435 3436 #if FASTRETRANS_DEBUG > 0 3437 WARN_ON((int)tp->sacked_out < 0); 3438 WARN_ON((int)tp->lost_out < 0); 3439 WARN_ON((int)tp->retrans_out < 0); 3440 if (!tp->packets_out && tcp_is_sack(tp)) { 3441 icsk = inet_csk(sk); 3442 if (tp->lost_out) { 3443 pr_debug("Leak l=%u %d\n", 3444 tp->lost_out, icsk->icsk_ca_state); 3445 tp->lost_out = 0; 3446 } 3447 if (tp->sacked_out) { 3448 pr_debug("Leak s=%u %d\n", 3449 tp->sacked_out, icsk->icsk_ca_state); 3450 tp->sacked_out = 0; 3451 } 3452 if (tp->retrans_out) { 3453 pr_debug("Leak r=%u %d\n", 3454 tp->retrans_out, icsk->icsk_ca_state); 3455 tp->retrans_out = 0; 3456 } 3457 } 3458 #endif 3459 return flag; 3460 } 3461 3462 static void tcp_ack_probe(struct sock *sk) 3463 { 3464 struct inet_connection_sock *icsk = inet_csk(sk); 3465 struct sk_buff *head = tcp_send_head(sk); 3466 const struct tcp_sock *tp = tcp_sk(sk); 3467 3468 /* Was it a usable window open? */ 3469 if (!head) 3470 return; 3471 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { 3472 icsk->icsk_backoff = 0; 3473 icsk->icsk_probes_tstamp = 0; 3474 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3475 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3476 * This function is not for random using! 3477 */ 3478 } else { 3479 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); 3480 3481 when = tcp_clamp_probe0_to_user_timeout(sk, when); 3482 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); 3483 } 3484 } 3485 3486 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) 3487 { 3488 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3489 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3490 } 3491 3492 /* Decide wheather to run the increase function of congestion control. */ 3493 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3494 { 3495 /* If reordering is high then always grow cwnd whenever data is 3496 * delivered regardless of its ordering. Otherwise stay conservative 3497 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ 3498 * new SACK or ECE mark may first advance cwnd here and later reduce 3499 * cwnd in tcp_fastretrans_alert() based on more states. 3500 */ 3501 if (tcp_sk(sk)->reordering > 3502 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) 3503 return flag & FLAG_FORWARD_PROGRESS; 3504 3505 return flag & FLAG_DATA_ACKED; 3506 } 3507 3508 /* The "ultimate" congestion control function that aims to replace the rigid 3509 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction). 3510 * It's called toward the end of processing an ACK with precise rate 3511 * information. All transmission or retransmission are delayed afterwards. 3512 */ 3513 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, 3514 int flag, const struct rate_sample *rs) 3515 { 3516 const struct inet_connection_sock *icsk = inet_csk(sk); 3517 3518 if (icsk->icsk_ca_ops->cong_control) { 3519 icsk->icsk_ca_ops->cong_control(sk, rs); 3520 return; 3521 } 3522 3523 if (tcp_in_cwnd_reduction(sk)) { 3524 /* Reduce cwnd if state mandates */ 3525 tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag); 3526 } else if (tcp_may_raise_cwnd(sk, flag)) { 3527 /* Advance cwnd if state allows */ 3528 tcp_cong_avoid(sk, ack, acked_sacked); 3529 } 3530 tcp_update_pacing_rate(sk); 3531 } 3532 3533 /* Check that window update is acceptable. 3534 * The function assumes that snd_una<=ack<=snd_next. 3535 */ 3536 static inline bool tcp_may_update_window(const struct tcp_sock *tp, 3537 const u32 ack, const u32 ack_seq, 3538 const u32 nwin) 3539 { 3540 return after(ack, tp->snd_una) || 3541 after(ack_seq, tp->snd_wl1) || 3542 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin)); 3543 } 3544 3545 /* If we update tp->snd_una, also update tp->bytes_acked */ 3546 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) 3547 { 3548 u32 delta = ack - tp->snd_una; 3549 3550 sock_owned_by_me((struct sock *)tp); 3551 tp->bytes_acked += delta; 3552 tp->snd_una = ack; 3553 } 3554 3555 /* If we update tp->rcv_nxt, also update tp->bytes_received */ 3556 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) 3557 { 3558 u32 delta = seq - tp->rcv_nxt; 3559 3560 sock_owned_by_me((struct sock *)tp); 3561 tp->bytes_received += delta; 3562 WRITE_ONCE(tp->rcv_nxt, seq); 3563 } 3564 3565 /* Update our send window. 3566 * 3567 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3568 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3569 */ 3570 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3571 u32 ack_seq) 3572 { 3573 struct tcp_sock *tp = tcp_sk(sk); 3574 int flag = 0; 3575 u32 nwin = ntohs(tcp_hdr(skb)->window); 3576 3577 if (likely(!tcp_hdr(skb)->syn)) 3578 nwin <<= tp->rx_opt.snd_wscale; 3579 3580 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3581 flag |= FLAG_WIN_UPDATE; 3582 tcp_update_wl(tp, ack_seq); 3583 3584 if (tp->snd_wnd != nwin) { 3585 tp->snd_wnd = nwin; 3586 3587 /* Note, it is the only place, where 3588 * fast path is recovered for sending TCP. 3589 */ 3590 tp->pred_flags = 0; 3591 tcp_fast_path_check(sk); 3592 3593 if (!tcp_write_queue_empty(sk)) 3594 tcp_slow_start_after_idle_check(sk); 3595 3596 if (nwin > tp->max_window) { 3597 tp->max_window = nwin; 3598 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3599 } 3600 } 3601 } 3602 3603 tcp_snd_una_update(tp, ack); 3604 3605 return flag; 3606 } 3607 3608 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, 3609 u32 *last_oow_ack_time) 3610 { 3611 /* Paired with the WRITE_ONCE() in this function. */ 3612 u32 val = READ_ONCE(*last_oow_ack_time); 3613 3614 if (val) { 3615 s32 elapsed = (s32)(tcp_jiffies32 - val); 3616 3617 if (0 <= elapsed && 3618 elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) { 3619 NET_INC_STATS(net, mib_idx); 3620 return true; /* rate-limited: don't send yet! */ 3621 } 3622 } 3623 3624 /* Paired with the prior READ_ONCE() and with itself, 3625 * as we might be lockless. 3626 */ 3627 WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32); 3628 3629 return false; /* not rate-limited: go ahead, send dupack now! */ 3630 } 3631 3632 /* Return true if we're currently rate-limiting out-of-window ACKs and 3633 * thus shouldn't send a dupack right now. We rate-limit dupacks in 3634 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS 3635 * attacks that send repeated SYNs or ACKs for the same connection. To 3636 * do this, we do not send a duplicate SYNACK or ACK if the remote 3637 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. 3638 */ 3639 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 3640 int mib_idx, u32 *last_oow_ack_time) 3641 { 3642 /* Data packets without SYNs are not likely part of an ACK loop. */ 3643 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && 3644 !tcp_hdr(skb)->syn) 3645 return false; 3646 3647 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); 3648 } 3649 3650 /* RFC 5961 7 [ACK Throttling] */ 3651 static void tcp_send_challenge_ack(struct sock *sk) 3652 { 3653 struct tcp_sock *tp = tcp_sk(sk); 3654 struct net *net = sock_net(sk); 3655 u32 count, now, ack_limit; 3656 3657 /* First check our per-socket dupack rate limit. */ 3658 if (__tcp_oow_rate_limited(net, 3659 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3660 &tp->last_oow_ack_time)) 3661 return; 3662 3663 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit); 3664 if (ack_limit == INT_MAX) 3665 goto send_ack; 3666 3667 /* Then check host-wide RFC 5961 rate limit. */ 3668 now = jiffies / HZ; 3669 if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) { 3670 u32 half = (ack_limit + 1) >> 1; 3671 3672 WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now); 3673 WRITE_ONCE(net->ipv4.tcp_challenge_count, 3674 get_random_u32_inclusive(half, ack_limit + half - 1)); 3675 } 3676 count = READ_ONCE(net->ipv4.tcp_challenge_count); 3677 if (count > 0) { 3678 WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1); 3679 send_ack: 3680 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK); 3681 tcp_send_ack(sk); 3682 } 3683 } 3684 3685 static void tcp_store_ts_recent(struct tcp_sock *tp) 3686 { 3687 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3688 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); 3689 } 3690 3691 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 3692 { 3693 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 3694 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 3695 * extra check below makes sure this can only happen 3696 * for pure ACK frames. -DaveM 3697 * 3698 * Not only, also it occurs for expired timestamps. 3699 */ 3700 3701 if (tcp_paws_check(&tp->rx_opt, 0)) 3702 tcp_store_ts_recent(tp); 3703 } 3704 } 3705 3706 /* This routine deals with acks during a TLP episode and ends an episode by 3707 * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack 3708 */ 3709 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) 3710 { 3711 struct tcp_sock *tp = tcp_sk(sk); 3712 3713 if (before(ack, tp->tlp_high_seq)) 3714 return; 3715 3716 if (!tp->tlp_retrans) { 3717 /* TLP of new data has been acknowledged */ 3718 tp->tlp_high_seq = 0; 3719 } else if (flag & FLAG_DSACK_TLP) { 3720 /* This DSACK means original and TLP probe arrived; no loss */ 3721 tp->tlp_high_seq = 0; 3722 } else if (after(ack, tp->tlp_high_seq)) { 3723 /* ACK advances: there was a loss, so reduce cwnd. Reset 3724 * tlp_high_seq in tcp_init_cwnd_reduction() 3725 */ 3726 tcp_init_cwnd_reduction(sk); 3727 tcp_set_ca_state(sk, TCP_CA_CWR); 3728 tcp_end_cwnd_reduction(sk); 3729 tcp_try_keep_open(sk); 3730 NET_INC_STATS(sock_net(sk), 3731 LINUX_MIB_TCPLOSSPROBERECOVERY); 3732 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | 3733 FLAG_NOT_DUP | FLAG_DATA_SACKED))) { 3734 /* Pure dupack: original and TLP probe arrived; no loss */ 3735 tp->tlp_high_seq = 0; 3736 } 3737 } 3738 3739 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) 3740 { 3741 const struct inet_connection_sock *icsk = inet_csk(sk); 3742 3743 if (icsk->icsk_ca_ops->in_ack_event) 3744 icsk->icsk_ca_ops->in_ack_event(sk, flags); 3745 } 3746 3747 /* Congestion control has updated the cwnd already. So if we're in 3748 * loss recovery then now we do any new sends (for FRTO) or 3749 * retransmits (for CA_Loss or CA_recovery) that make sense. 3750 */ 3751 static void tcp_xmit_recovery(struct sock *sk, int rexmit) 3752 { 3753 struct tcp_sock *tp = tcp_sk(sk); 3754 3755 if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT) 3756 return; 3757 3758 if (unlikely(rexmit == REXMIT_NEW)) { 3759 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 3760 TCP_NAGLE_OFF); 3761 if (after(tp->snd_nxt, tp->high_seq)) 3762 return; 3763 tp->frto = 0; 3764 } 3765 tcp_xmit_retransmit_queue(sk); 3766 } 3767 3768 /* Returns the number of packets newly acked or sacked by the current ACK */ 3769 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag) 3770 { 3771 const struct net *net = sock_net(sk); 3772 struct tcp_sock *tp = tcp_sk(sk); 3773 u32 delivered; 3774 3775 delivered = tp->delivered - prior_delivered; 3776 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered); 3777 if (flag & FLAG_ECE) 3778 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered); 3779 3780 return delivered; 3781 } 3782 3783 /* This routine deals with incoming acks, but not outgoing ones. */ 3784 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3785 { 3786 struct inet_connection_sock *icsk = inet_csk(sk); 3787 struct tcp_sock *tp = tcp_sk(sk); 3788 struct tcp_sacktag_state sack_state; 3789 struct rate_sample rs = { .prior_delivered = 0 }; 3790 u32 prior_snd_una = tp->snd_una; 3791 bool is_sack_reneg = tp->is_sack_reneg; 3792 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3793 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3794 int num_dupack = 0; 3795 int prior_packets = tp->packets_out; 3796 u32 delivered = tp->delivered; 3797 u32 lost = tp->lost; 3798 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ 3799 u32 prior_fack; 3800 3801 sack_state.first_sackt = 0; 3802 sack_state.rate = &rs; 3803 sack_state.sack_delivered = 0; 3804 3805 /* We very likely will need to access rtx queue. */ 3806 prefetch(sk->tcp_rtx_queue.rb_node); 3807 3808 /* If the ack is older than previous acks 3809 * then we can probably ignore it. 3810 */ 3811 if (before(ack, prior_snd_una)) { 3812 u32 max_window; 3813 3814 /* do not accept ACK for bytes we never sent. */ 3815 max_window = min_t(u64, tp->max_window, tp->bytes_acked); 3816 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3817 if (before(ack, prior_snd_una - max_window)) { 3818 if (!(flag & FLAG_NO_CHALLENGE_ACK)) 3819 tcp_send_challenge_ack(sk); 3820 return -SKB_DROP_REASON_TCP_TOO_OLD_ACK; 3821 } 3822 goto old_ack; 3823 } 3824 3825 /* If the ack includes data we haven't sent yet, discard 3826 * this segment (RFC793 Section 3.9). 3827 */ 3828 if (after(ack, tp->snd_nxt)) 3829 return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA; 3830 3831 if (after(ack, prior_snd_una)) { 3832 flag |= FLAG_SND_UNA_ADVANCED; 3833 icsk->icsk_retransmits = 0; 3834 3835 #if IS_ENABLED(CONFIG_TLS_DEVICE) 3836 if (static_branch_unlikely(&clean_acked_data_enabled.key)) 3837 if (icsk->icsk_clean_acked) 3838 icsk->icsk_clean_acked(sk, ack); 3839 #endif 3840 } 3841 3842 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; 3843 rs.prior_in_flight = tcp_packets_in_flight(tp); 3844 3845 /* ts_recent update must be made after we are sure that the packet 3846 * is in window. 3847 */ 3848 if (flag & FLAG_UPDATE_TS_RECENT) 3849 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 3850 3851 if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) == 3852 FLAG_SND_UNA_ADVANCED) { 3853 /* Window is constant, pure forward advance. 3854 * No more checks are required. 3855 * Note, we use the fact that SND.UNA>=SND.WL2. 3856 */ 3857 tcp_update_wl(tp, ack_seq); 3858 tcp_snd_una_update(tp, ack); 3859 flag |= FLAG_WIN_UPDATE; 3860 3861 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); 3862 3863 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); 3864 } else { 3865 u32 ack_ev_flags = CA_ACK_SLOWPATH; 3866 3867 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3868 flag |= FLAG_DATA; 3869 else 3870 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3871 3872 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3873 3874 if (TCP_SKB_CB(skb)->sacked) 3875 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3876 &sack_state); 3877 3878 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { 3879 flag |= FLAG_ECE; 3880 ack_ev_flags |= CA_ACK_ECE; 3881 } 3882 3883 if (sack_state.sack_delivered) 3884 tcp_count_delivered(tp, sack_state.sack_delivered, 3885 flag & FLAG_ECE); 3886 3887 if (flag & FLAG_WIN_UPDATE) 3888 ack_ev_flags |= CA_ACK_WIN_UPDATE; 3889 3890 tcp_in_ack_event(sk, ack_ev_flags); 3891 } 3892 3893 /* This is a deviation from RFC3168 since it states that: 3894 * "When the TCP data sender is ready to set the CWR bit after reducing 3895 * the congestion window, it SHOULD set the CWR bit only on the first 3896 * new data packet that it transmits." 3897 * We accept CWR on pure ACKs to be more robust 3898 * with widely-deployed TCP implementations that do this. 3899 */ 3900 tcp_ecn_accept_cwr(sk, skb); 3901 3902 /* We passed data and got it acked, remove any soft error 3903 * log. Something worked... 3904 */ 3905 WRITE_ONCE(sk->sk_err_soft, 0); 3906 icsk->icsk_probes_out = 0; 3907 tp->rcv_tstamp = tcp_jiffies32; 3908 if (!prior_packets) 3909 goto no_queue; 3910 3911 /* See if we can take anything off of the retransmit queue. */ 3912 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una, 3913 &sack_state, flag & FLAG_ECE); 3914 3915 tcp_rack_update_reo_wnd(sk, &rs); 3916 3917 if (tp->tlp_high_seq) 3918 tcp_process_tlp_ack(sk, ack, flag); 3919 3920 if (tcp_ack_is_dubious(sk, flag)) { 3921 if (!(flag & (FLAG_SND_UNA_ADVANCED | 3922 FLAG_NOT_DUP | FLAG_DSACKING_ACK))) { 3923 num_dupack = 1; 3924 /* Consider if pure acks were aggregated in tcp_add_backlog() */ 3925 if (!(flag & FLAG_DATA)) 3926 num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 3927 } 3928 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, 3929 &rexmit); 3930 } 3931 3932 /* If needed, reset TLP/RTO timer when RACK doesn't set. */ 3933 if (flag & FLAG_SET_XMIT_TIMER) 3934 tcp_set_xmit_timer(sk); 3935 3936 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3937 sk_dst_confirm(sk); 3938 3939 delivered = tcp_newly_delivered(sk, delivered, flag); 3940 lost = tp->lost - lost; /* freshly marked lost */ 3941 rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED); 3942 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); 3943 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); 3944 tcp_xmit_recovery(sk, rexmit); 3945 return 1; 3946 3947 no_queue: 3948 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3949 if (flag & FLAG_DSACKING_ACK) { 3950 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, 3951 &rexmit); 3952 tcp_newly_delivered(sk, delivered, flag); 3953 } 3954 /* If this ack opens up a zero window, clear backoff. It was 3955 * being used to time the probes, and is probably far higher than 3956 * it needs to be for normal retransmission. 3957 */ 3958 tcp_ack_probe(sk); 3959 3960 if (tp->tlp_high_seq) 3961 tcp_process_tlp_ack(sk, ack, flag); 3962 return 1; 3963 3964 old_ack: 3965 /* If data was SACKed, tag it and see if we should send more data. 3966 * If data was DSACKed, see if we can undo a cwnd reduction. 3967 */ 3968 if (TCP_SKB_CB(skb)->sacked) { 3969 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3970 &sack_state); 3971 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, 3972 &rexmit); 3973 tcp_newly_delivered(sk, delivered, flag); 3974 tcp_xmit_recovery(sk, rexmit); 3975 } 3976 3977 return 0; 3978 } 3979 3980 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, 3981 bool syn, struct tcp_fastopen_cookie *foc, 3982 bool exp_opt) 3983 { 3984 /* Valid only in SYN or SYN-ACK with an even length. */ 3985 if (!foc || !syn || len < 0 || (len & 1)) 3986 return; 3987 3988 if (len >= TCP_FASTOPEN_COOKIE_MIN && 3989 len <= TCP_FASTOPEN_COOKIE_MAX) 3990 memcpy(foc->val, cookie, len); 3991 else if (len != 0) 3992 len = -1; 3993 foc->len = len; 3994 foc->exp = exp_opt; 3995 } 3996 3997 static bool smc_parse_options(const struct tcphdr *th, 3998 struct tcp_options_received *opt_rx, 3999 const unsigned char *ptr, 4000 int opsize) 4001 { 4002 #if IS_ENABLED(CONFIG_SMC) 4003 if (static_branch_unlikely(&tcp_have_smc)) { 4004 if (th->syn && !(opsize & 1) && 4005 opsize >= TCPOLEN_EXP_SMC_BASE && 4006 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) { 4007 opt_rx->smc_ok = 1; 4008 return true; 4009 } 4010 } 4011 #endif 4012 return false; 4013 } 4014 4015 /* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped 4016 * value on success. 4017 */ 4018 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss) 4019 { 4020 const unsigned char *ptr = (const unsigned char *)(th + 1); 4021 int length = (th->doff * 4) - sizeof(struct tcphdr); 4022 u16 mss = 0; 4023 4024 while (length > 0) { 4025 int opcode = *ptr++; 4026 int opsize; 4027 4028 switch (opcode) { 4029 case TCPOPT_EOL: 4030 return mss; 4031 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 4032 length--; 4033 continue; 4034 default: 4035 if (length < 2) 4036 return mss; 4037 opsize = *ptr++; 4038 if (opsize < 2) /* "silly options" */ 4039 return mss; 4040 if (opsize > length) 4041 return mss; /* fail on partial options */ 4042 if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) { 4043 u16 in_mss = get_unaligned_be16(ptr); 4044 4045 if (in_mss) { 4046 if (user_mss && user_mss < in_mss) 4047 in_mss = user_mss; 4048 mss = in_mss; 4049 } 4050 } 4051 ptr += opsize - 2; 4052 length -= opsize; 4053 } 4054 } 4055 return mss; 4056 } 4057 EXPORT_SYMBOL_GPL(tcp_parse_mss_option); 4058 4059 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 4060 * But, this can also be called on packets in the established flow when 4061 * the fast version below fails. 4062 */ 4063 void tcp_parse_options(const struct net *net, 4064 const struct sk_buff *skb, 4065 struct tcp_options_received *opt_rx, int estab, 4066 struct tcp_fastopen_cookie *foc) 4067 { 4068 const unsigned char *ptr; 4069 const struct tcphdr *th = tcp_hdr(skb); 4070 int length = (th->doff * 4) - sizeof(struct tcphdr); 4071 4072 ptr = (const unsigned char *)(th + 1); 4073 opt_rx->saw_tstamp = 0; 4074 opt_rx->saw_unknown = 0; 4075 4076 while (length > 0) { 4077 int opcode = *ptr++; 4078 int opsize; 4079 4080 switch (opcode) { 4081 case TCPOPT_EOL: 4082 return; 4083 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 4084 length--; 4085 continue; 4086 default: 4087 if (length < 2) 4088 return; 4089 opsize = *ptr++; 4090 if (opsize < 2) /* "silly options" */ 4091 return; 4092 if (opsize > length) 4093 return; /* don't parse partial options */ 4094 switch (opcode) { 4095 case TCPOPT_MSS: 4096 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 4097 u16 in_mss = get_unaligned_be16(ptr); 4098 if (in_mss) { 4099 if (opt_rx->user_mss && 4100 opt_rx->user_mss < in_mss) 4101 in_mss = opt_rx->user_mss; 4102 opt_rx->mss_clamp = in_mss; 4103 } 4104 } 4105 break; 4106 case TCPOPT_WINDOW: 4107 if (opsize == TCPOLEN_WINDOW && th->syn && 4108 !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) { 4109 __u8 snd_wscale = *(__u8 *)ptr; 4110 opt_rx->wscale_ok = 1; 4111 if (snd_wscale > TCP_MAX_WSCALE) { 4112 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n", 4113 __func__, 4114 snd_wscale, 4115 TCP_MAX_WSCALE); 4116 snd_wscale = TCP_MAX_WSCALE; 4117 } 4118 opt_rx->snd_wscale = snd_wscale; 4119 } 4120 break; 4121 case TCPOPT_TIMESTAMP: 4122 if ((opsize == TCPOLEN_TIMESTAMP) && 4123 ((estab && opt_rx->tstamp_ok) || 4124 (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) { 4125 opt_rx->saw_tstamp = 1; 4126 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 4127 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 4128 } 4129 break; 4130 case TCPOPT_SACK_PERM: 4131 if (opsize == TCPOLEN_SACK_PERM && th->syn && 4132 !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) { 4133 opt_rx->sack_ok = TCP_SACK_SEEN; 4134 tcp_sack_reset(opt_rx); 4135 } 4136 break; 4137 4138 case TCPOPT_SACK: 4139 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 4140 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 4141 opt_rx->sack_ok) { 4142 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 4143 } 4144 break; 4145 #ifdef CONFIG_TCP_MD5SIG 4146 case TCPOPT_MD5SIG: 4147 /* The MD5 Hash has already been 4148 * checked (see tcp_v{4,6}_rcv()). 4149 */ 4150 break; 4151 #endif 4152 case TCPOPT_FASTOPEN: 4153 tcp_parse_fastopen_option( 4154 opsize - TCPOLEN_FASTOPEN_BASE, 4155 ptr, th->syn, foc, false); 4156 break; 4157 4158 case TCPOPT_EXP: 4159 /* Fast Open option shares code 254 using a 4160 * 16 bits magic number. 4161 */ 4162 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && 4163 get_unaligned_be16(ptr) == 4164 TCPOPT_FASTOPEN_MAGIC) { 4165 tcp_parse_fastopen_option(opsize - 4166 TCPOLEN_EXP_FASTOPEN_BASE, 4167 ptr + 2, th->syn, foc, true); 4168 break; 4169 } 4170 4171 if (smc_parse_options(th, opt_rx, ptr, opsize)) 4172 break; 4173 4174 opt_rx->saw_unknown = 1; 4175 break; 4176 4177 default: 4178 opt_rx->saw_unknown = 1; 4179 } 4180 ptr += opsize-2; 4181 length -= opsize; 4182 } 4183 } 4184 } 4185 EXPORT_SYMBOL(tcp_parse_options); 4186 4187 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4188 { 4189 const __be32 *ptr = (const __be32 *)(th + 1); 4190 4191 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4192 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 4193 tp->rx_opt.saw_tstamp = 1; 4194 ++ptr; 4195 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4196 ++ptr; 4197 if (*ptr) 4198 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 4199 else 4200 tp->rx_opt.rcv_tsecr = 0; 4201 return true; 4202 } 4203 return false; 4204 } 4205 4206 /* Fast parse options. This hopes to only see timestamps. 4207 * If it is wrong it falls back on tcp_parse_options(). 4208 */ 4209 static bool tcp_fast_parse_options(const struct net *net, 4210 const struct sk_buff *skb, 4211 const struct tcphdr *th, struct tcp_sock *tp) 4212 { 4213 /* In the spirit of fast parsing, compare doff directly to constant 4214 * values. Because equality is used, short doff can be ignored here. 4215 */ 4216 if (th->doff == (sizeof(*th) / 4)) { 4217 tp->rx_opt.saw_tstamp = 0; 4218 return false; 4219 } else if (tp->rx_opt.tstamp_ok && 4220 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4221 if (tcp_parse_aligned_timestamp(tp, th)) 4222 return true; 4223 } 4224 4225 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); 4226 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 4227 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 4228 4229 return true; 4230 } 4231 4232 #ifdef CONFIG_TCP_MD5SIG 4233 /* 4234 * Parse MD5 Signature option 4235 */ 4236 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 4237 { 4238 int length = (th->doff << 2) - sizeof(*th); 4239 const u8 *ptr = (const u8 *)(th + 1); 4240 4241 /* If not enough data remaining, we can short cut */ 4242 while (length >= TCPOLEN_MD5SIG) { 4243 int opcode = *ptr++; 4244 int opsize; 4245 4246 switch (opcode) { 4247 case TCPOPT_EOL: 4248 return NULL; 4249 case TCPOPT_NOP: 4250 length--; 4251 continue; 4252 default: 4253 opsize = *ptr++; 4254 if (opsize < 2 || opsize > length) 4255 return NULL; 4256 if (opcode == TCPOPT_MD5SIG) 4257 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 4258 } 4259 ptr += opsize - 2; 4260 length -= opsize; 4261 } 4262 return NULL; 4263 } 4264 EXPORT_SYMBOL(tcp_parse_md5sig_option); 4265 #endif 4266 4267 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 4268 * 4269 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 4270 * it can pass through stack. So, the following predicate verifies that 4271 * this segment is not used for anything but congestion avoidance or 4272 * fast retransmit. Moreover, we even are able to eliminate most of such 4273 * second order effects, if we apply some small "replay" window (~RTO) 4274 * to timestamp space. 4275 * 4276 * All these measures still do not guarantee that we reject wrapped ACKs 4277 * on networks with high bandwidth, when sequence space is recycled fastly, 4278 * but it guarantees that such events will be very rare and do not affect 4279 * connection seriously. This doesn't look nice, but alas, PAWS is really 4280 * buggy extension. 4281 * 4282 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 4283 * states that events when retransmit arrives after original data are rare. 4284 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 4285 * the biggest problem on large power networks even with minor reordering. 4286 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 4287 * up to bandwidth of 18Gigabit/sec. 8) ] 4288 */ 4289 4290 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4291 { 4292 const struct tcp_sock *tp = tcp_sk(sk); 4293 const struct tcphdr *th = tcp_hdr(skb); 4294 u32 seq = TCP_SKB_CB(skb)->seq; 4295 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4296 4297 return (/* 1. Pure ACK with correct sequence number. */ 4298 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 4299 4300 /* 2. ... and duplicate ACK. */ 4301 ack == tp->snd_una && 4302 4303 /* 3. ... and does not update window. */ 4304 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 4305 4306 /* 4. ... and sits in replay window. */ 4307 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 4308 } 4309 4310 static inline bool tcp_paws_discard(const struct sock *sk, 4311 const struct sk_buff *skb) 4312 { 4313 const struct tcp_sock *tp = tcp_sk(sk); 4314 4315 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 4316 !tcp_disordered_ack(sk, skb); 4317 } 4318 4319 /* Check segment sequence number for validity. 4320 * 4321 * Segment controls are considered valid, if the segment 4322 * fits to the window after truncation to the window. Acceptability 4323 * of data (and SYN, FIN, of course) is checked separately. 4324 * See tcp_data_queue(), for example. 4325 * 4326 * Also, controls (RST is main one) are accepted using RCV.WUP instead 4327 * of RCV.NXT. Peer still did not advance his SND.UNA when we 4328 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 4329 * (borrowed from freebsd) 4330 */ 4331 4332 static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp, 4333 u32 seq, u32 end_seq) 4334 { 4335 if (before(end_seq, tp->rcv_wup)) 4336 return SKB_DROP_REASON_TCP_OLD_SEQUENCE; 4337 4338 if (after(seq, tp->rcv_nxt + tcp_receive_window(tp))) 4339 return SKB_DROP_REASON_TCP_INVALID_SEQUENCE; 4340 4341 return SKB_NOT_DROPPED_YET; 4342 } 4343 4344 /* When we get a reset we do this. */ 4345 void tcp_reset(struct sock *sk, struct sk_buff *skb) 4346 { 4347 trace_tcp_receive_reset(sk); 4348 4349 /* mptcp can't tell us to ignore reset pkts, 4350 * so just ignore the return value of mptcp_incoming_options(). 4351 */ 4352 if (sk_is_mptcp(sk)) 4353 mptcp_incoming_options(sk, skb); 4354 4355 /* We want the right error as BSD sees it (and indeed as we do). */ 4356 switch (sk->sk_state) { 4357 case TCP_SYN_SENT: 4358 WRITE_ONCE(sk->sk_err, ECONNREFUSED); 4359 break; 4360 case TCP_CLOSE_WAIT: 4361 WRITE_ONCE(sk->sk_err, EPIPE); 4362 break; 4363 case TCP_CLOSE: 4364 return; 4365 default: 4366 WRITE_ONCE(sk->sk_err, ECONNRESET); 4367 } 4368 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4369 smp_wmb(); 4370 4371 tcp_write_queue_purge(sk); 4372 tcp_done(sk); 4373 4374 if (!sock_flag(sk, SOCK_DEAD)) 4375 sk_error_report(sk); 4376 } 4377 4378 /* 4379 * Process the FIN bit. This now behaves as it is supposed to work 4380 * and the FIN takes effect when it is validly part of sequence 4381 * space. Not before when we get holes. 4382 * 4383 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 4384 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 4385 * TIME-WAIT) 4386 * 4387 * If we are in FINWAIT-1, a received FIN indicates simultaneous 4388 * close and we go into CLOSING (and later onto TIME-WAIT) 4389 * 4390 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4391 */ 4392 void tcp_fin(struct sock *sk) 4393 { 4394 struct tcp_sock *tp = tcp_sk(sk); 4395 4396 inet_csk_schedule_ack(sk); 4397 4398 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); 4399 sock_set_flag(sk, SOCK_DONE); 4400 4401 switch (sk->sk_state) { 4402 case TCP_SYN_RECV: 4403 case TCP_ESTABLISHED: 4404 /* Move to CLOSE_WAIT */ 4405 tcp_set_state(sk, TCP_CLOSE_WAIT); 4406 inet_csk_enter_pingpong_mode(sk); 4407 break; 4408 4409 case TCP_CLOSE_WAIT: 4410 case TCP_CLOSING: 4411 /* Received a retransmission of the FIN, do 4412 * nothing. 4413 */ 4414 break; 4415 case TCP_LAST_ACK: 4416 /* RFC793: Remain in the LAST-ACK state. */ 4417 break; 4418 4419 case TCP_FIN_WAIT1: 4420 /* This case occurs when a simultaneous close 4421 * happens, we must ack the received FIN and 4422 * enter the CLOSING state. 4423 */ 4424 tcp_send_ack(sk); 4425 tcp_set_state(sk, TCP_CLOSING); 4426 break; 4427 case TCP_FIN_WAIT2: 4428 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 4429 tcp_send_ack(sk); 4430 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 4431 break; 4432 default: 4433 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4434 * cases we should never reach this piece of code. 4435 */ 4436 pr_err("%s: Impossible, sk->sk_state=%d\n", 4437 __func__, sk->sk_state); 4438 break; 4439 } 4440 4441 /* It _is_ possible, that we have something out-of-order _after_ FIN. 4442 * Probably, we should reset in this case. For now drop them. 4443 */ 4444 skb_rbtree_purge(&tp->out_of_order_queue); 4445 if (tcp_is_sack(tp)) 4446 tcp_sack_reset(&tp->rx_opt); 4447 4448 if (!sock_flag(sk, SOCK_DEAD)) { 4449 sk->sk_state_change(sk); 4450 4451 /* Do not send POLL_HUP for half duplex close. */ 4452 if (sk->sk_shutdown == SHUTDOWN_MASK || 4453 sk->sk_state == TCP_CLOSE) 4454 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 4455 else 4456 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 4457 } 4458 } 4459 4460 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4461 u32 end_seq) 4462 { 4463 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4464 if (before(seq, sp->start_seq)) 4465 sp->start_seq = seq; 4466 if (after(end_seq, sp->end_seq)) 4467 sp->end_seq = end_seq; 4468 return true; 4469 } 4470 return false; 4471 } 4472 4473 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4474 { 4475 struct tcp_sock *tp = tcp_sk(sk); 4476 4477 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { 4478 int mib_idx; 4479 4480 if (before(seq, tp->rcv_nxt)) 4481 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 4482 else 4483 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4484 4485 NET_INC_STATS(sock_net(sk), mib_idx); 4486 4487 tp->rx_opt.dsack = 1; 4488 tp->duplicate_sack[0].start_seq = seq; 4489 tp->duplicate_sack[0].end_seq = end_seq; 4490 } 4491 } 4492 4493 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 4494 { 4495 struct tcp_sock *tp = tcp_sk(sk); 4496 4497 if (!tp->rx_opt.dsack) 4498 tcp_dsack_set(sk, seq, end_seq); 4499 else 4500 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4501 } 4502 4503 static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb) 4504 { 4505 /* When the ACK path fails or drops most ACKs, the sender would 4506 * timeout and spuriously retransmit the same segment repeatedly. 4507 * The receiver remembers and reflects via DSACKs. Leverage the 4508 * DSACK state and change the txhash to re-route speculatively. 4509 */ 4510 if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq && 4511 sk_rethink_txhash(sk)) 4512 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH); 4513 } 4514 4515 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4516 { 4517 struct tcp_sock *tp = tcp_sk(sk); 4518 4519 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4520 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4521 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4522 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); 4523 4524 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { 4525 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4526 4527 tcp_rcv_spurious_retrans(sk, skb); 4528 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4529 end_seq = tp->rcv_nxt; 4530 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 4531 } 4532 } 4533 4534 tcp_send_ack(sk); 4535 } 4536 4537 /* These routines update the SACK block as out-of-order packets arrive or 4538 * in-order packets close up the sequence space. 4539 */ 4540 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 4541 { 4542 int this_sack; 4543 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4544 struct tcp_sack_block *swalk = sp + 1; 4545 4546 /* See if the recent change to the first SACK eats into 4547 * or hits the sequence space of other SACK blocks, if so coalesce. 4548 */ 4549 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 4550 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 4551 int i; 4552 4553 /* Zap SWALK, by moving every further SACK up by one slot. 4554 * Decrease num_sacks. 4555 */ 4556 tp->rx_opt.num_sacks--; 4557 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4558 sp[i] = sp[i + 1]; 4559 continue; 4560 } 4561 this_sack++; 4562 swalk++; 4563 } 4564 } 4565 4566 void tcp_sack_compress_send_ack(struct sock *sk) 4567 { 4568 struct tcp_sock *tp = tcp_sk(sk); 4569 4570 if (!tp->compressed_ack) 4571 return; 4572 4573 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) 4574 __sock_put(sk); 4575 4576 /* Since we have to send one ack finally, 4577 * substract one from tp->compressed_ack to keep 4578 * LINUX_MIB_TCPACKCOMPRESSED accurate. 4579 */ 4580 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, 4581 tp->compressed_ack - 1); 4582 4583 tp->compressed_ack = 0; 4584 tcp_send_ack(sk); 4585 } 4586 4587 /* Reasonable amount of sack blocks included in TCP SACK option 4588 * The max is 4, but this becomes 3 if TCP timestamps are there. 4589 * Given that SACK packets might be lost, be conservative and use 2. 4590 */ 4591 #define TCP_SACK_BLOCKS_EXPECTED 2 4592 4593 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4594 { 4595 struct tcp_sock *tp = tcp_sk(sk); 4596 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4597 int cur_sacks = tp->rx_opt.num_sacks; 4598 int this_sack; 4599 4600 if (!cur_sacks) 4601 goto new_sack; 4602 4603 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 4604 if (tcp_sack_extend(sp, seq, end_seq)) { 4605 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED) 4606 tcp_sack_compress_send_ack(sk); 4607 /* Rotate this_sack to the first one. */ 4608 for (; this_sack > 0; this_sack--, sp--) 4609 swap(*sp, *(sp - 1)); 4610 if (cur_sacks > 1) 4611 tcp_sack_maybe_coalesce(tp); 4612 return; 4613 } 4614 } 4615 4616 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED) 4617 tcp_sack_compress_send_ack(sk); 4618 4619 /* Could not find an adjacent existing SACK, build a new one, 4620 * put it at the front, and shift everyone else down. We 4621 * always know there is at least one SACK present already here. 4622 * 4623 * If the sack array is full, forget about the last one. 4624 */ 4625 if (this_sack >= TCP_NUM_SACKS) { 4626 this_sack--; 4627 tp->rx_opt.num_sacks--; 4628 sp--; 4629 } 4630 for (; this_sack > 0; this_sack--, sp--) 4631 *sp = *(sp - 1); 4632 4633 new_sack: 4634 /* Build the new head SACK, and we're done. */ 4635 sp->start_seq = seq; 4636 sp->end_seq = end_seq; 4637 tp->rx_opt.num_sacks++; 4638 } 4639 4640 /* RCV.NXT advances, some SACKs should be eaten. */ 4641 4642 static void tcp_sack_remove(struct tcp_sock *tp) 4643 { 4644 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4645 int num_sacks = tp->rx_opt.num_sacks; 4646 int this_sack; 4647 4648 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4649 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4650 tp->rx_opt.num_sacks = 0; 4651 return; 4652 } 4653 4654 for (this_sack = 0; this_sack < num_sacks;) { 4655 /* Check if the start of the sack is covered by RCV.NXT. */ 4656 if (!before(tp->rcv_nxt, sp->start_seq)) { 4657 int i; 4658 4659 /* RCV.NXT must cover all the block! */ 4660 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 4661 4662 /* Zap this SACK, by moving forward any other SACKS. */ 4663 for (i = this_sack+1; i < num_sacks; i++) 4664 tp->selective_acks[i-1] = tp->selective_acks[i]; 4665 num_sacks--; 4666 continue; 4667 } 4668 this_sack++; 4669 sp++; 4670 } 4671 tp->rx_opt.num_sacks = num_sacks; 4672 } 4673 4674 /** 4675 * tcp_try_coalesce - try to merge skb to prior one 4676 * @sk: socket 4677 * @to: prior buffer 4678 * @from: buffer to add in queue 4679 * @fragstolen: pointer to boolean 4680 * 4681 * Before queueing skb @from after @to, try to merge them 4682 * to reduce overall memory use and queue lengths, if cost is small. 4683 * Packets in ofo or receive queues can stay a long time. 4684 * Better try to coalesce them right now to avoid future collapses. 4685 * Returns true if caller should free @from instead of queueing it 4686 */ 4687 static bool tcp_try_coalesce(struct sock *sk, 4688 struct sk_buff *to, 4689 struct sk_buff *from, 4690 bool *fragstolen) 4691 { 4692 int delta; 4693 4694 *fragstolen = false; 4695 4696 /* Its possible this segment overlaps with prior segment in queue */ 4697 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) 4698 return false; 4699 4700 if (!mptcp_skb_can_collapse(to, from)) 4701 return false; 4702 4703 #ifdef CONFIG_TLS_DEVICE 4704 if (from->decrypted != to->decrypted) 4705 return false; 4706 #endif 4707 4708 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4709 return false; 4710 4711 atomic_add(delta, &sk->sk_rmem_alloc); 4712 sk_mem_charge(sk, delta); 4713 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4714 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4715 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4716 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; 4717 4718 if (TCP_SKB_CB(from)->has_rxtstamp) { 4719 TCP_SKB_CB(to)->has_rxtstamp = true; 4720 to->tstamp = from->tstamp; 4721 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; 4722 } 4723 4724 return true; 4725 } 4726 4727 static bool tcp_ooo_try_coalesce(struct sock *sk, 4728 struct sk_buff *to, 4729 struct sk_buff *from, 4730 bool *fragstolen) 4731 { 4732 bool res = tcp_try_coalesce(sk, to, from, fragstolen); 4733 4734 /* In case tcp_drop_reason() is called later, update to->gso_segs */ 4735 if (res) { 4736 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + 4737 max_t(u16, 1, skb_shinfo(from)->gso_segs); 4738 4739 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); 4740 } 4741 return res; 4742 } 4743 4744 static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb, 4745 enum skb_drop_reason reason) 4746 { 4747 sk_drops_add(sk, skb); 4748 kfree_skb_reason(skb, reason); 4749 } 4750 4751 /* This one checks to see if we can put data from the 4752 * out_of_order queue into the receive_queue. 4753 */ 4754 static void tcp_ofo_queue(struct sock *sk) 4755 { 4756 struct tcp_sock *tp = tcp_sk(sk); 4757 __u32 dsack_high = tp->rcv_nxt; 4758 bool fin, fragstolen, eaten; 4759 struct sk_buff *skb, *tail; 4760 struct rb_node *p; 4761 4762 p = rb_first(&tp->out_of_order_queue); 4763 while (p) { 4764 skb = rb_to_skb(p); 4765 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4766 break; 4767 4768 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4769 __u32 dsack = dsack_high; 4770 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4771 dsack_high = TCP_SKB_CB(skb)->end_seq; 4772 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4773 } 4774 p = rb_next(p); 4775 rb_erase(&skb->rbnode, &tp->out_of_order_queue); 4776 4777 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { 4778 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP); 4779 continue; 4780 } 4781 4782 tail = skb_peek_tail(&sk->sk_receive_queue); 4783 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); 4784 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4785 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 4786 if (!eaten) 4787 __skb_queue_tail(&sk->sk_receive_queue, skb); 4788 else 4789 kfree_skb_partial(skb, fragstolen); 4790 4791 if (unlikely(fin)) { 4792 tcp_fin(sk); 4793 /* tcp_fin() purges tp->out_of_order_queue, 4794 * so we must end this loop right now. 4795 */ 4796 break; 4797 } 4798 } 4799 } 4800 4801 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb); 4802 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb); 4803 4804 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, 4805 unsigned int size) 4806 { 4807 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4808 !sk_rmem_schedule(sk, skb, size)) { 4809 4810 if (tcp_prune_queue(sk, skb) < 0) 4811 return -1; 4812 4813 while (!sk_rmem_schedule(sk, skb, size)) { 4814 if (!tcp_prune_ofo_queue(sk, skb)) 4815 return -1; 4816 } 4817 } 4818 return 0; 4819 } 4820 4821 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4822 { 4823 struct tcp_sock *tp = tcp_sk(sk); 4824 struct rb_node **p, *parent; 4825 struct sk_buff *skb1; 4826 u32 seq, end_seq; 4827 bool fragstolen; 4828 4829 tcp_ecn_check_ce(sk, skb); 4830 4831 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4832 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4833 sk->sk_data_ready(sk); 4834 tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM); 4835 return; 4836 } 4837 4838 /* Disable header prediction. */ 4839 tp->pred_flags = 0; 4840 inet_csk_schedule_ack(sk); 4841 4842 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); 4843 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4844 seq = TCP_SKB_CB(skb)->seq; 4845 end_seq = TCP_SKB_CB(skb)->end_seq; 4846 4847 p = &tp->out_of_order_queue.rb_node; 4848 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4849 /* Initial out of order segment, build 1 SACK. */ 4850 if (tcp_is_sack(tp)) { 4851 tp->rx_opt.num_sacks = 1; 4852 tp->selective_acks[0].start_seq = seq; 4853 tp->selective_acks[0].end_seq = end_seq; 4854 } 4855 rb_link_node(&skb->rbnode, NULL, p); 4856 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4857 tp->ooo_last_skb = skb; 4858 goto end; 4859 } 4860 4861 /* In the typical case, we are adding an skb to the end of the list. 4862 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4863 */ 4864 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, 4865 skb, &fragstolen)) { 4866 coalesce_done: 4867 /* For non sack flows, do not grow window to force DUPACK 4868 * and trigger fast retransmit. 4869 */ 4870 if (tcp_is_sack(tp)) 4871 tcp_grow_window(sk, skb, true); 4872 kfree_skb_partial(skb, fragstolen); 4873 skb = NULL; 4874 goto add_sack; 4875 } 4876 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 4877 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { 4878 parent = &tp->ooo_last_skb->rbnode; 4879 p = &parent->rb_right; 4880 goto insert; 4881 } 4882 4883 /* Find place to insert this segment. Handle overlaps on the way. */ 4884 parent = NULL; 4885 while (*p) { 4886 parent = *p; 4887 skb1 = rb_to_skb(parent); 4888 if (before(seq, TCP_SKB_CB(skb1)->seq)) { 4889 p = &parent->rb_left; 4890 continue; 4891 } 4892 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4893 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4894 /* All the bits are present. Drop. */ 4895 NET_INC_STATS(sock_net(sk), 4896 LINUX_MIB_TCPOFOMERGE); 4897 tcp_drop_reason(sk, skb, 4898 SKB_DROP_REASON_TCP_OFOMERGE); 4899 skb = NULL; 4900 tcp_dsack_set(sk, seq, end_seq); 4901 goto add_sack; 4902 } 4903 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4904 /* Partial overlap. */ 4905 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); 4906 } else { 4907 /* skb's seq == skb1's seq and skb covers skb1. 4908 * Replace skb1 with skb. 4909 */ 4910 rb_replace_node(&skb1->rbnode, &skb->rbnode, 4911 &tp->out_of_order_queue); 4912 tcp_dsack_extend(sk, 4913 TCP_SKB_CB(skb1)->seq, 4914 TCP_SKB_CB(skb1)->end_seq); 4915 NET_INC_STATS(sock_net(sk), 4916 LINUX_MIB_TCPOFOMERGE); 4917 tcp_drop_reason(sk, skb1, 4918 SKB_DROP_REASON_TCP_OFOMERGE); 4919 goto merge_right; 4920 } 4921 } else if (tcp_ooo_try_coalesce(sk, skb1, 4922 skb, &fragstolen)) { 4923 goto coalesce_done; 4924 } 4925 p = &parent->rb_right; 4926 } 4927 insert: 4928 /* Insert segment into RB tree. */ 4929 rb_link_node(&skb->rbnode, parent, p); 4930 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4931 4932 merge_right: 4933 /* Remove other segments covered by skb. */ 4934 while ((skb1 = skb_rb_next(skb)) != NULL) { 4935 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4936 break; 4937 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4938 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4939 end_seq); 4940 break; 4941 } 4942 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); 4943 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4944 TCP_SKB_CB(skb1)->end_seq); 4945 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4946 tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE); 4947 } 4948 /* If there is no skb after us, we are the last_skb ! */ 4949 if (!skb1) 4950 tp->ooo_last_skb = skb; 4951 4952 add_sack: 4953 if (tcp_is_sack(tp)) 4954 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4955 end: 4956 if (skb) { 4957 /* For non sack flows, do not grow window to force DUPACK 4958 * and trigger fast retransmit. 4959 */ 4960 if (tcp_is_sack(tp)) 4961 tcp_grow_window(sk, skb, false); 4962 skb_condense(skb); 4963 skb_set_owner_r(skb, sk); 4964 } 4965 } 4966 4967 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, 4968 bool *fragstolen) 4969 { 4970 int eaten; 4971 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4972 4973 eaten = (tail && 4974 tcp_try_coalesce(sk, tail, 4975 skb, fragstolen)) ? 1 : 0; 4976 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); 4977 if (!eaten) { 4978 __skb_queue_tail(&sk->sk_receive_queue, skb); 4979 skb_set_owner_r(skb, sk); 4980 } 4981 return eaten; 4982 } 4983 4984 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4985 { 4986 struct sk_buff *skb; 4987 int err = -ENOMEM; 4988 int data_len = 0; 4989 bool fragstolen; 4990 4991 if (size == 0) 4992 return 0; 4993 4994 if (size > PAGE_SIZE) { 4995 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); 4996 4997 data_len = npages << PAGE_SHIFT; 4998 size = data_len + (size & ~PAGE_MASK); 4999 } 5000 skb = alloc_skb_with_frags(size - data_len, data_len, 5001 PAGE_ALLOC_COSTLY_ORDER, 5002 &err, sk->sk_allocation); 5003 if (!skb) 5004 goto err; 5005 5006 skb_put(skb, size - data_len); 5007 skb->data_len = data_len; 5008 skb->len = size; 5009 5010 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { 5011 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); 5012 goto err_free; 5013 } 5014 5015 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 5016 if (err) 5017 goto err_free; 5018 5019 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 5020 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 5021 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 5022 5023 if (tcp_queue_rcv(sk, skb, &fragstolen)) { 5024 WARN_ON_ONCE(fragstolen); /* should not happen */ 5025 __kfree_skb(skb); 5026 } 5027 return size; 5028 5029 err_free: 5030 kfree_skb(skb); 5031 err: 5032 return err; 5033 5034 } 5035 5036 void tcp_data_ready(struct sock *sk) 5037 { 5038 if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE)) 5039 sk->sk_data_ready(sk); 5040 } 5041 5042 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 5043 { 5044 struct tcp_sock *tp = tcp_sk(sk); 5045 enum skb_drop_reason reason; 5046 bool fragstolen; 5047 int eaten; 5048 5049 /* If a subflow has been reset, the packet should not continue 5050 * to be processed, drop the packet. 5051 */ 5052 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) { 5053 __kfree_skb(skb); 5054 return; 5055 } 5056 5057 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 5058 __kfree_skb(skb); 5059 return; 5060 } 5061 skb_dst_drop(skb); 5062 __skb_pull(skb, tcp_hdr(skb)->doff * 4); 5063 5064 reason = SKB_DROP_REASON_NOT_SPECIFIED; 5065 tp->rx_opt.dsack = 0; 5066 5067 /* Queue data for delivery to the user. 5068 * Packets in sequence go to the receive queue. 5069 * Out of sequence packets to the out_of_order_queue. 5070 */ 5071 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 5072 if (tcp_receive_window(tp) == 0) { 5073 reason = SKB_DROP_REASON_TCP_ZEROWINDOW; 5074 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); 5075 goto out_of_window; 5076 } 5077 5078 /* Ok. In sequence. In window. */ 5079 queue_and_out: 5080 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) { 5081 /* TODO: maybe ratelimit these WIN 0 ACK ? */ 5082 inet_csk(sk)->icsk_ack.pending |= 5083 (ICSK_ACK_NOMEM | ICSK_ACK_NOW); 5084 inet_csk_schedule_ack(sk); 5085 sk->sk_data_ready(sk); 5086 5087 if (skb_queue_len(&sk->sk_receive_queue)) { 5088 reason = SKB_DROP_REASON_PROTO_MEM; 5089 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP); 5090 goto drop; 5091 } 5092 sk_forced_mem_schedule(sk, skb->truesize); 5093 } 5094 5095 eaten = tcp_queue_rcv(sk, skb, &fragstolen); 5096 if (skb->len) 5097 tcp_event_data_recv(sk, skb); 5098 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 5099 tcp_fin(sk); 5100 5101 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 5102 tcp_ofo_queue(sk); 5103 5104 /* RFC5681. 4.2. SHOULD send immediate ACK, when 5105 * gap in queue is filled. 5106 */ 5107 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 5108 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; 5109 } 5110 5111 if (tp->rx_opt.num_sacks) 5112 tcp_sack_remove(tp); 5113 5114 tcp_fast_path_check(sk); 5115 5116 if (eaten > 0) 5117 kfree_skb_partial(skb, fragstolen); 5118 if (!sock_flag(sk, SOCK_DEAD)) 5119 tcp_data_ready(sk); 5120 return; 5121 } 5122 5123 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 5124 tcp_rcv_spurious_retrans(sk, skb); 5125 /* A retransmit, 2nd most common case. Force an immediate ack. */ 5126 reason = SKB_DROP_REASON_TCP_OLD_DATA; 5127 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 5128 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 5129 5130 out_of_window: 5131 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); 5132 inet_csk_schedule_ack(sk); 5133 drop: 5134 tcp_drop_reason(sk, skb, reason); 5135 return; 5136 } 5137 5138 /* Out of window. F.e. zero window probe. */ 5139 if (!before(TCP_SKB_CB(skb)->seq, 5140 tp->rcv_nxt + tcp_receive_window(tp))) { 5141 reason = SKB_DROP_REASON_TCP_OVERWINDOW; 5142 goto out_of_window; 5143 } 5144 5145 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5146 /* Partial packet, seq < rcv_next < end_seq */ 5147 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 5148 5149 /* If window is closed, drop tail of packet. But after 5150 * remembering D-SACK for its head made in previous line. 5151 */ 5152 if (!tcp_receive_window(tp)) { 5153 reason = SKB_DROP_REASON_TCP_ZEROWINDOW; 5154 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); 5155 goto out_of_window; 5156 } 5157 goto queue_and_out; 5158 } 5159 5160 tcp_data_queue_ofo(sk, skb); 5161 } 5162 5163 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list) 5164 { 5165 if (list) 5166 return !skb_queue_is_last(list, skb) ? skb->next : NULL; 5167 5168 return skb_rb_next(skb); 5169 } 5170 5171 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 5172 struct sk_buff_head *list, 5173 struct rb_root *root) 5174 { 5175 struct sk_buff *next = tcp_skb_next(skb, list); 5176 5177 if (list) 5178 __skb_unlink(skb, list); 5179 else 5180 rb_erase(&skb->rbnode, root); 5181 5182 __kfree_skb(skb); 5183 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 5184 5185 return next; 5186 } 5187 5188 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */ 5189 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) 5190 { 5191 struct rb_node **p = &root->rb_node; 5192 struct rb_node *parent = NULL; 5193 struct sk_buff *skb1; 5194 5195 while (*p) { 5196 parent = *p; 5197 skb1 = rb_to_skb(parent); 5198 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) 5199 p = &parent->rb_left; 5200 else 5201 p = &parent->rb_right; 5202 } 5203 rb_link_node(&skb->rbnode, parent, p); 5204 rb_insert_color(&skb->rbnode, root); 5205 } 5206 5207 /* Collapse contiguous sequence of skbs head..tail with 5208 * sequence numbers start..end. 5209 * 5210 * If tail is NULL, this means until the end of the queue. 5211 * 5212 * Segments with FIN/SYN are not collapsed (only because this 5213 * simplifies code) 5214 */ 5215 static void 5216 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, 5217 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) 5218 { 5219 struct sk_buff *skb = head, *n; 5220 struct sk_buff_head tmp; 5221 bool end_of_skbs; 5222 5223 /* First, check that queue is collapsible and find 5224 * the point where collapsing can be useful. 5225 */ 5226 restart: 5227 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) { 5228 n = tcp_skb_next(skb, list); 5229 5230 /* No new bits? It is possible on ofo queue. */ 5231 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 5232 skb = tcp_collapse_one(sk, skb, list, root); 5233 if (!skb) 5234 break; 5235 goto restart; 5236 } 5237 5238 /* The first skb to collapse is: 5239 * - not SYN/FIN and 5240 * - bloated or contains data before "start" or 5241 * overlaps to the next one and mptcp allow collapsing. 5242 */ 5243 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && 5244 (tcp_win_from_space(sk, skb->truesize) > skb->len || 5245 before(TCP_SKB_CB(skb)->seq, start))) { 5246 end_of_skbs = false; 5247 break; 5248 } 5249 5250 if (n && n != tail && mptcp_skb_can_collapse(skb, n) && 5251 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { 5252 end_of_skbs = false; 5253 break; 5254 } 5255 5256 /* Decided to skip this, advance start seq. */ 5257 start = TCP_SKB_CB(skb)->end_seq; 5258 } 5259 if (end_of_skbs || 5260 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 5261 return; 5262 5263 __skb_queue_head_init(&tmp); 5264 5265 while (before(start, end)) { 5266 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); 5267 struct sk_buff *nskb; 5268 5269 nskb = alloc_skb(copy, GFP_ATOMIC); 5270 if (!nskb) 5271 break; 5272 5273 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 5274 #ifdef CONFIG_TLS_DEVICE 5275 nskb->decrypted = skb->decrypted; 5276 #endif 5277 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 5278 if (list) 5279 __skb_queue_before(list, skb, nskb); 5280 else 5281 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ 5282 skb_set_owner_r(nskb, sk); 5283 mptcp_skb_ext_move(nskb, skb); 5284 5285 /* Copy data, releasing collapsed skbs. */ 5286 while (copy > 0) { 5287 int offset = start - TCP_SKB_CB(skb)->seq; 5288 int size = TCP_SKB_CB(skb)->end_seq - start; 5289 5290 BUG_ON(offset < 0); 5291 if (size > 0) { 5292 size = min(copy, size); 5293 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 5294 BUG(); 5295 TCP_SKB_CB(nskb)->end_seq += size; 5296 copy -= size; 5297 start += size; 5298 } 5299 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 5300 skb = tcp_collapse_one(sk, skb, list, root); 5301 if (!skb || 5302 skb == tail || 5303 !mptcp_skb_can_collapse(nskb, skb) || 5304 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 5305 goto end; 5306 #ifdef CONFIG_TLS_DEVICE 5307 if (skb->decrypted != nskb->decrypted) 5308 goto end; 5309 #endif 5310 } 5311 } 5312 } 5313 end: 5314 skb_queue_walk_safe(&tmp, skb, n) 5315 tcp_rbtree_insert(root, skb); 5316 } 5317 5318 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 5319 * and tcp_collapse() them until all the queue is collapsed. 5320 */ 5321 static void tcp_collapse_ofo_queue(struct sock *sk) 5322 { 5323 struct tcp_sock *tp = tcp_sk(sk); 5324 u32 range_truesize, sum_tiny = 0; 5325 struct sk_buff *skb, *head; 5326 u32 start, end; 5327 5328 skb = skb_rb_first(&tp->out_of_order_queue); 5329 new_range: 5330 if (!skb) { 5331 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); 5332 return; 5333 } 5334 start = TCP_SKB_CB(skb)->seq; 5335 end = TCP_SKB_CB(skb)->end_seq; 5336 range_truesize = skb->truesize; 5337 5338 for (head = skb;;) { 5339 skb = skb_rb_next(skb); 5340 5341 /* Range is terminated when we see a gap or when 5342 * we are at the queue end. 5343 */ 5344 if (!skb || 5345 after(TCP_SKB_CB(skb)->seq, end) || 5346 before(TCP_SKB_CB(skb)->end_seq, start)) { 5347 /* Do not attempt collapsing tiny skbs */ 5348 if (range_truesize != head->truesize || 5349 end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) { 5350 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 5351 head, skb, start, end); 5352 } else { 5353 sum_tiny += range_truesize; 5354 if (sum_tiny > sk->sk_rcvbuf >> 3) 5355 return; 5356 } 5357 goto new_range; 5358 } 5359 5360 range_truesize += skb->truesize; 5361 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 5362 start = TCP_SKB_CB(skb)->seq; 5363 if (after(TCP_SKB_CB(skb)->end_seq, end)) 5364 end = TCP_SKB_CB(skb)->end_seq; 5365 } 5366 } 5367 5368 /* 5369 * Clean the out-of-order queue to make room. 5370 * We drop high sequences packets to : 5371 * 1) Let a chance for holes to be filled. 5372 * This means we do not drop packets from ooo queue if their sequence 5373 * is before incoming packet sequence. 5374 * 2) not add too big latencies if thousands of packets sit there. 5375 * (But if application shrinks SO_RCVBUF, we could still end up 5376 * freeing whole queue here) 5377 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. 5378 * 5379 * Return true if queue has shrunk. 5380 */ 5381 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb) 5382 { 5383 struct tcp_sock *tp = tcp_sk(sk); 5384 struct rb_node *node, *prev; 5385 bool pruned = false; 5386 int goal; 5387 5388 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 5389 return false; 5390 5391 goal = sk->sk_rcvbuf >> 3; 5392 node = &tp->ooo_last_skb->rbnode; 5393 5394 do { 5395 struct sk_buff *skb = rb_to_skb(node); 5396 5397 /* If incoming skb would land last in ofo queue, stop pruning. */ 5398 if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq)) 5399 break; 5400 pruned = true; 5401 prev = rb_prev(node); 5402 rb_erase(node, &tp->out_of_order_queue); 5403 goal -= skb->truesize; 5404 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE); 5405 tp->ooo_last_skb = rb_to_skb(prev); 5406 if (!prev || goal <= 0) { 5407 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 5408 !tcp_under_memory_pressure(sk)) 5409 break; 5410 goal = sk->sk_rcvbuf >> 3; 5411 } 5412 node = prev; 5413 } while (node); 5414 5415 if (pruned) { 5416 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 5417 /* Reset SACK state. A conforming SACK implementation will 5418 * do the same at a timeout based retransmit. When a connection 5419 * is in a sad state like this, we care only about integrity 5420 * of the connection not performance. 5421 */ 5422 if (tp->rx_opt.sack_ok) 5423 tcp_sack_reset(&tp->rx_opt); 5424 } 5425 return pruned; 5426 } 5427 5428 /* Reduce allocated memory if we can, trying to get 5429 * the socket within its memory limits again. 5430 * 5431 * Return less than zero if we should start dropping frames 5432 * until the socket owning process reads some of the data 5433 * to stabilize the situation. 5434 */ 5435 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb) 5436 { 5437 struct tcp_sock *tp = tcp_sk(sk); 5438 5439 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); 5440 5441 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 5442 tcp_clamp_window(sk); 5443 else if (tcp_under_memory_pressure(sk)) 5444 tcp_adjust_rcv_ssthresh(sk); 5445 5446 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5447 return 0; 5448 5449 tcp_collapse_ofo_queue(sk); 5450 if (!skb_queue_empty(&sk->sk_receive_queue)) 5451 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 5452 skb_peek(&sk->sk_receive_queue), 5453 NULL, 5454 tp->copied_seq, tp->rcv_nxt); 5455 5456 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5457 return 0; 5458 5459 /* Collapsing did not help, destructive actions follow. 5460 * This must not ever occur. */ 5461 5462 tcp_prune_ofo_queue(sk, in_skb); 5463 5464 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5465 return 0; 5466 5467 /* If we are really being abused, tell the caller to silently 5468 * drop receive data on the floor. It will get retransmitted 5469 * and hopefully then we'll have sufficient space. 5470 */ 5471 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); 5472 5473 /* Massive buffer overcommit. */ 5474 tp->pred_flags = 0; 5475 return -1; 5476 } 5477 5478 static bool tcp_should_expand_sndbuf(struct sock *sk) 5479 { 5480 const struct tcp_sock *tp = tcp_sk(sk); 5481 5482 /* If the user specified a specific send buffer setting, do 5483 * not modify it. 5484 */ 5485 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5486 return false; 5487 5488 /* If we are under global TCP memory pressure, do not expand. */ 5489 if (tcp_under_memory_pressure(sk)) { 5490 int unused_mem = sk_unused_reserved_mem(sk); 5491 5492 /* Adjust sndbuf according to reserved mem. But make sure 5493 * it never goes below SOCK_MIN_SNDBUF. 5494 * See sk_stream_moderate_sndbuf() for more details. 5495 */ 5496 if (unused_mem > SOCK_MIN_SNDBUF) 5497 WRITE_ONCE(sk->sk_sndbuf, unused_mem); 5498 5499 return false; 5500 } 5501 5502 /* If we are under soft global TCP memory pressure, do not expand. */ 5503 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5504 return false; 5505 5506 /* If we filled the congestion window, do not expand. */ 5507 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)) 5508 return false; 5509 5510 return true; 5511 } 5512 5513 static void tcp_new_space(struct sock *sk) 5514 { 5515 struct tcp_sock *tp = tcp_sk(sk); 5516 5517 if (tcp_should_expand_sndbuf(sk)) { 5518 tcp_sndbuf_expand(sk); 5519 tp->snd_cwnd_stamp = tcp_jiffies32; 5520 } 5521 5522 INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk); 5523 } 5524 5525 /* Caller made space either from: 5526 * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced) 5527 * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt) 5528 * 5529 * We might be able to generate EPOLLOUT to the application if: 5530 * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2 5531 * 2) notsent amount (tp->write_seq - tp->snd_nxt) became 5532 * small enough that tcp_stream_memory_free() decides it 5533 * is time to generate EPOLLOUT. 5534 */ 5535 void tcp_check_space(struct sock *sk) 5536 { 5537 /* pairs with tcp_poll() */ 5538 smp_mb(); 5539 if (sk->sk_socket && 5540 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5541 tcp_new_space(sk); 5542 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 5543 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 5544 } 5545 } 5546 5547 static inline void tcp_data_snd_check(struct sock *sk) 5548 { 5549 tcp_push_pending_frames(sk); 5550 tcp_check_space(sk); 5551 } 5552 5553 /* 5554 * Check if sending an ack is needed. 5555 */ 5556 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 5557 { 5558 struct tcp_sock *tp = tcp_sk(sk); 5559 unsigned long rtt, delay; 5560 5561 /* More than one full frame received... */ 5562 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 5563 /* ... and right edge of window advances far enough. 5564 * (tcp_recvmsg() will send ACK otherwise). 5565 * If application uses SO_RCVLOWAT, we want send ack now if 5566 * we have not received enough bytes to satisfy the condition. 5567 */ 5568 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || 5569 __tcp_select_window(sk) >= tp->rcv_wnd)) || 5570 /* We ACK each frame or... */ 5571 tcp_in_quickack_mode(sk) || 5572 /* Protocol state mandates a one-time immediate ACK */ 5573 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) { 5574 send_now: 5575 tcp_send_ack(sk); 5576 return; 5577 } 5578 5579 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 5580 tcp_send_delayed_ack(sk); 5581 return; 5582 } 5583 5584 if (!tcp_is_sack(tp) || 5585 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) 5586 goto send_now; 5587 5588 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { 5589 tp->compressed_ack_rcv_nxt = tp->rcv_nxt; 5590 tp->dup_ack_counter = 0; 5591 } 5592 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) { 5593 tp->dup_ack_counter++; 5594 goto send_now; 5595 } 5596 tp->compressed_ack++; 5597 if (hrtimer_is_queued(&tp->compressed_ack_timer)) 5598 return; 5599 5600 /* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */ 5601 5602 rtt = tp->rcv_rtt_est.rtt_us; 5603 if (tp->srtt_us && tp->srtt_us < rtt) 5604 rtt = tp->srtt_us; 5605 5606 delay = min_t(unsigned long, 5607 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns), 5608 rtt * (NSEC_PER_USEC >> 3)/20); 5609 sock_hold(sk); 5610 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), 5611 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns), 5612 HRTIMER_MODE_REL_PINNED_SOFT); 5613 } 5614 5615 static inline void tcp_ack_snd_check(struct sock *sk) 5616 { 5617 if (!inet_csk_ack_scheduled(sk)) { 5618 /* We sent a data segment already. */ 5619 return; 5620 } 5621 __tcp_ack_snd_check(sk, 1); 5622 } 5623 5624 /* 5625 * This routine is only called when we have urgent data 5626 * signaled. Its the 'slow' part of tcp_urg. It could be 5627 * moved inline now as tcp_urg is only called from one 5628 * place. We handle URGent data wrong. We have to - as 5629 * BSD still doesn't use the correction from RFC961. 5630 * For 1003.1g we should support a new option TCP_STDURG to permit 5631 * either form (or just set the sysctl tcp_stdurg). 5632 */ 5633 5634 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5635 { 5636 struct tcp_sock *tp = tcp_sk(sk); 5637 u32 ptr = ntohs(th->urg_ptr); 5638 5639 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) 5640 ptr--; 5641 ptr += ntohl(th->seq); 5642 5643 /* Ignore urgent data that we've already seen and read. */ 5644 if (after(tp->copied_seq, ptr)) 5645 return; 5646 5647 /* Do not replay urg ptr. 5648 * 5649 * NOTE: interesting situation not covered by specs. 5650 * Misbehaving sender may send urg ptr, pointing to segment, 5651 * which we already have in ofo queue. We are not able to fetch 5652 * such data and will stay in TCP_URG_NOTYET until will be eaten 5653 * by recvmsg(). Seems, we are not obliged to handle such wicked 5654 * situations. But it is worth to think about possibility of some 5655 * DoSes using some hypothetical application level deadlock. 5656 */ 5657 if (before(ptr, tp->rcv_nxt)) 5658 return; 5659 5660 /* Do we already have a newer (or duplicate) urgent pointer? */ 5661 if (tp->urg_data && !after(ptr, tp->urg_seq)) 5662 return; 5663 5664 /* Tell the world about our new urgent pointer. */ 5665 sk_send_sigurg(sk); 5666 5667 /* We may be adding urgent data when the last byte read was 5668 * urgent. To do this requires some care. We cannot just ignore 5669 * tp->copied_seq since we would read the last urgent byte again 5670 * as data, nor can we alter copied_seq until this data arrives 5671 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 5672 * 5673 * NOTE. Double Dutch. Rendering to plain English: author of comment 5674 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 5675 * and expect that both A and B disappear from stream. This is _wrong_. 5676 * Though this happens in BSD with high probability, this is occasional. 5677 * Any application relying on this is buggy. Note also, that fix "works" 5678 * only in this artificial test. Insert some normal data between A and B and we will 5679 * decline of BSD again. Verdict: it is better to remove to trap 5680 * buggy users. 5681 */ 5682 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 5683 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 5684 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 5685 tp->copied_seq++; 5686 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 5687 __skb_unlink(skb, &sk->sk_receive_queue); 5688 __kfree_skb(skb); 5689 } 5690 } 5691 5692 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); 5693 WRITE_ONCE(tp->urg_seq, ptr); 5694 5695 /* Disable header prediction. */ 5696 tp->pred_flags = 0; 5697 } 5698 5699 /* This is the 'fast' part of urgent handling. */ 5700 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5701 { 5702 struct tcp_sock *tp = tcp_sk(sk); 5703 5704 /* Check if we get a new urgent pointer - normally not. */ 5705 if (unlikely(th->urg)) 5706 tcp_check_urg(sk, th); 5707 5708 /* Do we wait for any urgent data? - normally not... */ 5709 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { 5710 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 5711 th->syn; 5712 5713 /* Is the urgent pointer pointing into this packet? */ 5714 if (ptr < skb->len) { 5715 u8 tmp; 5716 if (skb_copy_bits(skb, ptr, &tmp, 1)) 5717 BUG(); 5718 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); 5719 if (!sock_flag(sk, SOCK_DEAD)) 5720 sk->sk_data_ready(sk); 5721 } 5722 } 5723 } 5724 5725 /* Accept RST for rcv_nxt - 1 after a FIN. 5726 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a 5727 * FIN is sent followed by a RST packet. The RST is sent with the same 5728 * sequence number as the FIN, and thus according to RFC 5961 a challenge 5729 * ACK should be sent. However, Mac OSX rate limits replies to challenge 5730 * ACKs on the closed socket. In addition middleboxes can drop either the 5731 * challenge ACK or a subsequent RST. 5732 */ 5733 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) 5734 { 5735 const struct tcp_sock *tp = tcp_sk(sk); 5736 5737 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && 5738 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | 5739 TCPF_CLOSING)); 5740 } 5741 5742 /* Does PAWS and seqno based validation of an incoming segment, flags will 5743 * play significant role here. 5744 */ 5745 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5746 const struct tcphdr *th, int syn_inerr) 5747 { 5748 struct tcp_sock *tp = tcp_sk(sk); 5749 SKB_DR(reason); 5750 5751 /* RFC1323: H1. Apply PAWS check first. */ 5752 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && 5753 tp->rx_opt.saw_tstamp && 5754 tcp_paws_discard(sk, skb)) { 5755 if (!th->rst) { 5756 if (unlikely(th->syn)) 5757 goto syn_challenge; 5758 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5759 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5760 LINUX_MIB_TCPACKSKIPPEDPAWS, 5761 &tp->last_oow_ack_time)) 5762 tcp_send_dupack(sk, skb); 5763 SKB_DR_SET(reason, TCP_RFC7323_PAWS); 5764 goto discard; 5765 } 5766 /* Reset is accepted even if it did not pass PAWS. */ 5767 } 5768 5769 /* Step 1: check sequence number */ 5770 reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 5771 if (reason) { 5772 /* RFC793, page 37: "In all states except SYN-SENT, all reset 5773 * (RST) segments are validated by checking their SEQ-fields." 5774 * And page 69: "If an incoming segment is not acceptable, 5775 * an acknowledgment should be sent in reply (unless the RST 5776 * bit is set, if so drop the segment and return)". 5777 */ 5778 if (!th->rst) { 5779 if (th->syn) 5780 goto syn_challenge; 5781 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5782 LINUX_MIB_TCPACKSKIPPEDSEQ, 5783 &tp->last_oow_ack_time)) 5784 tcp_send_dupack(sk, skb); 5785 } else if (tcp_reset_check(sk, skb)) { 5786 goto reset; 5787 } 5788 goto discard; 5789 } 5790 5791 /* Step 2: check RST bit */ 5792 if (th->rst) { 5793 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a 5794 * FIN and SACK too if available): 5795 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or 5796 * the right-most SACK block, 5797 * then 5798 * RESET the connection 5799 * else 5800 * Send a challenge ACK 5801 */ 5802 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || 5803 tcp_reset_check(sk, skb)) 5804 goto reset; 5805 5806 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { 5807 struct tcp_sack_block *sp = &tp->selective_acks[0]; 5808 int max_sack = sp[0].end_seq; 5809 int this_sack; 5810 5811 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; 5812 ++this_sack) { 5813 max_sack = after(sp[this_sack].end_seq, 5814 max_sack) ? 5815 sp[this_sack].end_seq : max_sack; 5816 } 5817 5818 if (TCP_SKB_CB(skb)->seq == max_sack) 5819 goto reset; 5820 } 5821 5822 /* Disable TFO if RST is out-of-order 5823 * and no data has been received 5824 * for current active TFO socket 5825 */ 5826 if (tp->syn_fastopen && !tp->data_segs_in && 5827 sk->sk_state == TCP_ESTABLISHED) 5828 tcp_fastopen_active_disable(sk); 5829 tcp_send_challenge_ack(sk); 5830 SKB_DR_SET(reason, TCP_RESET); 5831 goto discard; 5832 } 5833 5834 /* step 3: check security and precedence [ignored] */ 5835 5836 /* step 4: Check for a SYN 5837 * RFC 5961 4.2 : Send a challenge ack 5838 */ 5839 if (th->syn) { 5840 syn_challenge: 5841 if (syn_inerr) 5842 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5843 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5844 tcp_send_challenge_ack(sk); 5845 SKB_DR_SET(reason, TCP_INVALID_SYN); 5846 goto discard; 5847 } 5848 5849 bpf_skops_parse_hdr(sk, skb); 5850 5851 return true; 5852 5853 discard: 5854 tcp_drop_reason(sk, skb, reason); 5855 return false; 5856 5857 reset: 5858 tcp_reset(sk, skb); 5859 __kfree_skb(skb); 5860 return false; 5861 } 5862 5863 /* 5864 * TCP receive function for the ESTABLISHED state. 5865 * 5866 * It is split into a fast path and a slow path. The fast path is 5867 * disabled when: 5868 * - A zero window was announced from us - zero window probing 5869 * is only handled properly in the slow path. 5870 * - Out of order segments arrived. 5871 * - Urgent data is expected. 5872 * - There is no buffer space left 5873 * - Unexpected TCP flags/window values/header lengths are received 5874 * (detected by checking the TCP header against pred_flags) 5875 * - Data is sent in both directions. Fast path only supports pure senders 5876 * or pure receivers (this means either the sequence number or the ack 5877 * value must stay constant) 5878 * - Unexpected TCP option. 5879 * 5880 * When these conditions are not satisfied it drops into a standard 5881 * receive procedure patterned after RFC793 to handle all cases. 5882 * The first three cases are guaranteed by proper pred_flags setting, 5883 * the rest is checked inline. Fast processing is turned on in 5884 * tcp_data_queue when everything is OK. 5885 */ 5886 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) 5887 { 5888 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 5889 const struct tcphdr *th = (const struct tcphdr *)skb->data; 5890 struct tcp_sock *tp = tcp_sk(sk); 5891 unsigned int len = skb->len; 5892 5893 /* TCP congestion window tracking */ 5894 trace_tcp_probe(sk, skb); 5895 5896 tcp_mstamp_refresh(tp); 5897 if (unlikely(!rcu_access_pointer(sk->sk_rx_dst))) 5898 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5899 /* 5900 * Header prediction. 5901 * The code loosely follows the one in the famous 5902 * "30 instruction TCP receive" Van Jacobson mail. 5903 * 5904 * Van's trick is to deposit buffers into socket queue 5905 * on a device interrupt, to call tcp_recv function 5906 * on the receive process context and checksum and copy 5907 * the buffer to user space. smart... 5908 * 5909 * Our current scheme is not silly either but we take the 5910 * extra cost of the net_bh soft interrupt processing... 5911 * We do checksum and copy also but from device to kernel. 5912 */ 5913 5914 tp->rx_opt.saw_tstamp = 0; 5915 5916 /* pred_flags is 0xS?10 << 16 + snd_wnd 5917 * if header_prediction is to be made 5918 * 'S' will always be tp->tcp_header_len >> 2 5919 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5920 * turn it off (when there are holes in the receive 5921 * space for instance) 5922 * PSH flag is ignored. 5923 */ 5924 5925 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5926 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5927 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5928 int tcp_header_len = tp->tcp_header_len; 5929 5930 /* Timestamp header prediction: tcp_header_len 5931 * is automatically equal to th->doff*4 due to pred_flags 5932 * match. 5933 */ 5934 5935 /* Check timestamp */ 5936 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5937 /* No? Slow path! */ 5938 if (!tcp_parse_aligned_timestamp(tp, th)) 5939 goto slow_path; 5940 5941 /* If PAWS failed, check it more carefully in slow path */ 5942 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5943 goto slow_path; 5944 5945 /* DO NOT update ts_recent here, if checksum fails 5946 * and timestamp was corrupted part, it will result 5947 * in a hung connection since we will drop all 5948 * future packets due to the PAWS test. 5949 */ 5950 } 5951 5952 if (len <= tcp_header_len) { 5953 /* Bulk data transfer: sender */ 5954 if (len == tcp_header_len) { 5955 /* Predicted packet is in window by definition. 5956 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5957 * Hence, check seq<=rcv_wup reduces to: 5958 */ 5959 if (tcp_header_len == 5960 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5961 tp->rcv_nxt == tp->rcv_wup) 5962 tcp_store_ts_recent(tp); 5963 5964 /* We know that such packets are checksummed 5965 * on entry. 5966 */ 5967 tcp_ack(sk, skb, 0); 5968 __kfree_skb(skb); 5969 tcp_data_snd_check(sk); 5970 /* When receiving pure ack in fast path, update 5971 * last ts ecr directly instead of calling 5972 * tcp_rcv_rtt_measure_ts() 5973 */ 5974 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; 5975 return; 5976 } else { /* Header too small */ 5977 reason = SKB_DROP_REASON_PKT_TOO_SMALL; 5978 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5979 goto discard; 5980 } 5981 } else { 5982 int eaten = 0; 5983 bool fragstolen = false; 5984 5985 if (tcp_checksum_complete(skb)) 5986 goto csum_error; 5987 5988 if ((int)skb->truesize > sk->sk_forward_alloc) 5989 goto step5; 5990 5991 /* Predicted packet is in window by definition. 5992 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5993 * Hence, check seq<=rcv_wup reduces to: 5994 */ 5995 if (tcp_header_len == 5996 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5997 tp->rcv_nxt == tp->rcv_wup) 5998 tcp_store_ts_recent(tp); 5999 6000 tcp_rcv_rtt_measure_ts(sk, skb); 6001 6002 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); 6003 6004 /* Bulk data transfer: receiver */ 6005 skb_dst_drop(skb); 6006 __skb_pull(skb, tcp_header_len); 6007 eaten = tcp_queue_rcv(sk, skb, &fragstolen); 6008 6009 tcp_event_data_recv(sk, skb); 6010 6011 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 6012 /* Well, only one small jumplet in fast path... */ 6013 tcp_ack(sk, skb, FLAG_DATA); 6014 tcp_data_snd_check(sk); 6015 if (!inet_csk_ack_scheduled(sk)) 6016 goto no_ack; 6017 } else { 6018 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); 6019 } 6020 6021 __tcp_ack_snd_check(sk, 0); 6022 no_ack: 6023 if (eaten) 6024 kfree_skb_partial(skb, fragstolen); 6025 tcp_data_ready(sk); 6026 return; 6027 } 6028 } 6029 6030 slow_path: 6031 if (len < (th->doff << 2) || tcp_checksum_complete(skb)) 6032 goto csum_error; 6033 6034 if (!th->ack && !th->rst && !th->syn) { 6035 reason = SKB_DROP_REASON_TCP_FLAGS; 6036 goto discard; 6037 } 6038 6039 /* 6040 * Standard slow path. 6041 */ 6042 6043 if (!tcp_validate_incoming(sk, skb, th, 1)) 6044 return; 6045 6046 step5: 6047 reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT); 6048 if ((int)reason < 0) { 6049 reason = -reason; 6050 goto discard; 6051 } 6052 tcp_rcv_rtt_measure_ts(sk, skb); 6053 6054 /* Process urgent data. */ 6055 tcp_urg(sk, skb, th); 6056 6057 /* step 7: process the segment text */ 6058 tcp_data_queue(sk, skb); 6059 6060 tcp_data_snd_check(sk); 6061 tcp_ack_snd_check(sk); 6062 return; 6063 6064 csum_error: 6065 reason = SKB_DROP_REASON_TCP_CSUM; 6066 trace_tcp_bad_csum(skb); 6067 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 6068 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 6069 6070 discard: 6071 tcp_drop_reason(sk, skb, reason); 6072 } 6073 EXPORT_SYMBOL(tcp_rcv_established); 6074 6075 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb) 6076 { 6077 struct inet_connection_sock *icsk = inet_csk(sk); 6078 struct tcp_sock *tp = tcp_sk(sk); 6079 6080 tcp_mtup_init(sk); 6081 icsk->icsk_af_ops->rebuild_header(sk); 6082 tcp_init_metrics(sk); 6083 6084 /* Initialize the congestion window to start the transfer. 6085 * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 6086 * retransmitted. In light of RFC6298 more aggressive 1sec 6087 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 6088 * retransmission has occurred. 6089 */ 6090 if (tp->total_retrans > 1 && tp->undo_marker) 6091 tcp_snd_cwnd_set(tp, 1); 6092 else 6093 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk))); 6094 tp->snd_cwnd_stamp = tcp_jiffies32; 6095 6096 bpf_skops_established(sk, bpf_op, skb); 6097 /* Initialize congestion control unless BPF initialized it already: */ 6098 if (!icsk->icsk_ca_initialized) 6099 tcp_init_congestion_control(sk); 6100 tcp_init_buffer_space(sk); 6101 } 6102 6103 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 6104 { 6105 struct tcp_sock *tp = tcp_sk(sk); 6106 struct inet_connection_sock *icsk = inet_csk(sk); 6107 6108 tcp_set_state(sk, TCP_ESTABLISHED); 6109 icsk->icsk_ack.lrcvtime = tcp_jiffies32; 6110 6111 if (skb) { 6112 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 6113 security_inet_conn_established(sk, skb); 6114 sk_mark_napi_id(sk, skb); 6115 } 6116 6117 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb); 6118 6119 /* Prevent spurious tcp_cwnd_restart() on first data 6120 * packet. 6121 */ 6122 tp->lsndtime = tcp_jiffies32; 6123 6124 if (sock_flag(sk, SOCK_KEEPOPEN)) 6125 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 6126 6127 if (!tp->rx_opt.snd_wscale) 6128 __tcp_fast_path_on(tp, tp->snd_wnd); 6129 else 6130 tp->pred_flags = 0; 6131 } 6132 6133 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 6134 struct tcp_fastopen_cookie *cookie) 6135 { 6136 struct tcp_sock *tp = tcp_sk(sk); 6137 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; 6138 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; 6139 bool syn_drop = false; 6140 6141 if (mss == tp->rx_opt.user_mss) { 6142 struct tcp_options_received opt; 6143 6144 /* Get original SYNACK MSS value if user MSS sets mss_clamp */ 6145 tcp_clear_options(&opt); 6146 opt.user_mss = opt.mss_clamp = 0; 6147 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); 6148 mss = opt.mss_clamp; 6149 } 6150 6151 if (!tp->syn_fastopen) { 6152 /* Ignore an unsolicited cookie */ 6153 cookie->len = -1; 6154 } else if (tp->total_retrans) { 6155 /* SYN timed out and the SYN-ACK neither has a cookie nor 6156 * acknowledges data. Presumably the remote received only 6157 * the retransmitted (regular) SYNs: either the original 6158 * SYN-data or the corresponding SYN-ACK was dropped. 6159 */ 6160 syn_drop = (cookie->len < 0 && data); 6161 } else if (cookie->len < 0 && !tp->syn_data) { 6162 /* We requested a cookie but didn't get it. If we did not use 6163 * the (old) exp opt format then try so next time (try_exp=1). 6164 * Otherwise we go back to use the RFC7413 opt (try_exp=2). 6165 */ 6166 try_exp = tp->syn_fastopen_exp ? 2 : 1; 6167 } 6168 6169 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); 6170 6171 if (data) { /* Retransmit unacked data in SYN */ 6172 if (tp->total_retrans) 6173 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED; 6174 else 6175 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED; 6176 skb_rbtree_walk_from(data) 6177 tcp_mark_skb_lost(sk, data); 6178 tcp_xmit_retransmit_queue(sk); 6179 tp->retrans_stamp = 0; 6180 NET_INC_STATS(sock_net(sk), 6181 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 6182 return true; 6183 } 6184 tp->syn_data_acked = tp->syn_data; 6185 if (tp->syn_data_acked) { 6186 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); 6187 /* SYN-data is counted as two separate packets in tcp_ack() */ 6188 if (tp->delivered > 1) 6189 --tp->delivered; 6190 } 6191 6192 tcp_fastopen_add_skb(sk, synack); 6193 6194 return false; 6195 } 6196 6197 static void smc_check_reset_syn(struct tcp_sock *tp) 6198 { 6199 #if IS_ENABLED(CONFIG_SMC) 6200 if (static_branch_unlikely(&tcp_have_smc)) { 6201 if (tp->syn_smc && !tp->rx_opt.smc_ok) 6202 tp->syn_smc = 0; 6203 } 6204 #endif 6205 } 6206 6207 static void tcp_try_undo_spurious_syn(struct sock *sk) 6208 { 6209 struct tcp_sock *tp = tcp_sk(sk); 6210 u32 syn_stamp; 6211 6212 /* undo_marker is set when SYN or SYNACK times out. The timeout is 6213 * spurious if the ACK's timestamp option echo value matches the 6214 * original SYN timestamp. 6215 */ 6216 syn_stamp = tp->retrans_stamp; 6217 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp && 6218 syn_stamp == tp->rx_opt.rcv_tsecr) 6219 tp->undo_marker = 0; 6220 } 6221 6222 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 6223 const struct tcphdr *th) 6224 { 6225 struct inet_connection_sock *icsk = inet_csk(sk); 6226 struct tcp_sock *tp = tcp_sk(sk); 6227 struct tcp_fastopen_cookie foc = { .len = -1 }; 6228 int saved_clamp = tp->rx_opt.mss_clamp; 6229 bool fastopen_fail; 6230 SKB_DR(reason); 6231 6232 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); 6233 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 6234 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 6235 6236 if (th->ack) { 6237 /* rfc793: 6238 * "If the state is SYN-SENT then 6239 * first check the ACK bit 6240 * If the ACK bit is set 6241 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 6242 * a reset (unless the RST bit is set, if so drop 6243 * the segment and return)" 6244 */ 6245 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || 6246 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 6247 /* Previous FIN/ACK or RST/ACK might be ignored. */ 6248 if (icsk->icsk_retransmits == 0) 6249 inet_csk_reset_xmit_timer(sk, 6250 ICSK_TIME_RETRANS, 6251 TCP_TIMEOUT_MIN, TCP_RTO_MAX); 6252 goto reset_and_undo; 6253 } 6254 6255 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 6256 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 6257 tcp_time_stamp(tp))) { 6258 NET_INC_STATS(sock_net(sk), 6259 LINUX_MIB_PAWSACTIVEREJECTED); 6260 goto reset_and_undo; 6261 } 6262 6263 /* Now ACK is acceptable. 6264 * 6265 * "If the RST bit is set 6266 * If the ACK was acceptable then signal the user "error: 6267 * connection reset", drop the segment, enter CLOSED state, 6268 * delete TCB, and return." 6269 */ 6270 6271 if (th->rst) { 6272 tcp_reset(sk, skb); 6273 consume: 6274 __kfree_skb(skb); 6275 return 0; 6276 } 6277 6278 /* rfc793: 6279 * "fifth, if neither of the SYN or RST bits is set then 6280 * drop the segment and return." 6281 * 6282 * See note below! 6283 * --ANK(990513) 6284 */ 6285 if (!th->syn) { 6286 SKB_DR_SET(reason, TCP_FLAGS); 6287 goto discard_and_undo; 6288 } 6289 /* rfc793: 6290 * "If the SYN bit is on ... 6291 * are acceptable then ... 6292 * (our SYN has been ACKed), change the connection 6293 * state to ESTABLISHED..." 6294 */ 6295 6296 tcp_ecn_rcv_synack(tp, th); 6297 6298 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 6299 tcp_try_undo_spurious_syn(sk); 6300 tcp_ack(sk, skb, FLAG_SLOWPATH); 6301 6302 /* Ok.. it's good. Set up sequence numbers and 6303 * move to established. 6304 */ 6305 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); 6306 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 6307 6308 /* RFC1323: The window in SYN & SYN/ACK segments is 6309 * never scaled. 6310 */ 6311 tp->snd_wnd = ntohs(th->window); 6312 6313 if (!tp->rx_opt.wscale_ok) { 6314 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 6315 tp->window_clamp = min(tp->window_clamp, 65535U); 6316 } 6317 6318 if (tp->rx_opt.saw_tstamp) { 6319 tp->rx_opt.tstamp_ok = 1; 6320 tp->tcp_header_len = 6321 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 6322 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6323 tcp_store_ts_recent(tp); 6324 } else { 6325 tp->tcp_header_len = sizeof(struct tcphdr); 6326 } 6327 6328 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 6329 tcp_initialize_rcv_mss(sk); 6330 6331 /* Remember, tcp_poll() does not lock socket! 6332 * Change state from SYN-SENT only after copied_seq 6333 * is initialized. */ 6334 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 6335 6336 smc_check_reset_syn(tp); 6337 6338 smp_mb(); 6339 6340 tcp_finish_connect(sk, skb); 6341 6342 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && 6343 tcp_rcv_fastopen_synack(sk, skb, &foc); 6344 6345 if (!sock_flag(sk, SOCK_DEAD)) { 6346 sk->sk_state_change(sk); 6347 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 6348 } 6349 if (fastopen_fail) 6350 return -1; 6351 if (sk->sk_write_pending || 6352 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) || 6353 inet_csk_in_pingpong_mode(sk)) { 6354 /* Save one ACK. Data will be ready after 6355 * several ticks, if write_pending is set. 6356 * 6357 * It may be deleted, but with this feature tcpdumps 6358 * look so _wonderfully_ clever, that I was not able 6359 * to stand against the temptation 8) --ANK 6360 */ 6361 inet_csk_schedule_ack(sk); 6362 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); 6363 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 6364 TCP_DELACK_MAX, TCP_RTO_MAX); 6365 goto consume; 6366 } 6367 tcp_send_ack(sk); 6368 return -1; 6369 } 6370 6371 /* No ACK in the segment */ 6372 6373 if (th->rst) { 6374 /* rfc793: 6375 * "If the RST bit is set 6376 * 6377 * Otherwise (no ACK) drop the segment and return." 6378 */ 6379 SKB_DR_SET(reason, TCP_RESET); 6380 goto discard_and_undo; 6381 } 6382 6383 /* PAWS check. */ 6384 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 6385 tcp_paws_reject(&tp->rx_opt, 0)) { 6386 SKB_DR_SET(reason, TCP_RFC7323_PAWS); 6387 goto discard_and_undo; 6388 } 6389 if (th->syn) { 6390 /* We see SYN without ACK. It is attempt of 6391 * simultaneous connect with crossed SYNs. 6392 * Particularly, it can be connect to self. 6393 */ 6394 tcp_set_state(sk, TCP_SYN_RECV); 6395 6396 if (tp->rx_opt.saw_tstamp) { 6397 tp->rx_opt.tstamp_ok = 1; 6398 tcp_store_ts_recent(tp); 6399 tp->tcp_header_len = 6400 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 6401 } else { 6402 tp->tcp_header_len = sizeof(struct tcphdr); 6403 } 6404 6405 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); 6406 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 6407 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 6408 6409 /* RFC1323: The window in SYN & SYN/ACK segments is 6410 * never scaled. 6411 */ 6412 tp->snd_wnd = ntohs(th->window); 6413 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 6414 tp->max_window = tp->snd_wnd; 6415 6416 tcp_ecn_rcv_syn(tp, th); 6417 6418 tcp_mtup_init(sk); 6419 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 6420 tcp_initialize_rcv_mss(sk); 6421 6422 tcp_send_synack(sk); 6423 #if 0 6424 /* Note, we could accept data and URG from this segment. 6425 * There are no obstacles to make this (except that we must 6426 * either change tcp_recvmsg() to prevent it from returning data 6427 * before 3WHS completes per RFC793, or employ TCP Fast Open). 6428 * 6429 * However, if we ignore data in ACKless segments sometimes, 6430 * we have no reasons to accept it sometimes. 6431 * Also, seems the code doing it in step6 of tcp_rcv_state_process 6432 * is not flawless. So, discard packet for sanity. 6433 * Uncomment this return to process the data. 6434 */ 6435 return -1; 6436 #else 6437 goto consume; 6438 #endif 6439 } 6440 /* "fifth, if neither of the SYN or RST bits is set then 6441 * drop the segment and return." 6442 */ 6443 6444 discard_and_undo: 6445 tcp_clear_options(&tp->rx_opt); 6446 tp->rx_opt.mss_clamp = saved_clamp; 6447 tcp_drop_reason(sk, skb, reason); 6448 return 0; 6449 6450 reset_and_undo: 6451 tcp_clear_options(&tp->rx_opt); 6452 tp->rx_opt.mss_clamp = saved_clamp; 6453 return 1; 6454 } 6455 6456 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) 6457 { 6458 struct tcp_sock *tp = tcp_sk(sk); 6459 struct request_sock *req; 6460 6461 /* If we are still handling the SYNACK RTO, see if timestamp ECR allows 6462 * undo. If peer SACKs triggered fast recovery, we can't undo here. 6463 */ 6464 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out) 6465 tcp_try_undo_recovery(sk); 6466 6467 /* Reset rtx states to prevent spurious retransmits_timed_out() */ 6468 tp->retrans_stamp = 0; 6469 inet_csk(sk)->icsk_retransmits = 0; 6470 6471 /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1, 6472 * we no longer need req so release it. 6473 */ 6474 req = rcu_dereference_protected(tp->fastopen_rsk, 6475 lockdep_sock_is_held(sk)); 6476 reqsk_fastopen_remove(sk, req, false); 6477 6478 /* Re-arm the timer because data may have been sent out. 6479 * This is similar to the regular data transmission case 6480 * when new data has just been ack'ed. 6481 * 6482 * (TFO) - we could try to be more aggressive and 6483 * retransmitting any data sooner based on when they 6484 * are sent out. 6485 */ 6486 tcp_rearm_rto(sk); 6487 } 6488 6489 /* 6490 * This function implements the receiving procedure of RFC 793 for 6491 * all states except ESTABLISHED and TIME_WAIT. 6492 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 6493 * address independent. 6494 */ 6495 6496 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) 6497 { 6498 struct tcp_sock *tp = tcp_sk(sk); 6499 struct inet_connection_sock *icsk = inet_csk(sk); 6500 const struct tcphdr *th = tcp_hdr(skb); 6501 struct request_sock *req; 6502 int queued = 0; 6503 bool acceptable; 6504 SKB_DR(reason); 6505 6506 switch (sk->sk_state) { 6507 case TCP_CLOSE: 6508 SKB_DR_SET(reason, TCP_CLOSE); 6509 goto discard; 6510 6511 case TCP_LISTEN: 6512 if (th->ack) 6513 return 1; 6514 6515 if (th->rst) { 6516 SKB_DR_SET(reason, TCP_RESET); 6517 goto discard; 6518 } 6519 if (th->syn) { 6520 if (th->fin) { 6521 SKB_DR_SET(reason, TCP_FLAGS); 6522 goto discard; 6523 } 6524 /* It is possible that we process SYN packets from backlog, 6525 * so we need to make sure to disable BH and RCU right there. 6526 */ 6527 rcu_read_lock(); 6528 local_bh_disable(); 6529 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 6530 local_bh_enable(); 6531 rcu_read_unlock(); 6532 6533 if (!acceptable) 6534 return 1; 6535 consume_skb(skb); 6536 return 0; 6537 } 6538 SKB_DR_SET(reason, TCP_FLAGS); 6539 goto discard; 6540 6541 case TCP_SYN_SENT: 6542 tp->rx_opt.saw_tstamp = 0; 6543 tcp_mstamp_refresh(tp); 6544 queued = tcp_rcv_synsent_state_process(sk, skb, th); 6545 if (queued >= 0) 6546 return queued; 6547 6548 /* Do step6 onward by hand. */ 6549 tcp_urg(sk, skb, th); 6550 __kfree_skb(skb); 6551 tcp_data_snd_check(sk); 6552 return 0; 6553 } 6554 6555 tcp_mstamp_refresh(tp); 6556 tp->rx_opt.saw_tstamp = 0; 6557 req = rcu_dereference_protected(tp->fastopen_rsk, 6558 lockdep_sock_is_held(sk)); 6559 if (req) { 6560 bool req_stolen; 6561 6562 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 6563 sk->sk_state != TCP_FIN_WAIT1); 6564 6565 if (!tcp_check_req(sk, skb, req, true, &req_stolen)) { 6566 SKB_DR_SET(reason, TCP_FASTOPEN); 6567 goto discard; 6568 } 6569 } 6570 6571 if (!th->ack && !th->rst && !th->syn) { 6572 SKB_DR_SET(reason, TCP_FLAGS); 6573 goto discard; 6574 } 6575 if (!tcp_validate_incoming(sk, skb, th, 0)) 6576 return 0; 6577 6578 /* step 5: check the ACK field */ 6579 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | 6580 FLAG_UPDATE_TS_RECENT | 6581 FLAG_NO_CHALLENGE_ACK) > 0; 6582 6583 if (!acceptable) { 6584 if (sk->sk_state == TCP_SYN_RECV) 6585 return 1; /* send one RST */ 6586 tcp_send_challenge_ack(sk); 6587 SKB_DR_SET(reason, TCP_OLD_ACK); 6588 goto discard; 6589 } 6590 switch (sk->sk_state) { 6591 case TCP_SYN_RECV: 6592 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ 6593 if (!tp->srtt_us) 6594 tcp_synack_rtt_meas(sk, req); 6595 6596 if (req) { 6597 tcp_rcv_synrecv_state_fastopen(sk); 6598 } else { 6599 tcp_try_undo_spurious_syn(sk); 6600 tp->retrans_stamp = 0; 6601 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, 6602 skb); 6603 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 6604 } 6605 smp_mb(); 6606 tcp_set_state(sk, TCP_ESTABLISHED); 6607 sk->sk_state_change(sk); 6608 6609 /* Note, that this wakeup is only for marginal crossed SYN case. 6610 * Passively open sockets are not waked up, because 6611 * sk->sk_sleep == NULL and sk->sk_socket == NULL. 6612 */ 6613 if (sk->sk_socket) 6614 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 6615 6616 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 6617 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 6618 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 6619 6620 if (tp->rx_opt.tstamp_ok) 6621 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6622 6623 if (!inet_csk(sk)->icsk_ca_ops->cong_control) 6624 tcp_update_pacing_rate(sk); 6625 6626 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 6627 tp->lsndtime = tcp_jiffies32; 6628 6629 tcp_initialize_rcv_mss(sk); 6630 tcp_fast_path_on(tp); 6631 if (sk->sk_shutdown & SEND_SHUTDOWN) 6632 tcp_shutdown(sk, SEND_SHUTDOWN); 6633 break; 6634 6635 case TCP_FIN_WAIT1: { 6636 int tmo; 6637 6638 if (req) 6639 tcp_rcv_synrecv_state_fastopen(sk); 6640 6641 if (tp->snd_una != tp->write_seq) 6642 break; 6643 6644 tcp_set_state(sk, TCP_FIN_WAIT2); 6645 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN); 6646 6647 sk_dst_confirm(sk); 6648 6649 if (!sock_flag(sk, SOCK_DEAD)) { 6650 /* Wake up lingering close() */ 6651 sk->sk_state_change(sk); 6652 break; 6653 } 6654 6655 if (READ_ONCE(tp->linger2) < 0) { 6656 tcp_done(sk); 6657 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6658 return 1; 6659 } 6660 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6661 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6662 /* Receive out of order FIN after close() */ 6663 if (tp->syn_fastopen && th->fin) 6664 tcp_fastopen_active_disable(sk); 6665 tcp_done(sk); 6666 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6667 return 1; 6668 } 6669 6670 tmo = tcp_fin_time(sk); 6671 if (tmo > TCP_TIMEWAIT_LEN) { 6672 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 6673 } else if (th->fin || sock_owned_by_user(sk)) { 6674 /* Bad case. We could lose such FIN otherwise. 6675 * It is not a big problem, but it looks confusing 6676 * and not so rare event. We still can lose it now, 6677 * if it spins in bh_lock_sock(), but it is really 6678 * marginal case. 6679 */ 6680 inet_csk_reset_keepalive_timer(sk, tmo); 6681 } else { 6682 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 6683 goto consume; 6684 } 6685 break; 6686 } 6687 6688 case TCP_CLOSING: 6689 if (tp->snd_una == tp->write_seq) { 6690 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 6691 goto consume; 6692 } 6693 break; 6694 6695 case TCP_LAST_ACK: 6696 if (tp->snd_una == tp->write_seq) { 6697 tcp_update_metrics(sk); 6698 tcp_done(sk); 6699 goto consume; 6700 } 6701 break; 6702 } 6703 6704 /* step 6: check the URG bit */ 6705 tcp_urg(sk, skb, th); 6706 6707 /* step 7: process the segment text */ 6708 switch (sk->sk_state) { 6709 case TCP_CLOSE_WAIT: 6710 case TCP_CLOSING: 6711 case TCP_LAST_ACK: 6712 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 6713 /* If a subflow has been reset, the packet should not 6714 * continue to be processed, drop the packet. 6715 */ 6716 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) 6717 goto discard; 6718 break; 6719 } 6720 fallthrough; 6721 case TCP_FIN_WAIT1: 6722 case TCP_FIN_WAIT2: 6723 /* RFC 793 says to queue data in these states, 6724 * RFC 1122 says we MUST send a reset. 6725 * BSD 4.4 also does reset. 6726 */ 6727 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6728 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6729 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6730 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6731 tcp_reset(sk, skb); 6732 return 1; 6733 } 6734 } 6735 fallthrough; 6736 case TCP_ESTABLISHED: 6737 tcp_data_queue(sk, skb); 6738 queued = 1; 6739 break; 6740 } 6741 6742 /* tcp_data could move socket to TIME-WAIT */ 6743 if (sk->sk_state != TCP_CLOSE) { 6744 tcp_data_snd_check(sk); 6745 tcp_ack_snd_check(sk); 6746 } 6747 6748 if (!queued) { 6749 discard: 6750 tcp_drop_reason(sk, skb, reason); 6751 } 6752 return 0; 6753 6754 consume: 6755 __kfree_skb(skb); 6756 return 0; 6757 } 6758 EXPORT_SYMBOL(tcp_rcv_state_process); 6759 6760 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) 6761 { 6762 struct inet_request_sock *ireq = inet_rsk(req); 6763 6764 if (family == AF_INET) 6765 net_dbg_ratelimited("drop open request from %pI4/%u\n", 6766 &ireq->ir_rmt_addr, port); 6767 #if IS_ENABLED(CONFIG_IPV6) 6768 else if (family == AF_INET6) 6769 net_dbg_ratelimited("drop open request from %pI6/%u\n", 6770 &ireq->ir_v6_rmt_addr, port); 6771 #endif 6772 } 6773 6774 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set 6775 * 6776 * If we receive a SYN packet with these bits set, it means a 6777 * network is playing bad games with TOS bits. In order to 6778 * avoid possible false congestion notifications, we disable 6779 * TCP ECN negotiation. 6780 * 6781 * Exception: tcp_ca wants ECN. This is required for DCTCP 6782 * congestion control: Linux DCTCP asserts ECT on all packets, 6783 * including SYN, which is most optimal solution; however, 6784 * others, such as FreeBSD do not. 6785 * 6786 * Exception: At least one of the reserved bits of the TCP header (th->res1) is 6787 * set, indicating the use of a future TCP extension (such as AccECN). See 6788 * RFC8311 §4.3 which updates RFC3168 to allow the development of such 6789 * extensions. 6790 */ 6791 static void tcp_ecn_create_request(struct request_sock *req, 6792 const struct sk_buff *skb, 6793 const struct sock *listen_sk, 6794 const struct dst_entry *dst) 6795 { 6796 const struct tcphdr *th = tcp_hdr(skb); 6797 const struct net *net = sock_net(listen_sk); 6798 bool th_ecn = th->ece && th->cwr; 6799 bool ect, ecn_ok; 6800 u32 ecn_ok_dst; 6801 6802 if (!th_ecn) 6803 return; 6804 6805 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); 6806 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); 6807 ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst; 6808 6809 if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) || 6810 (ecn_ok_dst & DST_FEATURE_ECN_CA) || 6811 tcp_bpf_ca_needs_ecn((struct sock *)req)) 6812 inet_rsk(req)->ecn_ok = 1; 6813 } 6814 6815 static void tcp_openreq_init(struct request_sock *req, 6816 const struct tcp_options_received *rx_opt, 6817 struct sk_buff *skb, const struct sock *sk) 6818 { 6819 struct inet_request_sock *ireq = inet_rsk(req); 6820 6821 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 6822 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 6823 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 6824 tcp_rsk(req)->snt_synack = 0; 6825 tcp_rsk(req)->last_oow_ack_time = 0; 6826 req->mss = rx_opt->mss_clamp; 6827 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 6828 ireq->tstamp_ok = rx_opt->tstamp_ok; 6829 ireq->sack_ok = rx_opt->sack_ok; 6830 ireq->snd_wscale = rx_opt->snd_wscale; 6831 ireq->wscale_ok = rx_opt->wscale_ok; 6832 ireq->acked = 0; 6833 ireq->ecn_ok = 0; 6834 ireq->ir_rmt_port = tcp_hdr(skb)->source; 6835 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); 6836 ireq->ir_mark = inet_request_mark(sk, skb); 6837 #if IS_ENABLED(CONFIG_SMC) 6838 ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested && 6839 tcp_sk(sk)->smc_hs_congested(sk)); 6840 #endif 6841 } 6842 6843 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 6844 struct sock *sk_listener, 6845 bool attach_listener) 6846 { 6847 struct request_sock *req = reqsk_alloc(ops, sk_listener, 6848 attach_listener); 6849 6850 if (req) { 6851 struct inet_request_sock *ireq = inet_rsk(req); 6852 6853 ireq->ireq_opt = NULL; 6854 #if IS_ENABLED(CONFIG_IPV6) 6855 ireq->pktopts = NULL; 6856 #endif 6857 atomic64_set(&ireq->ir_cookie, 0); 6858 ireq->ireq_state = TCP_NEW_SYN_RECV; 6859 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); 6860 ireq->ireq_family = sk_listener->sk_family; 6861 req->timeout = TCP_TIMEOUT_INIT; 6862 } 6863 6864 return req; 6865 } 6866 EXPORT_SYMBOL(inet_reqsk_alloc); 6867 6868 /* 6869 * Return true if a syncookie should be sent 6870 */ 6871 static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) 6872 { 6873 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 6874 const char *msg = "Dropping request"; 6875 struct net *net = sock_net(sk); 6876 bool want_cookie = false; 6877 u8 syncookies; 6878 6879 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); 6880 6881 #ifdef CONFIG_SYN_COOKIES 6882 if (syncookies) { 6883 msg = "Sending cookies"; 6884 want_cookie = true; 6885 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 6886 } else 6887 #endif 6888 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 6889 6890 if (!READ_ONCE(queue->synflood_warned) && syncookies != 2 && 6891 xchg(&queue->synflood_warned, 1) == 0) { 6892 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) { 6893 net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n", 6894 proto, inet6_rcv_saddr(sk), 6895 sk->sk_num, msg); 6896 } else { 6897 net_info_ratelimited("%s: Possible SYN flooding on port %pI4:%u. %s.\n", 6898 proto, &sk->sk_rcv_saddr, 6899 sk->sk_num, msg); 6900 } 6901 } 6902 6903 return want_cookie; 6904 } 6905 6906 static void tcp_reqsk_record_syn(const struct sock *sk, 6907 struct request_sock *req, 6908 const struct sk_buff *skb) 6909 { 6910 if (tcp_sk(sk)->save_syn) { 6911 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); 6912 struct saved_syn *saved_syn; 6913 u32 mac_hdrlen; 6914 void *base; 6915 6916 if (tcp_sk(sk)->save_syn == 2) { /* Save full header. */ 6917 base = skb_mac_header(skb); 6918 mac_hdrlen = skb_mac_header_len(skb); 6919 len += mac_hdrlen; 6920 } else { 6921 base = skb_network_header(skb); 6922 mac_hdrlen = 0; 6923 } 6924 6925 saved_syn = kmalloc(struct_size(saved_syn, data, len), 6926 GFP_ATOMIC); 6927 if (saved_syn) { 6928 saved_syn->mac_hdrlen = mac_hdrlen; 6929 saved_syn->network_hdrlen = skb_network_header_len(skb); 6930 saved_syn->tcp_hdrlen = tcp_hdrlen(skb); 6931 memcpy(saved_syn->data, base, len); 6932 req->saved_syn = saved_syn; 6933 } 6934 } 6935 } 6936 6937 /* If a SYN cookie is required and supported, returns a clamped MSS value to be 6938 * used for SYN cookie generation. 6939 */ 6940 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, 6941 const struct tcp_request_sock_ops *af_ops, 6942 struct sock *sk, struct tcphdr *th) 6943 { 6944 struct tcp_sock *tp = tcp_sk(sk); 6945 u16 mss; 6946 6947 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && 6948 !inet_csk_reqsk_queue_is_full(sk)) 6949 return 0; 6950 6951 if (!tcp_syn_flood_action(sk, rsk_ops->slab_name)) 6952 return 0; 6953 6954 if (sk_acceptq_is_full(sk)) { 6955 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6956 return 0; 6957 } 6958 6959 mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss); 6960 if (!mss) 6961 mss = af_ops->mss_clamp; 6962 6963 return mss; 6964 } 6965 EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss); 6966 6967 int tcp_conn_request(struct request_sock_ops *rsk_ops, 6968 const struct tcp_request_sock_ops *af_ops, 6969 struct sock *sk, struct sk_buff *skb) 6970 { 6971 struct tcp_fastopen_cookie foc = { .len = -1 }; 6972 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; 6973 struct tcp_options_received tmp_opt; 6974 struct tcp_sock *tp = tcp_sk(sk); 6975 struct net *net = sock_net(sk); 6976 struct sock *fastopen_sk = NULL; 6977 struct request_sock *req; 6978 bool want_cookie = false; 6979 struct dst_entry *dst; 6980 struct flowi fl; 6981 u8 syncookies; 6982 6983 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies); 6984 6985 /* TW buckets are converted to open requests without 6986 * limitations, they conserve resources and peer is 6987 * evidently real one. 6988 */ 6989 if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { 6990 want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name); 6991 if (!want_cookie) 6992 goto drop; 6993 } 6994 6995 if (sk_acceptq_is_full(sk)) { 6996 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6997 goto drop; 6998 } 6999 7000 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); 7001 if (!req) 7002 goto drop; 7003 7004 req->syncookie = want_cookie; 7005 tcp_rsk(req)->af_specific = af_ops; 7006 tcp_rsk(req)->ts_off = 0; 7007 #if IS_ENABLED(CONFIG_MPTCP) 7008 tcp_rsk(req)->is_mptcp = 0; 7009 #endif 7010 7011 tcp_clear_options(&tmp_opt); 7012 tmp_opt.mss_clamp = af_ops->mss_clamp; 7013 tmp_opt.user_mss = tp->rx_opt.user_mss; 7014 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, 7015 want_cookie ? NULL : &foc); 7016 7017 if (want_cookie && !tmp_opt.saw_tstamp) 7018 tcp_clear_options(&tmp_opt); 7019 7020 if (IS_ENABLED(CONFIG_SMC) && want_cookie) 7021 tmp_opt.smc_ok = 0; 7022 7023 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 7024 tcp_openreq_init(req, &tmp_opt, skb, sk); 7025 inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk); 7026 7027 /* Note: tcp_v6_init_req() might override ir_iif for link locals */ 7028 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); 7029 7030 dst = af_ops->route_req(sk, skb, &fl, req); 7031 if (!dst) 7032 goto drop_and_free; 7033 7034 if (tmp_opt.tstamp_ok) 7035 tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); 7036 7037 if (!want_cookie && !isn) { 7038 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); 7039 7040 /* Kill the following clause, if you dislike this way. */ 7041 if (!syncookies && 7042 (max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 7043 (max_syn_backlog >> 2)) && 7044 !tcp_peer_is_proven(req, dst)) { 7045 /* Without syncookies last quarter of 7046 * backlog is filled with destinations, 7047 * proven to be alive. 7048 * It means that we continue to communicate 7049 * to destinations, already remembered 7050 * to the moment of synflood. 7051 */ 7052 pr_drop_req(req, ntohs(tcp_hdr(skb)->source), 7053 rsk_ops->family); 7054 goto drop_and_release; 7055 } 7056 7057 isn = af_ops->init_seq(skb); 7058 } 7059 7060 tcp_ecn_create_request(req, skb, sk, dst); 7061 7062 if (want_cookie) { 7063 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); 7064 if (!tmp_opt.tstamp_ok) 7065 inet_rsk(req)->ecn_ok = 0; 7066 } 7067 7068 tcp_rsk(req)->snt_isn = isn; 7069 tcp_rsk(req)->txhash = net_tx_rndhash(); 7070 tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; 7071 tcp_openreq_init_rwin(req, sk, dst); 7072 sk_rx_queue_set(req_to_sk(req), skb); 7073 if (!want_cookie) { 7074 tcp_reqsk_record_syn(sk, req, skb); 7075 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); 7076 } 7077 if (fastopen_sk) { 7078 af_ops->send_synack(fastopen_sk, dst, &fl, req, 7079 &foc, TCP_SYNACK_FASTOPEN, skb); 7080 /* Add the child socket directly into the accept queue */ 7081 if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) { 7082 reqsk_fastopen_remove(fastopen_sk, req, false); 7083 bh_unlock_sock(fastopen_sk); 7084 sock_put(fastopen_sk); 7085 goto drop_and_free; 7086 } 7087 sk->sk_data_ready(sk); 7088 bh_unlock_sock(fastopen_sk); 7089 sock_put(fastopen_sk); 7090 } else { 7091 tcp_rsk(req)->tfo_listener = false; 7092 if (!want_cookie) { 7093 req->timeout = tcp_timeout_init((struct sock *)req); 7094 inet_csk_reqsk_queue_hash_add(sk, req, req->timeout); 7095 } 7096 af_ops->send_synack(sk, dst, &fl, req, &foc, 7097 !want_cookie ? TCP_SYNACK_NORMAL : 7098 TCP_SYNACK_COOKIE, 7099 skb); 7100 if (want_cookie) { 7101 reqsk_free(req); 7102 return 0; 7103 } 7104 } 7105 reqsk_put(req); 7106 return 0; 7107 7108 drop_and_release: 7109 dst_release(dst); 7110 drop_and_free: 7111 __reqsk_free(req); 7112 drop: 7113 tcp_listendrop(sk); 7114 return 0; 7115 } 7116 EXPORT_SYMBOL(tcp_conn_request); 7117