1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 /* 23 * Changes: 24 * Pedro Roque : Fast Retransmit/Recovery. 25 * Two receive queues. 26 * Retransmit queue handled by TCP. 27 * Better retransmit timer handling. 28 * New congestion avoidance. 29 * Header prediction. 30 * Variable renaming. 31 * 32 * Eric : Fast Retransmit. 33 * Randy Scott : MSS option defines. 34 * Eric Schenk : Fixes to slow start algorithm. 35 * Eric Schenk : Yet another double ACK bug. 36 * Eric Schenk : Delayed ACK bug fixes. 37 * Eric Schenk : Floyd style fast retrans war avoidance. 38 * David S. Miller : Don't allow zero congestion window. 39 * Eric Schenk : Fix retransmitter so that it sends 40 * next packet on ack of previous packet. 41 * Andi Kleen : Moved open_request checking here 42 * and process RSTs for open_requests. 43 * Andi Kleen : Better prune_queue, and other fixes. 44 * Andrey Savochkin: Fix RTT measurements in the presence of 45 * timestamps. 46 * Andrey Savochkin: Check sequence numbers correctly when 47 * removing SACKs due to in sequence incoming 48 * data segments. 49 * Andi Kleen: Make sure we never ack data there is not 50 * enough room for. Also make this condition 51 * a fatal error if it might still happen. 52 * Andi Kleen: Add tcp_measure_rcv_mss to make 53 * connections with MSS<min(MTU,ann. MSS) 54 * work without delayed acks. 55 * Andi Kleen: Process packets with PSH set in the 56 * fast path. 57 * J Hadi Salim: ECN support 58 * Andrei Gurtov, 59 * Pasi Sarolahti, 60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 61 * engine. Lots of bugs are found. 62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 63 */ 64 65 #define pr_fmt(fmt) "TCP: " fmt 66 67 #include <linux/mm.h> 68 #include <linux/slab.h> 69 #include <linux/module.h> 70 #include <linux/sysctl.h> 71 #include <linux/kernel.h> 72 #include <linux/prefetch.h> 73 #include <net/dst.h> 74 #include <net/tcp.h> 75 #include <net/inet_common.h> 76 #include <linux/ipsec.h> 77 #include <asm/unaligned.h> 78 #include <linux/errqueue.h> 79 #include <trace/events/tcp.h> 80 #include <linux/static_key.h> 81 82 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 83 84 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 85 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 86 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 87 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 88 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 89 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 90 #define FLAG_ECE 0x40 /* ECE in this ACK */ 91 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ 92 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 93 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 94 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 95 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 96 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ 97 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 98 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 99 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 100 101 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 102 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 103 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) 104 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 105 106 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 107 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 108 109 #define REXMIT_NONE 0 /* no loss recovery to do */ 110 #define REXMIT_LOST 1 /* retransmit packets marked lost */ 111 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 112 113 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, 114 unsigned int len) 115 { 116 static bool __once __read_mostly; 117 118 if (!__once) { 119 struct net_device *dev; 120 121 __once = true; 122 123 rcu_read_lock(); 124 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 125 if (!dev || len >= dev->mtu) 126 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 127 dev ? dev->name : "Unknown driver"); 128 rcu_read_unlock(); 129 } 130 } 131 132 /* Adapt the MSS value used to make delayed ack decision to the 133 * real world. 134 */ 135 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 136 { 137 struct inet_connection_sock *icsk = inet_csk(sk); 138 const unsigned int lss = icsk->icsk_ack.last_seg_size; 139 unsigned int len; 140 141 icsk->icsk_ack.last_seg_size = 0; 142 143 /* skb->len may jitter because of SACKs, even if peer 144 * sends good full-sized frames. 145 */ 146 len = skb_shinfo(skb)->gso_size ? : skb->len; 147 if (len >= icsk->icsk_ack.rcv_mss) { 148 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 149 tcp_sk(sk)->advmss); 150 /* Account for possibly-removed options */ 151 if (unlikely(len > icsk->icsk_ack.rcv_mss + 152 MAX_TCP_OPTION_SPACE)) 153 tcp_gro_dev_warn(sk, skb, len); 154 } else { 155 /* Otherwise, we make more careful check taking into account, 156 * that SACKs block is variable. 157 * 158 * "len" is invariant segment length, including TCP header. 159 */ 160 len += skb->data - skb_transport_header(skb); 161 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 162 /* If PSH is not set, packet should be 163 * full sized, provided peer TCP is not badly broken. 164 * This observation (if it is correct 8)) allows 165 * to handle super-low mtu links fairly. 166 */ 167 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 168 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 169 /* Subtract also invariant (if peer is RFC compliant), 170 * tcp header plus fixed timestamp option length. 171 * Resulting "len" is MSS free of SACK jitter. 172 */ 173 len -= tcp_sk(sk)->tcp_header_len; 174 icsk->icsk_ack.last_seg_size = len; 175 if (len == lss) { 176 icsk->icsk_ack.rcv_mss = len; 177 return; 178 } 179 } 180 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 181 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 182 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 183 } 184 } 185 186 static void tcp_incr_quickack(struct sock *sk) 187 { 188 struct inet_connection_sock *icsk = inet_csk(sk); 189 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 190 191 if (quickacks == 0) 192 quickacks = 2; 193 if (quickacks > icsk->icsk_ack.quick) 194 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 195 } 196 197 static void tcp_enter_quickack_mode(struct sock *sk) 198 { 199 struct inet_connection_sock *icsk = inet_csk(sk); 200 tcp_incr_quickack(sk); 201 icsk->icsk_ack.pingpong = 0; 202 icsk->icsk_ack.ato = TCP_ATO_MIN; 203 } 204 205 /* Send ACKs quickly, if "quick" count is not exhausted 206 * and the session is not interactive. 207 */ 208 209 static bool tcp_in_quickack_mode(struct sock *sk) 210 { 211 const struct inet_connection_sock *icsk = inet_csk(sk); 212 const struct dst_entry *dst = __sk_dst_get(sk); 213 214 return (dst && dst_metric(dst, RTAX_QUICKACK)) || 215 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); 216 } 217 218 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) 219 { 220 if (tp->ecn_flags & TCP_ECN_OK) 221 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 222 } 223 224 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 225 { 226 if (tcp_hdr(skb)->cwr) 227 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 228 } 229 230 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 231 { 232 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 233 } 234 235 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 236 { 237 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 238 case INET_ECN_NOT_ECT: 239 /* Funny extension: if ECT is not set on a segment, 240 * and we already seen ECT on a previous segment, 241 * it is probably a retransmit. 242 */ 243 if (tp->ecn_flags & TCP_ECN_SEEN) 244 tcp_enter_quickack_mode((struct sock *)tp); 245 break; 246 case INET_ECN_CE: 247 if (tcp_ca_needs_ecn((struct sock *)tp)) 248 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); 249 250 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 251 /* Better not delay acks, sender can have a very low cwnd */ 252 tcp_enter_quickack_mode((struct sock *)tp); 253 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 254 } 255 tp->ecn_flags |= TCP_ECN_SEEN; 256 break; 257 default: 258 if (tcp_ca_needs_ecn((struct sock *)tp)) 259 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); 260 tp->ecn_flags |= TCP_ECN_SEEN; 261 break; 262 } 263 } 264 265 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 266 { 267 if (tp->ecn_flags & TCP_ECN_OK) 268 __tcp_ecn_check_ce(tp, skb); 269 } 270 271 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 272 { 273 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 274 tp->ecn_flags &= ~TCP_ECN_OK; 275 } 276 277 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 278 { 279 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 280 tp->ecn_flags &= ~TCP_ECN_OK; 281 } 282 283 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 284 { 285 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 286 return true; 287 return false; 288 } 289 290 /* Buffer size and advertised window tuning. 291 * 292 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 293 */ 294 295 static void tcp_sndbuf_expand(struct sock *sk) 296 { 297 const struct tcp_sock *tp = tcp_sk(sk); 298 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 299 int sndmem, per_mss; 300 u32 nr_segs; 301 302 /* Worst case is non GSO/TSO : each frame consumes one skb 303 * and skb->head is kmalloced using power of two area of memory 304 */ 305 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 306 MAX_TCP_HEADER + 307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 308 309 per_mss = roundup_pow_of_two(per_mss) + 310 SKB_DATA_ALIGN(sizeof(struct sk_buff)); 311 312 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); 313 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); 314 315 /* Fast Recovery (RFC 5681 3.2) : 316 * Cubic needs 1.7 factor, rounded to 2 to include 317 * extra cushion (application might react slowly to POLLOUT) 318 */ 319 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; 320 sndmem *= nr_segs * per_mss; 321 322 if (sk->sk_sndbuf < sndmem) 323 sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]); 324 } 325 326 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 327 * 328 * All tcp_full_space() is split to two parts: "network" buffer, allocated 329 * forward and advertised in receiver window (tp->rcv_wnd) and 330 * "application buffer", required to isolate scheduling/application 331 * latencies from network. 332 * window_clamp is maximal advertised window. It can be less than 333 * tcp_full_space(), in this case tcp_full_space() - window_clamp 334 * is reserved for "application" buffer. The less window_clamp is 335 * the smoother our behaviour from viewpoint of network, but the lower 336 * throughput and the higher sensitivity of the connection to losses. 8) 337 * 338 * rcv_ssthresh is more strict window_clamp used at "slow start" 339 * phase to predict further behaviour of this connection. 340 * It is used for two goals: 341 * - to enforce header prediction at sender, even when application 342 * requires some significant "application buffer". It is check #1. 343 * - to prevent pruning of receive queue because of misprediction 344 * of receiver window. Check #2. 345 * 346 * The scheme does not work when sender sends good segments opening 347 * window and then starts to feed us spaghetti. But it should work 348 * in common situations. Otherwise, we have to rely on queue collapsing. 349 */ 350 351 /* Slow part of check#2. */ 352 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 353 { 354 struct tcp_sock *tp = tcp_sk(sk); 355 /* Optimize this! */ 356 int truesize = tcp_win_from_space(sk, skb->truesize) >> 1; 357 int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 358 359 while (tp->rcv_ssthresh <= window) { 360 if (truesize <= skb->len) 361 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 362 363 truesize >>= 1; 364 window >>= 1; 365 } 366 return 0; 367 } 368 369 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 370 { 371 struct tcp_sock *tp = tcp_sk(sk); 372 373 /* Check #1 */ 374 if (tp->rcv_ssthresh < tp->window_clamp && 375 (int)tp->rcv_ssthresh < tcp_space(sk) && 376 !tcp_under_memory_pressure(sk)) { 377 int incr; 378 379 /* Check #2. Increase window, if skb with such overhead 380 * will fit to rcvbuf in future. 381 */ 382 if (tcp_win_from_space(sk, skb->truesize) <= skb->len) 383 incr = 2 * tp->advmss; 384 else 385 incr = __tcp_grow_window(sk, skb); 386 387 if (incr) { 388 incr = max_t(int, incr, 2 * skb->len); 389 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 390 tp->window_clamp); 391 inet_csk(sk)->icsk_ack.quick |= 1; 392 } 393 } 394 } 395 396 /* 3. Tuning rcvbuf, when connection enters established state. */ 397 static void tcp_fixup_rcvbuf(struct sock *sk) 398 { 399 u32 mss = tcp_sk(sk)->advmss; 400 int rcvmem; 401 402 rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * 403 tcp_default_init_rwnd(mss); 404 405 /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency 406 * Allow enough cushion so that sender is not limited by our window 407 */ 408 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) 409 rcvmem <<= 2; 410 411 if (sk->sk_rcvbuf < rcvmem) 412 sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 413 } 414 415 /* 4. Try to fixup all. It is made immediately after connection enters 416 * established state. 417 */ 418 void tcp_init_buffer_space(struct sock *sk) 419 { 420 int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win; 421 struct tcp_sock *tp = tcp_sk(sk); 422 int maxwin; 423 424 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 425 tcp_fixup_rcvbuf(sk); 426 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 427 tcp_sndbuf_expand(sk); 428 429 tp->rcvq_space.space = tp->rcv_wnd; 430 tcp_mstamp_refresh(tp); 431 tp->rcvq_space.time = tp->tcp_mstamp; 432 tp->rcvq_space.seq = tp->copied_seq; 433 434 maxwin = tcp_full_space(sk); 435 436 if (tp->window_clamp >= maxwin) { 437 tp->window_clamp = maxwin; 438 439 if (tcp_app_win && maxwin > 4 * tp->advmss) 440 tp->window_clamp = max(maxwin - 441 (maxwin >> tcp_app_win), 442 4 * tp->advmss); 443 } 444 445 /* Force reservation of one segment. */ 446 if (tcp_app_win && 447 tp->window_clamp > 2 * tp->advmss && 448 tp->window_clamp + tp->advmss > maxwin) 449 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 450 451 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 452 tp->snd_cwnd_stamp = tcp_jiffies32; 453 } 454 455 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 456 static void tcp_clamp_window(struct sock *sk) 457 { 458 struct tcp_sock *tp = tcp_sk(sk); 459 struct inet_connection_sock *icsk = inet_csk(sk); 460 struct net *net = sock_net(sk); 461 462 icsk->icsk_ack.quick = 0; 463 464 if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] && 465 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 466 !tcp_under_memory_pressure(sk) && 467 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 468 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 469 net->ipv4.sysctl_tcp_rmem[2]); 470 } 471 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 472 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 473 } 474 475 /* Initialize RCV_MSS value. 476 * RCV_MSS is an our guess about MSS used by the peer. 477 * We haven't any direct information about the MSS. 478 * It's better to underestimate the RCV_MSS rather than overestimate. 479 * Overestimations make us ACKing less frequently than needed. 480 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 481 */ 482 void tcp_initialize_rcv_mss(struct sock *sk) 483 { 484 const struct tcp_sock *tp = tcp_sk(sk); 485 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 486 487 hint = min(hint, tp->rcv_wnd / 2); 488 hint = min(hint, TCP_MSS_DEFAULT); 489 hint = max(hint, TCP_MIN_MSS); 490 491 inet_csk(sk)->icsk_ack.rcv_mss = hint; 492 } 493 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 494 495 /* Receiver "autotuning" code. 496 * 497 * The algorithm for RTT estimation w/o timestamps is based on 498 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 499 * <http://public.lanl.gov/radiant/pubs.html#DRS> 500 * 501 * More detail on this code can be found at 502 * <http://staff.psc.edu/jheffner/>, 503 * though this reference is out of date. A new paper 504 * is pending. 505 */ 506 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 507 { 508 u32 new_sample = tp->rcv_rtt_est.rtt_us; 509 long m = sample; 510 511 if (m == 0) 512 m = 1; 513 514 if (new_sample != 0) { 515 /* If we sample in larger samples in the non-timestamp 516 * case, we could grossly overestimate the RTT especially 517 * with chatty applications or bulk transfer apps which 518 * are stalled on filesystem I/O. 519 * 520 * Also, since we are only going for a minimum in the 521 * non-timestamp case, we do not smooth things out 522 * else with timestamps disabled convergence takes too 523 * long. 524 */ 525 if (!win_dep) { 526 m -= (new_sample >> 3); 527 new_sample += m; 528 } else { 529 m <<= 3; 530 if (m < new_sample) 531 new_sample = m; 532 } 533 } else { 534 /* No previous measure. */ 535 new_sample = m << 3; 536 } 537 538 tp->rcv_rtt_est.rtt_us = new_sample; 539 } 540 541 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 542 { 543 u32 delta_us; 544 545 if (tp->rcv_rtt_est.time == 0) 546 goto new_measure; 547 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 548 return; 549 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 550 tcp_rcv_rtt_update(tp, delta_us, 1); 551 552 new_measure: 553 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 554 tp->rcv_rtt_est.time = tp->tcp_mstamp; 555 } 556 557 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 558 const struct sk_buff *skb) 559 { 560 struct tcp_sock *tp = tcp_sk(sk); 561 562 if (tp->rx_opt.rcv_tsecr && 563 (TCP_SKB_CB(skb)->end_seq - 564 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 565 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 566 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 567 568 tcp_rcv_rtt_update(tp, delta_us, 0); 569 } 570 } 571 572 /* 573 * This function should be called every time data is copied to user space. 574 * It calculates the appropriate TCP receive buffer space. 575 */ 576 void tcp_rcv_space_adjust(struct sock *sk) 577 { 578 struct tcp_sock *tp = tcp_sk(sk); 579 int time; 580 int copied; 581 582 tcp_mstamp_refresh(tp); 583 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); 584 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) 585 return; 586 587 /* Number of bytes copied to user in last RTT */ 588 copied = tp->copied_seq - tp->rcvq_space.seq; 589 if (copied <= tp->rcvq_space.space) 590 goto new_measure; 591 592 /* A bit of theory : 593 * copied = bytes received in previous RTT, our base window 594 * To cope with packet losses, we need a 2x factor 595 * To cope with slow start, and sender growing its cwin by 100 % 596 * every RTT, we need a 4x factor, because the ACK we are sending 597 * now is for the next RTT, not the current one : 598 * <prev RTT . ><current RTT .. ><next RTT .... > 599 */ 600 601 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && 602 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 603 int rcvwin, rcvmem, rcvbuf; 604 605 /* minimal window to cope with packet losses, assuming 606 * steady state. Add some cushion because of small variations. 607 */ 608 rcvwin = (copied << 1) + 16 * tp->advmss; 609 610 /* If rate increased by 25%, 611 * assume slow start, rcvwin = 3 * copied 612 * If rate increased by 50%, 613 * assume sender can use 2x growth, rcvwin = 4 * copied 614 */ 615 if (copied >= 616 tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) { 617 if (copied >= 618 tp->rcvq_space.space + (tp->rcvq_space.space >> 1)) 619 rcvwin <<= 1; 620 else 621 rcvwin += (rcvwin >> 1); 622 } 623 624 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); 625 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) 626 rcvmem += 128; 627 628 rcvbuf = min(rcvwin / tp->advmss * rcvmem, 629 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 630 if (rcvbuf > sk->sk_rcvbuf) { 631 sk->sk_rcvbuf = rcvbuf; 632 633 /* Make the window clamp follow along. */ 634 tp->window_clamp = rcvwin; 635 } 636 } 637 tp->rcvq_space.space = copied; 638 639 new_measure: 640 tp->rcvq_space.seq = tp->copied_seq; 641 tp->rcvq_space.time = tp->tcp_mstamp; 642 } 643 644 /* There is something which you must keep in mind when you analyze the 645 * behavior of the tp->ato delayed ack timeout interval. When a 646 * connection starts up, we want to ack as quickly as possible. The 647 * problem is that "good" TCP's do slow start at the beginning of data 648 * transmission. The means that until we send the first few ACK's the 649 * sender will sit on his end and only queue most of his data, because 650 * he can only send snd_cwnd unacked packets at any given time. For 651 * each ACK we send, he increments snd_cwnd and transmits more of his 652 * queue. -DaveM 653 */ 654 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 655 { 656 struct tcp_sock *tp = tcp_sk(sk); 657 struct inet_connection_sock *icsk = inet_csk(sk); 658 u32 now; 659 660 inet_csk_schedule_ack(sk); 661 662 tcp_measure_rcv_mss(sk, skb); 663 664 tcp_rcv_rtt_measure(tp); 665 666 now = tcp_jiffies32; 667 668 if (!icsk->icsk_ack.ato) { 669 /* The _first_ data packet received, initialize 670 * delayed ACK engine. 671 */ 672 tcp_incr_quickack(sk); 673 icsk->icsk_ack.ato = TCP_ATO_MIN; 674 } else { 675 int m = now - icsk->icsk_ack.lrcvtime; 676 677 if (m <= TCP_ATO_MIN / 2) { 678 /* The fastest case is the first. */ 679 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 680 } else if (m < icsk->icsk_ack.ato) { 681 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 682 if (icsk->icsk_ack.ato > icsk->icsk_rto) 683 icsk->icsk_ack.ato = icsk->icsk_rto; 684 } else if (m > icsk->icsk_rto) { 685 /* Too long gap. Apparently sender failed to 686 * restart window, so that we send ACKs quickly. 687 */ 688 tcp_incr_quickack(sk); 689 sk_mem_reclaim(sk); 690 } 691 } 692 icsk->icsk_ack.lrcvtime = now; 693 694 tcp_ecn_check_ce(tp, skb); 695 696 if (skb->len >= 128) 697 tcp_grow_window(sk, skb); 698 } 699 700 /* Called to compute a smoothed rtt estimate. The data fed to this 701 * routine either comes from timestamps, or from segments that were 702 * known _not_ to have been retransmitted [see Karn/Partridge 703 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 704 * piece by Van Jacobson. 705 * NOTE: the next three routines used to be one big routine. 706 * To save cycles in the RFC 1323 implementation it was better to break 707 * it up into three procedures. -- erics 708 */ 709 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) 710 { 711 struct tcp_sock *tp = tcp_sk(sk); 712 long m = mrtt_us; /* RTT */ 713 u32 srtt = tp->srtt_us; 714 715 /* The following amusing code comes from Jacobson's 716 * article in SIGCOMM '88. Note that rtt and mdev 717 * are scaled versions of rtt and mean deviation. 718 * This is designed to be as fast as possible 719 * m stands for "measurement". 720 * 721 * On a 1990 paper the rto value is changed to: 722 * RTO = rtt + 4 * mdev 723 * 724 * Funny. This algorithm seems to be very broken. 725 * These formulae increase RTO, when it should be decreased, increase 726 * too slowly, when it should be increased quickly, decrease too quickly 727 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 728 * does not matter how to _calculate_ it. Seems, it was trap 729 * that VJ failed to avoid. 8) 730 */ 731 if (srtt != 0) { 732 m -= (srtt >> 3); /* m is now error in rtt est */ 733 srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 734 if (m < 0) { 735 m = -m; /* m is now abs(error) */ 736 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 737 /* This is similar to one of Eifel findings. 738 * Eifel blocks mdev updates when rtt decreases. 739 * This solution is a bit different: we use finer gain 740 * for mdev in this case (alpha*beta). 741 * Like Eifel it also prevents growth of rto, 742 * but also it limits too fast rto decreases, 743 * happening in pure Eifel. 744 */ 745 if (m > 0) 746 m >>= 3; 747 } else { 748 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 749 } 750 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 751 if (tp->mdev_us > tp->mdev_max_us) { 752 tp->mdev_max_us = tp->mdev_us; 753 if (tp->mdev_max_us > tp->rttvar_us) 754 tp->rttvar_us = tp->mdev_max_us; 755 } 756 if (after(tp->snd_una, tp->rtt_seq)) { 757 if (tp->mdev_max_us < tp->rttvar_us) 758 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; 759 tp->rtt_seq = tp->snd_nxt; 760 tp->mdev_max_us = tcp_rto_min_us(sk); 761 } 762 } else { 763 /* no previous measure. */ 764 srtt = m << 3; /* take the measured time to be rtt */ 765 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ 766 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); 767 tp->mdev_max_us = tp->rttvar_us; 768 tp->rtt_seq = tp->snd_nxt; 769 } 770 tp->srtt_us = max(1U, srtt); 771 } 772 773 static void tcp_update_pacing_rate(struct sock *sk) 774 { 775 const struct tcp_sock *tp = tcp_sk(sk); 776 u64 rate; 777 778 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ 779 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); 780 781 /* current rate is (cwnd * mss) / srtt 782 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. 783 * In Congestion Avoidance phase, set it to 120 % the current rate. 784 * 785 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) 786 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching 787 * end of slow start and should slow down. 788 */ 789 if (tp->snd_cwnd < tp->snd_ssthresh / 2) 790 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio; 791 else 792 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio; 793 794 rate *= max(tp->snd_cwnd, tp->packets_out); 795 796 if (likely(tp->srtt_us)) 797 do_div(rate, tp->srtt_us); 798 799 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate 800 * without any lock. We want to make sure compiler wont store 801 * intermediate values in this location. 802 */ 803 WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate, 804 sk->sk_max_pacing_rate)); 805 } 806 807 /* Calculate rto without backoff. This is the second half of Van Jacobson's 808 * routine referred to above. 809 */ 810 static void tcp_set_rto(struct sock *sk) 811 { 812 const struct tcp_sock *tp = tcp_sk(sk); 813 /* Old crap is replaced with new one. 8) 814 * 815 * More seriously: 816 * 1. If rtt variance happened to be less 50msec, it is hallucination. 817 * It cannot be less due to utterly erratic ACK generation made 818 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 819 * to do with delayed acks, because at cwnd>2 true delack timeout 820 * is invisible. Actually, Linux-2.4 also generates erratic 821 * ACKs in some circumstances. 822 */ 823 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 824 825 /* 2. Fixups made earlier cannot be right. 826 * If we do not estimate RTO correctly without them, 827 * all the algo is pure shit and should be replaced 828 * with correct one. It is exactly, which we pretend to do. 829 */ 830 831 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 832 * guarantees that rto is higher. 833 */ 834 tcp_bound_rto(sk); 835 } 836 837 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 838 { 839 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 840 841 if (!cwnd) 842 cwnd = TCP_INIT_CWND; 843 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 844 } 845 846 /* Take a notice that peer is sending D-SACKs */ 847 static void tcp_dsack_seen(struct tcp_sock *tp) 848 { 849 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 850 tp->rack.dsack_seen = 1; 851 } 852 853 /* It's reordering when higher sequence was delivered (i.e. sacked) before 854 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering 855 * distance is approximated in full-mss packet distance ("reordering"). 856 */ 857 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, 858 const int ts) 859 { 860 struct tcp_sock *tp = tcp_sk(sk); 861 const u32 mss = tp->mss_cache; 862 u32 fack, metric; 863 864 fack = tcp_highest_sack_seq(tp); 865 if (!before(low_seq, fack)) 866 return; 867 868 metric = fack - low_seq; 869 if ((metric > tp->reordering * mss) && mss) { 870 #if FASTRETRANS_DEBUG > 1 871 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 872 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 873 tp->reordering, 874 0, 875 tp->sacked_out, 876 tp->undo_marker ? tp->undo_retrans : 0); 877 #endif 878 tp->reordering = min_t(u32, (metric + mss - 1) / mss, 879 sock_net(sk)->ipv4.sysctl_tcp_max_reordering); 880 } 881 882 tp->rack.reord = 1; 883 /* This exciting event is worth to be remembered. 8) */ 884 NET_INC_STATS(sock_net(sk), 885 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER); 886 } 887 888 /* This must be called before lost_out is incremented */ 889 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 890 { 891 if (!tp->retransmit_skb_hint || 892 before(TCP_SKB_CB(skb)->seq, 893 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 894 tp->retransmit_skb_hint = skb; 895 } 896 897 /* Sum the number of packets on the wire we have marked as lost. 898 * There are two cases we care about here: 899 * a) Packet hasn't been marked lost (nor retransmitted), 900 * and this is the first loss. 901 * b) Packet has been marked both lost and retransmitted, 902 * and this means we think it was lost again. 903 */ 904 static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb) 905 { 906 __u8 sacked = TCP_SKB_CB(skb)->sacked; 907 908 if (!(sacked & TCPCB_LOST) || 909 ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS))) 910 tp->lost += tcp_skb_pcount(skb); 911 } 912 913 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) 914 { 915 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 916 tcp_verify_retransmit_hint(tp, skb); 917 918 tp->lost_out += tcp_skb_pcount(skb); 919 tcp_sum_lost(tp, skb); 920 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 921 } 922 } 923 924 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) 925 { 926 tcp_verify_retransmit_hint(tp, skb); 927 928 tcp_sum_lost(tp, skb); 929 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 930 tp->lost_out += tcp_skb_pcount(skb); 931 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 932 } 933 } 934 935 /* This procedure tags the retransmission queue when SACKs arrive. 936 * 937 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 938 * Packets in queue with these bits set are counted in variables 939 * sacked_out, retrans_out and lost_out, correspondingly. 940 * 941 * Valid combinations are: 942 * Tag InFlight Description 943 * 0 1 - orig segment is in flight. 944 * S 0 - nothing flies, orig reached receiver. 945 * L 0 - nothing flies, orig lost by net. 946 * R 2 - both orig and retransmit are in flight. 947 * L|R 1 - orig is lost, retransmit is in flight. 948 * S|R 1 - orig reached receiver, retrans is still in flight. 949 * (L|S|R is logically valid, it could occur when L|R is sacked, 950 * but it is equivalent to plain S and code short-curcuits it to S. 951 * L|S is logically invalid, it would mean -1 packet in flight 8)) 952 * 953 * These 6 states form finite state machine, controlled by the following events: 954 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 955 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 956 * 3. Loss detection event of two flavors: 957 * A. Scoreboard estimator decided the packet is lost. 958 * A'. Reno "three dupacks" marks head of queue lost. 959 * B. SACK arrives sacking SND.NXT at the moment, when the 960 * segment was retransmitted. 961 * 4. D-SACK added new rule: D-SACK changes any tag to S. 962 * 963 * It is pleasant to note, that state diagram turns out to be commutative, 964 * so that we are allowed not to be bothered by order of our actions, 965 * when multiple events arrive simultaneously. (see the function below). 966 * 967 * Reordering detection. 968 * -------------------- 969 * Reordering metric is maximal distance, which a packet can be displaced 970 * in packet stream. With SACKs we can estimate it: 971 * 972 * 1. SACK fills old hole and the corresponding segment was not 973 * ever retransmitted -> reordering. Alas, we cannot use it 974 * when segment was retransmitted. 975 * 2. The last flaw is solved with D-SACK. D-SACK arrives 976 * for retransmitted and already SACKed segment -> reordering.. 977 * Both of these heuristics are not used in Loss state, when we cannot 978 * account for retransmits accurately. 979 * 980 * SACK block validation. 981 * ---------------------- 982 * 983 * SACK block range validation checks that the received SACK block fits to 984 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 985 * Note that SND.UNA is not included to the range though being valid because 986 * it means that the receiver is rather inconsistent with itself reporting 987 * SACK reneging when it should advance SND.UNA. Such SACK block this is 988 * perfectly valid, however, in light of RFC2018 which explicitly states 989 * that "SACK block MUST reflect the newest segment. Even if the newest 990 * segment is going to be discarded ...", not that it looks very clever 991 * in case of head skb. Due to potentional receiver driven attacks, we 992 * choose to avoid immediate execution of a walk in write queue due to 993 * reneging and defer head skb's loss recovery to standard loss recovery 994 * procedure that will eventually trigger (nothing forbids us doing this). 995 * 996 * Implements also blockage to start_seq wrap-around. Problem lies in the 997 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 998 * there's no guarantee that it will be before snd_nxt (n). The problem 999 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1000 * wrap (s_w): 1001 * 1002 * <- outs wnd -> <- wrapzone -> 1003 * u e n u_w e_w s n_w 1004 * | | | | | | | 1005 * |<------------+------+----- TCP seqno space --------------+---------->| 1006 * ...-- <2^31 ->| |<--------... 1007 * ...---- >2^31 ------>| |<--------... 1008 * 1009 * Current code wouldn't be vulnerable but it's better still to discard such 1010 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1011 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1012 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1013 * equal to the ideal case (infinite seqno space without wrap caused issues). 1014 * 1015 * With D-SACK the lower bound is extended to cover sequence space below 1016 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1017 * again, D-SACK block must not to go across snd_una (for the same reason as 1018 * for the normal SACK blocks, explained above). But there all simplicity 1019 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1020 * fully below undo_marker they do not affect behavior in anyway and can 1021 * therefore be safely ignored. In rare cases (which are more or less 1022 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1023 * fragmentation and packet reordering past skb's retransmission. To consider 1024 * them correctly, the acceptable range must be extended even more though 1025 * the exact amount is rather hard to quantify. However, tp->max_window can 1026 * be used as an exaggerated estimate. 1027 */ 1028 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, 1029 u32 start_seq, u32 end_seq) 1030 { 1031 /* Too far in future, or reversed (interpretation is ambiguous) */ 1032 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1033 return false; 1034 1035 /* Nasty start_seq wrap-around check (see comments above) */ 1036 if (!before(start_seq, tp->snd_nxt)) 1037 return false; 1038 1039 /* In outstanding window? ...This is valid exit for D-SACKs too. 1040 * start_seq == snd_una is non-sensical (see comments above) 1041 */ 1042 if (after(start_seq, tp->snd_una)) 1043 return true; 1044 1045 if (!is_dsack || !tp->undo_marker) 1046 return false; 1047 1048 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1049 if (after(end_seq, tp->snd_una)) 1050 return false; 1051 1052 if (!before(start_seq, tp->undo_marker)) 1053 return true; 1054 1055 /* Too old */ 1056 if (!after(end_seq, tp->undo_marker)) 1057 return false; 1058 1059 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1060 * start_seq < undo_marker and end_seq >= undo_marker. 1061 */ 1062 return !before(start_seq, end_seq - tp->max_window); 1063 } 1064 1065 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1066 struct tcp_sack_block_wire *sp, int num_sacks, 1067 u32 prior_snd_una) 1068 { 1069 struct tcp_sock *tp = tcp_sk(sk); 1070 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1071 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1072 bool dup_sack = false; 1073 1074 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1075 dup_sack = true; 1076 tcp_dsack_seen(tp); 1077 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1078 } else if (num_sacks > 1) { 1079 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1080 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1081 1082 if (!after(end_seq_0, end_seq_1) && 1083 !before(start_seq_0, start_seq_1)) { 1084 dup_sack = true; 1085 tcp_dsack_seen(tp); 1086 NET_INC_STATS(sock_net(sk), 1087 LINUX_MIB_TCPDSACKOFORECV); 1088 } 1089 } 1090 1091 /* D-SACK for already forgotten data... Do dumb counting. */ 1092 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && 1093 !after(end_seq_0, prior_snd_una) && 1094 after(end_seq_0, tp->undo_marker)) 1095 tp->undo_retrans--; 1096 1097 return dup_sack; 1098 } 1099 1100 struct tcp_sacktag_state { 1101 u32 reord; 1102 /* Timestamps for earliest and latest never-retransmitted segment 1103 * that was SACKed. RTO needs the earliest RTT to stay conservative, 1104 * but congestion control should still get an accurate delay signal. 1105 */ 1106 u64 first_sackt; 1107 u64 last_sackt; 1108 struct rate_sample *rate; 1109 int flag; 1110 unsigned int mss_now; 1111 }; 1112 1113 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1114 * the incoming SACK may not exactly match but we can find smaller MSS 1115 * aligned portion of it that matches. Therefore we might need to fragment 1116 * which may fail and creates some hassle (caller must handle error case 1117 * returns). 1118 * 1119 * FIXME: this could be merged to shift decision code 1120 */ 1121 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1122 u32 start_seq, u32 end_seq) 1123 { 1124 int err; 1125 bool in_sack; 1126 unsigned int pkt_len; 1127 unsigned int mss; 1128 1129 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1130 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1131 1132 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1133 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1134 mss = tcp_skb_mss(skb); 1135 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1136 1137 if (!in_sack) { 1138 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1139 if (pkt_len < mss) 1140 pkt_len = mss; 1141 } else { 1142 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1143 if (pkt_len < mss) 1144 return -EINVAL; 1145 } 1146 1147 /* Round if necessary so that SACKs cover only full MSSes 1148 * and/or the remaining small portion (if present) 1149 */ 1150 if (pkt_len > mss) { 1151 unsigned int new_len = (pkt_len / mss) * mss; 1152 if (!in_sack && new_len < pkt_len) 1153 new_len += mss; 1154 pkt_len = new_len; 1155 } 1156 1157 if (pkt_len >= skb->len && !in_sack) 1158 return 0; 1159 1160 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 1161 pkt_len, mss, GFP_ATOMIC); 1162 if (err < 0) 1163 return err; 1164 } 1165 1166 return in_sack; 1167 } 1168 1169 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1170 static u8 tcp_sacktag_one(struct sock *sk, 1171 struct tcp_sacktag_state *state, u8 sacked, 1172 u32 start_seq, u32 end_seq, 1173 int dup_sack, int pcount, 1174 u64 xmit_time) 1175 { 1176 struct tcp_sock *tp = tcp_sk(sk); 1177 1178 /* Account D-SACK for retransmitted packet. */ 1179 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1180 if (tp->undo_marker && tp->undo_retrans > 0 && 1181 after(end_seq, tp->undo_marker)) 1182 tp->undo_retrans--; 1183 if ((sacked & TCPCB_SACKED_ACKED) && 1184 before(start_seq, state->reord)) 1185 state->reord = start_seq; 1186 } 1187 1188 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1189 if (!after(end_seq, tp->snd_una)) 1190 return sacked; 1191 1192 if (!(sacked & TCPCB_SACKED_ACKED)) { 1193 tcp_rack_advance(tp, sacked, end_seq, xmit_time); 1194 1195 if (sacked & TCPCB_SACKED_RETRANS) { 1196 /* If the segment is not tagged as lost, 1197 * we do not clear RETRANS, believing 1198 * that retransmission is still in flight. 1199 */ 1200 if (sacked & TCPCB_LOST) { 1201 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1202 tp->lost_out -= pcount; 1203 tp->retrans_out -= pcount; 1204 } 1205 } else { 1206 if (!(sacked & TCPCB_RETRANS)) { 1207 /* New sack for not retransmitted frame, 1208 * which was in hole. It is reordering. 1209 */ 1210 if (before(start_seq, 1211 tcp_highest_sack_seq(tp)) && 1212 before(start_seq, state->reord)) 1213 state->reord = start_seq; 1214 1215 if (!after(end_seq, tp->high_seq)) 1216 state->flag |= FLAG_ORIG_SACK_ACKED; 1217 if (state->first_sackt == 0) 1218 state->first_sackt = xmit_time; 1219 state->last_sackt = xmit_time; 1220 } 1221 1222 if (sacked & TCPCB_LOST) { 1223 sacked &= ~TCPCB_LOST; 1224 tp->lost_out -= pcount; 1225 } 1226 } 1227 1228 sacked |= TCPCB_SACKED_ACKED; 1229 state->flag |= FLAG_DATA_SACKED; 1230 tp->sacked_out += pcount; 1231 tp->delivered += pcount; /* Out-of-order packets delivered */ 1232 1233 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1234 if (tp->lost_skb_hint && 1235 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1236 tp->lost_cnt_hint += pcount; 1237 } 1238 1239 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1240 * frames and clear it. undo_retrans is decreased above, L|R frames 1241 * are accounted above as well. 1242 */ 1243 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1244 sacked &= ~TCPCB_SACKED_RETRANS; 1245 tp->retrans_out -= pcount; 1246 } 1247 1248 return sacked; 1249 } 1250 1251 /* Shift newly-SACKed bytes from this skb to the immediately previous 1252 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1253 */ 1254 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, 1255 struct sk_buff *skb, 1256 struct tcp_sacktag_state *state, 1257 unsigned int pcount, int shifted, int mss, 1258 bool dup_sack) 1259 { 1260 struct tcp_sock *tp = tcp_sk(sk); 1261 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1262 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1263 1264 BUG_ON(!pcount); 1265 1266 /* Adjust counters and hints for the newly sacked sequence 1267 * range but discard the return value since prev is already 1268 * marked. We must tag the range first because the seq 1269 * advancement below implicitly advances 1270 * tcp_highest_sack_seq() when skb is highest_sack. 1271 */ 1272 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1273 start_seq, end_seq, dup_sack, pcount, 1274 skb->skb_mstamp); 1275 tcp_rate_skb_delivered(sk, skb, state->rate); 1276 1277 if (skb == tp->lost_skb_hint) 1278 tp->lost_cnt_hint += pcount; 1279 1280 TCP_SKB_CB(prev)->end_seq += shifted; 1281 TCP_SKB_CB(skb)->seq += shifted; 1282 1283 tcp_skb_pcount_add(prev, pcount); 1284 BUG_ON(tcp_skb_pcount(skb) < pcount); 1285 tcp_skb_pcount_add(skb, -pcount); 1286 1287 /* When we're adding to gso_segs == 1, gso_size will be zero, 1288 * in theory this shouldn't be necessary but as long as DSACK 1289 * code can come after this skb later on it's better to keep 1290 * setting gso_size to something. 1291 */ 1292 if (!TCP_SKB_CB(prev)->tcp_gso_size) 1293 TCP_SKB_CB(prev)->tcp_gso_size = mss; 1294 1295 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1296 if (tcp_skb_pcount(skb) <= 1) 1297 TCP_SKB_CB(skb)->tcp_gso_size = 0; 1298 1299 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1300 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1301 1302 if (skb->len > 0) { 1303 BUG_ON(!tcp_skb_pcount(skb)); 1304 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1305 return false; 1306 } 1307 1308 /* Whole SKB was eaten :-) */ 1309 1310 if (skb == tp->retransmit_skb_hint) 1311 tp->retransmit_skb_hint = prev; 1312 if (skb == tp->lost_skb_hint) { 1313 tp->lost_skb_hint = prev; 1314 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1315 } 1316 1317 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1318 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; 1319 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1320 TCP_SKB_CB(prev)->end_seq++; 1321 1322 if (skb == tcp_highest_sack(sk)) 1323 tcp_advance_highest_sack(sk, skb); 1324 1325 tcp_skb_collapse_tstamp(prev, skb); 1326 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp)) 1327 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0; 1328 1329 tcp_rtx_queue_unlink_and_free(skb, sk); 1330 1331 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); 1332 1333 return true; 1334 } 1335 1336 /* I wish gso_size would have a bit more sane initialization than 1337 * something-or-zero which complicates things 1338 */ 1339 static int tcp_skb_seglen(const struct sk_buff *skb) 1340 { 1341 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1342 } 1343 1344 /* Shifting pages past head area doesn't work */ 1345 static int skb_can_shift(const struct sk_buff *skb) 1346 { 1347 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1348 } 1349 1350 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1351 * skb. 1352 */ 1353 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1354 struct tcp_sacktag_state *state, 1355 u32 start_seq, u32 end_seq, 1356 bool dup_sack) 1357 { 1358 struct tcp_sock *tp = tcp_sk(sk); 1359 struct sk_buff *prev; 1360 int mss; 1361 int pcount = 0; 1362 int len; 1363 int in_sack; 1364 1365 if (!sk_can_gso(sk)) 1366 goto fallback; 1367 1368 /* Normally R but no L won't result in plain S */ 1369 if (!dup_sack && 1370 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1371 goto fallback; 1372 if (!skb_can_shift(skb)) 1373 goto fallback; 1374 /* This frame is about to be dropped (was ACKed). */ 1375 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1376 goto fallback; 1377 1378 /* Can only happen with delayed DSACK + discard craziness */ 1379 prev = skb_rb_prev(skb); 1380 if (!prev) 1381 goto fallback; 1382 1383 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1384 goto fallback; 1385 1386 if (!tcp_skb_can_collapse_to(prev)) 1387 goto fallback; 1388 1389 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1390 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1391 1392 if (in_sack) { 1393 len = skb->len; 1394 pcount = tcp_skb_pcount(skb); 1395 mss = tcp_skb_seglen(skb); 1396 1397 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1398 * drop this restriction as unnecessary 1399 */ 1400 if (mss != tcp_skb_seglen(prev)) 1401 goto fallback; 1402 } else { 1403 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1404 goto noop; 1405 /* CHECKME: This is non-MSS split case only?, this will 1406 * cause skipped skbs due to advancing loop btw, original 1407 * has that feature too 1408 */ 1409 if (tcp_skb_pcount(skb) <= 1) 1410 goto noop; 1411 1412 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1413 if (!in_sack) { 1414 /* TODO: head merge to next could be attempted here 1415 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1416 * though it might not be worth of the additional hassle 1417 * 1418 * ...we can probably just fallback to what was done 1419 * previously. We could try merging non-SACKed ones 1420 * as well but it probably isn't going to buy off 1421 * because later SACKs might again split them, and 1422 * it would make skb timestamp tracking considerably 1423 * harder problem. 1424 */ 1425 goto fallback; 1426 } 1427 1428 len = end_seq - TCP_SKB_CB(skb)->seq; 1429 BUG_ON(len < 0); 1430 BUG_ON(len > skb->len); 1431 1432 /* MSS boundaries should be honoured or else pcount will 1433 * severely break even though it makes things bit trickier. 1434 * Optimize common case to avoid most of the divides 1435 */ 1436 mss = tcp_skb_mss(skb); 1437 1438 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1439 * drop this restriction as unnecessary 1440 */ 1441 if (mss != tcp_skb_seglen(prev)) 1442 goto fallback; 1443 1444 if (len == mss) { 1445 pcount = 1; 1446 } else if (len < mss) { 1447 goto noop; 1448 } else { 1449 pcount = len / mss; 1450 len = pcount * mss; 1451 } 1452 } 1453 1454 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1455 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1456 goto fallback; 1457 1458 if (!skb_shift(prev, skb, len)) 1459 goto fallback; 1460 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) 1461 goto out; 1462 1463 /* Hole filled allows collapsing with the next as well, this is very 1464 * useful when hole on every nth skb pattern happens 1465 */ 1466 skb = skb_rb_next(prev); 1467 if (!skb) 1468 goto out; 1469 1470 if (!skb_can_shift(skb) || 1471 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1472 (mss != tcp_skb_seglen(skb))) 1473 goto out; 1474 1475 len = skb->len; 1476 if (skb_shift(prev, skb, len)) { 1477 pcount += tcp_skb_pcount(skb); 1478 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), 1479 len, mss, 0); 1480 } 1481 1482 out: 1483 return prev; 1484 1485 noop: 1486 return skb; 1487 1488 fallback: 1489 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1490 return NULL; 1491 } 1492 1493 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1494 struct tcp_sack_block *next_dup, 1495 struct tcp_sacktag_state *state, 1496 u32 start_seq, u32 end_seq, 1497 bool dup_sack_in) 1498 { 1499 struct tcp_sock *tp = tcp_sk(sk); 1500 struct sk_buff *tmp; 1501 1502 skb_rbtree_walk_from(skb) { 1503 int in_sack = 0; 1504 bool dup_sack = dup_sack_in; 1505 1506 /* queue is in-order => we can short-circuit the walk early */ 1507 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1508 break; 1509 1510 if (next_dup && 1511 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1512 in_sack = tcp_match_skb_to_sack(sk, skb, 1513 next_dup->start_seq, 1514 next_dup->end_seq); 1515 if (in_sack > 0) 1516 dup_sack = true; 1517 } 1518 1519 /* skb reference here is a bit tricky to get right, since 1520 * shifting can eat and free both this skb and the next, 1521 * so not even _safe variant of the loop is enough. 1522 */ 1523 if (in_sack <= 0) { 1524 tmp = tcp_shift_skb_data(sk, skb, state, 1525 start_seq, end_seq, dup_sack); 1526 if (tmp) { 1527 if (tmp != skb) { 1528 skb = tmp; 1529 continue; 1530 } 1531 1532 in_sack = 0; 1533 } else { 1534 in_sack = tcp_match_skb_to_sack(sk, skb, 1535 start_seq, 1536 end_seq); 1537 } 1538 } 1539 1540 if (unlikely(in_sack < 0)) 1541 break; 1542 1543 if (in_sack) { 1544 TCP_SKB_CB(skb)->sacked = 1545 tcp_sacktag_one(sk, 1546 state, 1547 TCP_SKB_CB(skb)->sacked, 1548 TCP_SKB_CB(skb)->seq, 1549 TCP_SKB_CB(skb)->end_seq, 1550 dup_sack, 1551 tcp_skb_pcount(skb), 1552 skb->skb_mstamp); 1553 tcp_rate_skb_delivered(sk, skb, state->rate); 1554 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1555 list_del_init(&skb->tcp_tsorted_anchor); 1556 1557 if (!before(TCP_SKB_CB(skb)->seq, 1558 tcp_highest_sack_seq(tp))) 1559 tcp_advance_highest_sack(sk, skb); 1560 } 1561 } 1562 return skb; 1563 } 1564 1565 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, 1566 struct tcp_sacktag_state *state, 1567 u32 seq) 1568 { 1569 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node; 1570 struct sk_buff *skb; 1571 1572 while (*p) { 1573 parent = *p; 1574 skb = rb_to_skb(parent); 1575 if (before(seq, TCP_SKB_CB(skb)->seq)) { 1576 p = &parent->rb_left; 1577 continue; 1578 } 1579 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) { 1580 p = &parent->rb_right; 1581 continue; 1582 } 1583 return skb; 1584 } 1585 return NULL; 1586 } 1587 1588 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1589 struct tcp_sacktag_state *state, 1590 u32 skip_to_seq) 1591 { 1592 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq)) 1593 return skb; 1594 1595 return tcp_sacktag_bsearch(sk, state, skip_to_seq); 1596 } 1597 1598 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1599 struct sock *sk, 1600 struct tcp_sack_block *next_dup, 1601 struct tcp_sacktag_state *state, 1602 u32 skip_to_seq) 1603 { 1604 if (!next_dup) 1605 return skb; 1606 1607 if (before(next_dup->start_seq, skip_to_seq)) { 1608 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); 1609 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1610 next_dup->start_seq, next_dup->end_seq, 1611 1); 1612 } 1613 1614 return skb; 1615 } 1616 1617 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1618 { 1619 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1620 } 1621 1622 static int 1623 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1624 u32 prior_snd_una, struct tcp_sacktag_state *state) 1625 { 1626 struct tcp_sock *tp = tcp_sk(sk); 1627 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1628 TCP_SKB_CB(ack_skb)->sacked); 1629 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1630 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1631 struct tcp_sack_block *cache; 1632 struct sk_buff *skb; 1633 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1634 int used_sacks; 1635 bool found_dup_sack = false; 1636 int i, j; 1637 int first_sack_index; 1638 1639 state->flag = 0; 1640 state->reord = tp->snd_nxt; 1641 1642 if (!tp->sacked_out) 1643 tcp_highest_sack_reset(sk); 1644 1645 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1646 num_sacks, prior_snd_una); 1647 if (found_dup_sack) { 1648 state->flag |= FLAG_DSACKING_ACK; 1649 tp->delivered++; /* A spurious retransmission is delivered */ 1650 } 1651 1652 /* Eliminate too old ACKs, but take into 1653 * account more or less fresh ones, they can 1654 * contain valid SACK info. 1655 */ 1656 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1657 return 0; 1658 1659 if (!tp->packets_out) 1660 goto out; 1661 1662 used_sacks = 0; 1663 first_sack_index = 0; 1664 for (i = 0; i < num_sacks; i++) { 1665 bool dup_sack = !i && found_dup_sack; 1666 1667 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1668 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1669 1670 if (!tcp_is_sackblock_valid(tp, dup_sack, 1671 sp[used_sacks].start_seq, 1672 sp[used_sacks].end_seq)) { 1673 int mib_idx; 1674 1675 if (dup_sack) { 1676 if (!tp->undo_marker) 1677 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1678 else 1679 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1680 } else { 1681 /* Don't count olds caused by ACK reordering */ 1682 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1683 !after(sp[used_sacks].end_seq, tp->snd_una)) 1684 continue; 1685 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1686 } 1687 1688 NET_INC_STATS(sock_net(sk), mib_idx); 1689 if (i == 0) 1690 first_sack_index = -1; 1691 continue; 1692 } 1693 1694 /* Ignore very old stuff early */ 1695 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1696 continue; 1697 1698 used_sacks++; 1699 } 1700 1701 /* order SACK blocks to allow in order walk of the retrans queue */ 1702 for (i = used_sacks - 1; i > 0; i--) { 1703 for (j = 0; j < i; j++) { 1704 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1705 swap(sp[j], sp[j + 1]); 1706 1707 /* Track where the first SACK block goes to */ 1708 if (j == first_sack_index) 1709 first_sack_index = j + 1; 1710 } 1711 } 1712 } 1713 1714 state->mss_now = tcp_current_mss(sk); 1715 skb = NULL; 1716 i = 0; 1717 1718 if (!tp->sacked_out) { 1719 /* It's already past, so skip checking against it */ 1720 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1721 } else { 1722 cache = tp->recv_sack_cache; 1723 /* Skip empty blocks in at head of the cache */ 1724 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1725 !cache->end_seq) 1726 cache++; 1727 } 1728 1729 while (i < used_sacks) { 1730 u32 start_seq = sp[i].start_seq; 1731 u32 end_seq = sp[i].end_seq; 1732 bool dup_sack = (found_dup_sack && (i == first_sack_index)); 1733 struct tcp_sack_block *next_dup = NULL; 1734 1735 if (found_dup_sack && ((i + 1) == first_sack_index)) 1736 next_dup = &sp[i + 1]; 1737 1738 /* Skip too early cached blocks */ 1739 while (tcp_sack_cache_ok(tp, cache) && 1740 !before(start_seq, cache->end_seq)) 1741 cache++; 1742 1743 /* Can skip some work by looking recv_sack_cache? */ 1744 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1745 after(end_seq, cache->start_seq)) { 1746 1747 /* Head todo? */ 1748 if (before(start_seq, cache->start_seq)) { 1749 skb = tcp_sacktag_skip(skb, sk, state, 1750 start_seq); 1751 skb = tcp_sacktag_walk(skb, sk, next_dup, 1752 state, 1753 start_seq, 1754 cache->start_seq, 1755 dup_sack); 1756 } 1757 1758 /* Rest of the block already fully processed? */ 1759 if (!after(end_seq, cache->end_seq)) 1760 goto advance_sp; 1761 1762 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1763 state, 1764 cache->end_seq); 1765 1766 /* ...tail remains todo... */ 1767 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1768 /* ...but better entrypoint exists! */ 1769 skb = tcp_highest_sack(sk); 1770 if (!skb) 1771 break; 1772 cache++; 1773 goto walk; 1774 } 1775 1776 skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); 1777 /* Check overlap against next cached too (past this one already) */ 1778 cache++; 1779 continue; 1780 } 1781 1782 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1783 skb = tcp_highest_sack(sk); 1784 if (!skb) 1785 break; 1786 } 1787 skb = tcp_sacktag_skip(skb, sk, state, start_seq); 1788 1789 walk: 1790 skb = tcp_sacktag_walk(skb, sk, next_dup, state, 1791 start_seq, end_seq, dup_sack); 1792 1793 advance_sp: 1794 i++; 1795 } 1796 1797 /* Clear the head of the cache sack blocks so we can skip it next time */ 1798 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1799 tp->recv_sack_cache[i].start_seq = 0; 1800 tp->recv_sack_cache[i].end_seq = 0; 1801 } 1802 for (j = 0; j < used_sacks; j++) 1803 tp->recv_sack_cache[i++] = sp[j]; 1804 1805 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) 1806 tcp_check_sack_reordering(sk, state->reord, 0); 1807 1808 tcp_verify_left_out(tp); 1809 out: 1810 1811 #if FASTRETRANS_DEBUG > 0 1812 WARN_ON((int)tp->sacked_out < 0); 1813 WARN_ON((int)tp->lost_out < 0); 1814 WARN_ON((int)tp->retrans_out < 0); 1815 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1816 #endif 1817 return state->flag; 1818 } 1819 1820 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1821 * packets_out. Returns false if sacked_out adjustement wasn't necessary. 1822 */ 1823 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) 1824 { 1825 u32 holes; 1826 1827 holes = max(tp->lost_out, 1U); 1828 holes = min(holes, tp->packets_out); 1829 1830 if ((tp->sacked_out + holes) > tp->packets_out) { 1831 tp->sacked_out = tp->packets_out - holes; 1832 return true; 1833 } 1834 return false; 1835 } 1836 1837 /* If we receive more dupacks than we expected counting segments 1838 * in assumption of absent reordering, interpret this as reordering. 1839 * The only another reason could be bug in receiver TCP. 1840 */ 1841 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1842 { 1843 struct tcp_sock *tp = tcp_sk(sk); 1844 1845 if (!tcp_limit_reno_sacked(tp)) 1846 return; 1847 1848 tp->reordering = min_t(u32, tp->packets_out + addend, 1849 sock_net(sk)->ipv4.sysctl_tcp_max_reordering); 1850 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); 1851 } 1852 1853 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 1854 1855 static void tcp_add_reno_sack(struct sock *sk) 1856 { 1857 struct tcp_sock *tp = tcp_sk(sk); 1858 u32 prior_sacked = tp->sacked_out; 1859 1860 tp->sacked_out++; 1861 tcp_check_reno_reordering(sk, 0); 1862 if (tp->sacked_out > prior_sacked) 1863 tp->delivered++; /* Some out-of-order packet is delivered */ 1864 tcp_verify_left_out(tp); 1865 } 1866 1867 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 1868 1869 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 1870 { 1871 struct tcp_sock *tp = tcp_sk(sk); 1872 1873 if (acked > 0) { 1874 /* One ACK acked hole. The rest eat duplicate ACKs. */ 1875 tp->delivered += max_t(int, acked - tp->sacked_out, 1); 1876 if (acked - 1 >= tp->sacked_out) 1877 tp->sacked_out = 0; 1878 else 1879 tp->sacked_out -= acked - 1; 1880 } 1881 tcp_check_reno_reordering(sk, acked); 1882 tcp_verify_left_out(tp); 1883 } 1884 1885 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 1886 { 1887 tp->sacked_out = 0; 1888 } 1889 1890 void tcp_clear_retrans(struct tcp_sock *tp) 1891 { 1892 tp->retrans_out = 0; 1893 tp->lost_out = 0; 1894 tp->undo_marker = 0; 1895 tp->undo_retrans = -1; 1896 tp->sacked_out = 0; 1897 } 1898 1899 static inline void tcp_init_undo(struct tcp_sock *tp) 1900 { 1901 tp->undo_marker = tp->snd_una; 1902 /* Retransmission still in flight may cause DSACKs later. */ 1903 tp->undo_retrans = tp->retrans_out ? : -1; 1904 } 1905 1906 /* Enter Loss state. If we detect SACK reneging, forget all SACK information 1907 * and reset tags completely, otherwise preserve SACKs. If receiver 1908 * dropped its ofo queue, we will know this due to reneging detection. 1909 */ 1910 void tcp_enter_loss(struct sock *sk) 1911 { 1912 const struct inet_connection_sock *icsk = inet_csk(sk); 1913 struct tcp_sock *tp = tcp_sk(sk); 1914 struct net *net = sock_net(sk); 1915 struct sk_buff *skb; 1916 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; 1917 bool is_reneg; /* is receiver reneging on SACKs? */ 1918 bool mark_lost; 1919 1920 /* Reduce ssthresh if it has not yet been made inside this window. */ 1921 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1922 !after(tp->high_seq, tp->snd_una) || 1923 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1924 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1925 tp->prior_cwnd = tp->snd_cwnd; 1926 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1927 tcp_ca_event(sk, CA_EVENT_LOSS); 1928 tcp_init_undo(tp); 1929 } 1930 tp->snd_cwnd = 1; 1931 tp->snd_cwnd_cnt = 0; 1932 tp->snd_cwnd_stamp = tcp_jiffies32; 1933 1934 tp->retrans_out = 0; 1935 tp->lost_out = 0; 1936 1937 if (tcp_is_reno(tp)) 1938 tcp_reset_reno_sack(tp); 1939 1940 skb = tcp_rtx_queue_head(sk); 1941 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); 1942 if (is_reneg) { 1943 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1944 tp->sacked_out = 0; 1945 /* Mark SACK reneging until we recover from this loss event. */ 1946 tp->is_sack_reneg = 1; 1947 } 1948 tcp_clear_all_retrans_hints(tp); 1949 1950 skb_rbtree_walk_from(skb) { 1951 mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 1952 is_reneg); 1953 if (mark_lost) 1954 tcp_sum_lost(tp, skb); 1955 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 1956 if (mark_lost) { 1957 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1958 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1959 tp->lost_out += tcp_skb_pcount(skb); 1960 } 1961 } 1962 tcp_verify_left_out(tp); 1963 1964 /* Timeout in disordered state after receiving substantial DUPACKs 1965 * suggests that the degree of reordering is over-estimated. 1966 */ 1967 if (icsk->icsk_ca_state <= TCP_CA_Disorder && 1968 tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) 1969 tp->reordering = min_t(unsigned int, tp->reordering, 1970 net->ipv4.sysctl_tcp_reordering); 1971 tcp_set_ca_state(sk, TCP_CA_Loss); 1972 tp->high_seq = tp->snd_nxt; 1973 tcp_ecn_queue_cwr(tp); 1974 1975 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1976 * loss recovery is underway except recurring timeout(s) on 1977 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 1978 * 1979 * In theory F-RTO can be used repeatedly during loss recovery. 1980 * In practice this interacts badly with broken middle-boxes that 1981 * falsely raise the receive window, which results in repeated 1982 * timeouts and stop-and-go behavior. 1983 */ 1984 tp->frto = net->ipv4.sysctl_tcp_frto && 1985 (new_recovery || icsk->icsk_retransmits) && 1986 !inet_csk(sk)->icsk_mtup.probe_size; 1987 } 1988 1989 /* If ACK arrived pointing to a remembered SACK, it means that our 1990 * remembered SACKs do not reflect real state of receiver i.e. 1991 * receiver _host_ is heavily congested (or buggy). 1992 * 1993 * To avoid big spurious retransmission bursts due to transient SACK 1994 * scoreboard oddities that look like reneging, we give the receiver a 1995 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will 1996 * restore sanity to the SACK scoreboard. If the apparent reneging 1997 * persists until this RTO then we'll clear the SACK scoreboard. 1998 */ 1999 static bool tcp_check_sack_reneging(struct sock *sk, int flag) 2000 { 2001 if (flag & FLAG_SACK_RENEGING) { 2002 struct tcp_sock *tp = tcp_sk(sk); 2003 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), 2004 msecs_to_jiffies(10)); 2005 2006 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2007 delay, TCP_RTO_MAX); 2008 return true; 2009 } 2010 return false; 2011 } 2012 2013 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 2014 * counter when SACK is enabled (without SACK, sacked_out is used for 2015 * that purpose). 2016 * 2017 * With reordering, holes may still be in flight, so RFC3517 recovery 2018 * uses pure sacked_out (total number of SACKed segments) even though 2019 * it violates the RFC that uses duplicate ACKs, often these are equal 2020 * but when e.g. out-of-window ACKs or packet duplication occurs, 2021 * they differ. Since neither occurs due to loss, TCP should really 2022 * ignore them. 2023 */ 2024 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2025 { 2026 return tp->sacked_out + 1; 2027 } 2028 2029 /* Linux NewReno/SACK/ECN state machine. 2030 * -------------------------------------- 2031 * 2032 * "Open" Normal state, no dubious events, fast path. 2033 * "Disorder" In all the respects it is "Open", 2034 * but requires a bit more attention. It is entered when 2035 * we see some SACKs or dupacks. It is split of "Open" 2036 * mainly to move some processing from fast path to slow one. 2037 * "CWR" CWND was reduced due to some Congestion Notification event. 2038 * It can be ECN, ICMP source quench, local device congestion. 2039 * "Recovery" CWND was reduced, we are fast-retransmitting. 2040 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2041 * 2042 * tcp_fastretrans_alert() is entered: 2043 * - each incoming ACK, if state is not "Open" 2044 * - when arrived ACK is unusual, namely: 2045 * * SACK 2046 * * Duplicate ACK. 2047 * * ECN ECE. 2048 * 2049 * Counting packets in flight is pretty simple. 2050 * 2051 * in_flight = packets_out - left_out + retrans_out 2052 * 2053 * packets_out is SND.NXT-SND.UNA counted in packets. 2054 * 2055 * retrans_out is number of retransmitted segments. 2056 * 2057 * left_out is number of segments left network, but not ACKed yet. 2058 * 2059 * left_out = sacked_out + lost_out 2060 * 2061 * sacked_out: Packets, which arrived to receiver out of order 2062 * and hence not ACKed. With SACKs this number is simply 2063 * amount of SACKed data. Even without SACKs 2064 * it is easy to give pretty reliable estimate of this number, 2065 * counting duplicate ACKs. 2066 * 2067 * lost_out: Packets lost by network. TCP has no explicit 2068 * "loss notification" feedback from network (for now). 2069 * It means that this number can be only _guessed_. 2070 * Actually, it is the heuristics to predict lossage that 2071 * distinguishes different algorithms. 2072 * 2073 * F.e. after RTO, when all the queue is considered as lost, 2074 * lost_out = packets_out and in_flight = retrans_out. 2075 * 2076 * Essentially, we have now a few algorithms detecting 2077 * lost packets. 2078 * 2079 * If the receiver supports SACK: 2080 * 2081 * RFC6675/3517: It is the conventional algorithm. A packet is 2082 * considered lost if the number of higher sequence packets 2083 * SACKed is greater than or equal the DUPACK thoreshold 2084 * (reordering). This is implemented in tcp_mark_head_lost and 2085 * tcp_update_scoreboard. 2086 * 2087 * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm 2088 * (2017-) that checks timing instead of counting DUPACKs. 2089 * Essentially a packet is considered lost if it's not S/ACKed 2090 * after RTT + reordering_window, where both metrics are 2091 * dynamically measured and adjusted. This is implemented in 2092 * tcp_rack_mark_lost. 2093 * 2094 * If the receiver does not support SACK: 2095 * 2096 * NewReno (RFC6582): in Recovery we assume that one segment 2097 * is lost (classic Reno). While we are in Recovery and 2098 * a partial ACK arrives, we assume that one more packet 2099 * is lost (NewReno). This heuristics are the same in NewReno 2100 * and SACK. 2101 * 2102 * Really tricky (and requiring careful tuning) part of algorithm 2103 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2104 * The first determines the moment _when_ we should reduce CWND and, 2105 * hence, slow down forward transmission. In fact, it determines the moment 2106 * when we decide that hole is caused by loss, rather than by a reorder. 2107 * 2108 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2109 * holes, caused by lost packets. 2110 * 2111 * And the most logically complicated part of algorithm is undo 2112 * heuristics. We detect false retransmits due to both too early 2113 * fast retransmit (reordering) and underestimated RTO, analyzing 2114 * timestamps and D-SACKs. When we detect that some segments were 2115 * retransmitted by mistake and CWND reduction was wrong, we undo 2116 * window reduction and abort recovery phase. This logic is hidden 2117 * inside several functions named tcp_try_undo_<something>. 2118 */ 2119 2120 /* This function decides, when we should leave Disordered state 2121 * and enter Recovery phase, reducing congestion window. 2122 * 2123 * Main question: may we further continue forward transmission 2124 * with the same cwnd? 2125 */ 2126 static bool tcp_time_to_recover(struct sock *sk, int flag) 2127 { 2128 struct tcp_sock *tp = tcp_sk(sk); 2129 2130 /* Trick#1: The loss is proven. */ 2131 if (tp->lost_out) 2132 return true; 2133 2134 /* Not-A-Trick#2 : Classic rule... */ 2135 if (tcp_dupack_heuristics(tp) > tp->reordering) 2136 return true; 2137 2138 return false; 2139 } 2140 2141 /* Detect loss in event "A" above by marking head of queue up as lost. 2142 * For non-SACK(Reno) senders, the first "packets" number of segments 2143 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2144 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2145 * the maximum SACKed segments to pass before reaching this limit. 2146 */ 2147 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2148 { 2149 struct tcp_sock *tp = tcp_sk(sk); 2150 struct sk_buff *skb; 2151 int cnt, oldcnt, lost; 2152 unsigned int mss; 2153 /* Use SACK to deduce losses of new sequences sent during recovery */ 2154 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2155 2156 WARN_ON(packets > tp->packets_out); 2157 skb = tp->lost_skb_hint; 2158 if (skb) { 2159 /* Head already handled? */ 2160 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) 2161 return; 2162 cnt = tp->lost_cnt_hint; 2163 } else { 2164 skb = tcp_rtx_queue_head(sk); 2165 cnt = 0; 2166 } 2167 2168 skb_rbtree_walk_from(skb) { 2169 /* TODO: do this better */ 2170 /* this is not the most efficient way to do this... */ 2171 tp->lost_skb_hint = skb; 2172 tp->lost_cnt_hint = cnt; 2173 2174 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2175 break; 2176 2177 oldcnt = cnt; 2178 if (tcp_is_reno(tp) || 2179 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2180 cnt += tcp_skb_pcount(skb); 2181 2182 if (cnt > packets) { 2183 if (tcp_is_sack(tp) || 2184 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 2185 (oldcnt >= packets)) 2186 break; 2187 2188 mss = tcp_skb_mss(skb); 2189 /* If needed, chop off the prefix to mark as lost. */ 2190 lost = (packets - oldcnt) * mss; 2191 if (lost < skb->len && 2192 tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 2193 lost, mss, GFP_ATOMIC) < 0) 2194 break; 2195 cnt = packets; 2196 } 2197 2198 tcp_skb_mark_lost(tp, skb); 2199 2200 if (mark_head) 2201 break; 2202 } 2203 tcp_verify_left_out(tp); 2204 } 2205 2206 /* Account newly detected lost packet(s) */ 2207 2208 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2209 { 2210 struct tcp_sock *tp = tcp_sk(sk); 2211 2212 if (tcp_is_reno(tp)) { 2213 tcp_mark_head_lost(sk, 1, 1); 2214 } else { 2215 int sacked_upto = tp->sacked_out - tp->reordering; 2216 if (sacked_upto >= 0) 2217 tcp_mark_head_lost(sk, sacked_upto, 0); 2218 else if (fast_rexmit) 2219 tcp_mark_head_lost(sk, 1, 1); 2220 } 2221 } 2222 2223 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) 2224 { 2225 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2226 before(tp->rx_opt.rcv_tsecr, when); 2227 } 2228 2229 /* skb is spurious retransmitted if the returned timestamp echo 2230 * reply is prior to the skb transmission time 2231 */ 2232 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, 2233 const struct sk_buff *skb) 2234 { 2235 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && 2236 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); 2237 } 2238 2239 /* Nothing was retransmitted or returned timestamp is less 2240 * than timestamp of the first retransmission. 2241 */ 2242 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) 2243 { 2244 return !tp->retrans_stamp || 2245 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); 2246 } 2247 2248 /* Undo procedures. */ 2249 2250 /* We can clear retrans_stamp when there are no retransmissions in the 2251 * window. It would seem that it is trivially available for us in 2252 * tp->retrans_out, however, that kind of assumptions doesn't consider 2253 * what will happen if errors occur when sending retransmission for the 2254 * second time. ...It could the that such segment has only 2255 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2256 * the head skb is enough except for some reneging corner cases that 2257 * are not worth the effort. 2258 * 2259 * Main reason for all this complexity is the fact that connection dying 2260 * time now depends on the validity of the retrans_stamp, in particular, 2261 * that successive retransmissions of a segment must not advance 2262 * retrans_stamp under any conditions. 2263 */ 2264 static bool tcp_any_retrans_done(const struct sock *sk) 2265 { 2266 const struct tcp_sock *tp = tcp_sk(sk); 2267 struct sk_buff *skb; 2268 2269 if (tp->retrans_out) 2270 return true; 2271 2272 skb = tcp_rtx_queue_head(sk); 2273 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2274 return true; 2275 2276 return false; 2277 } 2278 2279 static void DBGUNDO(struct sock *sk, const char *msg) 2280 { 2281 #if FASTRETRANS_DEBUG > 1 2282 struct tcp_sock *tp = tcp_sk(sk); 2283 struct inet_sock *inet = inet_sk(sk); 2284 2285 if (sk->sk_family == AF_INET) { 2286 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2287 msg, 2288 &inet->inet_daddr, ntohs(inet->inet_dport), 2289 tp->snd_cwnd, tcp_left_out(tp), 2290 tp->snd_ssthresh, tp->prior_ssthresh, 2291 tp->packets_out); 2292 } 2293 #if IS_ENABLED(CONFIG_IPV6) 2294 else if (sk->sk_family == AF_INET6) { 2295 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2296 msg, 2297 &sk->sk_v6_daddr, ntohs(inet->inet_dport), 2298 tp->snd_cwnd, tcp_left_out(tp), 2299 tp->snd_ssthresh, tp->prior_ssthresh, 2300 tp->packets_out); 2301 } 2302 #endif 2303 #endif 2304 } 2305 2306 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) 2307 { 2308 struct tcp_sock *tp = tcp_sk(sk); 2309 2310 if (unmark_loss) { 2311 struct sk_buff *skb; 2312 2313 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2314 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2315 } 2316 tp->lost_out = 0; 2317 tcp_clear_all_retrans_hints(tp); 2318 } 2319 2320 if (tp->prior_ssthresh) { 2321 const struct inet_connection_sock *icsk = inet_csk(sk); 2322 2323 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2324 2325 if (tp->prior_ssthresh > tp->snd_ssthresh) { 2326 tp->snd_ssthresh = tp->prior_ssthresh; 2327 tcp_ecn_withdraw_cwr(tp); 2328 } 2329 } 2330 tp->snd_cwnd_stamp = tcp_jiffies32; 2331 tp->undo_marker = 0; 2332 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ 2333 } 2334 2335 static inline bool tcp_may_undo(const struct tcp_sock *tp) 2336 { 2337 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2338 } 2339 2340 /* People celebrate: "We love our President!" */ 2341 static bool tcp_try_undo_recovery(struct sock *sk) 2342 { 2343 struct tcp_sock *tp = tcp_sk(sk); 2344 2345 if (tcp_may_undo(tp)) { 2346 int mib_idx; 2347 2348 /* Happy end! We did not retransmit anything 2349 * or our original transmission succeeded. 2350 */ 2351 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2352 tcp_undo_cwnd_reduction(sk, false); 2353 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2354 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2355 else 2356 mib_idx = LINUX_MIB_TCPFULLUNDO; 2357 2358 NET_INC_STATS(sock_net(sk), mib_idx); 2359 } else if (tp->rack.reo_wnd_persist) { 2360 tp->rack.reo_wnd_persist--; 2361 } 2362 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2363 /* Hold old state until something *above* high_seq 2364 * is ACKed. For Reno it is MUST to prevent false 2365 * fast retransmits (RFC2582). SACK TCP is safe. */ 2366 if (!tcp_any_retrans_done(sk)) 2367 tp->retrans_stamp = 0; 2368 return true; 2369 } 2370 tcp_set_ca_state(sk, TCP_CA_Open); 2371 tp->is_sack_reneg = 0; 2372 return false; 2373 } 2374 2375 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2376 static bool tcp_try_undo_dsack(struct sock *sk) 2377 { 2378 struct tcp_sock *tp = tcp_sk(sk); 2379 2380 if (tp->undo_marker && !tp->undo_retrans) { 2381 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, 2382 tp->rack.reo_wnd_persist + 1); 2383 DBGUNDO(sk, "D-SACK"); 2384 tcp_undo_cwnd_reduction(sk, false); 2385 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2386 return true; 2387 } 2388 return false; 2389 } 2390 2391 /* Undo during loss recovery after partial ACK or using F-RTO. */ 2392 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2393 { 2394 struct tcp_sock *tp = tcp_sk(sk); 2395 2396 if (frto_undo || tcp_may_undo(tp)) { 2397 tcp_undo_cwnd_reduction(sk, true); 2398 2399 DBGUNDO(sk, "partial loss"); 2400 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2401 if (frto_undo) 2402 NET_INC_STATS(sock_net(sk), 2403 LINUX_MIB_TCPSPURIOUSRTOS); 2404 inet_csk(sk)->icsk_retransmits = 0; 2405 if (frto_undo || tcp_is_sack(tp)) { 2406 tcp_set_ca_state(sk, TCP_CA_Open); 2407 tp->is_sack_reneg = 0; 2408 } 2409 return true; 2410 } 2411 return false; 2412 } 2413 2414 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. 2415 * It computes the number of packets to send (sndcnt) based on packets newly 2416 * delivered: 2417 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 2418 * cwnd reductions across a full RTT. 2419 * 2) Otherwise PRR uses packet conservation to send as much as delivered. 2420 * But when the retransmits are acked without further losses, PRR 2421 * slow starts cwnd up to ssthresh to speed up the recovery. 2422 */ 2423 static void tcp_init_cwnd_reduction(struct sock *sk) 2424 { 2425 struct tcp_sock *tp = tcp_sk(sk); 2426 2427 tp->high_seq = tp->snd_nxt; 2428 tp->tlp_high_seq = 0; 2429 tp->snd_cwnd_cnt = 0; 2430 tp->prior_cwnd = tp->snd_cwnd; 2431 tp->prr_delivered = 0; 2432 tp->prr_out = 0; 2433 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2434 tcp_ecn_queue_cwr(tp); 2435 } 2436 2437 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag) 2438 { 2439 struct tcp_sock *tp = tcp_sk(sk); 2440 int sndcnt = 0; 2441 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 2442 2443 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) 2444 return; 2445 2446 tp->prr_delivered += newly_acked_sacked; 2447 if (delta < 0) { 2448 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 2449 tp->prior_cwnd - 1; 2450 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 2451 } else if ((flag & FLAG_RETRANS_DATA_ACKED) && 2452 !(flag & FLAG_LOST_RETRANS)) { 2453 sndcnt = min_t(int, delta, 2454 max_t(int, tp->prr_delivered - tp->prr_out, 2455 newly_acked_sacked) + 1); 2456 } else { 2457 sndcnt = min(delta, newly_acked_sacked); 2458 } 2459 /* Force a fast retransmit upon entering fast recovery */ 2460 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); 2461 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 2462 } 2463 2464 static inline void tcp_end_cwnd_reduction(struct sock *sk) 2465 { 2466 struct tcp_sock *tp = tcp_sk(sk); 2467 2468 if (inet_csk(sk)->icsk_ca_ops->cong_control) 2469 return; 2470 2471 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2472 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2473 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2474 tp->snd_cwnd = tp->snd_ssthresh; 2475 tp->snd_cwnd_stamp = tcp_jiffies32; 2476 } 2477 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2478 } 2479 2480 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ 2481 void tcp_enter_cwr(struct sock *sk) 2482 { 2483 struct tcp_sock *tp = tcp_sk(sk); 2484 2485 tp->prior_ssthresh = 0; 2486 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2487 tp->undo_marker = 0; 2488 tcp_init_cwnd_reduction(sk); 2489 tcp_set_ca_state(sk, TCP_CA_CWR); 2490 } 2491 } 2492 EXPORT_SYMBOL(tcp_enter_cwr); 2493 2494 static void tcp_try_keep_open(struct sock *sk) 2495 { 2496 struct tcp_sock *tp = tcp_sk(sk); 2497 int state = TCP_CA_Open; 2498 2499 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2500 state = TCP_CA_Disorder; 2501 2502 if (inet_csk(sk)->icsk_ca_state != state) { 2503 tcp_set_ca_state(sk, state); 2504 tp->high_seq = tp->snd_nxt; 2505 } 2506 } 2507 2508 static void tcp_try_to_open(struct sock *sk, int flag) 2509 { 2510 struct tcp_sock *tp = tcp_sk(sk); 2511 2512 tcp_verify_left_out(tp); 2513 2514 if (!tcp_any_retrans_done(sk)) 2515 tp->retrans_stamp = 0; 2516 2517 if (flag & FLAG_ECE) 2518 tcp_enter_cwr(sk); 2519 2520 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2521 tcp_try_keep_open(sk); 2522 } 2523 } 2524 2525 static void tcp_mtup_probe_failed(struct sock *sk) 2526 { 2527 struct inet_connection_sock *icsk = inet_csk(sk); 2528 2529 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2530 icsk->icsk_mtup.probe_size = 0; 2531 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); 2532 } 2533 2534 static void tcp_mtup_probe_success(struct sock *sk) 2535 { 2536 struct tcp_sock *tp = tcp_sk(sk); 2537 struct inet_connection_sock *icsk = inet_csk(sk); 2538 2539 /* FIXME: breaks with very large cwnd */ 2540 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2541 tp->snd_cwnd = tp->snd_cwnd * 2542 tcp_mss_to_mtu(sk, tp->mss_cache) / 2543 icsk->icsk_mtup.probe_size; 2544 tp->snd_cwnd_cnt = 0; 2545 tp->snd_cwnd_stamp = tcp_jiffies32; 2546 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2547 2548 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2549 icsk->icsk_mtup.probe_size = 0; 2550 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2551 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); 2552 } 2553 2554 /* Do a simple retransmit without using the backoff mechanisms in 2555 * tcp_timer. This is used for path mtu discovery. 2556 * The socket is already locked here. 2557 */ 2558 void tcp_simple_retransmit(struct sock *sk) 2559 { 2560 const struct inet_connection_sock *icsk = inet_csk(sk); 2561 struct tcp_sock *tp = tcp_sk(sk); 2562 struct sk_buff *skb; 2563 unsigned int mss = tcp_current_mss(sk); 2564 2565 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2566 if (tcp_skb_seglen(skb) > mss && 2567 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2568 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2569 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2570 tp->retrans_out -= tcp_skb_pcount(skb); 2571 } 2572 tcp_skb_mark_lost_uncond_verify(tp, skb); 2573 } 2574 } 2575 2576 tcp_clear_retrans_hints_partial(tp); 2577 2578 if (!tp->lost_out) 2579 return; 2580 2581 if (tcp_is_reno(tp)) 2582 tcp_limit_reno_sacked(tp); 2583 2584 tcp_verify_left_out(tp); 2585 2586 /* Don't muck with the congestion window here. 2587 * Reason is that we do not increase amount of _data_ 2588 * in network, but units changed and effective 2589 * cwnd/ssthresh really reduced now. 2590 */ 2591 if (icsk->icsk_ca_state != TCP_CA_Loss) { 2592 tp->high_seq = tp->snd_nxt; 2593 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2594 tp->prior_ssthresh = 0; 2595 tp->undo_marker = 0; 2596 tcp_set_ca_state(sk, TCP_CA_Loss); 2597 } 2598 tcp_xmit_retransmit_queue(sk); 2599 } 2600 EXPORT_SYMBOL(tcp_simple_retransmit); 2601 2602 void tcp_enter_recovery(struct sock *sk, bool ece_ack) 2603 { 2604 struct tcp_sock *tp = tcp_sk(sk); 2605 int mib_idx; 2606 2607 if (tcp_is_reno(tp)) 2608 mib_idx = LINUX_MIB_TCPRENORECOVERY; 2609 else 2610 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2611 2612 NET_INC_STATS(sock_net(sk), mib_idx); 2613 2614 tp->prior_ssthresh = 0; 2615 tcp_init_undo(tp); 2616 2617 if (!tcp_in_cwnd_reduction(sk)) { 2618 if (!ece_ack) 2619 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2620 tcp_init_cwnd_reduction(sk); 2621 } 2622 tcp_set_ca_state(sk, TCP_CA_Recovery); 2623 } 2624 2625 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are 2626 * recovered or spurious. Otherwise retransmits more on partial ACKs. 2627 */ 2628 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, 2629 int *rexmit) 2630 { 2631 struct tcp_sock *tp = tcp_sk(sk); 2632 bool recovered = !before(tp->snd_una, tp->high_seq); 2633 2634 if ((flag & FLAG_SND_UNA_ADVANCED) && 2635 tcp_try_undo_loss(sk, false)) 2636 return; 2637 2638 /* The ACK (s)acks some never-retransmitted data meaning not all 2639 * the data packets before the timeout were lost. Therefore we 2640 * undo the congestion window and state. This is essentially 2641 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since 2642 * a retransmitted skb is permantly marked, we can apply such an 2643 * operation even if F-RTO was not used. 2644 */ 2645 if ((flag & FLAG_ORIG_SACK_ACKED) && 2646 tcp_try_undo_loss(sk, tp->undo_marker)) 2647 return; 2648 2649 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2650 if (after(tp->snd_nxt, tp->high_seq)) { 2651 if (flag & FLAG_DATA_SACKED || is_dupack) 2652 tp->frto = 0; /* Step 3.a. loss was real */ 2653 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2654 tp->high_seq = tp->snd_nxt; 2655 /* Step 2.b. Try send new data (but deferred until cwnd 2656 * is updated in tcp_ack()). Otherwise fall back to 2657 * the conventional recovery. 2658 */ 2659 if (!tcp_write_queue_empty(sk) && 2660 after(tcp_wnd_end(tp), tp->snd_nxt)) { 2661 *rexmit = REXMIT_NEW; 2662 return; 2663 } 2664 tp->frto = 0; 2665 } 2666 } 2667 2668 if (recovered) { 2669 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ 2670 tcp_try_undo_recovery(sk); 2671 return; 2672 } 2673 if (tcp_is_reno(tp)) { 2674 /* A Reno DUPACK means new data in F-RTO step 2.b above are 2675 * delivered. Lower inflight to clock out (re)tranmissions. 2676 */ 2677 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) 2678 tcp_add_reno_sack(sk); 2679 else if (flag & FLAG_SND_UNA_ADVANCED) 2680 tcp_reset_reno_sack(tp); 2681 } 2682 *rexmit = REXMIT_LOST; 2683 } 2684 2685 /* Undo during fast recovery after partial ACK. */ 2686 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una) 2687 { 2688 struct tcp_sock *tp = tcp_sk(sk); 2689 2690 if (tp->undo_marker && tcp_packet_delayed(tp)) { 2691 /* Plain luck! Hole if filled with delayed 2692 * packet, rather than with a retransmit. Check reordering. 2693 */ 2694 tcp_check_sack_reordering(sk, prior_snd_una, 1); 2695 2696 /* We are getting evidence that the reordering degree is higher 2697 * than we realized. If there are no retransmits out then we 2698 * can undo. Otherwise we clock out new packets but do not 2699 * mark more packets lost or retransmit more. 2700 */ 2701 if (tp->retrans_out) 2702 return true; 2703 2704 if (!tcp_any_retrans_done(sk)) 2705 tp->retrans_stamp = 0; 2706 2707 DBGUNDO(sk, "partial recovery"); 2708 tcp_undo_cwnd_reduction(sk, true); 2709 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2710 tcp_try_keep_open(sk); 2711 return true; 2712 } 2713 return false; 2714 } 2715 2716 static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) 2717 { 2718 struct tcp_sock *tp = tcp_sk(sk); 2719 2720 /* Use RACK to detect loss */ 2721 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) { 2722 u32 prior_retrans = tp->retrans_out; 2723 2724 tcp_rack_mark_lost(sk); 2725 if (prior_retrans > tp->retrans_out) 2726 *ack_flag |= FLAG_LOST_RETRANS; 2727 } 2728 } 2729 2730 static bool tcp_force_fast_retransmit(struct sock *sk) 2731 { 2732 struct tcp_sock *tp = tcp_sk(sk); 2733 2734 return after(tcp_highest_sack_seq(tp), 2735 tp->snd_una + tp->reordering * tp->mss_cache); 2736 } 2737 2738 /* Process an event, which can update packets-in-flight not trivially. 2739 * Main goal of this function is to calculate new estimate for left_out, 2740 * taking into account both packets sitting in receiver's buffer and 2741 * packets lost by network. 2742 * 2743 * Besides that it updates the congestion state when packet loss or ECN 2744 * is detected. But it does not reduce the cwnd, it is done by the 2745 * congestion control later. 2746 * 2747 * It does _not_ decide what to send, it is made in function 2748 * tcp_xmit_retransmit_queue(). 2749 */ 2750 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, 2751 bool is_dupack, int *ack_flag, int *rexmit) 2752 { 2753 struct inet_connection_sock *icsk = inet_csk(sk); 2754 struct tcp_sock *tp = tcp_sk(sk); 2755 int fast_rexmit = 0, flag = *ack_flag; 2756 bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2757 tcp_force_fast_retransmit(sk)); 2758 2759 if (!tp->packets_out && tp->sacked_out) 2760 tp->sacked_out = 0; 2761 2762 /* Now state machine starts. 2763 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 2764 if (flag & FLAG_ECE) 2765 tp->prior_ssthresh = 0; 2766 2767 /* B. In all the states check for reneging SACKs. */ 2768 if (tcp_check_sack_reneging(sk, flag)) 2769 return; 2770 2771 /* C. Check consistency of the current state. */ 2772 tcp_verify_left_out(tp); 2773 2774 /* D. Check state exit conditions. State can be terminated 2775 * when high_seq is ACKed. */ 2776 if (icsk->icsk_ca_state == TCP_CA_Open) { 2777 WARN_ON(tp->retrans_out != 0); 2778 tp->retrans_stamp = 0; 2779 } else if (!before(tp->snd_una, tp->high_seq)) { 2780 switch (icsk->icsk_ca_state) { 2781 case TCP_CA_CWR: 2782 /* CWR is to be held something *above* high_seq 2783 * is ACKed for CWR bit to reach receiver. */ 2784 if (tp->snd_una != tp->high_seq) { 2785 tcp_end_cwnd_reduction(sk); 2786 tcp_set_ca_state(sk, TCP_CA_Open); 2787 } 2788 break; 2789 2790 case TCP_CA_Recovery: 2791 if (tcp_is_reno(tp)) 2792 tcp_reset_reno_sack(tp); 2793 if (tcp_try_undo_recovery(sk)) 2794 return; 2795 tcp_end_cwnd_reduction(sk); 2796 break; 2797 } 2798 } 2799 2800 /* E. Process state. */ 2801 switch (icsk->icsk_ca_state) { 2802 case TCP_CA_Recovery: 2803 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2804 if (tcp_is_reno(tp) && is_dupack) 2805 tcp_add_reno_sack(sk); 2806 } else { 2807 if (tcp_try_undo_partial(sk, prior_snd_una)) 2808 return; 2809 /* Partial ACK arrived. Force fast retransmit. */ 2810 do_lost = tcp_is_reno(tp) || 2811 tcp_force_fast_retransmit(sk); 2812 } 2813 if (tcp_try_undo_dsack(sk)) { 2814 tcp_try_keep_open(sk); 2815 return; 2816 } 2817 tcp_rack_identify_loss(sk, ack_flag); 2818 break; 2819 case TCP_CA_Loss: 2820 tcp_process_loss(sk, flag, is_dupack, rexmit); 2821 tcp_rack_identify_loss(sk, ack_flag); 2822 if (!(icsk->icsk_ca_state == TCP_CA_Open || 2823 (*ack_flag & FLAG_LOST_RETRANS))) 2824 return; 2825 /* Change state if cwnd is undone or retransmits are lost */ 2826 /* fall through */ 2827 default: 2828 if (tcp_is_reno(tp)) { 2829 if (flag & FLAG_SND_UNA_ADVANCED) 2830 tcp_reset_reno_sack(tp); 2831 if (is_dupack) 2832 tcp_add_reno_sack(sk); 2833 } 2834 2835 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2836 tcp_try_undo_dsack(sk); 2837 2838 tcp_rack_identify_loss(sk, ack_flag); 2839 if (!tcp_time_to_recover(sk, flag)) { 2840 tcp_try_to_open(sk, flag); 2841 return; 2842 } 2843 2844 /* MTU probe failure: don't reduce cwnd */ 2845 if (icsk->icsk_ca_state < TCP_CA_CWR && 2846 icsk->icsk_mtup.probe_size && 2847 tp->snd_una == tp->mtu_probe.probe_seq_start) { 2848 tcp_mtup_probe_failed(sk); 2849 /* Restores the reduction we did in tcp_mtup_probe() */ 2850 tp->snd_cwnd++; 2851 tcp_simple_retransmit(sk); 2852 return; 2853 } 2854 2855 /* Otherwise enter Recovery state */ 2856 tcp_enter_recovery(sk, (flag & FLAG_ECE)); 2857 fast_rexmit = 1; 2858 } 2859 2860 if (do_lost) 2861 tcp_update_scoreboard(sk, fast_rexmit); 2862 *rexmit = REXMIT_LOST; 2863 } 2864 2865 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) 2866 { 2867 u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ; 2868 struct tcp_sock *tp = tcp_sk(sk); 2869 2870 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, 2871 rtt_us ? : jiffies_to_usecs(1)); 2872 } 2873 2874 static bool tcp_ack_update_rtt(struct sock *sk, const int flag, 2875 long seq_rtt_us, long sack_rtt_us, 2876 long ca_rtt_us, struct rate_sample *rs) 2877 { 2878 const struct tcp_sock *tp = tcp_sk(sk); 2879 2880 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because 2881 * broken middle-boxes or peers may corrupt TS-ECR fields. But 2882 * Karn's algorithm forbids taking RTT if some retransmitted data 2883 * is acked (RFC6298). 2884 */ 2885 if (seq_rtt_us < 0) 2886 seq_rtt_us = sack_rtt_us; 2887 2888 /* RTTM Rule: A TSecr value received in a segment is used to 2889 * update the averaged RTT measurement only if the segment 2890 * acknowledges some new data, i.e., only if it advances the 2891 * left edge of the send window. 2892 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2893 */ 2894 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2895 flag & FLAG_ACKED) { 2896 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 2897 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 2898 2899 seq_rtt_us = ca_rtt_us = delta_us; 2900 } 2901 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 2902 if (seq_rtt_us < 0) 2903 return false; 2904 2905 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is 2906 * always taken together with ACK, SACK, or TS-opts. Any negative 2907 * values will be skipped with the seq_rtt_us < 0 check above. 2908 */ 2909 tcp_update_rtt_min(sk, ca_rtt_us); 2910 tcp_rtt_estimator(sk, seq_rtt_us); 2911 tcp_set_rto(sk); 2912 2913 /* RFC6298: only reset backoff on valid RTT measurement. */ 2914 inet_csk(sk)->icsk_backoff = 0; 2915 return true; 2916 } 2917 2918 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2919 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2920 { 2921 struct rate_sample rs; 2922 long rtt_us = -1L; 2923 2924 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack) 2925 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack); 2926 2927 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs); 2928 } 2929 2930 2931 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 2932 { 2933 const struct inet_connection_sock *icsk = inet_csk(sk); 2934 2935 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); 2936 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32; 2937 } 2938 2939 /* Restart timer after forward progress on connection. 2940 * RFC2988 recommends to restart timer to now+rto. 2941 */ 2942 void tcp_rearm_rto(struct sock *sk) 2943 { 2944 const struct inet_connection_sock *icsk = inet_csk(sk); 2945 struct tcp_sock *tp = tcp_sk(sk); 2946 2947 /* If the retrans timer is currently being used by Fast Open 2948 * for SYN-ACK retrans purpose, stay put. 2949 */ 2950 if (tp->fastopen_rsk) 2951 return; 2952 2953 if (!tp->packets_out) { 2954 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 2955 } else { 2956 u32 rto = inet_csk(sk)->icsk_rto; 2957 /* Offset the time elapsed after installing regular RTO */ 2958 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 2959 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2960 s64 delta_us = tcp_rto_delta_us(sk); 2961 /* delta_us may not be positive if the socket is locked 2962 * when the retrans timer fires and is rescheduled. 2963 */ 2964 rto = usecs_to_jiffies(max_t(int, delta_us, 1)); 2965 } 2966 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 2967 TCP_RTO_MAX); 2968 } 2969 } 2970 2971 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ 2972 static void tcp_set_xmit_timer(struct sock *sk) 2973 { 2974 if (!tcp_schedule_loss_probe(sk, true)) 2975 tcp_rearm_rto(sk); 2976 } 2977 2978 /* If we get here, the whole TSO packet has not been acked. */ 2979 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 2980 { 2981 struct tcp_sock *tp = tcp_sk(sk); 2982 u32 packets_acked; 2983 2984 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 2985 2986 packets_acked = tcp_skb_pcount(skb); 2987 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2988 return 0; 2989 packets_acked -= tcp_skb_pcount(skb); 2990 2991 if (packets_acked) { 2992 BUG_ON(tcp_skb_pcount(skb) == 0); 2993 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 2994 } 2995 2996 return packets_acked; 2997 } 2998 2999 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, 3000 u32 prior_snd_una) 3001 { 3002 const struct skb_shared_info *shinfo; 3003 3004 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ 3005 if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) 3006 return; 3007 3008 shinfo = skb_shinfo(skb); 3009 if (!before(shinfo->tskey, prior_snd_una) && 3010 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { 3011 tcp_skb_tsorted_save(skb) { 3012 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3013 } tcp_skb_tsorted_restore(skb); 3014 } 3015 } 3016 3017 /* Remove acknowledged frames from the retransmission queue. If our packet 3018 * is before the ack sequence we can discard it as it's confirmed to have 3019 * arrived at the other end. 3020 */ 3021 static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, 3022 u32 prior_snd_una, 3023 struct tcp_sacktag_state *sack) 3024 { 3025 const struct inet_connection_sock *icsk = inet_csk(sk); 3026 u64 first_ackt, last_ackt; 3027 struct tcp_sock *tp = tcp_sk(sk); 3028 u32 prior_sacked = tp->sacked_out; 3029 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ 3030 struct sk_buff *skb, *next; 3031 bool fully_acked = true; 3032 long sack_rtt_us = -1L; 3033 long seq_rtt_us = -1L; 3034 long ca_rtt_us = -1L; 3035 u32 pkts_acked = 0; 3036 u32 last_in_flight = 0; 3037 bool rtt_update; 3038 int flag = 0; 3039 3040 first_ackt = 0; 3041 3042 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) { 3043 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3044 const u32 start_seq = scb->seq; 3045 u8 sacked = scb->sacked; 3046 u32 acked_pcount; 3047 3048 tcp_ack_tstamp(sk, skb, prior_snd_una); 3049 3050 /* Determine how many packets and what bytes were acked, tso and else */ 3051 if (after(scb->end_seq, tp->snd_una)) { 3052 if (tcp_skb_pcount(skb) == 1 || 3053 !after(tp->snd_una, scb->seq)) 3054 break; 3055 3056 acked_pcount = tcp_tso_acked(sk, skb); 3057 if (!acked_pcount) 3058 break; 3059 fully_acked = false; 3060 } else { 3061 acked_pcount = tcp_skb_pcount(skb); 3062 } 3063 3064 if (unlikely(sacked & TCPCB_RETRANS)) { 3065 if (sacked & TCPCB_SACKED_RETRANS) 3066 tp->retrans_out -= acked_pcount; 3067 flag |= FLAG_RETRANS_DATA_ACKED; 3068 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3069 last_ackt = skb->skb_mstamp; 3070 WARN_ON_ONCE(last_ackt == 0); 3071 if (!first_ackt) 3072 first_ackt = last_ackt; 3073 3074 last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; 3075 if (before(start_seq, reord)) 3076 reord = start_seq; 3077 if (!after(scb->end_seq, tp->high_seq)) 3078 flag |= FLAG_ORIG_SACK_ACKED; 3079 } 3080 3081 if (sacked & TCPCB_SACKED_ACKED) { 3082 tp->sacked_out -= acked_pcount; 3083 } else if (tcp_is_sack(tp)) { 3084 tp->delivered += acked_pcount; 3085 if (!tcp_skb_spurious_retrans(tp, skb)) 3086 tcp_rack_advance(tp, sacked, scb->end_seq, 3087 skb->skb_mstamp); 3088 } 3089 if (sacked & TCPCB_LOST) 3090 tp->lost_out -= acked_pcount; 3091 3092 tp->packets_out -= acked_pcount; 3093 pkts_acked += acked_pcount; 3094 tcp_rate_skb_delivered(sk, skb, sack->rate); 3095 3096 /* Initial outgoing SYN's get put onto the write_queue 3097 * just like anything else we transmit. It is not 3098 * true data, and if we misinform our callers that 3099 * this ACK acks real data, we will erroneously exit 3100 * connection startup slow start one packet too 3101 * quickly. This is severely frowned upon behavior. 3102 */ 3103 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { 3104 flag |= FLAG_DATA_ACKED; 3105 } else { 3106 flag |= FLAG_SYN_ACKED; 3107 tp->retrans_stamp = 0; 3108 } 3109 3110 if (!fully_acked) 3111 break; 3112 3113 next = skb_rb_next(skb); 3114 if (unlikely(skb == tp->retransmit_skb_hint)) 3115 tp->retransmit_skb_hint = NULL; 3116 if (unlikely(skb == tp->lost_skb_hint)) 3117 tp->lost_skb_hint = NULL; 3118 tcp_rtx_queue_unlink_and_free(skb, sk); 3119 } 3120 3121 if (!skb) 3122 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3123 3124 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3125 tp->snd_up = tp->snd_una; 3126 3127 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3128 flag |= FLAG_SACK_RENEGING; 3129 3130 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) { 3131 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); 3132 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); 3133 } 3134 if (sack->first_sackt) { 3135 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); 3136 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); 3137 } 3138 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, 3139 ca_rtt_us, sack->rate); 3140 3141 if (flag & FLAG_ACKED) { 3142 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3143 if (unlikely(icsk->icsk_mtup.probe_size && 3144 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3145 tcp_mtup_probe_success(sk); 3146 } 3147 3148 if (tcp_is_reno(tp)) { 3149 tcp_remove_reno_sacks(sk, pkts_acked); 3150 } else { 3151 int delta; 3152 3153 /* Non-retransmitted hole got filled? That's reordering */ 3154 if (before(reord, prior_fack)) 3155 tcp_check_sack_reordering(sk, reord, 0); 3156 3157 delta = prior_sacked - tp->sacked_out; 3158 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3159 } 3160 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3161 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { 3162 /* Do not re-arm RTO if the sack RTT is measured from data sent 3163 * after when the head was last (re)transmitted. Otherwise the 3164 * timeout may continue to extend in loss recovery. 3165 */ 3166 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3167 } 3168 3169 if (icsk->icsk_ca_ops->pkts_acked) { 3170 struct ack_sample sample = { .pkts_acked = pkts_acked, 3171 .rtt_us = sack->rate->rtt_us, 3172 .in_flight = last_in_flight }; 3173 3174 icsk->icsk_ca_ops->pkts_acked(sk, &sample); 3175 } 3176 3177 #if FASTRETRANS_DEBUG > 0 3178 WARN_ON((int)tp->sacked_out < 0); 3179 WARN_ON((int)tp->lost_out < 0); 3180 WARN_ON((int)tp->retrans_out < 0); 3181 if (!tp->packets_out && tcp_is_sack(tp)) { 3182 icsk = inet_csk(sk); 3183 if (tp->lost_out) { 3184 pr_debug("Leak l=%u %d\n", 3185 tp->lost_out, icsk->icsk_ca_state); 3186 tp->lost_out = 0; 3187 } 3188 if (tp->sacked_out) { 3189 pr_debug("Leak s=%u %d\n", 3190 tp->sacked_out, icsk->icsk_ca_state); 3191 tp->sacked_out = 0; 3192 } 3193 if (tp->retrans_out) { 3194 pr_debug("Leak r=%u %d\n", 3195 tp->retrans_out, icsk->icsk_ca_state); 3196 tp->retrans_out = 0; 3197 } 3198 } 3199 #endif 3200 return flag; 3201 } 3202 3203 static void tcp_ack_probe(struct sock *sk) 3204 { 3205 struct inet_connection_sock *icsk = inet_csk(sk); 3206 struct sk_buff *head = tcp_send_head(sk); 3207 const struct tcp_sock *tp = tcp_sk(sk); 3208 3209 /* Was it a usable window open? */ 3210 if (!head) 3211 return; 3212 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { 3213 icsk->icsk_backoff = 0; 3214 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3215 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3216 * This function is not for random using! 3217 */ 3218 } else { 3219 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); 3220 3221 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3222 when, TCP_RTO_MAX); 3223 } 3224 } 3225 3226 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) 3227 { 3228 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3229 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3230 } 3231 3232 /* Decide wheather to run the increase function of congestion control. */ 3233 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3234 { 3235 /* If reordering is high then always grow cwnd whenever data is 3236 * delivered regardless of its ordering. Otherwise stay conservative 3237 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ 3238 * new SACK or ECE mark may first advance cwnd here and later reduce 3239 * cwnd in tcp_fastretrans_alert() based on more states. 3240 */ 3241 if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) 3242 return flag & FLAG_FORWARD_PROGRESS; 3243 3244 return flag & FLAG_DATA_ACKED; 3245 } 3246 3247 /* The "ultimate" congestion control function that aims to replace the rigid 3248 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction). 3249 * It's called toward the end of processing an ACK with precise rate 3250 * information. All transmission or retransmission are delayed afterwards. 3251 */ 3252 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, 3253 int flag, const struct rate_sample *rs) 3254 { 3255 const struct inet_connection_sock *icsk = inet_csk(sk); 3256 3257 if (icsk->icsk_ca_ops->cong_control) { 3258 icsk->icsk_ca_ops->cong_control(sk, rs); 3259 return; 3260 } 3261 3262 if (tcp_in_cwnd_reduction(sk)) { 3263 /* Reduce cwnd if state mandates */ 3264 tcp_cwnd_reduction(sk, acked_sacked, flag); 3265 } else if (tcp_may_raise_cwnd(sk, flag)) { 3266 /* Advance cwnd if state allows */ 3267 tcp_cong_avoid(sk, ack, acked_sacked); 3268 } 3269 tcp_update_pacing_rate(sk); 3270 } 3271 3272 /* Check that window update is acceptable. 3273 * The function assumes that snd_una<=ack<=snd_next. 3274 */ 3275 static inline bool tcp_may_update_window(const struct tcp_sock *tp, 3276 const u32 ack, const u32 ack_seq, 3277 const u32 nwin) 3278 { 3279 return after(ack, tp->snd_una) || 3280 after(ack_seq, tp->snd_wl1) || 3281 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); 3282 } 3283 3284 /* If we update tp->snd_una, also update tp->bytes_acked */ 3285 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) 3286 { 3287 u32 delta = ack - tp->snd_una; 3288 3289 sock_owned_by_me((struct sock *)tp); 3290 tp->bytes_acked += delta; 3291 tp->snd_una = ack; 3292 } 3293 3294 /* If we update tp->rcv_nxt, also update tp->bytes_received */ 3295 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) 3296 { 3297 u32 delta = seq - tp->rcv_nxt; 3298 3299 sock_owned_by_me((struct sock *)tp); 3300 tp->bytes_received += delta; 3301 tp->rcv_nxt = seq; 3302 } 3303 3304 /* Update our send window. 3305 * 3306 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3307 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3308 */ 3309 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3310 u32 ack_seq) 3311 { 3312 struct tcp_sock *tp = tcp_sk(sk); 3313 int flag = 0; 3314 u32 nwin = ntohs(tcp_hdr(skb)->window); 3315 3316 if (likely(!tcp_hdr(skb)->syn)) 3317 nwin <<= tp->rx_opt.snd_wscale; 3318 3319 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3320 flag |= FLAG_WIN_UPDATE; 3321 tcp_update_wl(tp, ack_seq); 3322 3323 if (tp->snd_wnd != nwin) { 3324 tp->snd_wnd = nwin; 3325 3326 /* Note, it is the only place, where 3327 * fast path is recovered for sending TCP. 3328 */ 3329 tp->pred_flags = 0; 3330 tcp_fast_path_check(sk); 3331 3332 if (!tcp_write_queue_empty(sk)) 3333 tcp_slow_start_after_idle_check(sk); 3334 3335 if (nwin > tp->max_window) { 3336 tp->max_window = nwin; 3337 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3338 } 3339 } 3340 } 3341 3342 tcp_snd_una_update(tp, ack); 3343 3344 return flag; 3345 } 3346 3347 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, 3348 u32 *last_oow_ack_time) 3349 { 3350 if (*last_oow_ack_time) { 3351 s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time); 3352 3353 if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) { 3354 NET_INC_STATS(net, mib_idx); 3355 return true; /* rate-limited: don't send yet! */ 3356 } 3357 } 3358 3359 *last_oow_ack_time = tcp_jiffies32; 3360 3361 return false; /* not rate-limited: go ahead, send dupack now! */ 3362 } 3363 3364 /* Return true if we're currently rate-limiting out-of-window ACKs and 3365 * thus shouldn't send a dupack right now. We rate-limit dupacks in 3366 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS 3367 * attacks that send repeated SYNs or ACKs for the same connection. To 3368 * do this, we do not send a duplicate SYNACK or ACK if the remote 3369 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. 3370 */ 3371 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 3372 int mib_idx, u32 *last_oow_ack_time) 3373 { 3374 /* Data packets without SYNs are not likely part of an ACK loop. */ 3375 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && 3376 !tcp_hdr(skb)->syn) 3377 return false; 3378 3379 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); 3380 } 3381 3382 /* RFC 5961 7 [ACK Throttling] */ 3383 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) 3384 { 3385 /* unprotected vars, we dont care of overwrites */ 3386 static u32 challenge_timestamp; 3387 static unsigned int challenge_count; 3388 struct tcp_sock *tp = tcp_sk(sk); 3389 struct net *net = sock_net(sk); 3390 u32 count, now; 3391 3392 /* First check our per-socket dupack rate limit. */ 3393 if (__tcp_oow_rate_limited(net, 3394 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3395 &tp->last_oow_ack_time)) 3396 return; 3397 3398 /* Then check host-wide RFC 5961 rate limit. */ 3399 now = jiffies / HZ; 3400 if (now != challenge_timestamp) { 3401 u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit; 3402 u32 half = (ack_limit + 1) >> 1; 3403 3404 challenge_timestamp = now; 3405 WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit)); 3406 } 3407 count = READ_ONCE(challenge_count); 3408 if (count > 0) { 3409 WRITE_ONCE(challenge_count, count - 1); 3410 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK); 3411 tcp_send_ack(sk); 3412 } 3413 } 3414 3415 static void tcp_store_ts_recent(struct tcp_sock *tp) 3416 { 3417 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3418 tp->rx_opt.ts_recent_stamp = get_seconds(); 3419 } 3420 3421 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 3422 { 3423 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 3424 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 3425 * extra check below makes sure this can only happen 3426 * for pure ACK frames. -DaveM 3427 * 3428 * Not only, also it occurs for expired timestamps. 3429 */ 3430 3431 if (tcp_paws_check(&tp->rx_opt, 0)) 3432 tcp_store_ts_recent(tp); 3433 } 3434 } 3435 3436 /* This routine deals with acks during a TLP episode. 3437 * We mark the end of a TLP episode on receiving TLP dupack or when 3438 * ack is after tlp_high_seq. 3439 * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. 3440 */ 3441 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) 3442 { 3443 struct tcp_sock *tp = tcp_sk(sk); 3444 3445 if (before(ack, tp->tlp_high_seq)) 3446 return; 3447 3448 if (flag & FLAG_DSACKING_ACK) { 3449 /* This DSACK means original and TLP probe arrived; no loss */ 3450 tp->tlp_high_seq = 0; 3451 } else if (after(ack, tp->tlp_high_seq)) { 3452 /* ACK advances: there was a loss, so reduce cwnd. Reset 3453 * tlp_high_seq in tcp_init_cwnd_reduction() 3454 */ 3455 tcp_init_cwnd_reduction(sk); 3456 tcp_set_ca_state(sk, TCP_CA_CWR); 3457 tcp_end_cwnd_reduction(sk); 3458 tcp_try_keep_open(sk); 3459 NET_INC_STATS(sock_net(sk), 3460 LINUX_MIB_TCPLOSSPROBERECOVERY); 3461 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | 3462 FLAG_NOT_DUP | FLAG_DATA_SACKED))) { 3463 /* Pure dupack: original and TLP probe arrived; no loss */ 3464 tp->tlp_high_seq = 0; 3465 } 3466 } 3467 3468 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) 3469 { 3470 const struct inet_connection_sock *icsk = inet_csk(sk); 3471 3472 if (icsk->icsk_ca_ops->in_ack_event) 3473 icsk->icsk_ca_ops->in_ack_event(sk, flags); 3474 } 3475 3476 /* Congestion control has updated the cwnd already. So if we're in 3477 * loss recovery then now we do any new sends (for FRTO) or 3478 * retransmits (for CA_Loss or CA_recovery) that make sense. 3479 */ 3480 static void tcp_xmit_recovery(struct sock *sk, int rexmit) 3481 { 3482 struct tcp_sock *tp = tcp_sk(sk); 3483 3484 if (rexmit == REXMIT_NONE) 3485 return; 3486 3487 if (unlikely(rexmit == 2)) { 3488 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 3489 TCP_NAGLE_OFF); 3490 if (after(tp->snd_nxt, tp->high_seq)) 3491 return; 3492 tp->frto = 0; 3493 } 3494 tcp_xmit_retransmit_queue(sk); 3495 } 3496 3497 /* This routine deals with incoming acks, but not outgoing ones. */ 3498 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3499 { 3500 struct inet_connection_sock *icsk = inet_csk(sk); 3501 struct tcp_sock *tp = tcp_sk(sk); 3502 struct tcp_sacktag_state sack_state; 3503 struct rate_sample rs = { .prior_delivered = 0 }; 3504 u32 prior_snd_una = tp->snd_una; 3505 bool is_sack_reneg = tp->is_sack_reneg; 3506 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3507 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3508 bool is_dupack = false; 3509 int prior_packets = tp->packets_out; 3510 u32 delivered = tp->delivered; 3511 u32 lost = tp->lost; 3512 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ 3513 u32 prior_fack; 3514 3515 sack_state.first_sackt = 0; 3516 sack_state.rate = &rs; 3517 3518 /* We very likely will need to access rtx queue. */ 3519 prefetch(sk->tcp_rtx_queue.rb_node); 3520 3521 /* If the ack is older than previous acks 3522 * then we can probably ignore it. 3523 */ 3524 if (before(ack, prior_snd_una)) { 3525 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3526 if (before(ack, prior_snd_una - tp->max_window)) { 3527 if (!(flag & FLAG_NO_CHALLENGE_ACK)) 3528 tcp_send_challenge_ack(sk, skb); 3529 return -1; 3530 } 3531 goto old_ack; 3532 } 3533 3534 /* If the ack includes data we haven't sent yet, discard 3535 * this segment (RFC793 Section 3.9). 3536 */ 3537 if (after(ack, tp->snd_nxt)) 3538 goto invalid_ack; 3539 3540 if (after(ack, prior_snd_una)) { 3541 flag |= FLAG_SND_UNA_ADVANCED; 3542 icsk->icsk_retransmits = 0; 3543 } 3544 3545 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; 3546 rs.prior_in_flight = tcp_packets_in_flight(tp); 3547 3548 /* ts_recent update must be made after we are sure that the packet 3549 * is in window. 3550 */ 3551 if (flag & FLAG_UPDATE_TS_RECENT) 3552 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 3553 3554 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3555 /* Window is constant, pure forward advance. 3556 * No more checks are required. 3557 * Note, we use the fact that SND.UNA>=SND.WL2. 3558 */ 3559 tcp_update_wl(tp, ack_seq); 3560 tcp_snd_una_update(tp, ack); 3561 flag |= FLAG_WIN_UPDATE; 3562 3563 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); 3564 3565 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); 3566 } else { 3567 u32 ack_ev_flags = CA_ACK_SLOWPATH; 3568 3569 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3570 flag |= FLAG_DATA; 3571 else 3572 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3573 3574 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3575 3576 if (TCP_SKB_CB(skb)->sacked) 3577 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3578 &sack_state); 3579 3580 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { 3581 flag |= FLAG_ECE; 3582 ack_ev_flags |= CA_ACK_ECE; 3583 } 3584 3585 if (flag & FLAG_WIN_UPDATE) 3586 ack_ev_flags |= CA_ACK_WIN_UPDATE; 3587 3588 tcp_in_ack_event(sk, ack_ev_flags); 3589 } 3590 3591 /* We passed data and got it acked, remove any soft error 3592 * log. Something worked... 3593 */ 3594 sk->sk_err_soft = 0; 3595 icsk->icsk_probes_out = 0; 3596 tp->rcv_tstamp = tcp_jiffies32; 3597 if (!prior_packets) 3598 goto no_queue; 3599 3600 /* See if we can take anything off of the retransmit queue. */ 3601 flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state); 3602 3603 tcp_rack_update_reo_wnd(sk, &rs); 3604 3605 if (tp->tlp_high_seq) 3606 tcp_process_tlp_ack(sk, ack, flag); 3607 /* If needed, reset TLP/RTO timer; RACK may later override this. */ 3608 if (flag & FLAG_SET_XMIT_TIMER) 3609 tcp_set_xmit_timer(sk); 3610 3611 if (tcp_ack_is_dubious(sk, flag)) { 3612 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3613 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3614 &rexmit); 3615 } 3616 3617 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3618 sk_dst_confirm(sk); 3619 3620 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3621 lost = tp->lost - lost; /* freshly marked lost */ 3622 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); 3623 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); 3624 tcp_xmit_recovery(sk, rexmit); 3625 return 1; 3626 3627 no_queue: 3628 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3629 if (flag & FLAG_DSACKING_ACK) 3630 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3631 &rexmit); 3632 /* If this ack opens up a zero window, clear backoff. It was 3633 * being used to time the probes, and is probably far higher than 3634 * it needs to be for normal retransmission. 3635 */ 3636 tcp_ack_probe(sk); 3637 3638 if (tp->tlp_high_seq) 3639 tcp_process_tlp_ack(sk, ack, flag); 3640 return 1; 3641 3642 invalid_ack: 3643 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3644 return -1; 3645 3646 old_ack: 3647 /* If data was SACKed, tag it and see if we should send more data. 3648 * If data was DSACKed, see if we can undo a cwnd reduction. 3649 */ 3650 if (TCP_SKB_CB(skb)->sacked) { 3651 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3652 &sack_state); 3653 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3654 &rexmit); 3655 tcp_xmit_recovery(sk, rexmit); 3656 } 3657 3658 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3659 return 0; 3660 } 3661 3662 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, 3663 bool syn, struct tcp_fastopen_cookie *foc, 3664 bool exp_opt) 3665 { 3666 /* Valid only in SYN or SYN-ACK with an even length. */ 3667 if (!foc || !syn || len < 0 || (len & 1)) 3668 return; 3669 3670 if (len >= TCP_FASTOPEN_COOKIE_MIN && 3671 len <= TCP_FASTOPEN_COOKIE_MAX) 3672 memcpy(foc->val, cookie, len); 3673 else if (len != 0) 3674 len = -1; 3675 foc->len = len; 3676 foc->exp = exp_opt; 3677 } 3678 3679 static void smc_parse_options(const struct tcphdr *th, 3680 struct tcp_options_received *opt_rx, 3681 const unsigned char *ptr, 3682 int opsize) 3683 { 3684 #if IS_ENABLED(CONFIG_SMC) 3685 if (static_branch_unlikely(&tcp_have_smc)) { 3686 if (th->syn && !(opsize & 1) && 3687 opsize >= TCPOLEN_EXP_SMC_BASE && 3688 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) 3689 opt_rx->smc_ok = 1; 3690 } 3691 #endif 3692 } 3693 3694 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3695 * But, this can also be called on packets in the established flow when 3696 * the fast version below fails. 3697 */ 3698 void tcp_parse_options(const struct net *net, 3699 const struct sk_buff *skb, 3700 struct tcp_options_received *opt_rx, int estab, 3701 struct tcp_fastopen_cookie *foc) 3702 { 3703 const unsigned char *ptr; 3704 const struct tcphdr *th = tcp_hdr(skb); 3705 int length = (th->doff * 4) - sizeof(struct tcphdr); 3706 3707 ptr = (const unsigned char *)(th + 1); 3708 opt_rx->saw_tstamp = 0; 3709 3710 while (length > 0) { 3711 int opcode = *ptr++; 3712 int opsize; 3713 3714 switch (opcode) { 3715 case TCPOPT_EOL: 3716 return; 3717 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3718 length--; 3719 continue; 3720 default: 3721 opsize = *ptr++; 3722 if (opsize < 2) /* "silly options" */ 3723 return; 3724 if (opsize > length) 3725 return; /* don't parse partial options */ 3726 switch (opcode) { 3727 case TCPOPT_MSS: 3728 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3729 u16 in_mss = get_unaligned_be16(ptr); 3730 if (in_mss) { 3731 if (opt_rx->user_mss && 3732 opt_rx->user_mss < in_mss) 3733 in_mss = opt_rx->user_mss; 3734 opt_rx->mss_clamp = in_mss; 3735 } 3736 } 3737 break; 3738 case TCPOPT_WINDOW: 3739 if (opsize == TCPOLEN_WINDOW && th->syn && 3740 !estab && net->ipv4.sysctl_tcp_window_scaling) { 3741 __u8 snd_wscale = *(__u8 *)ptr; 3742 opt_rx->wscale_ok = 1; 3743 if (snd_wscale > TCP_MAX_WSCALE) { 3744 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n", 3745 __func__, 3746 snd_wscale, 3747 TCP_MAX_WSCALE); 3748 snd_wscale = TCP_MAX_WSCALE; 3749 } 3750 opt_rx->snd_wscale = snd_wscale; 3751 } 3752 break; 3753 case TCPOPT_TIMESTAMP: 3754 if ((opsize == TCPOLEN_TIMESTAMP) && 3755 ((estab && opt_rx->tstamp_ok) || 3756 (!estab && net->ipv4.sysctl_tcp_timestamps))) { 3757 opt_rx->saw_tstamp = 1; 3758 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3759 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3760 } 3761 break; 3762 case TCPOPT_SACK_PERM: 3763 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3764 !estab && net->ipv4.sysctl_tcp_sack) { 3765 opt_rx->sack_ok = TCP_SACK_SEEN; 3766 tcp_sack_reset(opt_rx); 3767 } 3768 break; 3769 3770 case TCPOPT_SACK: 3771 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3772 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3773 opt_rx->sack_ok) { 3774 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3775 } 3776 break; 3777 #ifdef CONFIG_TCP_MD5SIG 3778 case TCPOPT_MD5SIG: 3779 /* 3780 * The MD5 Hash has already been 3781 * checked (see tcp_v{4,6}_do_rcv()). 3782 */ 3783 break; 3784 #endif 3785 case TCPOPT_FASTOPEN: 3786 tcp_parse_fastopen_option( 3787 opsize - TCPOLEN_FASTOPEN_BASE, 3788 ptr, th->syn, foc, false); 3789 break; 3790 3791 case TCPOPT_EXP: 3792 /* Fast Open option shares code 254 using a 3793 * 16 bits magic number. 3794 */ 3795 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && 3796 get_unaligned_be16(ptr) == 3797 TCPOPT_FASTOPEN_MAGIC) 3798 tcp_parse_fastopen_option(opsize - 3799 TCPOLEN_EXP_FASTOPEN_BASE, 3800 ptr + 2, th->syn, foc, true); 3801 else 3802 smc_parse_options(th, opt_rx, ptr, 3803 opsize); 3804 break; 3805 3806 } 3807 ptr += opsize-2; 3808 length -= opsize; 3809 } 3810 } 3811 } 3812 EXPORT_SYMBOL(tcp_parse_options); 3813 3814 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 3815 { 3816 const __be32 *ptr = (const __be32 *)(th + 1); 3817 3818 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3819 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3820 tp->rx_opt.saw_tstamp = 1; 3821 ++ptr; 3822 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3823 ++ptr; 3824 if (*ptr) 3825 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3826 else 3827 tp->rx_opt.rcv_tsecr = 0; 3828 return true; 3829 } 3830 return false; 3831 } 3832 3833 /* Fast parse options. This hopes to only see timestamps. 3834 * If it is wrong it falls back on tcp_parse_options(). 3835 */ 3836 static bool tcp_fast_parse_options(const struct net *net, 3837 const struct sk_buff *skb, 3838 const struct tcphdr *th, struct tcp_sock *tp) 3839 { 3840 /* In the spirit of fast parsing, compare doff directly to constant 3841 * values. Because equality is used, short doff can be ignored here. 3842 */ 3843 if (th->doff == (sizeof(*th) / 4)) { 3844 tp->rx_opt.saw_tstamp = 0; 3845 return false; 3846 } else if (tp->rx_opt.tstamp_ok && 3847 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 3848 if (tcp_parse_aligned_timestamp(tp, th)) 3849 return true; 3850 } 3851 3852 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); 3853 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 3854 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3855 3856 return true; 3857 } 3858 3859 #ifdef CONFIG_TCP_MD5SIG 3860 /* 3861 * Parse MD5 Signature option 3862 */ 3863 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 3864 { 3865 int length = (th->doff << 2) - sizeof(*th); 3866 const u8 *ptr = (const u8 *)(th + 1); 3867 3868 /* If the TCP option is too short, we can short cut */ 3869 if (length < TCPOLEN_MD5SIG) 3870 return NULL; 3871 3872 while (length > 0) { 3873 int opcode = *ptr++; 3874 int opsize; 3875 3876 switch (opcode) { 3877 case TCPOPT_EOL: 3878 return NULL; 3879 case TCPOPT_NOP: 3880 length--; 3881 continue; 3882 default: 3883 opsize = *ptr++; 3884 if (opsize < 2 || opsize > length) 3885 return NULL; 3886 if (opcode == TCPOPT_MD5SIG) 3887 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 3888 } 3889 ptr += opsize - 2; 3890 length -= opsize; 3891 } 3892 return NULL; 3893 } 3894 EXPORT_SYMBOL(tcp_parse_md5sig_option); 3895 #endif 3896 3897 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 3898 * 3899 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 3900 * it can pass through stack. So, the following predicate verifies that 3901 * this segment is not used for anything but congestion avoidance or 3902 * fast retransmit. Moreover, we even are able to eliminate most of such 3903 * second order effects, if we apply some small "replay" window (~RTO) 3904 * to timestamp space. 3905 * 3906 * All these measures still do not guarantee that we reject wrapped ACKs 3907 * on networks with high bandwidth, when sequence space is recycled fastly, 3908 * but it guarantees that such events will be very rare and do not affect 3909 * connection seriously. This doesn't look nice, but alas, PAWS is really 3910 * buggy extension. 3911 * 3912 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 3913 * states that events when retransmit arrives after original data are rare. 3914 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 3915 * the biggest problem on large power networks even with minor reordering. 3916 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 3917 * up to bandwidth of 18Gigabit/sec. 8) ] 3918 */ 3919 3920 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 3921 { 3922 const struct tcp_sock *tp = tcp_sk(sk); 3923 const struct tcphdr *th = tcp_hdr(skb); 3924 u32 seq = TCP_SKB_CB(skb)->seq; 3925 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3926 3927 return (/* 1. Pure ACK with correct sequence number. */ 3928 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 3929 3930 /* 2. ... and duplicate ACK. */ 3931 ack == tp->snd_una && 3932 3933 /* 3. ... and does not update window. */ 3934 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 3935 3936 /* 4. ... and sits in replay window. */ 3937 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 3938 } 3939 3940 static inline bool tcp_paws_discard(const struct sock *sk, 3941 const struct sk_buff *skb) 3942 { 3943 const struct tcp_sock *tp = tcp_sk(sk); 3944 3945 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 3946 !tcp_disordered_ack(sk, skb); 3947 } 3948 3949 /* Check segment sequence number for validity. 3950 * 3951 * Segment controls are considered valid, if the segment 3952 * fits to the window after truncation to the window. Acceptability 3953 * of data (and SYN, FIN, of course) is checked separately. 3954 * See tcp_data_queue(), for example. 3955 * 3956 * Also, controls (RST is main one) are accepted using RCV.WUP instead 3957 * of RCV.NXT. Peer still did not advance his SND.UNA when we 3958 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 3959 * (borrowed from freebsd) 3960 */ 3961 3962 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 3963 { 3964 return !before(end_seq, tp->rcv_wup) && 3965 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 3966 } 3967 3968 /* When we get a reset we do this. */ 3969 void tcp_reset(struct sock *sk) 3970 { 3971 trace_tcp_receive_reset(sk); 3972 3973 /* We want the right error as BSD sees it (and indeed as we do). */ 3974 switch (sk->sk_state) { 3975 case TCP_SYN_SENT: 3976 sk->sk_err = ECONNREFUSED; 3977 break; 3978 case TCP_CLOSE_WAIT: 3979 sk->sk_err = EPIPE; 3980 break; 3981 case TCP_CLOSE: 3982 return; 3983 default: 3984 sk->sk_err = ECONNRESET; 3985 } 3986 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3987 smp_wmb(); 3988 3989 tcp_done(sk); 3990 3991 if (!sock_flag(sk, SOCK_DEAD)) 3992 sk->sk_error_report(sk); 3993 } 3994 3995 /* 3996 * Process the FIN bit. This now behaves as it is supposed to work 3997 * and the FIN takes effect when it is validly part of sequence 3998 * space. Not before when we get holes. 3999 * 4000 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 4001 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 4002 * TIME-WAIT) 4003 * 4004 * If we are in FINWAIT-1, a received FIN indicates simultaneous 4005 * close and we go into CLOSING (and later onto TIME-WAIT) 4006 * 4007 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4008 */ 4009 void tcp_fin(struct sock *sk) 4010 { 4011 struct tcp_sock *tp = tcp_sk(sk); 4012 4013 inet_csk_schedule_ack(sk); 4014 4015 sk->sk_shutdown |= RCV_SHUTDOWN; 4016 sock_set_flag(sk, SOCK_DONE); 4017 4018 switch (sk->sk_state) { 4019 case TCP_SYN_RECV: 4020 case TCP_ESTABLISHED: 4021 /* Move to CLOSE_WAIT */ 4022 tcp_set_state(sk, TCP_CLOSE_WAIT); 4023 inet_csk(sk)->icsk_ack.pingpong = 1; 4024 break; 4025 4026 case TCP_CLOSE_WAIT: 4027 case TCP_CLOSING: 4028 /* Received a retransmission of the FIN, do 4029 * nothing. 4030 */ 4031 break; 4032 case TCP_LAST_ACK: 4033 /* RFC793: Remain in the LAST-ACK state. */ 4034 break; 4035 4036 case TCP_FIN_WAIT1: 4037 /* This case occurs when a simultaneous close 4038 * happens, we must ack the received FIN and 4039 * enter the CLOSING state. 4040 */ 4041 tcp_send_ack(sk); 4042 tcp_set_state(sk, TCP_CLOSING); 4043 break; 4044 case TCP_FIN_WAIT2: 4045 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 4046 tcp_send_ack(sk); 4047 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 4048 break; 4049 default: 4050 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4051 * cases we should never reach this piece of code. 4052 */ 4053 pr_err("%s: Impossible, sk->sk_state=%d\n", 4054 __func__, sk->sk_state); 4055 break; 4056 } 4057 4058 /* It _is_ possible, that we have something out-of-order _after_ FIN. 4059 * Probably, we should reset in this case. For now drop them. 4060 */ 4061 skb_rbtree_purge(&tp->out_of_order_queue); 4062 if (tcp_is_sack(tp)) 4063 tcp_sack_reset(&tp->rx_opt); 4064 sk_mem_reclaim(sk); 4065 4066 if (!sock_flag(sk, SOCK_DEAD)) { 4067 sk->sk_state_change(sk); 4068 4069 /* Do not send POLL_HUP for half duplex close. */ 4070 if (sk->sk_shutdown == SHUTDOWN_MASK || 4071 sk->sk_state == TCP_CLOSE) 4072 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 4073 else 4074 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 4075 } 4076 } 4077 4078 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4079 u32 end_seq) 4080 { 4081 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4082 if (before(seq, sp->start_seq)) 4083 sp->start_seq = seq; 4084 if (after(end_seq, sp->end_seq)) 4085 sp->end_seq = end_seq; 4086 return true; 4087 } 4088 return false; 4089 } 4090 4091 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4092 { 4093 struct tcp_sock *tp = tcp_sk(sk); 4094 4095 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { 4096 int mib_idx; 4097 4098 if (before(seq, tp->rcv_nxt)) 4099 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 4100 else 4101 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4102 4103 NET_INC_STATS(sock_net(sk), mib_idx); 4104 4105 tp->rx_opt.dsack = 1; 4106 tp->duplicate_sack[0].start_seq = seq; 4107 tp->duplicate_sack[0].end_seq = end_seq; 4108 } 4109 } 4110 4111 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 4112 { 4113 struct tcp_sock *tp = tcp_sk(sk); 4114 4115 if (!tp->rx_opt.dsack) 4116 tcp_dsack_set(sk, seq, end_seq); 4117 else 4118 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4119 } 4120 4121 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4122 { 4123 struct tcp_sock *tp = tcp_sk(sk); 4124 4125 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4126 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4127 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4128 tcp_enter_quickack_mode(sk); 4129 4130 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { 4131 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4132 4133 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4134 end_seq = tp->rcv_nxt; 4135 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 4136 } 4137 } 4138 4139 tcp_send_ack(sk); 4140 } 4141 4142 /* These routines update the SACK block as out-of-order packets arrive or 4143 * in-order packets close up the sequence space. 4144 */ 4145 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 4146 { 4147 int this_sack; 4148 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4149 struct tcp_sack_block *swalk = sp + 1; 4150 4151 /* See if the recent change to the first SACK eats into 4152 * or hits the sequence space of other SACK blocks, if so coalesce. 4153 */ 4154 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 4155 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 4156 int i; 4157 4158 /* Zap SWALK, by moving every further SACK up by one slot. 4159 * Decrease num_sacks. 4160 */ 4161 tp->rx_opt.num_sacks--; 4162 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4163 sp[i] = sp[i + 1]; 4164 continue; 4165 } 4166 this_sack++, swalk++; 4167 } 4168 } 4169 4170 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4171 { 4172 struct tcp_sock *tp = tcp_sk(sk); 4173 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4174 int cur_sacks = tp->rx_opt.num_sacks; 4175 int this_sack; 4176 4177 if (!cur_sacks) 4178 goto new_sack; 4179 4180 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 4181 if (tcp_sack_extend(sp, seq, end_seq)) { 4182 /* Rotate this_sack to the first one. */ 4183 for (; this_sack > 0; this_sack--, sp--) 4184 swap(*sp, *(sp - 1)); 4185 if (cur_sacks > 1) 4186 tcp_sack_maybe_coalesce(tp); 4187 return; 4188 } 4189 } 4190 4191 /* Could not find an adjacent existing SACK, build a new one, 4192 * put it at the front, and shift everyone else down. We 4193 * always know there is at least one SACK present already here. 4194 * 4195 * If the sack array is full, forget about the last one. 4196 */ 4197 if (this_sack >= TCP_NUM_SACKS) { 4198 this_sack--; 4199 tp->rx_opt.num_sacks--; 4200 sp--; 4201 } 4202 for (; this_sack > 0; this_sack--, sp--) 4203 *sp = *(sp - 1); 4204 4205 new_sack: 4206 /* Build the new head SACK, and we're done. */ 4207 sp->start_seq = seq; 4208 sp->end_seq = end_seq; 4209 tp->rx_opt.num_sacks++; 4210 } 4211 4212 /* RCV.NXT advances, some SACKs should be eaten. */ 4213 4214 static void tcp_sack_remove(struct tcp_sock *tp) 4215 { 4216 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4217 int num_sacks = tp->rx_opt.num_sacks; 4218 int this_sack; 4219 4220 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4221 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4222 tp->rx_opt.num_sacks = 0; 4223 return; 4224 } 4225 4226 for (this_sack = 0; this_sack < num_sacks;) { 4227 /* Check if the start of the sack is covered by RCV.NXT. */ 4228 if (!before(tp->rcv_nxt, sp->start_seq)) { 4229 int i; 4230 4231 /* RCV.NXT must cover all the block! */ 4232 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 4233 4234 /* Zap this SACK, by moving forward any other SACKS. */ 4235 for (i = this_sack+1; i < num_sacks; i++) 4236 tp->selective_acks[i-1] = tp->selective_acks[i]; 4237 num_sacks--; 4238 continue; 4239 } 4240 this_sack++; 4241 sp++; 4242 } 4243 tp->rx_opt.num_sacks = num_sacks; 4244 } 4245 4246 /** 4247 * tcp_try_coalesce - try to merge skb to prior one 4248 * @sk: socket 4249 * @dest: destination queue 4250 * @to: prior buffer 4251 * @from: buffer to add in queue 4252 * @fragstolen: pointer to boolean 4253 * 4254 * Before queueing skb @from after @to, try to merge them 4255 * to reduce overall memory use and queue lengths, if cost is small. 4256 * Packets in ofo or receive queues can stay a long time. 4257 * Better try to coalesce them right now to avoid future collapses. 4258 * Returns true if caller should free @from instead of queueing it 4259 */ 4260 static bool tcp_try_coalesce(struct sock *sk, 4261 struct sk_buff *to, 4262 struct sk_buff *from, 4263 bool *fragstolen) 4264 { 4265 int delta; 4266 4267 *fragstolen = false; 4268 4269 /* Its possible this segment overlaps with prior segment in queue */ 4270 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) 4271 return false; 4272 4273 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4274 return false; 4275 4276 atomic_add(delta, &sk->sk_rmem_alloc); 4277 sk_mem_charge(sk, delta); 4278 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4279 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4280 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4281 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; 4282 4283 if (TCP_SKB_CB(from)->has_rxtstamp) { 4284 TCP_SKB_CB(to)->has_rxtstamp = true; 4285 to->tstamp = from->tstamp; 4286 } 4287 4288 return true; 4289 } 4290 4291 static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4292 { 4293 sk_drops_add(sk, skb); 4294 __kfree_skb(skb); 4295 } 4296 4297 /* This one checks to see if we can put data from the 4298 * out_of_order queue into the receive_queue. 4299 */ 4300 static void tcp_ofo_queue(struct sock *sk) 4301 { 4302 struct tcp_sock *tp = tcp_sk(sk); 4303 __u32 dsack_high = tp->rcv_nxt; 4304 bool fin, fragstolen, eaten; 4305 struct sk_buff *skb, *tail; 4306 struct rb_node *p; 4307 4308 p = rb_first(&tp->out_of_order_queue); 4309 while (p) { 4310 skb = rb_to_skb(p); 4311 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4312 break; 4313 4314 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4315 __u32 dsack = dsack_high; 4316 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4317 dsack_high = TCP_SKB_CB(skb)->end_seq; 4318 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4319 } 4320 p = rb_next(p); 4321 rb_erase(&skb->rbnode, &tp->out_of_order_queue); 4322 4323 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { 4324 SOCK_DEBUG(sk, "ofo packet was already received\n"); 4325 tcp_drop(sk, skb); 4326 continue; 4327 } 4328 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 4329 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4330 TCP_SKB_CB(skb)->end_seq); 4331 4332 tail = skb_peek_tail(&sk->sk_receive_queue); 4333 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); 4334 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4335 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 4336 if (!eaten) 4337 __skb_queue_tail(&sk->sk_receive_queue, skb); 4338 else 4339 kfree_skb_partial(skb, fragstolen); 4340 4341 if (unlikely(fin)) { 4342 tcp_fin(sk); 4343 /* tcp_fin() purges tp->out_of_order_queue, 4344 * so we must end this loop right now. 4345 */ 4346 break; 4347 } 4348 } 4349 } 4350 4351 static bool tcp_prune_ofo_queue(struct sock *sk); 4352 static int tcp_prune_queue(struct sock *sk); 4353 4354 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, 4355 unsigned int size) 4356 { 4357 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4358 !sk_rmem_schedule(sk, skb, size)) { 4359 4360 if (tcp_prune_queue(sk) < 0) 4361 return -1; 4362 4363 while (!sk_rmem_schedule(sk, skb, size)) { 4364 if (!tcp_prune_ofo_queue(sk)) 4365 return -1; 4366 } 4367 } 4368 return 0; 4369 } 4370 4371 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4372 { 4373 struct tcp_sock *tp = tcp_sk(sk); 4374 struct rb_node **p, *parent; 4375 struct sk_buff *skb1; 4376 u32 seq, end_seq; 4377 bool fragstolen; 4378 4379 tcp_ecn_check_ce(tp, skb); 4380 4381 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4382 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4383 tcp_drop(sk, skb); 4384 return; 4385 } 4386 4387 /* Disable header prediction. */ 4388 tp->pred_flags = 0; 4389 inet_csk_schedule_ack(sk); 4390 4391 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4392 seq = TCP_SKB_CB(skb)->seq; 4393 end_seq = TCP_SKB_CB(skb)->end_seq; 4394 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4395 tp->rcv_nxt, seq, end_seq); 4396 4397 p = &tp->out_of_order_queue.rb_node; 4398 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4399 /* Initial out of order segment, build 1 SACK. */ 4400 if (tcp_is_sack(tp)) { 4401 tp->rx_opt.num_sacks = 1; 4402 tp->selective_acks[0].start_seq = seq; 4403 tp->selective_acks[0].end_seq = end_seq; 4404 } 4405 rb_link_node(&skb->rbnode, NULL, p); 4406 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4407 tp->ooo_last_skb = skb; 4408 goto end; 4409 } 4410 4411 /* In the typical case, we are adding an skb to the end of the list. 4412 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4413 */ 4414 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4415 skb, &fragstolen)) { 4416 coalesce_done: 4417 tcp_grow_window(sk, skb); 4418 kfree_skb_partial(skb, fragstolen); 4419 skb = NULL; 4420 goto add_sack; 4421 } 4422 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 4423 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { 4424 parent = &tp->ooo_last_skb->rbnode; 4425 p = &parent->rb_right; 4426 goto insert; 4427 } 4428 4429 /* Find place to insert this segment. Handle overlaps on the way. */ 4430 parent = NULL; 4431 while (*p) { 4432 parent = *p; 4433 skb1 = rb_to_skb(parent); 4434 if (before(seq, TCP_SKB_CB(skb1)->seq)) { 4435 p = &parent->rb_left; 4436 continue; 4437 } 4438 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4439 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4440 /* All the bits are present. Drop. */ 4441 NET_INC_STATS(sock_net(sk), 4442 LINUX_MIB_TCPOFOMERGE); 4443 __kfree_skb(skb); 4444 skb = NULL; 4445 tcp_dsack_set(sk, seq, end_seq); 4446 goto add_sack; 4447 } 4448 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4449 /* Partial overlap. */ 4450 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); 4451 } else { 4452 /* skb's seq == skb1's seq and skb covers skb1. 4453 * Replace skb1 with skb. 4454 */ 4455 rb_replace_node(&skb1->rbnode, &skb->rbnode, 4456 &tp->out_of_order_queue); 4457 tcp_dsack_extend(sk, 4458 TCP_SKB_CB(skb1)->seq, 4459 TCP_SKB_CB(skb1)->end_seq); 4460 NET_INC_STATS(sock_net(sk), 4461 LINUX_MIB_TCPOFOMERGE); 4462 __kfree_skb(skb1); 4463 goto merge_right; 4464 } 4465 } else if (tcp_try_coalesce(sk, skb1, 4466 skb, &fragstolen)) { 4467 goto coalesce_done; 4468 } 4469 p = &parent->rb_right; 4470 } 4471 insert: 4472 /* Insert segment into RB tree. */ 4473 rb_link_node(&skb->rbnode, parent, p); 4474 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4475 4476 merge_right: 4477 /* Remove other segments covered by skb. */ 4478 while ((skb1 = skb_rb_next(skb)) != NULL) { 4479 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4480 break; 4481 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4482 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4483 end_seq); 4484 break; 4485 } 4486 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); 4487 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4488 TCP_SKB_CB(skb1)->end_seq); 4489 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4490 tcp_drop(sk, skb1); 4491 } 4492 /* If there is no skb after us, we are the last_skb ! */ 4493 if (!skb1) 4494 tp->ooo_last_skb = skb; 4495 4496 add_sack: 4497 if (tcp_is_sack(tp)) 4498 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4499 end: 4500 if (skb) { 4501 tcp_grow_window(sk, skb); 4502 skb_condense(skb); 4503 skb_set_owner_r(skb, sk); 4504 } 4505 } 4506 4507 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4508 bool *fragstolen) 4509 { 4510 int eaten; 4511 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4512 4513 __skb_pull(skb, hdrlen); 4514 eaten = (tail && 4515 tcp_try_coalesce(sk, tail, 4516 skb, fragstolen)) ? 1 : 0; 4517 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); 4518 if (!eaten) { 4519 __skb_queue_tail(&sk->sk_receive_queue, skb); 4520 skb_set_owner_r(skb, sk); 4521 } 4522 return eaten; 4523 } 4524 4525 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4526 { 4527 struct sk_buff *skb; 4528 int err = -ENOMEM; 4529 int data_len = 0; 4530 bool fragstolen; 4531 4532 if (size == 0) 4533 return 0; 4534 4535 if (size > PAGE_SIZE) { 4536 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); 4537 4538 data_len = npages << PAGE_SHIFT; 4539 size = data_len + (size & ~PAGE_MASK); 4540 } 4541 skb = alloc_skb_with_frags(size - data_len, data_len, 4542 PAGE_ALLOC_COSTLY_ORDER, 4543 &err, sk->sk_allocation); 4544 if (!skb) 4545 goto err; 4546 4547 skb_put(skb, size - data_len); 4548 skb->data_len = data_len; 4549 skb->len = size; 4550 4551 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4552 goto err_free; 4553 4554 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 4555 if (err) 4556 goto err_free; 4557 4558 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4559 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 4560 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 4561 4562 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { 4563 WARN_ON_ONCE(fragstolen); /* should not happen */ 4564 __kfree_skb(skb); 4565 } 4566 return size; 4567 4568 err_free: 4569 kfree_skb(skb); 4570 err: 4571 return err; 4572 4573 } 4574 4575 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4576 { 4577 struct tcp_sock *tp = tcp_sk(sk); 4578 bool fragstolen; 4579 int eaten; 4580 4581 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 4582 __kfree_skb(skb); 4583 return; 4584 } 4585 skb_dst_drop(skb); 4586 __skb_pull(skb, tcp_hdr(skb)->doff * 4); 4587 4588 tcp_ecn_accept_cwr(tp, skb); 4589 4590 tp->rx_opt.dsack = 0; 4591 4592 /* Queue data for delivery to the user. 4593 * Packets in sequence go to the receive queue. 4594 * Out of sequence packets to the out_of_order_queue. 4595 */ 4596 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4597 if (tcp_receive_window(tp) == 0) 4598 goto out_of_window; 4599 4600 /* Ok. In sequence. In window. */ 4601 queue_and_out: 4602 if (skb_queue_len(&sk->sk_receive_queue) == 0) 4603 sk_forced_mem_schedule(sk, skb->truesize); 4604 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4605 goto drop; 4606 4607 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4608 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4609 if (skb->len) 4610 tcp_event_data_recv(sk, skb); 4611 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 4612 tcp_fin(sk); 4613 4614 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4615 tcp_ofo_queue(sk); 4616 4617 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4618 * gap in queue is filled. 4619 */ 4620 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4621 inet_csk(sk)->icsk_ack.pingpong = 0; 4622 } 4623 4624 if (tp->rx_opt.num_sacks) 4625 tcp_sack_remove(tp); 4626 4627 tcp_fast_path_check(sk); 4628 4629 if (eaten > 0) 4630 kfree_skb_partial(skb, fragstolen); 4631 if (!sock_flag(sk, SOCK_DEAD)) 4632 sk->sk_data_ready(sk); 4633 return; 4634 } 4635 4636 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4637 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4638 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4639 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4640 4641 out_of_window: 4642 tcp_enter_quickack_mode(sk); 4643 inet_csk_schedule_ack(sk); 4644 drop: 4645 tcp_drop(sk, skb); 4646 return; 4647 } 4648 4649 /* Out of window. F.e. zero window probe. */ 4650 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4651 goto out_of_window; 4652 4653 tcp_enter_quickack_mode(sk); 4654 4655 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4656 /* Partial packet, seq < rcv_next < end_seq */ 4657 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4658 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4659 TCP_SKB_CB(skb)->end_seq); 4660 4661 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4662 4663 /* If window is closed, drop tail of packet. But after 4664 * remembering D-SACK for its head made in previous line. 4665 */ 4666 if (!tcp_receive_window(tp)) 4667 goto out_of_window; 4668 goto queue_and_out; 4669 } 4670 4671 tcp_data_queue_ofo(sk, skb); 4672 } 4673 4674 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list) 4675 { 4676 if (list) 4677 return !skb_queue_is_last(list, skb) ? skb->next : NULL; 4678 4679 return skb_rb_next(skb); 4680 } 4681 4682 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4683 struct sk_buff_head *list, 4684 struct rb_root *root) 4685 { 4686 struct sk_buff *next = tcp_skb_next(skb, list); 4687 4688 if (list) 4689 __skb_unlink(skb, list); 4690 else 4691 rb_erase(&skb->rbnode, root); 4692 4693 __kfree_skb(skb); 4694 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4695 4696 return next; 4697 } 4698 4699 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */ 4700 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) 4701 { 4702 struct rb_node **p = &root->rb_node; 4703 struct rb_node *parent = NULL; 4704 struct sk_buff *skb1; 4705 4706 while (*p) { 4707 parent = *p; 4708 skb1 = rb_to_skb(parent); 4709 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) 4710 p = &parent->rb_left; 4711 else 4712 p = &parent->rb_right; 4713 } 4714 rb_link_node(&skb->rbnode, parent, p); 4715 rb_insert_color(&skb->rbnode, root); 4716 } 4717 4718 /* Collapse contiguous sequence of skbs head..tail with 4719 * sequence numbers start..end. 4720 * 4721 * If tail is NULL, this means until the end of the queue. 4722 * 4723 * Segments with FIN/SYN are not collapsed (only because this 4724 * simplifies code) 4725 */ 4726 static void 4727 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, 4728 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) 4729 { 4730 struct sk_buff *skb = head, *n; 4731 struct sk_buff_head tmp; 4732 bool end_of_skbs; 4733 4734 /* First, check that queue is collapsible and find 4735 * the point where collapsing can be useful. 4736 */ 4737 restart: 4738 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) { 4739 n = tcp_skb_next(skb, list); 4740 4741 /* No new bits? It is possible on ofo queue. */ 4742 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4743 skb = tcp_collapse_one(sk, skb, list, root); 4744 if (!skb) 4745 break; 4746 goto restart; 4747 } 4748 4749 /* The first skb to collapse is: 4750 * - not SYN/FIN and 4751 * - bloated or contains data before "start" or 4752 * overlaps to the next one. 4753 */ 4754 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && 4755 (tcp_win_from_space(sk, skb->truesize) > skb->len || 4756 before(TCP_SKB_CB(skb)->seq, start))) { 4757 end_of_skbs = false; 4758 break; 4759 } 4760 4761 if (n && n != tail && 4762 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { 4763 end_of_skbs = false; 4764 break; 4765 } 4766 4767 /* Decided to skip this, advance start seq. */ 4768 start = TCP_SKB_CB(skb)->end_seq; 4769 } 4770 if (end_of_skbs || 4771 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 4772 return; 4773 4774 __skb_queue_head_init(&tmp); 4775 4776 while (before(start, end)) { 4777 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); 4778 struct sk_buff *nskb; 4779 4780 nskb = alloc_skb(copy, GFP_ATOMIC); 4781 if (!nskb) 4782 break; 4783 4784 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4785 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4786 if (list) 4787 __skb_queue_before(list, skb, nskb); 4788 else 4789 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ 4790 skb_set_owner_r(nskb, sk); 4791 4792 /* Copy data, releasing collapsed skbs. */ 4793 while (copy > 0) { 4794 int offset = start - TCP_SKB_CB(skb)->seq; 4795 int size = TCP_SKB_CB(skb)->end_seq - start; 4796 4797 BUG_ON(offset < 0); 4798 if (size > 0) { 4799 size = min(copy, size); 4800 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4801 BUG(); 4802 TCP_SKB_CB(nskb)->end_seq += size; 4803 copy -= size; 4804 start += size; 4805 } 4806 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4807 skb = tcp_collapse_one(sk, skb, list, root); 4808 if (!skb || 4809 skb == tail || 4810 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 4811 goto end; 4812 } 4813 } 4814 } 4815 end: 4816 skb_queue_walk_safe(&tmp, skb, n) 4817 tcp_rbtree_insert(root, skb); 4818 } 4819 4820 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 4821 * and tcp_collapse() them until all the queue is collapsed. 4822 */ 4823 static void tcp_collapse_ofo_queue(struct sock *sk) 4824 { 4825 struct tcp_sock *tp = tcp_sk(sk); 4826 struct sk_buff *skb, *head; 4827 u32 start, end; 4828 4829 skb = skb_rb_first(&tp->out_of_order_queue); 4830 new_range: 4831 if (!skb) { 4832 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); 4833 return; 4834 } 4835 start = TCP_SKB_CB(skb)->seq; 4836 end = TCP_SKB_CB(skb)->end_seq; 4837 4838 for (head = skb;;) { 4839 skb = skb_rb_next(skb); 4840 4841 /* Range is terminated when we see a gap or when 4842 * we are at the queue end. 4843 */ 4844 if (!skb || 4845 after(TCP_SKB_CB(skb)->seq, end) || 4846 before(TCP_SKB_CB(skb)->end_seq, start)) { 4847 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4848 head, skb, start, end); 4849 goto new_range; 4850 } 4851 4852 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4853 start = TCP_SKB_CB(skb)->seq; 4854 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4855 end = TCP_SKB_CB(skb)->end_seq; 4856 } 4857 } 4858 4859 /* 4860 * Clean the out-of-order queue to make room. 4861 * We drop high sequences packets to : 4862 * 1) Let a chance for holes to be filled. 4863 * 2) not add too big latencies if thousands of packets sit there. 4864 * (But if application shrinks SO_RCVBUF, we could still end up 4865 * freeing whole queue here) 4866 * 4867 * Return true if queue has shrunk. 4868 */ 4869 static bool tcp_prune_ofo_queue(struct sock *sk) 4870 { 4871 struct tcp_sock *tp = tcp_sk(sk); 4872 struct rb_node *node, *prev; 4873 4874 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4875 return false; 4876 4877 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4878 node = &tp->ooo_last_skb->rbnode; 4879 do { 4880 prev = rb_prev(node); 4881 rb_erase(node, &tp->out_of_order_queue); 4882 tcp_drop(sk, rb_to_skb(node)); 4883 sk_mem_reclaim(sk); 4884 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 4885 !tcp_under_memory_pressure(sk)) 4886 break; 4887 node = prev; 4888 } while (node); 4889 tp->ooo_last_skb = rb_to_skb(prev); 4890 4891 /* Reset SACK state. A conforming SACK implementation will 4892 * do the same at a timeout based retransmit. When a connection 4893 * is in a sad state like this, we care only about integrity 4894 * of the connection not performance. 4895 */ 4896 if (tp->rx_opt.sack_ok) 4897 tcp_sack_reset(&tp->rx_opt); 4898 return true; 4899 } 4900 4901 /* Reduce allocated memory if we can, trying to get 4902 * the socket within its memory limits again. 4903 * 4904 * Return less than zero if we should start dropping frames 4905 * until the socket owning process reads some of the data 4906 * to stabilize the situation. 4907 */ 4908 static int tcp_prune_queue(struct sock *sk) 4909 { 4910 struct tcp_sock *tp = tcp_sk(sk); 4911 4912 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4913 4914 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); 4915 4916 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4917 tcp_clamp_window(sk); 4918 else if (tcp_under_memory_pressure(sk)) 4919 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4920 4921 tcp_collapse_ofo_queue(sk); 4922 if (!skb_queue_empty(&sk->sk_receive_queue)) 4923 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 4924 skb_peek(&sk->sk_receive_queue), 4925 NULL, 4926 tp->copied_seq, tp->rcv_nxt); 4927 sk_mem_reclaim(sk); 4928 4929 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4930 return 0; 4931 4932 /* Collapsing did not help, destructive actions follow. 4933 * This must not ever occur. */ 4934 4935 tcp_prune_ofo_queue(sk); 4936 4937 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4938 return 0; 4939 4940 /* If we are really being abused, tell the caller to silently 4941 * drop receive data on the floor. It will get retransmitted 4942 * and hopefully then we'll have sufficient space. 4943 */ 4944 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); 4945 4946 /* Massive buffer overcommit. */ 4947 tp->pred_flags = 0; 4948 return -1; 4949 } 4950 4951 static bool tcp_should_expand_sndbuf(const struct sock *sk) 4952 { 4953 const struct tcp_sock *tp = tcp_sk(sk); 4954 4955 /* If the user specified a specific send buffer setting, do 4956 * not modify it. 4957 */ 4958 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 4959 return false; 4960 4961 /* If we are under global TCP memory pressure, do not expand. */ 4962 if (tcp_under_memory_pressure(sk)) 4963 return false; 4964 4965 /* If we are under soft global TCP memory pressure, do not expand. */ 4966 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 4967 return false; 4968 4969 /* If we filled the congestion window, do not expand. */ 4970 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 4971 return false; 4972 4973 return true; 4974 } 4975 4976 /* When incoming ACK allowed to free some skb from write_queue, 4977 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 4978 * on the exit from tcp input handler. 4979 * 4980 * PROBLEM: sndbuf expansion does not work well with largesend. 4981 */ 4982 static void tcp_new_space(struct sock *sk) 4983 { 4984 struct tcp_sock *tp = tcp_sk(sk); 4985 4986 if (tcp_should_expand_sndbuf(sk)) { 4987 tcp_sndbuf_expand(sk); 4988 tp->snd_cwnd_stamp = tcp_jiffies32; 4989 } 4990 4991 sk->sk_write_space(sk); 4992 } 4993 4994 static void tcp_check_space(struct sock *sk) 4995 { 4996 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 4997 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 4998 /* pairs with tcp_poll() */ 4999 smp_mb(); 5000 if (sk->sk_socket && 5001 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5002 tcp_new_space(sk); 5003 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 5004 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 5005 } 5006 } 5007 } 5008 5009 static inline void tcp_data_snd_check(struct sock *sk) 5010 { 5011 tcp_push_pending_frames(sk); 5012 tcp_check_space(sk); 5013 } 5014 5015 /* 5016 * Check if sending an ack is needed. 5017 */ 5018 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 5019 { 5020 struct tcp_sock *tp = tcp_sk(sk); 5021 5022 /* More than one full frame received... */ 5023 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 5024 /* ... and right edge of window advances far enough. 5025 * (tcp_recvmsg() will send ACK otherwise). Or... 5026 */ 5027 __tcp_select_window(sk) >= tp->rcv_wnd) || 5028 /* We ACK each frame or... */ 5029 tcp_in_quickack_mode(sk) || 5030 /* We have out of order data. */ 5031 (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) { 5032 /* Then ack it now */ 5033 tcp_send_ack(sk); 5034 } else { 5035 /* Else, send delayed ack. */ 5036 tcp_send_delayed_ack(sk); 5037 } 5038 } 5039 5040 static inline void tcp_ack_snd_check(struct sock *sk) 5041 { 5042 if (!inet_csk_ack_scheduled(sk)) { 5043 /* We sent a data segment already. */ 5044 return; 5045 } 5046 __tcp_ack_snd_check(sk, 1); 5047 } 5048 5049 /* 5050 * This routine is only called when we have urgent data 5051 * signaled. Its the 'slow' part of tcp_urg. It could be 5052 * moved inline now as tcp_urg is only called from one 5053 * place. We handle URGent data wrong. We have to - as 5054 * BSD still doesn't use the correction from RFC961. 5055 * For 1003.1g we should support a new option TCP_STDURG to permit 5056 * either form (or just set the sysctl tcp_stdurg). 5057 */ 5058 5059 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5060 { 5061 struct tcp_sock *tp = tcp_sk(sk); 5062 u32 ptr = ntohs(th->urg_ptr); 5063 5064 if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg) 5065 ptr--; 5066 ptr += ntohl(th->seq); 5067 5068 /* Ignore urgent data that we've already seen and read. */ 5069 if (after(tp->copied_seq, ptr)) 5070 return; 5071 5072 /* Do not replay urg ptr. 5073 * 5074 * NOTE: interesting situation not covered by specs. 5075 * Misbehaving sender may send urg ptr, pointing to segment, 5076 * which we already have in ofo queue. We are not able to fetch 5077 * such data and will stay in TCP_URG_NOTYET until will be eaten 5078 * by recvmsg(). Seems, we are not obliged to handle such wicked 5079 * situations. But it is worth to think about possibility of some 5080 * DoSes using some hypothetical application level deadlock. 5081 */ 5082 if (before(ptr, tp->rcv_nxt)) 5083 return; 5084 5085 /* Do we already have a newer (or duplicate) urgent pointer? */ 5086 if (tp->urg_data && !after(ptr, tp->urg_seq)) 5087 return; 5088 5089 /* Tell the world about our new urgent pointer. */ 5090 sk_send_sigurg(sk); 5091 5092 /* We may be adding urgent data when the last byte read was 5093 * urgent. To do this requires some care. We cannot just ignore 5094 * tp->copied_seq since we would read the last urgent byte again 5095 * as data, nor can we alter copied_seq until this data arrives 5096 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 5097 * 5098 * NOTE. Double Dutch. Rendering to plain English: author of comment 5099 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 5100 * and expect that both A and B disappear from stream. This is _wrong_. 5101 * Though this happens in BSD with high probability, this is occasional. 5102 * Any application relying on this is buggy. Note also, that fix "works" 5103 * only in this artificial test. Insert some normal data between A and B and we will 5104 * decline of BSD again. Verdict: it is better to remove to trap 5105 * buggy users. 5106 */ 5107 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 5108 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 5109 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 5110 tp->copied_seq++; 5111 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 5112 __skb_unlink(skb, &sk->sk_receive_queue); 5113 __kfree_skb(skb); 5114 } 5115 } 5116 5117 tp->urg_data = TCP_URG_NOTYET; 5118 tp->urg_seq = ptr; 5119 5120 /* Disable header prediction. */ 5121 tp->pred_flags = 0; 5122 } 5123 5124 /* This is the 'fast' part of urgent handling. */ 5125 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5126 { 5127 struct tcp_sock *tp = tcp_sk(sk); 5128 5129 /* Check if we get a new urgent pointer - normally not. */ 5130 if (th->urg) 5131 tcp_check_urg(sk, th); 5132 5133 /* Do we wait for any urgent data? - normally not... */ 5134 if (tp->urg_data == TCP_URG_NOTYET) { 5135 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 5136 th->syn; 5137 5138 /* Is the urgent pointer pointing into this packet? */ 5139 if (ptr < skb->len) { 5140 u8 tmp; 5141 if (skb_copy_bits(skb, ptr, &tmp, 1)) 5142 BUG(); 5143 tp->urg_data = TCP_URG_VALID | tmp; 5144 if (!sock_flag(sk, SOCK_DEAD)) 5145 sk->sk_data_ready(sk); 5146 } 5147 } 5148 } 5149 5150 /* Accept RST for rcv_nxt - 1 after a FIN. 5151 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a 5152 * FIN is sent followed by a RST packet. The RST is sent with the same 5153 * sequence number as the FIN, and thus according to RFC 5961 a challenge 5154 * ACK should be sent. However, Mac OSX rate limits replies to challenge 5155 * ACKs on the closed socket. In addition middleboxes can drop either the 5156 * challenge ACK or a subsequent RST. 5157 */ 5158 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) 5159 { 5160 struct tcp_sock *tp = tcp_sk(sk); 5161 5162 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && 5163 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | 5164 TCPF_CLOSING)); 5165 } 5166 5167 /* Does PAWS and seqno based validation of an incoming segment, flags will 5168 * play significant role here. 5169 */ 5170 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5171 const struct tcphdr *th, int syn_inerr) 5172 { 5173 struct tcp_sock *tp = tcp_sk(sk); 5174 bool rst_seq_match = false; 5175 5176 /* RFC1323: H1. Apply PAWS check first. */ 5177 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && 5178 tp->rx_opt.saw_tstamp && 5179 tcp_paws_discard(sk, skb)) { 5180 if (!th->rst) { 5181 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5182 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5183 LINUX_MIB_TCPACKSKIPPEDPAWS, 5184 &tp->last_oow_ack_time)) 5185 tcp_send_dupack(sk, skb); 5186 goto discard; 5187 } 5188 /* Reset is accepted even if it did not pass PAWS. */ 5189 } 5190 5191 /* Step 1: check sequence number */ 5192 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 5193 /* RFC793, page 37: "In all states except SYN-SENT, all reset 5194 * (RST) segments are validated by checking their SEQ-fields." 5195 * And page 69: "If an incoming segment is not acceptable, 5196 * an acknowledgment should be sent in reply (unless the RST 5197 * bit is set, if so drop the segment and return)". 5198 */ 5199 if (!th->rst) { 5200 if (th->syn) 5201 goto syn_challenge; 5202 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5203 LINUX_MIB_TCPACKSKIPPEDSEQ, 5204 &tp->last_oow_ack_time)) 5205 tcp_send_dupack(sk, skb); 5206 } else if (tcp_reset_check(sk, skb)) { 5207 tcp_reset(sk); 5208 } 5209 goto discard; 5210 } 5211 5212 /* Step 2: check RST bit */ 5213 if (th->rst) { 5214 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a 5215 * FIN and SACK too if available): 5216 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or 5217 * the right-most SACK block, 5218 * then 5219 * RESET the connection 5220 * else 5221 * Send a challenge ACK 5222 */ 5223 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || 5224 tcp_reset_check(sk, skb)) { 5225 rst_seq_match = true; 5226 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { 5227 struct tcp_sack_block *sp = &tp->selective_acks[0]; 5228 int max_sack = sp[0].end_seq; 5229 int this_sack; 5230 5231 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; 5232 ++this_sack) { 5233 max_sack = after(sp[this_sack].end_seq, 5234 max_sack) ? 5235 sp[this_sack].end_seq : max_sack; 5236 } 5237 5238 if (TCP_SKB_CB(skb)->seq == max_sack) 5239 rst_seq_match = true; 5240 } 5241 5242 if (rst_seq_match) 5243 tcp_reset(sk); 5244 else { 5245 /* Disable TFO if RST is out-of-order 5246 * and no data has been received 5247 * for current active TFO socket 5248 */ 5249 if (tp->syn_fastopen && !tp->data_segs_in && 5250 sk->sk_state == TCP_ESTABLISHED) 5251 tcp_fastopen_active_disable(sk); 5252 tcp_send_challenge_ack(sk, skb); 5253 } 5254 goto discard; 5255 } 5256 5257 /* step 3: check security and precedence [ignored] */ 5258 5259 /* step 4: Check for a SYN 5260 * RFC 5961 4.2 : Send a challenge ack 5261 */ 5262 if (th->syn) { 5263 syn_challenge: 5264 if (syn_inerr) 5265 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5266 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5267 tcp_send_challenge_ack(sk, skb); 5268 goto discard; 5269 } 5270 5271 return true; 5272 5273 discard: 5274 tcp_drop(sk, skb); 5275 return false; 5276 } 5277 5278 /* 5279 * TCP receive function for the ESTABLISHED state. 5280 * 5281 * It is split into a fast path and a slow path. The fast path is 5282 * disabled when: 5283 * - A zero window was announced from us - zero window probing 5284 * is only handled properly in the slow path. 5285 * - Out of order segments arrived. 5286 * - Urgent data is expected. 5287 * - There is no buffer space left 5288 * - Unexpected TCP flags/window values/header lengths are received 5289 * (detected by checking the TCP header against pred_flags) 5290 * - Data is sent in both directions. Fast path only supports pure senders 5291 * or pure receivers (this means either the sequence number or the ack 5292 * value must stay constant) 5293 * - Unexpected TCP option. 5294 * 5295 * When these conditions are not satisfied it drops into a standard 5296 * receive procedure patterned after RFC793 to handle all cases. 5297 * The first three cases are guaranteed by proper pred_flags setting, 5298 * the rest is checked inline. Fast processing is turned on in 5299 * tcp_data_queue when everything is OK. 5300 */ 5301 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5302 const struct tcphdr *th) 5303 { 5304 unsigned int len = skb->len; 5305 struct tcp_sock *tp = tcp_sk(sk); 5306 5307 tcp_mstamp_refresh(tp); 5308 if (unlikely(!sk->sk_rx_dst)) 5309 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5310 /* 5311 * Header prediction. 5312 * The code loosely follows the one in the famous 5313 * "30 instruction TCP receive" Van Jacobson mail. 5314 * 5315 * Van's trick is to deposit buffers into socket queue 5316 * on a device interrupt, to call tcp_recv function 5317 * on the receive process context and checksum and copy 5318 * the buffer to user space. smart... 5319 * 5320 * Our current scheme is not silly either but we take the 5321 * extra cost of the net_bh soft interrupt processing... 5322 * We do checksum and copy also but from device to kernel. 5323 */ 5324 5325 tp->rx_opt.saw_tstamp = 0; 5326 5327 /* pred_flags is 0xS?10 << 16 + snd_wnd 5328 * if header_prediction is to be made 5329 * 'S' will always be tp->tcp_header_len >> 2 5330 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5331 * turn it off (when there are holes in the receive 5332 * space for instance) 5333 * PSH flag is ignored. 5334 */ 5335 5336 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5337 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5338 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5339 int tcp_header_len = tp->tcp_header_len; 5340 5341 /* Timestamp header prediction: tcp_header_len 5342 * is automatically equal to th->doff*4 due to pred_flags 5343 * match. 5344 */ 5345 5346 /* Check timestamp */ 5347 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5348 /* No? Slow path! */ 5349 if (!tcp_parse_aligned_timestamp(tp, th)) 5350 goto slow_path; 5351 5352 /* If PAWS failed, check it more carefully in slow path */ 5353 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5354 goto slow_path; 5355 5356 /* DO NOT update ts_recent here, if checksum fails 5357 * and timestamp was corrupted part, it will result 5358 * in a hung connection since we will drop all 5359 * future packets due to the PAWS test. 5360 */ 5361 } 5362 5363 if (len <= tcp_header_len) { 5364 /* Bulk data transfer: sender */ 5365 if (len == tcp_header_len) { 5366 /* Predicted packet is in window by definition. 5367 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5368 * Hence, check seq<=rcv_wup reduces to: 5369 */ 5370 if (tcp_header_len == 5371 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5372 tp->rcv_nxt == tp->rcv_wup) 5373 tcp_store_ts_recent(tp); 5374 5375 /* We know that such packets are checksummed 5376 * on entry. 5377 */ 5378 tcp_ack(sk, skb, 0); 5379 __kfree_skb(skb); 5380 tcp_data_snd_check(sk); 5381 return; 5382 } else { /* Header too small */ 5383 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5384 goto discard; 5385 } 5386 } else { 5387 int eaten = 0; 5388 bool fragstolen = false; 5389 5390 if (tcp_checksum_complete(skb)) 5391 goto csum_error; 5392 5393 if ((int)skb->truesize > sk->sk_forward_alloc) 5394 goto step5; 5395 5396 /* Predicted packet is in window by definition. 5397 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5398 * Hence, check seq<=rcv_wup reduces to: 5399 */ 5400 if (tcp_header_len == 5401 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5402 tp->rcv_nxt == tp->rcv_wup) 5403 tcp_store_ts_recent(tp); 5404 5405 tcp_rcv_rtt_measure_ts(sk, skb); 5406 5407 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); 5408 5409 /* Bulk data transfer: receiver */ 5410 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5411 &fragstolen); 5412 5413 tcp_event_data_recv(sk, skb); 5414 5415 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 5416 /* Well, only one small jumplet in fast path... */ 5417 tcp_ack(sk, skb, FLAG_DATA); 5418 tcp_data_snd_check(sk); 5419 if (!inet_csk_ack_scheduled(sk)) 5420 goto no_ack; 5421 } 5422 5423 __tcp_ack_snd_check(sk, 0); 5424 no_ack: 5425 if (eaten) 5426 kfree_skb_partial(skb, fragstolen); 5427 sk->sk_data_ready(sk); 5428 return; 5429 } 5430 } 5431 5432 slow_path: 5433 if (len < (th->doff << 2) || tcp_checksum_complete(skb)) 5434 goto csum_error; 5435 5436 if (!th->ack && !th->rst && !th->syn) 5437 goto discard; 5438 5439 /* 5440 * Standard slow path. 5441 */ 5442 5443 if (!tcp_validate_incoming(sk, skb, th, 1)) 5444 return; 5445 5446 step5: 5447 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) 5448 goto discard; 5449 5450 tcp_rcv_rtt_measure_ts(sk, skb); 5451 5452 /* Process urgent data. */ 5453 tcp_urg(sk, skb, th); 5454 5455 /* step 7: process the segment text */ 5456 tcp_data_queue(sk, skb); 5457 5458 tcp_data_snd_check(sk); 5459 tcp_ack_snd_check(sk); 5460 return; 5461 5462 csum_error: 5463 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 5464 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5465 5466 discard: 5467 tcp_drop(sk, skb); 5468 } 5469 EXPORT_SYMBOL(tcp_rcv_established); 5470 5471 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 5472 { 5473 struct tcp_sock *tp = tcp_sk(sk); 5474 struct inet_connection_sock *icsk = inet_csk(sk); 5475 5476 tcp_set_state(sk, TCP_ESTABLISHED); 5477 icsk->icsk_ack.lrcvtime = tcp_jiffies32; 5478 5479 if (skb) { 5480 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5481 security_inet_conn_established(sk, skb); 5482 } 5483 5484 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); 5485 5486 /* Prevent spurious tcp_cwnd_restart() on first data 5487 * packet. 5488 */ 5489 tp->lsndtime = tcp_jiffies32; 5490 5491 if (sock_flag(sk, SOCK_KEEPOPEN)) 5492 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5493 5494 if (!tp->rx_opt.snd_wscale) 5495 __tcp_fast_path_on(tp, tp->snd_wnd); 5496 else 5497 tp->pred_flags = 0; 5498 } 5499 5500 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 5501 struct tcp_fastopen_cookie *cookie) 5502 { 5503 struct tcp_sock *tp = tcp_sk(sk); 5504 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; 5505 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; 5506 bool syn_drop = false; 5507 5508 if (mss == tp->rx_opt.user_mss) { 5509 struct tcp_options_received opt; 5510 5511 /* Get original SYNACK MSS value if user MSS sets mss_clamp */ 5512 tcp_clear_options(&opt); 5513 opt.user_mss = opt.mss_clamp = 0; 5514 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); 5515 mss = opt.mss_clamp; 5516 } 5517 5518 if (!tp->syn_fastopen) { 5519 /* Ignore an unsolicited cookie */ 5520 cookie->len = -1; 5521 } else if (tp->total_retrans) { 5522 /* SYN timed out and the SYN-ACK neither has a cookie nor 5523 * acknowledges data. Presumably the remote received only 5524 * the retransmitted (regular) SYNs: either the original 5525 * SYN-data or the corresponding SYN-ACK was dropped. 5526 */ 5527 syn_drop = (cookie->len < 0 && data); 5528 } else if (cookie->len < 0 && !tp->syn_data) { 5529 /* We requested a cookie but didn't get it. If we did not use 5530 * the (old) exp opt format then try so next time (try_exp=1). 5531 * Otherwise we go back to use the RFC7413 opt (try_exp=2). 5532 */ 5533 try_exp = tp->syn_fastopen_exp ? 2 : 1; 5534 } 5535 5536 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); 5537 5538 if (data) { /* Retransmit unacked data in SYN */ 5539 skb_rbtree_walk_from(data) { 5540 if (__tcp_retransmit_skb(sk, data, 1)) 5541 break; 5542 } 5543 tcp_rearm_rto(sk); 5544 NET_INC_STATS(sock_net(sk), 5545 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 5546 return true; 5547 } 5548 tp->syn_data_acked = tp->syn_data; 5549 if (tp->syn_data_acked) 5550 NET_INC_STATS(sock_net(sk), 5551 LINUX_MIB_TCPFASTOPENACTIVE); 5552 5553 tcp_fastopen_add_skb(sk, synack); 5554 5555 return false; 5556 } 5557 5558 static void smc_check_reset_syn(struct tcp_sock *tp) 5559 { 5560 #if IS_ENABLED(CONFIG_SMC) 5561 if (static_branch_unlikely(&tcp_have_smc)) { 5562 if (tp->syn_smc && !tp->rx_opt.smc_ok) 5563 tp->syn_smc = 0; 5564 } 5565 #endif 5566 } 5567 5568 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5569 const struct tcphdr *th) 5570 { 5571 struct inet_connection_sock *icsk = inet_csk(sk); 5572 struct tcp_sock *tp = tcp_sk(sk); 5573 struct tcp_fastopen_cookie foc = { .len = -1 }; 5574 int saved_clamp = tp->rx_opt.mss_clamp; 5575 bool fastopen_fail; 5576 5577 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); 5578 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 5579 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5580 5581 if (th->ack) { 5582 /* rfc793: 5583 * "If the state is SYN-SENT then 5584 * first check the ACK bit 5585 * If the ACK bit is set 5586 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5587 * a reset (unless the RST bit is set, if so drop 5588 * the segment and return)" 5589 */ 5590 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || 5591 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) 5592 goto reset_and_undo; 5593 5594 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5595 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5596 tcp_time_stamp(tp))) { 5597 NET_INC_STATS(sock_net(sk), 5598 LINUX_MIB_PAWSACTIVEREJECTED); 5599 goto reset_and_undo; 5600 } 5601 5602 /* Now ACK is acceptable. 5603 * 5604 * "If the RST bit is set 5605 * If the ACK was acceptable then signal the user "error: 5606 * connection reset", drop the segment, enter CLOSED state, 5607 * delete TCB, and return." 5608 */ 5609 5610 if (th->rst) { 5611 tcp_reset(sk); 5612 goto discard; 5613 } 5614 5615 /* rfc793: 5616 * "fifth, if neither of the SYN or RST bits is set then 5617 * drop the segment and return." 5618 * 5619 * See note below! 5620 * --ANK(990513) 5621 */ 5622 if (!th->syn) 5623 goto discard_and_undo; 5624 5625 /* rfc793: 5626 * "If the SYN bit is on ... 5627 * are acceptable then ... 5628 * (our SYN has been ACKed), change the connection 5629 * state to ESTABLISHED..." 5630 */ 5631 5632 tcp_ecn_rcv_synack(tp, th); 5633 5634 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5635 tcp_ack(sk, skb, FLAG_SLOWPATH); 5636 5637 /* Ok.. it's good. Set up sequence numbers and 5638 * move to established. 5639 */ 5640 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5641 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5642 5643 /* RFC1323: The window in SYN & SYN/ACK segments is 5644 * never scaled. 5645 */ 5646 tp->snd_wnd = ntohs(th->window); 5647 5648 if (!tp->rx_opt.wscale_ok) { 5649 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5650 tp->window_clamp = min(tp->window_clamp, 65535U); 5651 } 5652 5653 if (tp->rx_opt.saw_tstamp) { 5654 tp->rx_opt.tstamp_ok = 1; 5655 tp->tcp_header_len = 5656 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5657 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5658 tcp_store_ts_recent(tp); 5659 } else { 5660 tp->tcp_header_len = sizeof(struct tcphdr); 5661 } 5662 5663 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5664 tcp_initialize_rcv_mss(sk); 5665 5666 /* Remember, tcp_poll() does not lock socket! 5667 * Change state from SYN-SENT only after copied_seq 5668 * is initialized. */ 5669 tp->copied_seq = tp->rcv_nxt; 5670 5671 smc_check_reset_syn(tp); 5672 5673 smp_mb(); 5674 5675 tcp_finish_connect(sk, skb); 5676 5677 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && 5678 tcp_rcv_fastopen_synack(sk, skb, &foc); 5679 5680 if (!sock_flag(sk, SOCK_DEAD)) { 5681 sk->sk_state_change(sk); 5682 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5683 } 5684 if (fastopen_fail) 5685 return -1; 5686 if (sk->sk_write_pending || 5687 icsk->icsk_accept_queue.rskq_defer_accept || 5688 icsk->icsk_ack.pingpong) { 5689 /* Save one ACK. Data will be ready after 5690 * several ticks, if write_pending is set. 5691 * 5692 * It may be deleted, but with this feature tcpdumps 5693 * look so _wonderfully_ clever, that I was not able 5694 * to stand against the temptation 8) --ANK 5695 */ 5696 inet_csk_schedule_ack(sk); 5697 tcp_enter_quickack_mode(sk); 5698 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5699 TCP_DELACK_MAX, TCP_RTO_MAX); 5700 5701 discard: 5702 tcp_drop(sk, skb); 5703 return 0; 5704 } else { 5705 tcp_send_ack(sk); 5706 } 5707 return -1; 5708 } 5709 5710 /* No ACK in the segment */ 5711 5712 if (th->rst) { 5713 /* rfc793: 5714 * "If the RST bit is set 5715 * 5716 * Otherwise (no ACK) drop the segment and return." 5717 */ 5718 5719 goto discard_and_undo; 5720 } 5721 5722 /* PAWS check. */ 5723 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5724 tcp_paws_reject(&tp->rx_opt, 0)) 5725 goto discard_and_undo; 5726 5727 if (th->syn) { 5728 /* We see SYN without ACK. It is attempt of 5729 * simultaneous connect with crossed SYNs. 5730 * Particularly, it can be connect to self. 5731 */ 5732 tcp_set_state(sk, TCP_SYN_RECV); 5733 5734 if (tp->rx_opt.saw_tstamp) { 5735 tp->rx_opt.tstamp_ok = 1; 5736 tcp_store_ts_recent(tp); 5737 tp->tcp_header_len = 5738 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5739 } else { 5740 tp->tcp_header_len = sizeof(struct tcphdr); 5741 } 5742 5743 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5744 tp->copied_seq = tp->rcv_nxt; 5745 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5746 5747 /* RFC1323: The window in SYN & SYN/ACK segments is 5748 * never scaled. 5749 */ 5750 tp->snd_wnd = ntohs(th->window); 5751 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5752 tp->max_window = tp->snd_wnd; 5753 5754 tcp_ecn_rcv_syn(tp, th); 5755 5756 tcp_mtup_init(sk); 5757 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5758 tcp_initialize_rcv_mss(sk); 5759 5760 tcp_send_synack(sk); 5761 #if 0 5762 /* Note, we could accept data and URG from this segment. 5763 * There are no obstacles to make this (except that we must 5764 * either change tcp_recvmsg() to prevent it from returning data 5765 * before 3WHS completes per RFC793, or employ TCP Fast Open). 5766 * 5767 * However, if we ignore data in ACKless segments sometimes, 5768 * we have no reasons to accept it sometimes. 5769 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5770 * is not flawless. So, discard packet for sanity. 5771 * Uncomment this return to process the data. 5772 */ 5773 return -1; 5774 #else 5775 goto discard; 5776 #endif 5777 } 5778 /* "fifth, if neither of the SYN or RST bits is set then 5779 * drop the segment and return." 5780 */ 5781 5782 discard_and_undo: 5783 tcp_clear_options(&tp->rx_opt); 5784 tp->rx_opt.mss_clamp = saved_clamp; 5785 goto discard; 5786 5787 reset_and_undo: 5788 tcp_clear_options(&tp->rx_opt); 5789 tp->rx_opt.mss_clamp = saved_clamp; 5790 return 1; 5791 } 5792 5793 /* 5794 * This function implements the receiving procedure of RFC 793 for 5795 * all states except ESTABLISHED and TIME_WAIT. 5796 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 5797 * address independent. 5798 */ 5799 5800 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) 5801 { 5802 struct tcp_sock *tp = tcp_sk(sk); 5803 struct inet_connection_sock *icsk = inet_csk(sk); 5804 const struct tcphdr *th = tcp_hdr(skb); 5805 struct request_sock *req; 5806 int queued = 0; 5807 bool acceptable; 5808 5809 switch (sk->sk_state) { 5810 case TCP_CLOSE: 5811 goto discard; 5812 5813 case TCP_LISTEN: 5814 if (th->ack) 5815 return 1; 5816 5817 if (th->rst) 5818 goto discard; 5819 5820 if (th->syn) { 5821 if (th->fin) 5822 goto discard; 5823 /* It is possible that we process SYN packets from backlog, 5824 * so we need to make sure to disable BH right there. 5825 */ 5826 local_bh_disable(); 5827 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 5828 local_bh_enable(); 5829 5830 if (!acceptable) 5831 return 1; 5832 consume_skb(skb); 5833 return 0; 5834 } 5835 goto discard; 5836 5837 case TCP_SYN_SENT: 5838 tp->rx_opt.saw_tstamp = 0; 5839 tcp_mstamp_refresh(tp); 5840 queued = tcp_rcv_synsent_state_process(sk, skb, th); 5841 if (queued >= 0) 5842 return queued; 5843 5844 /* Do step6 onward by hand. */ 5845 tcp_urg(sk, skb, th); 5846 __kfree_skb(skb); 5847 tcp_data_snd_check(sk); 5848 return 0; 5849 } 5850 5851 tcp_mstamp_refresh(tp); 5852 tp->rx_opt.saw_tstamp = 0; 5853 req = tp->fastopen_rsk; 5854 if (req) { 5855 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5856 sk->sk_state != TCP_FIN_WAIT1); 5857 5858 if (!tcp_check_req(sk, skb, req, true)) 5859 goto discard; 5860 } 5861 5862 if (!th->ack && !th->rst && !th->syn) 5863 goto discard; 5864 5865 if (!tcp_validate_incoming(sk, skb, th, 0)) 5866 return 0; 5867 5868 /* step 5: check the ACK field */ 5869 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | 5870 FLAG_UPDATE_TS_RECENT | 5871 FLAG_NO_CHALLENGE_ACK) > 0; 5872 5873 if (!acceptable) { 5874 if (sk->sk_state == TCP_SYN_RECV) 5875 return 1; /* send one RST */ 5876 tcp_send_challenge_ack(sk, skb); 5877 goto discard; 5878 } 5879 switch (sk->sk_state) { 5880 case TCP_SYN_RECV: 5881 if (!tp->srtt_us) 5882 tcp_synack_rtt_meas(sk, req); 5883 5884 /* Once we leave TCP_SYN_RECV, we no longer need req 5885 * so release it. 5886 */ 5887 if (req) { 5888 inet_csk(sk)->icsk_retransmits = 0; 5889 reqsk_fastopen_remove(sk, req, false); 5890 /* Re-arm the timer because data may have been sent out. 5891 * This is similar to the regular data transmission case 5892 * when new data has just been ack'ed. 5893 * 5894 * (TFO) - we could try to be more aggressive and 5895 * retransmitting any data sooner based on when they 5896 * are sent out. 5897 */ 5898 tcp_rearm_rto(sk); 5899 } else { 5900 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); 5901 tp->copied_seq = tp->rcv_nxt; 5902 } 5903 smp_mb(); 5904 tcp_set_state(sk, TCP_ESTABLISHED); 5905 sk->sk_state_change(sk); 5906 5907 /* Note, that this wakeup is only for marginal crossed SYN case. 5908 * Passively open sockets are not waked up, because 5909 * sk->sk_sleep == NULL and sk->sk_socket == NULL. 5910 */ 5911 if (sk->sk_socket) 5912 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5913 5914 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5915 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5916 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5917 5918 if (tp->rx_opt.tstamp_ok) 5919 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5920 5921 if (!inet_csk(sk)->icsk_ca_ops->cong_control) 5922 tcp_update_pacing_rate(sk); 5923 5924 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5925 tp->lsndtime = tcp_jiffies32; 5926 5927 tcp_initialize_rcv_mss(sk); 5928 tcp_fast_path_on(tp); 5929 break; 5930 5931 case TCP_FIN_WAIT1: { 5932 int tmo; 5933 5934 /* If we enter the TCP_FIN_WAIT1 state and we are a 5935 * Fast Open socket and this is the first acceptable 5936 * ACK we have received, this would have acknowledged 5937 * our SYNACK so stop the SYNACK timer. 5938 */ 5939 if (req) { 5940 /* We no longer need the request sock. */ 5941 reqsk_fastopen_remove(sk, req, false); 5942 tcp_rearm_rto(sk); 5943 } 5944 if (tp->snd_una != tp->write_seq) 5945 break; 5946 5947 tcp_set_state(sk, TCP_FIN_WAIT2); 5948 sk->sk_shutdown |= SEND_SHUTDOWN; 5949 5950 sk_dst_confirm(sk); 5951 5952 if (!sock_flag(sk, SOCK_DEAD)) { 5953 /* Wake up lingering close() */ 5954 sk->sk_state_change(sk); 5955 break; 5956 } 5957 5958 if (tp->linger2 < 0) { 5959 tcp_done(sk); 5960 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5961 return 1; 5962 } 5963 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5964 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5965 /* Receive out of order FIN after close() */ 5966 if (tp->syn_fastopen && th->fin) 5967 tcp_fastopen_active_disable(sk); 5968 tcp_done(sk); 5969 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5970 return 1; 5971 } 5972 5973 tmo = tcp_fin_time(sk); 5974 if (tmo > TCP_TIMEWAIT_LEN) { 5975 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 5976 } else if (th->fin || sock_owned_by_user(sk)) { 5977 /* Bad case. We could lose such FIN otherwise. 5978 * It is not a big problem, but it looks confusing 5979 * and not so rare event. We still can lose it now, 5980 * if it spins in bh_lock_sock(), but it is really 5981 * marginal case. 5982 */ 5983 inet_csk_reset_keepalive_timer(sk, tmo); 5984 } else { 5985 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 5986 goto discard; 5987 } 5988 break; 5989 } 5990 5991 case TCP_CLOSING: 5992 if (tp->snd_una == tp->write_seq) { 5993 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 5994 goto discard; 5995 } 5996 break; 5997 5998 case TCP_LAST_ACK: 5999 if (tp->snd_una == tp->write_seq) { 6000 tcp_update_metrics(sk); 6001 tcp_done(sk); 6002 goto discard; 6003 } 6004 break; 6005 } 6006 6007 /* step 6: check the URG bit */ 6008 tcp_urg(sk, skb, th); 6009 6010 /* step 7: process the segment text */ 6011 switch (sk->sk_state) { 6012 case TCP_CLOSE_WAIT: 6013 case TCP_CLOSING: 6014 case TCP_LAST_ACK: 6015 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 6016 break; 6017 /* fall through */ 6018 case TCP_FIN_WAIT1: 6019 case TCP_FIN_WAIT2: 6020 /* RFC 793 says to queue data in these states, 6021 * RFC 1122 says we MUST send a reset. 6022 * BSD 4.4 also does reset. 6023 */ 6024 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6025 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6026 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6027 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6028 tcp_reset(sk); 6029 return 1; 6030 } 6031 } 6032 /* Fall through */ 6033 case TCP_ESTABLISHED: 6034 tcp_data_queue(sk, skb); 6035 queued = 1; 6036 break; 6037 } 6038 6039 /* tcp_data could move socket to TIME-WAIT */ 6040 if (sk->sk_state != TCP_CLOSE) { 6041 tcp_data_snd_check(sk); 6042 tcp_ack_snd_check(sk); 6043 } 6044 6045 if (!queued) { 6046 discard: 6047 tcp_drop(sk, skb); 6048 } 6049 return 0; 6050 } 6051 EXPORT_SYMBOL(tcp_rcv_state_process); 6052 6053 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) 6054 { 6055 struct inet_request_sock *ireq = inet_rsk(req); 6056 6057 if (family == AF_INET) 6058 net_dbg_ratelimited("drop open request from %pI4/%u\n", 6059 &ireq->ir_rmt_addr, port); 6060 #if IS_ENABLED(CONFIG_IPV6) 6061 else if (family == AF_INET6) 6062 net_dbg_ratelimited("drop open request from %pI6/%u\n", 6063 &ireq->ir_v6_rmt_addr, port); 6064 #endif 6065 } 6066 6067 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set 6068 * 6069 * If we receive a SYN packet with these bits set, it means a 6070 * network is playing bad games with TOS bits. In order to 6071 * avoid possible false congestion notifications, we disable 6072 * TCP ECN negotiation. 6073 * 6074 * Exception: tcp_ca wants ECN. This is required for DCTCP 6075 * congestion control: Linux DCTCP asserts ECT on all packets, 6076 * including SYN, which is most optimal solution; however, 6077 * others, such as FreeBSD do not. 6078 */ 6079 static void tcp_ecn_create_request(struct request_sock *req, 6080 const struct sk_buff *skb, 6081 const struct sock *listen_sk, 6082 const struct dst_entry *dst) 6083 { 6084 const struct tcphdr *th = tcp_hdr(skb); 6085 const struct net *net = sock_net(listen_sk); 6086 bool th_ecn = th->ece && th->cwr; 6087 bool ect, ecn_ok; 6088 u32 ecn_ok_dst; 6089 6090 if (!th_ecn) 6091 return; 6092 6093 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); 6094 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); 6095 ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst; 6096 6097 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) || 6098 (ecn_ok_dst & DST_FEATURE_ECN_CA) || 6099 tcp_bpf_ca_needs_ecn((struct sock *)req)) 6100 inet_rsk(req)->ecn_ok = 1; 6101 } 6102 6103 static void tcp_openreq_init(struct request_sock *req, 6104 const struct tcp_options_received *rx_opt, 6105 struct sk_buff *skb, const struct sock *sk) 6106 { 6107 struct inet_request_sock *ireq = inet_rsk(req); 6108 6109 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 6110 req->cookie_ts = 0; 6111 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 6112 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 6113 tcp_rsk(req)->snt_synack = tcp_clock_us(); 6114 tcp_rsk(req)->last_oow_ack_time = 0; 6115 req->mss = rx_opt->mss_clamp; 6116 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 6117 ireq->tstamp_ok = rx_opt->tstamp_ok; 6118 ireq->sack_ok = rx_opt->sack_ok; 6119 ireq->snd_wscale = rx_opt->snd_wscale; 6120 ireq->wscale_ok = rx_opt->wscale_ok; 6121 ireq->acked = 0; 6122 ireq->ecn_ok = 0; 6123 ireq->ir_rmt_port = tcp_hdr(skb)->source; 6124 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); 6125 ireq->ir_mark = inet_request_mark(sk, skb); 6126 #if IS_ENABLED(CONFIG_SMC) 6127 ireq->smc_ok = rx_opt->smc_ok; 6128 #endif 6129 } 6130 6131 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 6132 struct sock *sk_listener, 6133 bool attach_listener) 6134 { 6135 struct request_sock *req = reqsk_alloc(ops, sk_listener, 6136 attach_listener); 6137 6138 if (req) { 6139 struct inet_request_sock *ireq = inet_rsk(req); 6140 6141 ireq->ireq_opt = NULL; 6142 #if IS_ENABLED(CONFIG_IPV6) 6143 ireq->pktopts = NULL; 6144 #endif 6145 atomic64_set(&ireq->ir_cookie, 0); 6146 ireq->ireq_state = TCP_NEW_SYN_RECV; 6147 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); 6148 ireq->ireq_family = sk_listener->sk_family; 6149 } 6150 6151 return req; 6152 } 6153 EXPORT_SYMBOL(inet_reqsk_alloc); 6154 6155 /* 6156 * Return true if a syncookie should be sent 6157 */ 6158 static bool tcp_syn_flood_action(const struct sock *sk, 6159 const struct sk_buff *skb, 6160 const char *proto) 6161 { 6162 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 6163 const char *msg = "Dropping request"; 6164 bool want_cookie = false; 6165 struct net *net = sock_net(sk); 6166 6167 #ifdef CONFIG_SYN_COOKIES 6168 if (net->ipv4.sysctl_tcp_syncookies) { 6169 msg = "Sending cookies"; 6170 want_cookie = true; 6171 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 6172 } else 6173 #endif 6174 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 6175 6176 if (!queue->synflood_warned && 6177 net->ipv4.sysctl_tcp_syncookies != 2 && 6178 xchg(&queue->synflood_warned, 1) == 0) 6179 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", 6180 proto, ntohs(tcp_hdr(skb)->dest), msg); 6181 6182 return want_cookie; 6183 } 6184 6185 static void tcp_reqsk_record_syn(const struct sock *sk, 6186 struct request_sock *req, 6187 const struct sk_buff *skb) 6188 { 6189 if (tcp_sk(sk)->save_syn) { 6190 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); 6191 u32 *copy; 6192 6193 copy = kmalloc(len + sizeof(u32), GFP_ATOMIC); 6194 if (copy) { 6195 copy[0] = len; 6196 memcpy(©[1], skb_network_header(skb), len); 6197 req->saved_syn = copy; 6198 } 6199 } 6200 } 6201 6202 int tcp_conn_request(struct request_sock_ops *rsk_ops, 6203 const struct tcp_request_sock_ops *af_ops, 6204 struct sock *sk, struct sk_buff *skb) 6205 { 6206 struct tcp_fastopen_cookie foc = { .len = -1 }; 6207 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; 6208 struct tcp_options_received tmp_opt; 6209 struct tcp_sock *tp = tcp_sk(sk); 6210 struct net *net = sock_net(sk); 6211 struct sock *fastopen_sk = NULL; 6212 struct request_sock *req; 6213 bool want_cookie = false; 6214 struct dst_entry *dst; 6215 struct flowi fl; 6216 6217 /* TW buckets are converted to open requests without 6218 * limitations, they conserve resources and peer is 6219 * evidently real one. 6220 */ 6221 if ((net->ipv4.sysctl_tcp_syncookies == 2 || 6222 inet_csk_reqsk_queue_is_full(sk)) && !isn) { 6223 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); 6224 if (!want_cookie) 6225 goto drop; 6226 } 6227 6228 if (sk_acceptq_is_full(sk)) { 6229 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6230 goto drop; 6231 } 6232 6233 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); 6234 if (!req) 6235 goto drop; 6236 6237 tcp_rsk(req)->af_specific = af_ops; 6238 tcp_rsk(req)->ts_off = 0; 6239 6240 tcp_clear_options(&tmp_opt); 6241 tmp_opt.mss_clamp = af_ops->mss_clamp; 6242 tmp_opt.user_mss = tp->rx_opt.user_mss; 6243 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, 6244 want_cookie ? NULL : &foc); 6245 6246 if (want_cookie && !tmp_opt.saw_tstamp) 6247 tcp_clear_options(&tmp_opt); 6248 6249 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 6250 tcp_openreq_init(req, &tmp_opt, skb, sk); 6251 inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; 6252 6253 /* Note: tcp_v6_init_req() might override ir_iif for link locals */ 6254 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); 6255 6256 af_ops->init_req(req, sk, skb); 6257 6258 if (security_inet_conn_request(sk, skb, req)) 6259 goto drop_and_free; 6260 6261 if (tmp_opt.tstamp_ok) 6262 tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); 6263 6264 dst = af_ops->route_req(sk, &fl, req); 6265 if (!dst) 6266 goto drop_and_free; 6267 6268 if (!want_cookie && !isn) { 6269 /* Kill the following clause, if you dislike this way. */ 6270 if (!net->ipv4.sysctl_tcp_syncookies && 6271 (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 6272 (net->ipv4.sysctl_max_syn_backlog >> 2)) && 6273 !tcp_peer_is_proven(req, dst)) { 6274 /* Without syncookies last quarter of 6275 * backlog is filled with destinations, 6276 * proven to be alive. 6277 * It means that we continue to communicate 6278 * to destinations, already remembered 6279 * to the moment of synflood. 6280 */ 6281 pr_drop_req(req, ntohs(tcp_hdr(skb)->source), 6282 rsk_ops->family); 6283 goto drop_and_release; 6284 } 6285 6286 isn = af_ops->init_seq(skb); 6287 } 6288 6289 tcp_ecn_create_request(req, skb, sk, dst); 6290 6291 if (want_cookie) { 6292 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); 6293 req->cookie_ts = tmp_opt.tstamp_ok; 6294 if (!tmp_opt.tstamp_ok) 6295 inet_rsk(req)->ecn_ok = 0; 6296 } 6297 6298 tcp_rsk(req)->snt_isn = isn; 6299 tcp_rsk(req)->txhash = net_tx_rndhash(); 6300 tcp_openreq_init_rwin(req, sk, dst); 6301 if (!want_cookie) { 6302 tcp_reqsk_record_syn(sk, req, skb); 6303 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); 6304 } 6305 if (fastopen_sk) { 6306 af_ops->send_synack(fastopen_sk, dst, &fl, req, 6307 &foc, TCP_SYNACK_FASTOPEN); 6308 /* Add the child socket directly into the accept queue */ 6309 inet_csk_reqsk_queue_add(sk, req, fastopen_sk); 6310 sk->sk_data_ready(sk); 6311 bh_unlock_sock(fastopen_sk); 6312 sock_put(fastopen_sk); 6313 } else { 6314 tcp_rsk(req)->tfo_listener = false; 6315 if (!want_cookie) 6316 inet_csk_reqsk_queue_hash_add(sk, req, 6317 tcp_timeout_init((struct sock *)req)); 6318 af_ops->send_synack(sk, dst, &fl, req, &foc, 6319 !want_cookie ? TCP_SYNACK_NORMAL : 6320 TCP_SYNACK_COOKIE); 6321 if (want_cookie) { 6322 reqsk_free(req); 6323 return 0; 6324 } 6325 } 6326 reqsk_put(req); 6327 return 0; 6328 6329 drop_and_release: 6330 dst_release(dst); 6331 drop_and_free: 6332 reqsk_free(req); 6333 drop: 6334 tcp_listendrop(sk); 6335 return 0; 6336 } 6337 EXPORT_SYMBOL(tcp_conn_request); 6338