1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 /* 23 * Changes: 24 * Pedro Roque : Fast Retransmit/Recovery. 25 * Two receive queues. 26 * Retransmit queue handled by TCP. 27 * Better retransmit timer handling. 28 * New congestion avoidance. 29 * Header prediction. 30 * Variable renaming. 31 * 32 * Eric : Fast Retransmit. 33 * Randy Scott : MSS option defines. 34 * Eric Schenk : Fixes to slow start algorithm. 35 * Eric Schenk : Yet another double ACK bug. 36 * Eric Schenk : Delayed ACK bug fixes. 37 * Eric Schenk : Floyd style fast retrans war avoidance. 38 * David S. Miller : Don't allow zero congestion window. 39 * Eric Schenk : Fix retransmitter so that it sends 40 * next packet on ack of previous packet. 41 * Andi Kleen : Moved open_request checking here 42 * and process RSTs for open_requests. 43 * Andi Kleen : Better prune_queue, and other fixes. 44 * Andrey Savochkin: Fix RTT measurements in the presence of 45 * timestamps. 46 * Andrey Savochkin: Check sequence numbers correctly when 47 * removing SACKs due to in sequence incoming 48 * data segments. 49 * Andi Kleen: Make sure we never ack data there is not 50 * enough room for. Also make this condition 51 * a fatal error if it might still happen. 52 * Andi Kleen: Add tcp_measure_rcv_mss to make 53 * connections with MSS<min(MTU,ann. MSS) 54 * work without delayed acks. 55 * Andi Kleen: Process packets with PSH set in the 56 * fast path. 57 * J Hadi Salim: ECN support 58 * Andrei Gurtov, 59 * Pasi Sarolahti, 60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 61 * engine. Lots of bugs are found. 62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 63 */ 64 65 #define pr_fmt(fmt) "TCP: " fmt 66 67 #include <linux/mm.h> 68 #include <linux/slab.h> 69 #include <linux/module.h> 70 #include <linux/sysctl.h> 71 #include <linux/kernel.h> 72 #include <linux/prefetch.h> 73 #include <net/dst.h> 74 #include <net/tcp.h> 75 #include <net/inet_common.h> 76 #include <linux/ipsec.h> 77 #include <asm/unaligned.h> 78 #include <linux/errqueue.h> 79 #include <trace/events/tcp.h> 80 #include <linux/static_key.h> 81 82 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 83 84 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 85 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 86 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 87 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 88 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 89 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 90 #define FLAG_ECE 0x40 /* ECE in this ACK */ 91 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ 92 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 93 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ 94 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 95 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 96 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */ 97 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 98 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ 99 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ 100 101 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 102 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 103 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK) 104 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 105 106 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 107 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 108 109 #define REXMIT_NONE 0 /* no loss recovery to do */ 110 #define REXMIT_LOST 1 /* retransmit packets marked lost */ 111 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */ 112 113 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb, 114 unsigned int len) 115 { 116 static bool __once __read_mostly; 117 118 if (!__once) { 119 struct net_device *dev; 120 121 __once = true; 122 123 rcu_read_lock(); 124 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); 125 if (!dev || len >= dev->mtu) 126 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n", 127 dev ? dev->name : "Unknown driver"); 128 rcu_read_unlock(); 129 } 130 } 131 132 /* Adapt the MSS value used to make delayed ack decision to the 133 * real world. 134 */ 135 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 136 { 137 struct inet_connection_sock *icsk = inet_csk(sk); 138 const unsigned int lss = icsk->icsk_ack.last_seg_size; 139 unsigned int len; 140 141 icsk->icsk_ack.last_seg_size = 0; 142 143 /* skb->len may jitter because of SACKs, even if peer 144 * sends good full-sized frames. 145 */ 146 len = skb_shinfo(skb)->gso_size ? : skb->len; 147 if (len >= icsk->icsk_ack.rcv_mss) { 148 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, 149 tcp_sk(sk)->advmss); 150 /* Account for possibly-removed options */ 151 if (unlikely(len > icsk->icsk_ack.rcv_mss + 152 MAX_TCP_OPTION_SPACE)) 153 tcp_gro_dev_warn(sk, skb, len); 154 } else { 155 /* Otherwise, we make more careful check taking into account, 156 * that SACKs block is variable. 157 * 158 * "len" is invariant segment length, including TCP header. 159 */ 160 len += skb->data - skb_transport_header(skb); 161 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 162 /* If PSH is not set, packet should be 163 * full sized, provided peer TCP is not badly broken. 164 * This observation (if it is correct 8)) allows 165 * to handle super-low mtu links fairly. 166 */ 167 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 168 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 169 /* Subtract also invariant (if peer is RFC compliant), 170 * tcp header plus fixed timestamp option length. 171 * Resulting "len" is MSS free of SACK jitter. 172 */ 173 len -= tcp_sk(sk)->tcp_header_len; 174 icsk->icsk_ack.last_seg_size = len; 175 if (len == lss) { 176 icsk->icsk_ack.rcv_mss = len; 177 return; 178 } 179 } 180 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 181 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 182 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 183 } 184 } 185 186 static void tcp_incr_quickack(struct sock *sk) 187 { 188 struct inet_connection_sock *icsk = inet_csk(sk); 189 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 190 191 if (quickacks == 0) 192 quickacks = 2; 193 if (quickacks > icsk->icsk_ack.quick) 194 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 195 } 196 197 static void tcp_enter_quickack_mode(struct sock *sk) 198 { 199 struct inet_connection_sock *icsk = inet_csk(sk); 200 tcp_incr_quickack(sk); 201 icsk->icsk_ack.pingpong = 0; 202 icsk->icsk_ack.ato = TCP_ATO_MIN; 203 } 204 205 /* Send ACKs quickly, if "quick" count is not exhausted 206 * and the session is not interactive. 207 */ 208 209 static bool tcp_in_quickack_mode(struct sock *sk) 210 { 211 const struct inet_connection_sock *icsk = inet_csk(sk); 212 const struct dst_entry *dst = __sk_dst_get(sk); 213 214 return (dst && dst_metric(dst, RTAX_QUICKACK)) || 215 (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); 216 } 217 218 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) 219 { 220 if (tp->ecn_flags & TCP_ECN_OK) 221 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 222 } 223 224 static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 225 { 226 if (tcp_hdr(skb)->cwr) 227 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 228 } 229 230 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 231 { 232 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 233 } 234 235 static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 236 { 237 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 238 case INET_ECN_NOT_ECT: 239 /* Funny extension: if ECT is not set on a segment, 240 * and we already seen ECT on a previous segment, 241 * it is probably a retransmit. 242 */ 243 if (tp->ecn_flags & TCP_ECN_SEEN) 244 tcp_enter_quickack_mode((struct sock *)tp); 245 break; 246 case INET_ECN_CE: 247 if (tcp_ca_needs_ecn((struct sock *)tp)) 248 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); 249 250 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { 251 /* Better not delay acks, sender can have a very low cwnd */ 252 tcp_enter_quickack_mode((struct sock *)tp); 253 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 254 } 255 tp->ecn_flags |= TCP_ECN_SEEN; 256 break; 257 default: 258 if (tcp_ca_needs_ecn((struct sock *)tp)) 259 tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); 260 tp->ecn_flags |= TCP_ECN_SEEN; 261 break; 262 } 263 } 264 265 static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 266 { 267 if (tp->ecn_flags & TCP_ECN_OK) 268 __tcp_ecn_check_ce(tp, skb); 269 } 270 271 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 272 { 273 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 274 tp->ecn_flags &= ~TCP_ECN_OK; 275 } 276 277 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 278 { 279 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 280 tp->ecn_flags &= ~TCP_ECN_OK; 281 } 282 283 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 284 { 285 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 286 return true; 287 return false; 288 } 289 290 /* Buffer size and advertised window tuning. 291 * 292 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 293 */ 294 295 static void tcp_sndbuf_expand(struct sock *sk) 296 { 297 const struct tcp_sock *tp = tcp_sk(sk); 298 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 299 int sndmem, per_mss; 300 u32 nr_segs; 301 302 /* Worst case is non GSO/TSO : each frame consumes one skb 303 * and skb->head is kmalloced using power of two area of memory 304 */ 305 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 306 MAX_TCP_HEADER + 307 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 308 309 per_mss = roundup_pow_of_two(per_mss) + 310 SKB_DATA_ALIGN(sizeof(struct sk_buff)); 311 312 nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); 313 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); 314 315 /* Fast Recovery (RFC 5681 3.2) : 316 * Cubic needs 1.7 factor, rounded to 2 to include 317 * extra cushion (application might react slowly to POLLOUT) 318 */ 319 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2; 320 sndmem *= nr_segs * per_mss; 321 322 if (sk->sk_sndbuf < sndmem) 323 sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]); 324 } 325 326 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 327 * 328 * All tcp_full_space() is split to two parts: "network" buffer, allocated 329 * forward and advertised in receiver window (tp->rcv_wnd) and 330 * "application buffer", required to isolate scheduling/application 331 * latencies from network. 332 * window_clamp is maximal advertised window. It can be less than 333 * tcp_full_space(), in this case tcp_full_space() - window_clamp 334 * is reserved for "application" buffer. The less window_clamp is 335 * the smoother our behaviour from viewpoint of network, but the lower 336 * throughput and the higher sensitivity of the connection to losses. 8) 337 * 338 * rcv_ssthresh is more strict window_clamp used at "slow start" 339 * phase to predict further behaviour of this connection. 340 * It is used for two goals: 341 * - to enforce header prediction at sender, even when application 342 * requires some significant "application buffer". It is check #1. 343 * - to prevent pruning of receive queue because of misprediction 344 * of receiver window. Check #2. 345 * 346 * The scheme does not work when sender sends good segments opening 347 * window and then starts to feed us spaghetti. But it should work 348 * in common situations. Otherwise, we have to rely on queue collapsing. 349 */ 350 351 /* Slow part of check#2. */ 352 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 353 { 354 struct tcp_sock *tp = tcp_sk(sk); 355 /* Optimize this! */ 356 int truesize = tcp_win_from_space(sk, skb->truesize) >> 1; 357 int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 358 359 while (tp->rcv_ssthresh <= window) { 360 if (truesize <= skb->len) 361 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 362 363 truesize >>= 1; 364 window >>= 1; 365 } 366 return 0; 367 } 368 369 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 370 { 371 struct tcp_sock *tp = tcp_sk(sk); 372 373 /* Check #1 */ 374 if (tp->rcv_ssthresh < tp->window_clamp && 375 (int)tp->rcv_ssthresh < tcp_space(sk) && 376 !tcp_under_memory_pressure(sk)) { 377 int incr; 378 379 /* Check #2. Increase window, if skb with such overhead 380 * will fit to rcvbuf in future. 381 */ 382 if (tcp_win_from_space(sk, skb->truesize) <= skb->len) 383 incr = 2 * tp->advmss; 384 else 385 incr = __tcp_grow_window(sk, skb); 386 387 if (incr) { 388 incr = max_t(int, incr, 2 * skb->len); 389 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 390 tp->window_clamp); 391 inet_csk(sk)->icsk_ack.quick |= 1; 392 } 393 } 394 } 395 396 /* 3. Tuning rcvbuf, when connection enters established state. */ 397 static void tcp_fixup_rcvbuf(struct sock *sk) 398 { 399 u32 mss = tcp_sk(sk)->advmss; 400 int rcvmem; 401 402 rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * 403 tcp_default_init_rwnd(mss); 404 405 /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency 406 * Allow enough cushion so that sender is not limited by our window 407 */ 408 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) 409 rcvmem <<= 2; 410 411 if (sk->sk_rcvbuf < rcvmem) 412 sk->sk_rcvbuf = min(rcvmem, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 413 } 414 415 /* 4. Try to fixup all. It is made immediately after connection enters 416 * established state. 417 */ 418 void tcp_init_buffer_space(struct sock *sk) 419 { 420 int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win; 421 struct tcp_sock *tp = tcp_sk(sk); 422 int maxwin; 423 424 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 425 tcp_fixup_rcvbuf(sk); 426 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 427 tcp_sndbuf_expand(sk); 428 429 tp->rcvq_space.space = tp->rcv_wnd; 430 tcp_mstamp_refresh(tp); 431 tp->rcvq_space.time = tp->tcp_mstamp; 432 tp->rcvq_space.seq = tp->copied_seq; 433 434 maxwin = tcp_full_space(sk); 435 436 if (tp->window_clamp >= maxwin) { 437 tp->window_clamp = maxwin; 438 439 if (tcp_app_win && maxwin > 4 * tp->advmss) 440 tp->window_clamp = max(maxwin - 441 (maxwin >> tcp_app_win), 442 4 * tp->advmss); 443 } 444 445 /* Force reservation of one segment. */ 446 if (tcp_app_win && 447 tp->window_clamp > 2 * tp->advmss && 448 tp->window_clamp + tp->advmss > maxwin) 449 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 450 451 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 452 tp->snd_cwnd_stamp = tcp_jiffies32; 453 } 454 455 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 456 static void tcp_clamp_window(struct sock *sk) 457 { 458 struct tcp_sock *tp = tcp_sk(sk); 459 struct inet_connection_sock *icsk = inet_csk(sk); 460 struct net *net = sock_net(sk); 461 462 icsk->icsk_ack.quick = 0; 463 464 if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] && 465 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 466 !tcp_under_memory_pressure(sk) && 467 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 468 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 469 net->ipv4.sysctl_tcp_rmem[2]); 470 } 471 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 472 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 473 } 474 475 /* Initialize RCV_MSS value. 476 * RCV_MSS is an our guess about MSS used by the peer. 477 * We haven't any direct information about the MSS. 478 * It's better to underestimate the RCV_MSS rather than overestimate. 479 * Overestimations make us ACKing less frequently than needed. 480 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 481 */ 482 void tcp_initialize_rcv_mss(struct sock *sk) 483 { 484 const struct tcp_sock *tp = tcp_sk(sk); 485 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 486 487 hint = min(hint, tp->rcv_wnd / 2); 488 hint = min(hint, TCP_MSS_DEFAULT); 489 hint = max(hint, TCP_MIN_MSS); 490 491 inet_csk(sk)->icsk_ack.rcv_mss = hint; 492 } 493 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 494 495 /* Receiver "autotuning" code. 496 * 497 * The algorithm for RTT estimation w/o timestamps is based on 498 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 499 * <http://public.lanl.gov/radiant/pubs.html#DRS> 500 * 501 * More detail on this code can be found at 502 * <http://staff.psc.edu/jheffner/>, 503 * though this reference is out of date. A new paper 504 * is pending. 505 */ 506 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 507 { 508 u32 new_sample = tp->rcv_rtt_est.rtt_us; 509 long m = sample; 510 511 if (new_sample != 0) { 512 /* If we sample in larger samples in the non-timestamp 513 * case, we could grossly overestimate the RTT especially 514 * with chatty applications or bulk transfer apps which 515 * are stalled on filesystem I/O. 516 * 517 * Also, since we are only going for a minimum in the 518 * non-timestamp case, we do not smooth things out 519 * else with timestamps disabled convergence takes too 520 * long. 521 */ 522 if (!win_dep) { 523 m -= (new_sample >> 3); 524 new_sample += m; 525 } else { 526 m <<= 3; 527 if (m < new_sample) 528 new_sample = m; 529 } 530 } else { 531 /* No previous measure. */ 532 new_sample = m << 3; 533 } 534 535 tp->rcv_rtt_est.rtt_us = new_sample; 536 } 537 538 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 539 { 540 u32 delta_us; 541 542 if (tp->rcv_rtt_est.time == 0) 543 goto new_measure; 544 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 545 return; 546 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); 547 if (!delta_us) 548 delta_us = 1; 549 tcp_rcv_rtt_update(tp, delta_us, 1); 550 551 new_measure: 552 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 553 tp->rcv_rtt_est.time = tp->tcp_mstamp; 554 } 555 556 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 557 const struct sk_buff *skb) 558 { 559 struct tcp_sock *tp = tcp_sk(sk); 560 561 if (tp->rx_opt.rcv_tsecr && 562 (TCP_SKB_CB(skb)->end_seq - 563 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { 564 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 565 u32 delta_us; 566 567 if (!delta) 568 delta = 1; 569 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 570 tcp_rcv_rtt_update(tp, delta_us, 0); 571 } 572 } 573 574 /* 575 * This function should be called every time data is copied to user space. 576 * It calculates the appropriate TCP receive buffer space. 577 */ 578 void tcp_rcv_space_adjust(struct sock *sk) 579 { 580 struct tcp_sock *tp = tcp_sk(sk); 581 u32 copied; 582 int time; 583 584 tcp_mstamp_refresh(tp); 585 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); 586 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) 587 return; 588 589 /* Number of bytes copied to user in last RTT */ 590 copied = tp->copied_seq - tp->rcvq_space.seq; 591 if (copied <= tp->rcvq_space.space) 592 goto new_measure; 593 594 /* A bit of theory : 595 * copied = bytes received in previous RTT, our base window 596 * To cope with packet losses, we need a 2x factor 597 * To cope with slow start, and sender growing its cwin by 100 % 598 * every RTT, we need a 4x factor, because the ACK we are sending 599 * now is for the next RTT, not the current one : 600 * <prev RTT . ><current RTT .. ><next RTT .... > 601 */ 602 603 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && 604 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 605 int rcvmem, rcvbuf; 606 u64 rcvwin, grow; 607 608 /* minimal window to cope with packet losses, assuming 609 * steady state. Add some cushion because of small variations. 610 */ 611 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; 612 613 /* Accommodate for sender rate increase (eg. slow start) */ 614 grow = rcvwin * (copied - tp->rcvq_space.space); 615 do_div(grow, tp->rcvq_space.space); 616 rcvwin += (grow << 1); 617 618 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); 619 while (tcp_win_from_space(sk, rcvmem) < tp->advmss) 620 rcvmem += 128; 621 622 do_div(rcvwin, tp->advmss); 623 rcvbuf = min_t(u64, rcvwin * rcvmem, 624 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); 625 if (rcvbuf > sk->sk_rcvbuf) { 626 sk->sk_rcvbuf = rcvbuf; 627 628 /* Make the window clamp follow along. */ 629 tp->window_clamp = tcp_win_from_space(sk, rcvbuf); 630 } 631 } 632 tp->rcvq_space.space = copied; 633 634 new_measure: 635 tp->rcvq_space.seq = tp->copied_seq; 636 tp->rcvq_space.time = tp->tcp_mstamp; 637 } 638 639 /* There is something which you must keep in mind when you analyze the 640 * behavior of the tp->ato delayed ack timeout interval. When a 641 * connection starts up, we want to ack as quickly as possible. The 642 * problem is that "good" TCP's do slow start at the beginning of data 643 * transmission. The means that until we send the first few ACK's the 644 * sender will sit on his end and only queue most of his data, because 645 * he can only send snd_cwnd unacked packets at any given time. For 646 * each ACK we send, he increments snd_cwnd and transmits more of his 647 * queue. -DaveM 648 */ 649 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 650 { 651 struct tcp_sock *tp = tcp_sk(sk); 652 struct inet_connection_sock *icsk = inet_csk(sk); 653 u32 now; 654 655 inet_csk_schedule_ack(sk); 656 657 tcp_measure_rcv_mss(sk, skb); 658 659 tcp_rcv_rtt_measure(tp); 660 661 now = tcp_jiffies32; 662 663 if (!icsk->icsk_ack.ato) { 664 /* The _first_ data packet received, initialize 665 * delayed ACK engine. 666 */ 667 tcp_incr_quickack(sk); 668 icsk->icsk_ack.ato = TCP_ATO_MIN; 669 } else { 670 int m = now - icsk->icsk_ack.lrcvtime; 671 672 if (m <= TCP_ATO_MIN / 2) { 673 /* The fastest case is the first. */ 674 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 675 } else if (m < icsk->icsk_ack.ato) { 676 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 677 if (icsk->icsk_ack.ato > icsk->icsk_rto) 678 icsk->icsk_ack.ato = icsk->icsk_rto; 679 } else if (m > icsk->icsk_rto) { 680 /* Too long gap. Apparently sender failed to 681 * restart window, so that we send ACKs quickly. 682 */ 683 tcp_incr_quickack(sk); 684 sk_mem_reclaim(sk); 685 } 686 } 687 icsk->icsk_ack.lrcvtime = now; 688 689 tcp_ecn_check_ce(tp, skb); 690 691 if (skb->len >= 128) 692 tcp_grow_window(sk, skb); 693 } 694 695 /* Called to compute a smoothed rtt estimate. The data fed to this 696 * routine either comes from timestamps, or from segments that were 697 * known _not_ to have been retransmitted [see Karn/Partridge 698 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 699 * piece by Van Jacobson. 700 * NOTE: the next three routines used to be one big routine. 701 * To save cycles in the RFC 1323 implementation it was better to break 702 * it up into three procedures. -- erics 703 */ 704 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) 705 { 706 struct tcp_sock *tp = tcp_sk(sk); 707 long m = mrtt_us; /* RTT */ 708 u32 srtt = tp->srtt_us; 709 710 /* The following amusing code comes from Jacobson's 711 * article in SIGCOMM '88. Note that rtt and mdev 712 * are scaled versions of rtt and mean deviation. 713 * This is designed to be as fast as possible 714 * m stands for "measurement". 715 * 716 * On a 1990 paper the rto value is changed to: 717 * RTO = rtt + 4 * mdev 718 * 719 * Funny. This algorithm seems to be very broken. 720 * These formulae increase RTO, when it should be decreased, increase 721 * too slowly, when it should be increased quickly, decrease too quickly 722 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 723 * does not matter how to _calculate_ it. Seems, it was trap 724 * that VJ failed to avoid. 8) 725 */ 726 if (srtt != 0) { 727 m -= (srtt >> 3); /* m is now error in rtt est */ 728 srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 729 if (m < 0) { 730 m = -m; /* m is now abs(error) */ 731 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 732 /* This is similar to one of Eifel findings. 733 * Eifel blocks mdev updates when rtt decreases. 734 * This solution is a bit different: we use finer gain 735 * for mdev in this case (alpha*beta). 736 * Like Eifel it also prevents growth of rto, 737 * but also it limits too fast rto decreases, 738 * happening in pure Eifel. 739 */ 740 if (m > 0) 741 m >>= 3; 742 } else { 743 m -= (tp->mdev_us >> 2); /* similar update on mdev */ 744 } 745 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 746 if (tp->mdev_us > tp->mdev_max_us) { 747 tp->mdev_max_us = tp->mdev_us; 748 if (tp->mdev_max_us > tp->rttvar_us) 749 tp->rttvar_us = tp->mdev_max_us; 750 } 751 if (after(tp->snd_una, tp->rtt_seq)) { 752 if (tp->mdev_max_us < tp->rttvar_us) 753 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; 754 tp->rtt_seq = tp->snd_nxt; 755 tp->mdev_max_us = tcp_rto_min_us(sk); 756 } 757 } else { 758 /* no previous measure. */ 759 srtt = m << 3; /* take the measured time to be rtt */ 760 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ 761 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); 762 tp->mdev_max_us = tp->rttvar_us; 763 tp->rtt_seq = tp->snd_nxt; 764 } 765 tp->srtt_us = max(1U, srtt); 766 } 767 768 static void tcp_update_pacing_rate(struct sock *sk) 769 { 770 const struct tcp_sock *tp = tcp_sk(sk); 771 u64 rate; 772 773 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ 774 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); 775 776 /* current rate is (cwnd * mss) / srtt 777 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. 778 * In Congestion Avoidance phase, set it to 120 % the current rate. 779 * 780 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) 781 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching 782 * end of slow start and should slow down. 783 */ 784 if (tp->snd_cwnd < tp->snd_ssthresh / 2) 785 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio; 786 else 787 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio; 788 789 rate *= max(tp->snd_cwnd, tp->packets_out); 790 791 if (likely(tp->srtt_us)) 792 do_div(rate, tp->srtt_us); 793 794 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate 795 * without any lock. We want to make sure compiler wont store 796 * intermediate values in this location. 797 */ 798 WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate, 799 sk->sk_max_pacing_rate)); 800 } 801 802 /* Calculate rto without backoff. This is the second half of Van Jacobson's 803 * routine referred to above. 804 */ 805 static void tcp_set_rto(struct sock *sk) 806 { 807 const struct tcp_sock *tp = tcp_sk(sk); 808 /* Old crap is replaced with new one. 8) 809 * 810 * More seriously: 811 * 1. If rtt variance happened to be less 50msec, it is hallucination. 812 * It cannot be less due to utterly erratic ACK generation made 813 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 814 * to do with delayed acks, because at cwnd>2 true delack timeout 815 * is invisible. Actually, Linux-2.4 also generates erratic 816 * ACKs in some circumstances. 817 */ 818 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 819 820 /* 2. Fixups made earlier cannot be right. 821 * If we do not estimate RTO correctly without them, 822 * all the algo is pure shit and should be replaced 823 * with correct one. It is exactly, which we pretend to do. 824 */ 825 826 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 827 * guarantees that rto is higher. 828 */ 829 tcp_bound_rto(sk); 830 } 831 832 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 833 { 834 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 835 836 if (!cwnd) 837 cwnd = TCP_INIT_CWND; 838 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 839 } 840 841 /* Take a notice that peer is sending D-SACKs */ 842 static void tcp_dsack_seen(struct tcp_sock *tp) 843 { 844 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 845 tp->rack.dsack_seen = 1; 846 } 847 848 /* It's reordering when higher sequence was delivered (i.e. sacked) before 849 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering 850 * distance is approximated in full-mss packet distance ("reordering"). 851 */ 852 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, 853 const int ts) 854 { 855 struct tcp_sock *tp = tcp_sk(sk); 856 const u32 mss = tp->mss_cache; 857 u32 fack, metric; 858 859 fack = tcp_highest_sack_seq(tp); 860 if (!before(low_seq, fack)) 861 return; 862 863 metric = fack - low_seq; 864 if ((metric > tp->reordering * mss) && mss) { 865 #if FASTRETRANS_DEBUG > 1 866 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 867 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 868 tp->reordering, 869 0, 870 tp->sacked_out, 871 tp->undo_marker ? tp->undo_retrans : 0); 872 #endif 873 tp->reordering = min_t(u32, (metric + mss - 1) / mss, 874 sock_net(sk)->ipv4.sysctl_tcp_max_reordering); 875 } 876 877 tp->rack.reord = 1; 878 /* This exciting event is worth to be remembered. 8) */ 879 NET_INC_STATS(sock_net(sk), 880 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER); 881 } 882 883 /* This must be called before lost_out is incremented */ 884 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 885 { 886 if (!tp->retransmit_skb_hint || 887 before(TCP_SKB_CB(skb)->seq, 888 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 889 tp->retransmit_skb_hint = skb; 890 } 891 892 /* Sum the number of packets on the wire we have marked as lost. 893 * There are two cases we care about here: 894 * a) Packet hasn't been marked lost (nor retransmitted), 895 * and this is the first loss. 896 * b) Packet has been marked both lost and retransmitted, 897 * and this means we think it was lost again. 898 */ 899 static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb) 900 { 901 __u8 sacked = TCP_SKB_CB(skb)->sacked; 902 903 if (!(sacked & TCPCB_LOST) || 904 ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS))) 905 tp->lost += tcp_skb_pcount(skb); 906 } 907 908 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) 909 { 910 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 911 tcp_verify_retransmit_hint(tp, skb); 912 913 tp->lost_out += tcp_skb_pcount(skb); 914 tcp_sum_lost(tp, skb); 915 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 916 } 917 } 918 919 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) 920 { 921 tcp_verify_retransmit_hint(tp, skb); 922 923 tcp_sum_lost(tp, skb); 924 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 925 tp->lost_out += tcp_skb_pcount(skb); 926 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 927 } 928 } 929 930 /* This procedure tags the retransmission queue when SACKs arrive. 931 * 932 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 933 * Packets in queue with these bits set are counted in variables 934 * sacked_out, retrans_out and lost_out, correspondingly. 935 * 936 * Valid combinations are: 937 * Tag InFlight Description 938 * 0 1 - orig segment is in flight. 939 * S 0 - nothing flies, orig reached receiver. 940 * L 0 - nothing flies, orig lost by net. 941 * R 2 - both orig and retransmit are in flight. 942 * L|R 1 - orig is lost, retransmit is in flight. 943 * S|R 1 - orig reached receiver, retrans is still in flight. 944 * (L|S|R is logically valid, it could occur when L|R is sacked, 945 * but it is equivalent to plain S and code short-curcuits it to S. 946 * L|S is logically invalid, it would mean -1 packet in flight 8)) 947 * 948 * These 6 states form finite state machine, controlled by the following events: 949 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 950 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 951 * 3. Loss detection event of two flavors: 952 * A. Scoreboard estimator decided the packet is lost. 953 * A'. Reno "three dupacks" marks head of queue lost. 954 * B. SACK arrives sacking SND.NXT at the moment, when the 955 * segment was retransmitted. 956 * 4. D-SACK added new rule: D-SACK changes any tag to S. 957 * 958 * It is pleasant to note, that state diagram turns out to be commutative, 959 * so that we are allowed not to be bothered by order of our actions, 960 * when multiple events arrive simultaneously. (see the function below). 961 * 962 * Reordering detection. 963 * -------------------- 964 * Reordering metric is maximal distance, which a packet can be displaced 965 * in packet stream. With SACKs we can estimate it: 966 * 967 * 1. SACK fills old hole and the corresponding segment was not 968 * ever retransmitted -> reordering. Alas, we cannot use it 969 * when segment was retransmitted. 970 * 2. The last flaw is solved with D-SACK. D-SACK arrives 971 * for retransmitted and already SACKed segment -> reordering.. 972 * Both of these heuristics are not used in Loss state, when we cannot 973 * account for retransmits accurately. 974 * 975 * SACK block validation. 976 * ---------------------- 977 * 978 * SACK block range validation checks that the received SACK block fits to 979 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 980 * Note that SND.UNA is not included to the range though being valid because 981 * it means that the receiver is rather inconsistent with itself reporting 982 * SACK reneging when it should advance SND.UNA. Such SACK block this is 983 * perfectly valid, however, in light of RFC2018 which explicitly states 984 * that "SACK block MUST reflect the newest segment. Even if the newest 985 * segment is going to be discarded ...", not that it looks very clever 986 * in case of head skb. Due to potentional receiver driven attacks, we 987 * choose to avoid immediate execution of a walk in write queue due to 988 * reneging and defer head skb's loss recovery to standard loss recovery 989 * procedure that will eventually trigger (nothing forbids us doing this). 990 * 991 * Implements also blockage to start_seq wrap-around. Problem lies in the 992 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 993 * there's no guarantee that it will be before snd_nxt (n). The problem 994 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 995 * wrap (s_w): 996 * 997 * <- outs wnd -> <- wrapzone -> 998 * u e n u_w e_w s n_w 999 * | | | | | | | 1000 * |<------------+------+----- TCP seqno space --------------+---------->| 1001 * ...-- <2^31 ->| |<--------... 1002 * ...---- >2^31 ------>| |<--------... 1003 * 1004 * Current code wouldn't be vulnerable but it's better still to discard such 1005 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1006 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1007 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1008 * equal to the ideal case (infinite seqno space without wrap caused issues). 1009 * 1010 * With D-SACK the lower bound is extended to cover sequence space below 1011 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1012 * again, D-SACK block must not to go across snd_una (for the same reason as 1013 * for the normal SACK blocks, explained above). But there all simplicity 1014 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1015 * fully below undo_marker they do not affect behavior in anyway and can 1016 * therefore be safely ignored. In rare cases (which are more or less 1017 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1018 * fragmentation and packet reordering past skb's retransmission. To consider 1019 * them correctly, the acceptable range must be extended even more though 1020 * the exact amount is rather hard to quantify. However, tp->max_window can 1021 * be used as an exaggerated estimate. 1022 */ 1023 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, 1024 u32 start_seq, u32 end_seq) 1025 { 1026 /* Too far in future, or reversed (interpretation is ambiguous) */ 1027 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1028 return false; 1029 1030 /* Nasty start_seq wrap-around check (see comments above) */ 1031 if (!before(start_seq, tp->snd_nxt)) 1032 return false; 1033 1034 /* In outstanding window? ...This is valid exit for D-SACKs too. 1035 * start_seq == snd_una is non-sensical (see comments above) 1036 */ 1037 if (after(start_seq, tp->snd_una)) 1038 return true; 1039 1040 if (!is_dsack || !tp->undo_marker) 1041 return false; 1042 1043 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1044 if (after(end_seq, tp->snd_una)) 1045 return false; 1046 1047 if (!before(start_seq, tp->undo_marker)) 1048 return true; 1049 1050 /* Too old */ 1051 if (!after(end_seq, tp->undo_marker)) 1052 return false; 1053 1054 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1055 * start_seq < undo_marker and end_seq >= undo_marker. 1056 */ 1057 return !before(start_seq, end_seq - tp->max_window); 1058 } 1059 1060 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1061 struct tcp_sack_block_wire *sp, int num_sacks, 1062 u32 prior_snd_una) 1063 { 1064 struct tcp_sock *tp = tcp_sk(sk); 1065 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1066 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1067 bool dup_sack = false; 1068 1069 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1070 dup_sack = true; 1071 tcp_dsack_seen(tp); 1072 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1073 } else if (num_sacks > 1) { 1074 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1075 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1076 1077 if (!after(end_seq_0, end_seq_1) && 1078 !before(start_seq_0, start_seq_1)) { 1079 dup_sack = true; 1080 tcp_dsack_seen(tp); 1081 NET_INC_STATS(sock_net(sk), 1082 LINUX_MIB_TCPDSACKOFORECV); 1083 } 1084 } 1085 1086 /* D-SACK for already forgotten data... Do dumb counting. */ 1087 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && 1088 !after(end_seq_0, prior_snd_una) && 1089 after(end_seq_0, tp->undo_marker)) 1090 tp->undo_retrans--; 1091 1092 return dup_sack; 1093 } 1094 1095 struct tcp_sacktag_state { 1096 u32 reord; 1097 /* Timestamps for earliest and latest never-retransmitted segment 1098 * that was SACKed. RTO needs the earliest RTT to stay conservative, 1099 * but congestion control should still get an accurate delay signal. 1100 */ 1101 u64 first_sackt; 1102 u64 last_sackt; 1103 struct rate_sample *rate; 1104 int flag; 1105 unsigned int mss_now; 1106 }; 1107 1108 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1109 * the incoming SACK may not exactly match but we can find smaller MSS 1110 * aligned portion of it that matches. Therefore we might need to fragment 1111 * which may fail and creates some hassle (caller must handle error case 1112 * returns). 1113 * 1114 * FIXME: this could be merged to shift decision code 1115 */ 1116 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1117 u32 start_seq, u32 end_seq) 1118 { 1119 int err; 1120 bool in_sack; 1121 unsigned int pkt_len; 1122 unsigned int mss; 1123 1124 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1125 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1126 1127 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1128 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1129 mss = tcp_skb_mss(skb); 1130 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1131 1132 if (!in_sack) { 1133 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1134 if (pkt_len < mss) 1135 pkt_len = mss; 1136 } else { 1137 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1138 if (pkt_len < mss) 1139 return -EINVAL; 1140 } 1141 1142 /* Round if necessary so that SACKs cover only full MSSes 1143 * and/or the remaining small portion (if present) 1144 */ 1145 if (pkt_len > mss) { 1146 unsigned int new_len = (pkt_len / mss) * mss; 1147 if (!in_sack && new_len < pkt_len) 1148 new_len += mss; 1149 pkt_len = new_len; 1150 } 1151 1152 if (pkt_len >= skb->len && !in_sack) 1153 return 0; 1154 1155 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 1156 pkt_len, mss, GFP_ATOMIC); 1157 if (err < 0) 1158 return err; 1159 } 1160 1161 return in_sack; 1162 } 1163 1164 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1165 static u8 tcp_sacktag_one(struct sock *sk, 1166 struct tcp_sacktag_state *state, u8 sacked, 1167 u32 start_seq, u32 end_seq, 1168 int dup_sack, int pcount, 1169 u64 xmit_time) 1170 { 1171 struct tcp_sock *tp = tcp_sk(sk); 1172 1173 /* Account D-SACK for retransmitted packet. */ 1174 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1175 if (tp->undo_marker && tp->undo_retrans > 0 && 1176 after(end_seq, tp->undo_marker)) 1177 tp->undo_retrans--; 1178 if ((sacked & TCPCB_SACKED_ACKED) && 1179 before(start_seq, state->reord)) 1180 state->reord = start_seq; 1181 } 1182 1183 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1184 if (!after(end_seq, tp->snd_una)) 1185 return sacked; 1186 1187 if (!(sacked & TCPCB_SACKED_ACKED)) { 1188 tcp_rack_advance(tp, sacked, end_seq, xmit_time); 1189 1190 if (sacked & TCPCB_SACKED_RETRANS) { 1191 /* If the segment is not tagged as lost, 1192 * we do not clear RETRANS, believing 1193 * that retransmission is still in flight. 1194 */ 1195 if (sacked & TCPCB_LOST) { 1196 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1197 tp->lost_out -= pcount; 1198 tp->retrans_out -= pcount; 1199 } 1200 } else { 1201 if (!(sacked & TCPCB_RETRANS)) { 1202 /* New sack for not retransmitted frame, 1203 * which was in hole. It is reordering. 1204 */ 1205 if (before(start_seq, 1206 tcp_highest_sack_seq(tp)) && 1207 before(start_seq, state->reord)) 1208 state->reord = start_seq; 1209 1210 if (!after(end_seq, tp->high_seq)) 1211 state->flag |= FLAG_ORIG_SACK_ACKED; 1212 if (state->first_sackt == 0) 1213 state->first_sackt = xmit_time; 1214 state->last_sackt = xmit_time; 1215 } 1216 1217 if (sacked & TCPCB_LOST) { 1218 sacked &= ~TCPCB_LOST; 1219 tp->lost_out -= pcount; 1220 } 1221 } 1222 1223 sacked |= TCPCB_SACKED_ACKED; 1224 state->flag |= FLAG_DATA_SACKED; 1225 tp->sacked_out += pcount; 1226 tp->delivered += pcount; /* Out-of-order packets delivered */ 1227 1228 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1229 if (tp->lost_skb_hint && 1230 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1231 tp->lost_cnt_hint += pcount; 1232 } 1233 1234 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1235 * frames and clear it. undo_retrans is decreased above, L|R frames 1236 * are accounted above as well. 1237 */ 1238 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1239 sacked &= ~TCPCB_SACKED_RETRANS; 1240 tp->retrans_out -= pcount; 1241 } 1242 1243 return sacked; 1244 } 1245 1246 /* Shift newly-SACKed bytes from this skb to the immediately previous 1247 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1248 */ 1249 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, 1250 struct sk_buff *skb, 1251 struct tcp_sacktag_state *state, 1252 unsigned int pcount, int shifted, int mss, 1253 bool dup_sack) 1254 { 1255 struct tcp_sock *tp = tcp_sk(sk); 1256 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1257 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1258 1259 BUG_ON(!pcount); 1260 1261 /* Adjust counters and hints for the newly sacked sequence 1262 * range but discard the return value since prev is already 1263 * marked. We must tag the range first because the seq 1264 * advancement below implicitly advances 1265 * tcp_highest_sack_seq() when skb is highest_sack. 1266 */ 1267 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1268 start_seq, end_seq, dup_sack, pcount, 1269 skb->skb_mstamp); 1270 tcp_rate_skb_delivered(sk, skb, state->rate); 1271 1272 if (skb == tp->lost_skb_hint) 1273 tp->lost_cnt_hint += pcount; 1274 1275 TCP_SKB_CB(prev)->end_seq += shifted; 1276 TCP_SKB_CB(skb)->seq += shifted; 1277 1278 tcp_skb_pcount_add(prev, pcount); 1279 BUG_ON(tcp_skb_pcount(skb) < pcount); 1280 tcp_skb_pcount_add(skb, -pcount); 1281 1282 /* When we're adding to gso_segs == 1, gso_size will be zero, 1283 * in theory this shouldn't be necessary but as long as DSACK 1284 * code can come after this skb later on it's better to keep 1285 * setting gso_size to something. 1286 */ 1287 if (!TCP_SKB_CB(prev)->tcp_gso_size) 1288 TCP_SKB_CB(prev)->tcp_gso_size = mss; 1289 1290 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1291 if (tcp_skb_pcount(skb) <= 1) 1292 TCP_SKB_CB(skb)->tcp_gso_size = 0; 1293 1294 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1295 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1296 1297 if (skb->len > 0) { 1298 BUG_ON(!tcp_skb_pcount(skb)); 1299 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1300 return false; 1301 } 1302 1303 /* Whole SKB was eaten :-) */ 1304 1305 if (skb == tp->retransmit_skb_hint) 1306 tp->retransmit_skb_hint = prev; 1307 if (skb == tp->lost_skb_hint) { 1308 tp->lost_skb_hint = prev; 1309 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1310 } 1311 1312 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1313 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; 1314 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1315 TCP_SKB_CB(prev)->end_seq++; 1316 1317 if (skb == tcp_highest_sack(sk)) 1318 tcp_advance_highest_sack(sk, skb); 1319 1320 tcp_skb_collapse_tstamp(prev, skb); 1321 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp)) 1322 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0; 1323 1324 tcp_rtx_queue_unlink_and_free(skb, sk); 1325 1326 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); 1327 1328 return true; 1329 } 1330 1331 /* I wish gso_size would have a bit more sane initialization than 1332 * something-or-zero which complicates things 1333 */ 1334 static int tcp_skb_seglen(const struct sk_buff *skb) 1335 { 1336 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1337 } 1338 1339 /* Shifting pages past head area doesn't work */ 1340 static int skb_can_shift(const struct sk_buff *skb) 1341 { 1342 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1343 } 1344 1345 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1346 * skb. 1347 */ 1348 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1349 struct tcp_sacktag_state *state, 1350 u32 start_seq, u32 end_seq, 1351 bool dup_sack) 1352 { 1353 struct tcp_sock *tp = tcp_sk(sk); 1354 struct sk_buff *prev; 1355 int mss; 1356 int pcount = 0; 1357 int len; 1358 int in_sack; 1359 1360 if (!sk_can_gso(sk)) 1361 goto fallback; 1362 1363 /* Normally R but no L won't result in plain S */ 1364 if (!dup_sack && 1365 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1366 goto fallback; 1367 if (!skb_can_shift(skb)) 1368 goto fallback; 1369 /* This frame is about to be dropped (was ACKed). */ 1370 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1371 goto fallback; 1372 1373 /* Can only happen with delayed DSACK + discard craziness */ 1374 prev = skb_rb_prev(skb); 1375 if (!prev) 1376 goto fallback; 1377 1378 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1379 goto fallback; 1380 1381 if (!tcp_skb_can_collapse_to(prev)) 1382 goto fallback; 1383 1384 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1385 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1386 1387 if (in_sack) { 1388 len = skb->len; 1389 pcount = tcp_skb_pcount(skb); 1390 mss = tcp_skb_seglen(skb); 1391 1392 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1393 * drop this restriction as unnecessary 1394 */ 1395 if (mss != tcp_skb_seglen(prev)) 1396 goto fallback; 1397 } else { 1398 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1399 goto noop; 1400 /* CHECKME: This is non-MSS split case only?, this will 1401 * cause skipped skbs due to advancing loop btw, original 1402 * has that feature too 1403 */ 1404 if (tcp_skb_pcount(skb) <= 1) 1405 goto noop; 1406 1407 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1408 if (!in_sack) { 1409 /* TODO: head merge to next could be attempted here 1410 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1411 * though it might not be worth of the additional hassle 1412 * 1413 * ...we can probably just fallback to what was done 1414 * previously. We could try merging non-SACKed ones 1415 * as well but it probably isn't going to buy off 1416 * because later SACKs might again split them, and 1417 * it would make skb timestamp tracking considerably 1418 * harder problem. 1419 */ 1420 goto fallback; 1421 } 1422 1423 len = end_seq - TCP_SKB_CB(skb)->seq; 1424 BUG_ON(len < 0); 1425 BUG_ON(len > skb->len); 1426 1427 /* MSS boundaries should be honoured or else pcount will 1428 * severely break even though it makes things bit trickier. 1429 * Optimize common case to avoid most of the divides 1430 */ 1431 mss = tcp_skb_mss(skb); 1432 1433 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1434 * drop this restriction as unnecessary 1435 */ 1436 if (mss != tcp_skb_seglen(prev)) 1437 goto fallback; 1438 1439 if (len == mss) { 1440 pcount = 1; 1441 } else if (len < mss) { 1442 goto noop; 1443 } else { 1444 pcount = len / mss; 1445 len = pcount * mss; 1446 } 1447 } 1448 1449 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1450 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1451 goto fallback; 1452 1453 if (!skb_shift(prev, skb, len)) 1454 goto fallback; 1455 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack)) 1456 goto out; 1457 1458 /* Hole filled allows collapsing with the next as well, this is very 1459 * useful when hole on every nth skb pattern happens 1460 */ 1461 skb = skb_rb_next(prev); 1462 if (!skb) 1463 goto out; 1464 1465 if (!skb_can_shift(skb) || 1466 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1467 (mss != tcp_skb_seglen(skb))) 1468 goto out; 1469 1470 len = skb->len; 1471 if (skb_shift(prev, skb, len)) { 1472 pcount += tcp_skb_pcount(skb); 1473 tcp_shifted_skb(sk, prev, skb, state, tcp_skb_pcount(skb), 1474 len, mss, 0); 1475 } 1476 1477 out: 1478 return prev; 1479 1480 noop: 1481 return skb; 1482 1483 fallback: 1484 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1485 return NULL; 1486 } 1487 1488 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1489 struct tcp_sack_block *next_dup, 1490 struct tcp_sacktag_state *state, 1491 u32 start_seq, u32 end_seq, 1492 bool dup_sack_in) 1493 { 1494 struct tcp_sock *tp = tcp_sk(sk); 1495 struct sk_buff *tmp; 1496 1497 skb_rbtree_walk_from(skb) { 1498 int in_sack = 0; 1499 bool dup_sack = dup_sack_in; 1500 1501 /* queue is in-order => we can short-circuit the walk early */ 1502 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1503 break; 1504 1505 if (next_dup && 1506 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1507 in_sack = tcp_match_skb_to_sack(sk, skb, 1508 next_dup->start_seq, 1509 next_dup->end_seq); 1510 if (in_sack > 0) 1511 dup_sack = true; 1512 } 1513 1514 /* skb reference here is a bit tricky to get right, since 1515 * shifting can eat and free both this skb and the next, 1516 * so not even _safe variant of the loop is enough. 1517 */ 1518 if (in_sack <= 0) { 1519 tmp = tcp_shift_skb_data(sk, skb, state, 1520 start_seq, end_seq, dup_sack); 1521 if (tmp) { 1522 if (tmp != skb) { 1523 skb = tmp; 1524 continue; 1525 } 1526 1527 in_sack = 0; 1528 } else { 1529 in_sack = tcp_match_skb_to_sack(sk, skb, 1530 start_seq, 1531 end_seq); 1532 } 1533 } 1534 1535 if (unlikely(in_sack < 0)) 1536 break; 1537 1538 if (in_sack) { 1539 TCP_SKB_CB(skb)->sacked = 1540 tcp_sacktag_one(sk, 1541 state, 1542 TCP_SKB_CB(skb)->sacked, 1543 TCP_SKB_CB(skb)->seq, 1544 TCP_SKB_CB(skb)->end_seq, 1545 dup_sack, 1546 tcp_skb_pcount(skb), 1547 skb->skb_mstamp); 1548 tcp_rate_skb_delivered(sk, skb, state->rate); 1549 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1550 list_del_init(&skb->tcp_tsorted_anchor); 1551 1552 if (!before(TCP_SKB_CB(skb)->seq, 1553 tcp_highest_sack_seq(tp))) 1554 tcp_advance_highest_sack(sk, skb); 1555 } 1556 } 1557 return skb; 1558 } 1559 1560 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, 1561 struct tcp_sacktag_state *state, 1562 u32 seq) 1563 { 1564 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node; 1565 struct sk_buff *skb; 1566 1567 while (*p) { 1568 parent = *p; 1569 skb = rb_to_skb(parent); 1570 if (before(seq, TCP_SKB_CB(skb)->seq)) { 1571 p = &parent->rb_left; 1572 continue; 1573 } 1574 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) { 1575 p = &parent->rb_right; 1576 continue; 1577 } 1578 return skb; 1579 } 1580 return NULL; 1581 } 1582 1583 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1584 struct tcp_sacktag_state *state, 1585 u32 skip_to_seq) 1586 { 1587 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq)) 1588 return skb; 1589 1590 return tcp_sacktag_bsearch(sk, state, skip_to_seq); 1591 } 1592 1593 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1594 struct sock *sk, 1595 struct tcp_sack_block *next_dup, 1596 struct tcp_sacktag_state *state, 1597 u32 skip_to_seq) 1598 { 1599 if (!next_dup) 1600 return skb; 1601 1602 if (before(next_dup->start_seq, skip_to_seq)) { 1603 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); 1604 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1605 next_dup->start_seq, next_dup->end_seq, 1606 1); 1607 } 1608 1609 return skb; 1610 } 1611 1612 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1613 { 1614 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1615 } 1616 1617 static int 1618 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1619 u32 prior_snd_una, struct tcp_sacktag_state *state) 1620 { 1621 struct tcp_sock *tp = tcp_sk(sk); 1622 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1623 TCP_SKB_CB(ack_skb)->sacked); 1624 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1625 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1626 struct tcp_sack_block *cache; 1627 struct sk_buff *skb; 1628 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1629 int used_sacks; 1630 bool found_dup_sack = false; 1631 int i, j; 1632 int first_sack_index; 1633 1634 state->flag = 0; 1635 state->reord = tp->snd_nxt; 1636 1637 if (!tp->sacked_out) 1638 tcp_highest_sack_reset(sk); 1639 1640 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1641 num_sacks, prior_snd_una); 1642 if (found_dup_sack) { 1643 state->flag |= FLAG_DSACKING_ACK; 1644 tp->delivered++; /* A spurious retransmission is delivered */ 1645 } 1646 1647 /* Eliminate too old ACKs, but take into 1648 * account more or less fresh ones, they can 1649 * contain valid SACK info. 1650 */ 1651 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1652 return 0; 1653 1654 if (!tp->packets_out) 1655 goto out; 1656 1657 used_sacks = 0; 1658 first_sack_index = 0; 1659 for (i = 0; i < num_sacks; i++) { 1660 bool dup_sack = !i && found_dup_sack; 1661 1662 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1663 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1664 1665 if (!tcp_is_sackblock_valid(tp, dup_sack, 1666 sp[used_sacks].start_seq, 1667 sp[used_sacks].end_seq)) { 1668 int mib_idx; 1669 1670 if (dup_sack) { 1671 if (!tp->undo_marker) 1672 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1673 else 1674 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1675 } else { 1676 /* Don't count olds caused by ACK reordering */ 1677 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1678 !after(sp[used_sacks].end_seq, tp->snd_una)) 1679 continue; 1680 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1681 } 1682 1683 NET_INC_STATS(sock_net(sk), mib_idx); 1684 if (i == 0) 1685 first_sack_index = -1; 1686 continue; 1687 } 1688 1689 /* Ignore very old stuff early */ 1690 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1691 continue; 1692 1693 used_sacks++; 1694 } 1695 1696 /* order SACK blocks to allow in order walk of the retrans queue */ 1697 for (i = used_sacks - 1; i > 0; i--) { 1698 for (j = 0; j < i; j++) { 1699 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1700 swap(sp[j], sp[j + 1]); 1701 1702 /* Track where the first SACK block goes to */ 1703 if (j == first_sack_index) 1704 first_sack_index = j + 1; 1705 } 1706 } 1707 } 1708 1709 state->mss_now = tcp_current_mss(sk); 1710 skb = NULL; 1711 i = 0; 1712 1713 if (!tp->sacked_out) { 1714 /* It's already past, so skip checking against it */ 1715 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1716 } else { 1717 cache = tp->recv_sack_cache; 1718 /* Skip empty blocks in at head of the cache */ 1719 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1720 !cache->end_seq) 1721 cache++; 1722 } 1723 1724 while (i < used_sacks) { 1725 u32 start_seq = sp[i].start_seq; 1726 u32 end_seq = sp[i].end_seq; 1727 bool dup_sack = (found_dup_sack && (i == first_sack_index)); 1728 struct tcp_sack_block *next_dup = NULL; 1729 1730 if (found_dup_sack && ((i + 1) == first_sack_index)) 1731 next_dup = &sp[i + 1]; 1732 1733 /* Skip too early cached blocks */ 1734 while (tcp_sack_cache_ok(tp, cache) && 1735 !before(start_seq, cache->end_seq)) 1736 cache++; 1737 1738 /* Can skip some work by looking recv_sack_cache? */ 1739 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1740 after(end_seq, cache->start_seq)) { 1741 1742 /* Head todo? */ 1743 if (before(start_seq, cache->start_seq)) { 1744 skb = tcp_sacktag_skip(skb, sk, state, 1745 start_seq); 1746 skb = tcp_sacktag_walk(skb, sk, next_dup, 1747 state, 1748 start_seq, 1749 cache->start_seq, 1750 dup_sack); 1751 } 1752 1753 /* Rest of the block already fully processed? */ 1754 if (!after(end_seq, cache->end_seq)) 1755 goto advance_sp; 1756 1757 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1758 state, 1759 cache->end_seq); 1760 1761 /* ...tail remains todo... */ 1762 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1763 /* ...but better entrypoint exists! */ 1764 skb = tcp_highest_sack(sk); 1765 if (!skb) 1766 break; 1767 cache++; 1768 goto walk; 1769 } 1770 1771 skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); 1772 /* Check overlap against next cached too (past this one already) */ 1773 cache++; 1774 continue; 1775 } 1776 1777 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1778 skb = tcp_highest_sack(sk); 1779 if (!skb) 1780 break; 1781 } 1782 skb = tcp_sacktag_skip(skb, sk, state, start_seq); 1783 1784 walk: 1785 skb = tcp_sacktag_walk(skb, sk, next_dup, state, 1786 start_seq, end_seq, dup_sack); 1787 1788 advance_sp: 1789 i++; 1790 } 1791 1792 /* Clear the head of the cache sack blocks so we can skip it next time */ 1793 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1794 tp->recv_sack_cache[i].start_seq = 0; 1795 tp->recv_sack_cache[i].end_seq = 0; 1796 } 1797 for (j = 0; j < used_sacks; j++) 1798 tp->recv_sack_cache[i++] = sp[j]; 1799 1800 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) 1801 tcp_check_sack_reordering(sk, state->reord, 0); 1802 1803 tcp_verify_left_out(tp); 1804 out: 1805 1806 #if FASTRETRANS_DEBUG > 0 1807 WARN_ON((int)tp->sacked_out < 0); 1808 WARN_ON((int)tp->lost_out < 0); 1809 WARN_ON((int)tp->retrans_out < 0); 1810 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1811 #endif 1812 return state->flag; 1813 } 1814 1815 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1816 * packets_out. Returns false if sacked_out adjustement wasn't necessary. 1817 */ 1818 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) 1819 { 1820 u32 holes; 1821 1822 holes = max(tp->lost_out, 1U); 1823 holes = min(holes, tp->packets_out); 1824 1825 if ((tp->sacked_out + holes) > tp->packets_out) { 1826 tp->sacked_out = tp->packets_out - holes; 1827 return true; 1828 } 1829 return false; 1830 } 1831 1832 /* If we receive more dupacks than we expected counting segments 1833 * in assumption of absent reordering, interpret this as reordering. 1834 * The only another reason could be bug in receiver TCP. 1835 */ 1836 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1837 { 1838 struct tcp_sock *tp = tcp_sk(sk); 1839 1840 if (!tcp_limit_reno_sacked(tp)) 1841 return; 1842 1843 tp->reordering = min_t(u32, tp->packets_out + addend, 1844 sock_net(sk)->ipv4.sysctl_tcp_max_reordering); 1845 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); 1846 } 1847 1848 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 1849 1850 static void tcp_add_reno_sack(struct sock *sk) 1851 { 1852 struct tcp_sock *tp = tcp_sk(sk); 1853 u32 prior_sacked = tp->sacked_out; 1854 1855 tp->sacked_out++; 1856 tcp_check_reno_reordering(sk, 0); 1857 if (tp->sacked_out > prior_sacked) 1858 tp->delivered++; /* Some out-of-order packet is delivered */ 1859 tcp_verify_left_out(tp); 1860 } 1861 1862 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 1863 1864 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 1865 { 1866 struct tcp_sock *tp = tcp_sk(sk); 1867 1868 if (acked > 0) { 1869 /* One ACK acked hole. The rest eat duplicate ACKs. */ 1870 tp->delivered += max_t(int, acked - tp->sacked_out, 1); 1871 if (acked - 1 >= tp->sacked_out) 1872 tp->sacked_out = 0; 1873 else 1874 tp->sacked_out -= acked - 1; 1875 } 1876 tcp_check_reno_reordering(sk, acked); 1877 tcp_verify_left_out(tp); 1878 } 1879 1880 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 1881 { 1882 tp->sacked_out = 0; 1883 } 1884 1885 void tcp_clear_retrans(struct tcp_sock *tp) 1886 { 1887 tp->retrans_out = 0; 1888 tp->lost_out = 0; 1889 tp->undo_marker = 0; 1890 tp->undo_retrans = -1; 1891 tp->sacked_out = 0; 1892 } 1893 1894 static inline void tcp_init_undo(struct tcp_sock *tp) 1895 { 1896 tp->undo_marker = tp->snd_una; 1897 /* Retransmission still in flight may cause DSACKs later. */ 1898 tp->undo_retrans = tp->retrans_out ? : -1; 1899 } 1900 1901 /* Enter Loss state. If we detect SACK reneging, forget all SACK information 1902 * and reset tags completely, otherwise preserve SACKs. If receiver 1903 * dropped its ofo queue, we will know this due to reneging detection. 1904 */ 1905 void tcp_enter_loss(struct sock *sk) 1906 { 1907 const struct inet_connection_sock *icsk = inet_csk(sk); 1908 struct tcp_sock *tp = tcp_sk(sk); 1909 struct net *net = sock_net(sk); 1910 struct sk_buff *skb; 1911 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; 1912 bool is_reneg; /* is receiver reneging on SACKs? */ 1913 bool mark_lost; 1914 1915 /* Reduce ssthresh if it has not yet been made inside this window. */ 1916 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1917 !after(tp->high_seq, tp->snd_una) || 1918 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1919 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1920 tp->prior_cwnd = tp->snd_cwnd; 1921 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1922 tcp_ca_event(sk, CA_EVENT_LOSS); 1923 tcp_init_undo(tp); 1924 } 1925 tp->snd_cwnd = 1; 1926 tp->snd_cwnd_cnt = 0; 1927 tp->snd_cwnd_stamp = tcp_jiffies32; 1928 1929 tp->retrans_out = 0; 1930 tp->lost_out = 0; 1931 1932 if (tcp_is_reno(tp)) 1933 tcp_reset_reno_sack(tp); 1934 1935 skb = tcp_rtx_queue_head(sk); 1936 is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); 1937 if (is_reneg) { 1938 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1939 tp->sacked_out = 0; 1940 /* Mark SACK reneging until we recover from this loss event. */ 1941 tp->is_sack_reneg = 1; 1942 } 1943 tcp_clear_all_retrans_hints(tp); 1944 1945 skb_rbtree_walk_from(skb) { 1946 mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 1947 is_reneg); 1948 if (mark_lost) 1949 tcp_sum_lost(tp, skb); 1950 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 1951 if (mark_lost) { 1952 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1953 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1954 tp->lost_out += tcp_skb_pcount(skb); 1955 } 1956 } 1957 tcp_verify_left_out(tp); 1958 1959 /* Timeout in disordered state after receiving substantial DUPACKs 1960 * suggests that the degree of reordering is over-estimated. 1961 */ 1962 if (icsk->icsk_ca_state <= TCP_CA_Disorder && 1963 tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) 1964 tp->reordering = min_t(unsigned int, tp->reordering, 1965 net->ipv4.sysctl_tcp_reordering); 1966 tcp_set_ca_state(sk, TCP_CA_Loss); 1967 tp->high_seq = tp->snd_nxt; 1968 tcp_ecn_queue_cwr(tp); 1969 1970 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous 1971 * loss recovery is underway except recurring timeout(s) on 1972 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing 1973 * 1974 * In theory F-RTO can be used repeatedly during loss recovery. 1975 * In practice this interacts badly with broken middle-boxes that 1976 * falsely raise the receive window, which results in repeated 1977 * timeouts and stop-and-go behavior. 1978 */ 1979 tp->frto = net->ipv4.sysctl_tcp_frto && 1980 (new_recovery || icsk->icsk_retransmits) && 1981 !inet_csk(sk)->icsk_mtup.probe_size; 1982 } 1983 1984 /* If ACK arrived pointing to a remembered SACK, it means that our 1985 * remembered SACKs do not reflect real state of receiver i.e. 1986 * receiver _host_ is heavily congested (or buggy). 1987 * 1988 * To avoid big spurious retransmission bursts due to transient SACK 1989 * scoreboard oddities that look like reneging, we give the receiver a 1990 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will 1991 * restore sanity to the SACK scoreboard. If the apparent reneging 1992 * persists until this RTO then we'll clear the SACK scoreboard. 1993 */ 1994 static bool tcp_check_sack_reneging(struct sock *sk, int flag) 1995 { 1996 if (flag & FLAG_SACK_RENEGING) { 1997 struct tcp_sock *tp = tcp_sk(sk); 1998 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), 1999 msecs_to_jiffies(10)); 2000 2001 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2002 delay, TCP_RTO_MAX); 2003 return true; 2004 } 2005 return false; 2006 } 2007 2008 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 2009 * counter when SACK is enabled (without SACK, sacked_out is used for 2010 * that purpose). 2011 * 2012 * With reordering, holes may still be in flight, so RFC3517 recovery 2013 * uses pure sacked_out (total number of SACKed segments) even though 2014 * it violates the RFC that uses duplicate ACKs, often these are equal 2015 * but when e.g. out-of-window ACKs or packet duplication occurs, 2016 * they differ. Since neither occurs due to loss, TCP should really 2017 * ignore them. 2018 */ 2019 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2020 { 2021 return tp->sacked_out + 1; 2022 } 2023 2024 /* Linux NewReno/SACK/ECN state machine. 2025 * -------------------------------------- 2026 * 2027 * "Open" Normal state, no dubious events, fast path. 2028 * "Disorder" In all the respects it is "Open", 2029 * but requires a bit more attention. It is entered when 2030 * we see some SACKs or dupacks. It is split of "Open" 2031 * mainly to move some processing from fast path to slow one. 2032 * "CWR" CWND was reduced due to some Congestion Notification event. 2033 * It can be ECN, ICMP source quench, local device congestion. 2034 * "Recovery" CWND was reduced, we are fast-retransmitting. 2035 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2036 * 2037 * tcp_fastretrans_alert() is entered: 2038 * - each incoming ACK, if state is not "Open" 2039 * - when arrived ACK is unusual, namely: 2040 * * SACK 2041 * * Duplicate ACK. 2042 * * ECN ECE. 2043 * 2044 * Counting packets in flight is pretty simple. 2045 * 2046 * in_flight = packets_out - left_out + retrans_out 2047 * 2048 * packets_out is SND.NXT-SND.UNA counted in packets. 2049 * 2050 * retrans_out is number of retransmitted segments. 2051 * 2052 * left_out is number of segments left network, but not ACKed yet. 2053 * 2054 * left_out = sacked_out + lost_out 2055 * 2056 * sacked_out: Packets, which arrived to receiver out of order 2057 * and hence not ACKed. With SACKs this number is simply 2058 * amount of SACKed data. Even without SACKs 2059 * it is easy to give pretty reliable estimate of this number, 2060 * counting duplicate ACKs. 2061 * 2062 * lost_out: Packets lost by network. TCP has no explicit 2063 * "loss notification" feedback from network (for now). 2064 * It means that this number can be only _guessed_. 2065 * Actually, it is the heuristics to predict lossage that 2066 * distinguishes different algorithms. 2067 * 2068 * F.e. after RTO, when all the queue is considered as lost, 2069 * lost_out = packets_out and in_flight = retrans_out. 2070 * 2071 * Essentially, we have now a few algorithms detecting 2072 * lost packets. 2073 * 2074 * If the receiver supports SACK: 2075 * 2076 * RFC6675/3517: It is the conventional algorithm. A packet is 2077 * considered lost if the number of higher sequence packets 2078 * SACKed is greater than or equal the DUPACK thoreshold 2079 * (reordering). This is implemented in tcp_mark_head_lost and 2080 * tcp_update_scoreboard. 2081 * 2082 * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm 2083 * (2017-) that checks timing instead of counting DUPACKs. 2084 * Essentially a packet is considered lost if it's not S/ACKed 2085 * after RTT + reordering_window, where both metrics are 2086 * dynamically measured and adjusted. This is implemented in 2087 * tcp_rack_mark_lost. 2088 * 2089 * If the receiver does not support SACK: 2090 * 2091 * NewReno (RFC6582): in Recovery we assume that one segment 2092 * is lost (classic Reno). While we are in Recovery and 2093 * a partial ACK arrives, we assume that one more packet 2094 * is lost (NewReno). This heuristics are the same in NewReno 2095 * and SACK. 2096 * 2097 * Really tricky (and requiring careful tuning) part of algorithm 2098 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2099 * The first determines the moment _when_ we should reduce CWND and, 2100 * hence, slow down forward transmission. In fact, it determines the moment 2101 * when we decide that hole is caused by loss, rather than by a reorder. 2102 * 2103 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2104 * holes, caused by lost packets. 2105 * 2106 * And the most logically complicated part of algorithm is undo 2107 * heuristics. We detect false retransmits due to both too early 2108 * fast retransmit (reordering) and underestimated RTO, analyzing 2109 * timestamps and D-SACKs. When we detect that some segments were 2110 * retransmitted by mistake and CWND reduction was wrong, we undo 2111 * window reduction and abort recovery phase. This logic is hidden 2112 * inside several functions named tcp_try_undo_<something>. 2113 */ 2114 2115 /* This function decides, when we should leave Disordered state 2116 * and enter Recovery phase, reducing congestion window. 2117 * 2118 * Main question: may we further continue forward transmission 2119 * with the same cwnd? 2120 */ 2121 static bool tcp_time_to_recover(struct sock *sk, int flag) 2122 { 2123 struct tcp_sock *tp = tcp_sk(sk); 2124 2125 /* Trick#1: The loss is proven. */ 2126 if (tp->lost_out) 2127 return true; 2128 2129 /* Not-A-Trick#2 : Classic rule... */ 2130 if (tcp_dupack_heuristics(tp) > tp->reordering) 2131 return true; 2132 2133 return false; 2134 } 2135 2136 /* Detect loss in event "A" above by marking head of queue up as lost. 2137 * For non-SACK(Reno) senders, the first "packets" number of segments 2138 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2139 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2140 * the maximum SACKed segments to pass before reaching this limit. 2141 */ 2142 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2143 { 2144 struct tcp_sock *tp = tcp_sk(sk); 2145 struct sk_buff *skb; 2146 int cnt, oldcnt, lost; 2147 unsigned int mss; 2148 /* Use SACK to deduce losses of new sequences sent during recovery */ 2149 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2150 2151 WARN_ON(packets > tp->packets_out); 2152 skb = tp->lost_skb_hint; 2153 if (skb) { 2154 /* Head already handled? */ 2155 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) 2156 return; 2157 cnt = tp->lost_cnt_hint; 2158 } else { 2159 skb = tcp_rtx_queue_head(sk); 2160 cnt = 0; 2161 } 2162 2163 skb_rbtree_walk_from(skb) { 2164 /* TODO: do this better */ 2165 /* this is not the most efficient way to do this... */ 2166 tp->lost_skb_hint = skb; 2167 tp->lost_cnt_hint = cnt; 2168 2169 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2170 break; 2171 2172 oldcnt = cnt; 2173 if (tcp_is_reno(tp) || 2174 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2175 cnt += tcp_skb_pcount(skb); 2176 2177 if (cnt > packets) { 2178 if (tcp_is_sack(tp) || 2179 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 2180 (oldcnt >= packets)) 2181 break; 2182 2183 mss = tcp_skb_mss(skb); 2184 /* If needed, chop off the prefix to mark as lost. */ 2185 lost = (packets - oldcnt) * mss; 2186 if (lost < skb->len && 2187 tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, 2188 lost, mss, GFP_ATOMIC) < 0) 2189 break; 2190 cnt = packets; 2191 } 2192 2193 tcp_skb_mark_lost(tp, skb); 2194 2195 if (mark_head) 2196 break; 2197 } 2198 tcp_verify_left_out(tp); 2199 } 2200 2201 /* Account newly detected lost packet(s) */ 2202 2203 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2204 { 2205 struct tcp_sock *tp = tcp_sk(sk); 2206 2207 if (tcp_is_reno(tp)) { 2208 tcp_mark_head_lost(sk, 1, 1); 2209 } else { 2210 int sacked_upto = tp->sacked_out - tp->reordering; 2211 if (sacked_upto >= 0) 2212 tcp_mark_head_lost(sk, sacked_upto, 0); 2213 else if (fast_rexmit) 2214 tcp_mark_head_lost(sk, 1, 1); 2215 } 2216 } 2217 2218 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) 2219 { 2220 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2221 before(tp->rx_opt.rcv_tsecr, when); 2222 } 2223 2224 /* skb is spurious retransmitted if the returned timestamp echo 2225 * reply is prior to the skb transmission time 2226 */ 2227 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, 2228 const struct sk_buff *skb) 2229 { 2230 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && 2231 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); 2232 } 2233 2234 /* Nothing was retransmitted or returned timestamp is less 2235 * than timestamp of the first retransmission. 2236 */ 2237 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) 2238 { 2239 return !tp->retrans_stamp || 2240 tcp_tsopt_ecr_before(tp, tp->retrans_stamp); 2241 } 2242 2243 /* Undo procedures. */ 2244 2245 /* We can clear retrans_stamp when there are no retransmissions in the 2246 * window. It would seem that it is trivially available for us in 2247 * tp->retrans_out, however, that kind of assumptions doesn't consider 2248 * what will happen if errors occur when sending retransmission for the 2249 * second time. ...It could the that such segment has only 2250 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2251 * the head skb is enough except for some reneging corner cases that 2252 * are not worth the effort. 2253 * 2254 * Main reason for all this complexity is the fact that connection dying 2255 * time now depends on the validity of the retrans_stamp, in particular, 2256 * that successive retransmissions of a segment must not advance 2257 * retrans_stamp under any conditions. 2258 */ 2259 static bool tcp_any_retrans_done(const struct sock *sk) 2260 { 2261 const struct tcp_sock *tp = tcp_sk(sk); 2262 struct sk_buff *skb; 2263 2264 if (tp->retrans_out) 2265 return true; 2266 2267 skb = tcp_rtx_queue_head(sk); 2268 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2269 return true; 2270 2271 return false; 2272 } 2273 2274 static void DBGUNDO(struct sock *sk, const char *msg) 2275 { 2276 #if FASTRETRANS_DEBUG > 1 2277 struct tcp_sock *tp = tcp_sk(sk); 2278 struct inet_sock *inet = inet_sk(sk); 2279 2280 if (sk->sk_family == AF_INET) { 2281 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2282 msg, 2283 &inet->inet_daddr, ntohs(inet->inet_dport), 2284 tp->snd_cwnd, tcp_left_out(tp), 2285 tp->snd_ssthresh, tp->prior_ssthresh, 2286 tp->packets_out); 2287 } 2288 #if IS_ENABLED(CONFIG_IPV6) 2289 else if (sk->sk_family == AF_INET6) { 2290 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2291 msg, 2292 &sk->sk_v6_daddr, ntohs(inet->inet_dport), 2293 tp->snd_cwnd, tcp_left_out(tp), 2294 tp->snd_ssthresh, tp->prior_ssthresh, 2295 tp->packets_out); 2296 } 2297 #endif 2298 #endif 2299 } 2300 2301 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) 2302 { 2303 struct tcp_sock *tp = tcp_sk(sk); 2304 2305 if (unmark_loss) { 2306 struct sk_buff *skb; 2307 2308 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2309 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2310 } 2311 tp->lost_out = 0; 2312 tcp_clear_all_retrans_hints(tp); 2313 } 2314 2315 if (tp->prior_ssthresh) { 2316 const struct inet_connection_sock *icsk = inet_csk(sk); 2317 2318 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2319 2320 if (tp->prior_ssthresh > tp->snd_ssthresh) { 2321 tp->snd_ssthresh = tp->prior_ssthresh; 2322 tcp_ecn_withdraw_cwr(tp); 2323 } 2324 } 2325 tp->snd_cwnd_stamp = tcp_jiffies32; 2326 tp->undo_marker = 0; 2327 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ 2328 } 2329 2330 static inline bool tcp_may_undo(const struct tcp_sock *tp) 2331 { 2332 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2333 } 2334 2335 /* People celebrate: "We love our President!" */ 2336 static bool tcp_try_undo_recovery(struct sock *sk) 2337 { 2338 struct tcp_sock *tp = tcp_sk(sk); 2339 2340 if (tcp_may_undo(tp)) { 2341 int mib_idx; 2342 2343 /* Happy end! We did not retransmit anything 2344 * or our original transmission succeeded. 2345 */ 2346 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2347 tcp_undo_cwnd_reduction(sk, false); 2348 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2349 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2350 else 2351 mib_idx = LINUX_MIB_TCPFULLUNDO; 2352 2353 NET_INC_STATS(sock_net(sk), mib_idx); 2354 } else if (tp->rack.reo_wnd_persist) { 2355 tp->rack.reo_wnd_persist--; 2356 } 2357 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2358 /* Hold old state until something *above* high_seq 2359 * is ACKed. For Reno it is MUST to prevent false 2360 * fast retransmits (RFC2582). SACK TCP is safe. */ 2361 if (!tcp_any_retrans_done(sk)) 2362 tp->retrans_stamp = 0; 2363 return true; 2364 } 2365 tcp_set_ca_state(sk, TCP_CA_Open); 2366 tp->is_sack_reneg = 0; 2367 return false; 2368 } 2369 2370 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2371 static bool tcp_try_undo_dsack(struct sock *sk) 2372 { 2373 struct tcp_sock *tp = tcp_sk(sk); 2374 2375 if (tp->undo_marker && !tp->undo_retrans) { 2376 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, 2377 tp->rack.reo_wnd_persist + 1); 2378 DBGUNDO(sk, "D-SACK"); 2379 tcp_undo_cwnd_reduction(sk, false); 2380 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2381 return true; 2382 } 2383 return false; 2384 } 2385 2386 /* Undo during loss recovery after partial ACK or using F-RTO. */ 2387 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2388 { 2389 struct tcp_sock *tp = tcp_sk(sk); 2390 2391 if (frto_undo || tcp_may_undo(tp)) { 2392 tcp_undo_cwnd_reduction(sk, true); 2393 2394 DBGUNDO(sk, "partial loss"); 2395 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2396 if (frto_undo) 2397 NET_INC_STATS(sock_net(sk), 2398 LINUX_MIB_TCPSPURIOUSRTOS); 2399 inet_csk(sk)->icsk_retransmits = 0; 2400 if (frto_undo || tcp_is_sack(tp)) { 2401 tcp_set_ca_state(sk, TCP_CA_Open); 2402 tp->is_sack_reneg = 0; 2403 } 2404 return true; 2405 } 2406 return false; 2407 } 2408 2409 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. 2410 * It computes the number of packets to send (sndcnt) based on packets newly 2411 * delivered: 2412 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 2413 * cwnd reductions across a full RTT. 2414 * 2) Otherwise PRR uses packet conservation to send as much as delivered. 2415 * But when the retransmits are acked without further losses, PRR 2416 * slow starts cwnd up to ssthresh to speed up the recovery. 2417 */ 2418 static void tcp_init_cwnd_reduction(struct sock *sk) 2419 { 2420 struct tcp_sock *tp = tcp_sk(sk); 2421 2422 tp->high_seq = tp->snd_nxt; 2423 tp->tlp_high_seq = 0; 2424 tp->snd_cwnd_cnt = 0; 2425 tp->prior_cwnd = tp->snd_cwnd; 2426 tp->prr_delivered = 0; 2427 tp->prr_out = 0; 2428 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 2429 tcp_ecn_queue_cwr(tp); 2430 } 2431 2432 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag) 2433 { 2434 struct tcp_sock *tp = tcp_sk(sk); 2435 int sndcnt = 0; 2436 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 2437 2438 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) 2439 return; 2440 2441 tp->prr_delivered += newly_acked_sacked; 2442 if (delta < 0) { 2443 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 2444 tp->prior_cwnd - 1; 2445 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 2446 } else if ((flag & FLAG_RETRANS_DATA_ACKED) && 2447 !(flag & FLAG_LOST_RETRANS)) { 2448 sndcnt = min_t(int, delta, 2449 max_t(int, tp->prr_delivered - tp->prr_out, 2450 newly_acked_sacked) + 1); 2451 } else { 2452 sndcnt = min(delta, newly_acked_sacked); 2453 } 2454 /* Force a fast retransmit upon entering fast recovery */ 2455 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); 2456 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 2457 } 2458 2459 static inline void tcp_end_cwnd_reduction(struct sock *sk) 2460 { 2461 struct tcp_sock *tp = tcp_sk(sk); 2462 2463 if (inet_csk(sk)->icsk_ca_ops->cong_control) 2464 return; 2465 2466 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2467 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2468 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2469 tp->snd_cwnd = tp->snd_ssthresh; 2470 tp->snd_cwnd_stamp = tcp_jiffies32; 2471 } 2472 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2473 } 2474 2475 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ 2476 void tcp_enter_cwr(struct sock *sk) 2477 { 2478 struct tcp_sock *tp = tcp_sk(sk); 2479 2480 tp->prior_ssthresh = 0; 2481 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2482 tp->undo_marker = 0; 2483 tcp_init_cwnd_reduction(sk); 2484 tcp_set_ca_state(sk, TCP_CA_CWR); 2485 } 2486 } 2487 EXPORT_SYMBOL(tcp_enter_cwr); 2488 2489 static void tcp_try_keep_open(struct sock *sk) 2490 { 2491 struct tcp_sock *tp = tcp_sk(sk); 2492 int state = TCP_CA_Open; 2493 2494 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2495 state = TCP_CA_Disorder; 2496 2497 if (inet_csk(sk)->icsk_ca_state != state) { 2498 tcp_set_ca_state(sk, state); 2499 tp->high_seq = tp->snd_nxt; 2500 } 2501 } 2502 2503 static void tcp_try_to_open(struct sock *sk, int flag) 2504 { 2505 struct tcp_sock *tp = tcp_sk(sk); 2506 2507 tcp_verify_left_out(tp); 2508 2509 if (!tcp_any_retrans_done(sk)) 2510 tp->retrans_stamp = 0; 2511 2512 if (flag & FLAG_ECE) 2513 tcp_enter_cwr(sk); 2514 2515 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2516 tcp_try_keep_open(sk); 2517 } 2518 } 2519 2520 static void tcp_mtup_probe_failed(struct sock *sk) 2521 { 2522 struct inet_connection_sock *icsk = inet_csk(sk); 2523 2524 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2525 icsk->icsk_mtup.probe_size = 0; 2526 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); 2527 } 2528 2529 static void tcp_mtup_probe_success(struct sock *sk) 2530 { 2531 struct tcp_sock *tp = tcp_sk(sk); 2532 struct inet_connection_sock *icsk = inet_csk(sk); 2533 2534 /* FIXME: breaks with very large cwnd */ 2535 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2536 tp->snd_cwnd = tp->snd_cwnd * 2537 tcp_mss_to_mtu(sk, tp->mss_cache) / 2538 icsk->icsk_mtup.probe_size; 2539 tp->snd_cwnd_cnt = 0; 2540 tp->snd_cwnd_stamp = tcp_jiffies32; 2541 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2542 2543 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2544 icsk->icsk_mtup.probe_size = 0; 2545 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2546 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); 2547 } 2548 2549 /* Do a simple retransmit without using the backoff mechanisms in 2550 * tcp_timer. This is used for path mtu discovery. 2551 * The socket is already locked here. 2552 */ 2553 void tcp_simple_retransmit(struct sock *sk) 2554 { 2555 const struct inet_connection_sock *icsk = inet_csk(sk); 2556 struct tcp_sock *tp = tcp_sk(sk); 2557 struct sk_buff *skb; 2558 unsigned int mss = tcp_current_mss(sk); 2559 2560 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 2561 if (tcp_skb_seglen(skb) > mss && 2562 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2563 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2564 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2565 tp->retrans_out -= tcp_skb_pcount(skb); 2566 } 2567 tcp_skb_mark_lost_uncond_verify(tp, skb); 2568 } 2569 } 2570 2571 tcp_clear_retrans_hints_partial(tp); 2572 2573 if (!tp->lost_out) 2574 return; 2575 2576 if (tcp_is_reno(tp)) 2577 tcp_limit_reno_sacked(tp); 2578 2579 tcp_verify_left_out(tp); 2580 2581 /* Don't muck with the congestion window here. 2582 * Reason is that we do not increase amount of _data_ 2583 * in network, but units changed and effective 2584 * cwnd/ssthresh really reduced now. 2585 */ 2586 if (icsk->icsk_ca_state != TCP_CA_Loss) { 2587 tp->high_seq = tp->snd_nxt; 2588 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2589 tp->prior_ssthresh = 0; 2590 tp->undo_marker = 0; 2591 tcp_set_ca_state(sk, TCP_CA_Loss); 2592 } 2593 tcp_xmit_retransmit_queue(sk); 2594 } 2595 EXPORT_SYMBOL(tcp_simple_retransmit); 2596 2597 void tcp_enter_recovery(struct sock *sk, bool ece_ack) 2598 { 2599 struct tcp_sock *tp = tcp_sk(sk); 2600 int mib_idx; 2601 2602 if (tcp_is_reno(tp)) 2603 mib_idx = LINUX_MIB_TCPRENORECOVERY; 2604 else 2605 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2606 2607 NET_INC_STATS(sock_net(sk), mib_idx); 2608 2609 tp->prior_ssthresh = 0; 2610 tcp_init_undo(tp); 2611 2612 if (!tcp_in_cwnd_reduction(sk)) { 2613 if (!ece_ack) 2614 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2615 tcp_init_cwnd_reduction(sk); 2616 } 2617 tcp_set_ca_state(sk, TCP_CA_Recovery); 2618 } 2619 2620 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are 2621 * recovered or spurious. Otherwise retransmits more on partial ACKs. 2622 */ 2623 static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, 2624 int *rexmit) 2625 { 2626 struct tcp_sock *tp = tcp_sk(sk); 2627 bool recovered = !before(tp->snd_una, tp->high_seq); 2628 2629 if ((flag & FLAG_SND_UNA_ADVANCED) && 2630 tcp_try_undo_loss(sk, false)) 2631 return; 2632 2633 /* The ACK (s)acks some never-retransmitted data meaning not all 2634 * the data packets before the timeout were lost. Therefore we 2635 * undo the congestion window and state. This is essentially 2636 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since 2637 * a retransmitted skb is permantly marked, we can apply such an 2638 * operation even if F-RTO was not used. 2639 */ 2640 if ((flag & FLAG_ORIG_SACK_ACKED) && 2641 tcp_try_undo_loss(sk, tp->undo_marker)) 2642 return; 2643 2644 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ 2645 if (after(tp->snd_nxt, tp->high_seq)) { 2646 if (flag & FLAG_DATA_SACKED || is_dupack) 2647 tp->frto = 0; /* Step 3.a. loss was real */ 2648 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { 2649 tp->high_seq = tp->snd_nxt; 2650 /* Step 2.b. Try send new data (but deferred until cwnd 2651 * is updated in tcp_ack()). Otherwise fall back to 2652 * the conventional recovery. 2653 */ 2654 if (!tcp_write_queue_empty(sk) && 2655 after(tcp_wnd_end(tp), tp->snd_nxt)) { 2656 *rexmit = REXMIT_NEW; 2657 return; 2658 } 2659 tp->frto = 0; 2660 } 2661 } 2662 2663 if (recovered) { 2664 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ 2665 tcp_try_undo_recovery(sk); 2666 return; 2667 } 2668 if (tcp_is_reno(tp)) { 2669 /* A Reno DUPACK means new data in F-RTO step 2.b above are 2670 * delivered. Lower inflight to clock out (re)tranmissions. 2671 */ 2672 if (after(tp->snd_nxt, tp->high_seq) && is_dupack) 2673 tcp_add_reno_sack(sk); 2674 else if (flag & FLAG_SND_UNA_ADVANCED) 2675 tcp_reset_reno_sack(tp); 2676 } 2677 *rexmit = REXMIT_LOST; 2678 } 2679 2680 /* Undo during fast recovery after partial ACK. */ 2681 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una) 2682 { 2683 struct tcp_sock *tp = tcp_sk(sk); 2684 2685 if (tp->undo_marker && tcp_packet_delayed(tp)) { 2686 /* Plain luck! Hole if filled with delayed 2687 * packet, rather than with a retransmit. Check reordering. 2688 */ 2689 tcp_check_sack_reordering(sk, prior_snd_una, 1); 2690 2691 /* We are getting evidence that the reordering degree is higher 2692 * than we realized. If there are no retransmits out then we 2693 * can undo. Otherwise we clock out new packets but do not 2694 * mark more packets lost or retransmit more. 2695 */ 2696 if (tp->retrans_out) 2697 return true; 2698 2699 if (!tcp_any_retrans_done(sk)) 2700 tp->retrans_stamp = 0; 2701 2702 DBGUNDO(sk, "partial recovery"); 2703 tcp_undo_cwnd_reduction(sk, true); 2704 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2705 tcp_try_keep_open(sk); 2706 return true; 2707 } 2708 return false; 2709 } 2710 2711 static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) 2712 { 2713 struct tcp_sock *tp = tcp_sk(sk); 2714 2715 /* Use RACK to detect loss */ 2716 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) { 2717 u32 prior_retrans = tp->retrans_out; 2718 2719 tcp_rack_mark_lost(sk); 2720 if (prior_retrans > tp->retrans_out) 2721 *ack_flag |= FLAG_LOST_RETRANS; 2722 } 2723 } 2724 2725 static bool tcp_force_fast_retransmit(struct sock *sk) 2726 { 2727 struct tcp_sock *tp = tcp_sk(sk); 2728 2729 return after(tcp_highest_sack_seq(tp), 2730 tp->snd_una + tp->reordering * tp->mss_cache); 2731 } 2732 2733 /* Process an event, which can update packets-in-flight not trivially. 2734 * Main goal of this function is to calculate new estimate for left_out, 2735 * taking into account both packets sitting in receiver's buffer and 2736 * packets lost by network. 2737 * 2738 * Besides that it updates the congestion state when packet loss or ECN 2739 * is detected. But it does not reduce the cwnd, it is done by the 2740 * congestion control later. 2741 * 2742 * It does _not_ decide what to send, it is made in function 2743 * tcp_xmit_retransmit_queue(). 2744 */ 2745 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, 2746 bool is_dupack, int *ack_flag, int *rexmit) 2747 { 2748 struct inet_connection_sock *icsk = inet_csk(sk); 2749 struct tcp_sock *tp = tcp_sk(sk); 2750 int fast_rexmit = 0, flag = *ack_flag; 2751 bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2752 tcp_force_fast_retransmit(sk)); 2753 2754 if (!tp->packets_out && tp->sacked_out) 2755 tp->sacked_out = 0; 2756 2757 /* Now state machine starts. 2758 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 2759 if (flag & FLAG_ECE) 2760 tp->prior_ssthresh = 0; 2761 2762 /* B. In all the states check for reneging SACKs. */ 2763 if (tcp_check_sack_reneging(sk, flag)) 2764 return; 2765 2766 /* C. Check consistency of the current state. */ 2767 tcp_verify_left_out(tp); 2768 2769 /* D. Check state exit conditions. State can be terminated 2770 * when high_seq is ACKed. */ 2771 if (icsk->icsk_ca_state == TCP_CA_Open) { 2772 WARN_ON(tp->retrans_out != 0); 2773 tp->retrans_stamp = 0; 2774 } else if (!before(tp->snd_una, tp->high_seq)) { 2775 switch (icsk->icsk_ca_state) { 2776 case TCP_CA_CWR: 2777 /* CWR is to be held something *above* high_seq 2778 * is ACKed for CWR bit to reach receiver. */ 2779 if (tp->snd_una != tp->high_seq) { 2780 tcp_end_cwnd_reduction(sk); 2781 tcp_set_ca_state(sk, TCP_CA_Open); 2782 } 2783 break; 2784 2785 case TCP_CA_Recovery: 2786 if (tcp_is_reno(tp)) 2787 tcp_reset_reno_sack(tp); 2788 if (tcp_try_undo_recovery(sk)) 2789 return; 2790 tcp_end_cwnd_reduction(sk); 2791 break; 2792 } 2793 } 2794 2795 /* E. Process state. */ 2796 switch (icsk->icsk_ca_state) { 2797 case TCP_CA_Recovery: 2798 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2799 if (tcp_is_reno(tp) && is_dupack) 2800 tcp_add_reno_sack(sk); 2801 } else { 2802 if (tcp_try_undo_partial(sk, prior_snd_una)) 2803 return; 2804 /* Partial ACK arrived. Force fast retransmit. */ 2805 do_lost = tcp_is_reno(tp) || 2806 tcp_force_fast_retransmit(sk); 2807 } 2808 if (tcp_try_undo_dsack(sk)) { 2809 tcp_try_keep_open(sk); 2810 return; 2811 } 2812 tcp_rack_identify_loss(sk, ack_flag); 2813 break; 2814 case TCP_CA_Loss: 2815 tcp_process_loss(sk, flag, is_dupack, rexmit); 2816 tcp_rack_identify_loss(sk, ack_flag); 2817 if (!(icsk->icsk_ca_state == TCP_CA_Open || 2818 (*ack_flag & FLAG_LOST_RETRANS))) 2819 return; 2820 /* Change state if cwnd is undone or retransmits are lost */ 2821 /* fall through */ 2822 default: 2823 if (tcp_is_reno(tp)) { 2824 if (flag & FLAG_SND_UNA_ADVANCED) 2825 tcp_reset_reno_sack(tp); 2826 if (is_dupack) 2827 tcp_add_reno_sack(sk); 2828 } 2829 2830 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2831 tcp_try_undo_dsack(sk); 2832 2833 tcp_rack_identify_loss(sk, ack_flag); 2834 if (!tcp_time_to_recover(sk, flag)) { 2835 tcp_try_to_open(sk, flag); 2836 return; 2837 } 2838 2839 /* MTU probe failure: don't reduce cwnd */ 2840 if (icsk->icsk_ca_state < TCP_CA_CWR && 2841 icsk->icsk_mtup.probe_size && 2842 tp->snd_una == tp->mtu_probe.probe_seq_start) { 2843 tcp_mtup_probe_failed(sk); 2844 /* Restores the reduction we did in tcp_mtup_probe() */ 2845 tp->snd_cwnd++; 2846 tcp_simple_retransmit(sk); 2847 return; 2848 } 2849 2850 /* Otherwise enter Recovery state */ 2851 tcp_enter_recovery(sk, (flag & FLAG_ECE)); 2852 fast_rexmit = 1; 2853 } 2854 2855 if (do_lost) 2856 tcp_update_scoreboard(sk, fast_rexmit); 2857 *rexmit = REXMIT_LOST; 2858 } 2859 2860 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) 2861 { 2862 u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ; 2863 struct tcp_sock *tp = tcp_sk(sk); 2864 2865 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, 2866 rtt_us ? : jiffies_to_usecs(1)); 2867 } 2868 2869 static bool tcp_ack_update_rtt(struct sock *sk, const int flag, 2870 long seq_rtt_us, long sack_rtt_us, 2871 long ca_rtt_us, struct rate_sample *rs) 2872 { 2873 const struct tcp_sock *tp = tcp_sk(sk); 2874 2875 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because 2876 * broken middle-boxes or peers may corrupt TS-ECR fields. But 2877 * Karn's algorithm forbids taking RTT if some retransmitted data 2878 * is acked (RFC6298). 2879 */ 2880 if (seq_rtt_us < 0) 2881 seq_rtt_us = sack_rtt_us; 2882 2883 /* RTTM Rule: A TSecr value received in a segment is used to 2884 * update the averaged RTT measurement only if the segment 2885 * acknowledges some new data, i.e., only if it advances the 2886 * left edge of the send window. 2887 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2888 */ 2889 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2890 flag & FLAG_ACKED) { 2891 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; 2892 u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); 2893 2894 seq_rtt_us = ca_rtt_us = delta_us; 2895 } 2896 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 2897 if (seq_rtt_us < 0) 2898 return false; 2899 2900 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is 2901 * always taken together with ACK, SACK, or TS-opts. Any negative 2902 * values will be skipped with the seq_rtt_us < 0 check above. 2903 */ 2904 tcp_update_rtt_min(sk, ca_rtt_us); 2905 tcp_rtt_estimator(sk, seq_rtt_us); 2906 tcp_set_rto(sk); 2907 2908 /* RFC6298: only reset backoff on valid RTT measurement. */ 2909 inet_csk(sk)->icsk_backoff = 0; 2910 return true; 2911 } 2912 2913 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2914 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2915 { 2916 struct rate_sample rs; 2917 long rtt_us = -1L; 2918 2919 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack) 2920 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack); 2921 2922 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs); 2923 } 2924 2925 2926 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 2927 { 2928 const struct inet_connection_sock *icsk = inet_csk(sk); 2929 2930 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); 2931 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32; 2932 } 2933 2934 /* Restart timer after forward progress on connection. 2935 * RFC2988 recommends to restart timer to now+rto. 2936 */ 2937 void tcp_rearm_rto(struct sock *sk) 2938 { 2939 const struct inet_connection_sock *icsk = inet_csk(sk); 2940 struct tcp_sock *tp = tcp_sk(sk); 2941 2942 /* If the retrans timer is currently being used by Fast Open 2943 * for SYN-ACK retrans purpose, stay put. 2944 */ 2945 if (tp->fastopen_rsk) 2946 return; 2947 2948 if (!tp->packets_out) { 2949 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 2950 } else { 2951 u32 rto = inet_csk(sk)->icsk_rto; 2952 /* Offset the time elapsed after installing regular RTO */ 2953 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || 2954 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 2955 s64 delta_us = tcp_rto_delta_us(sk); 2956 /* delta_us may not be positive if the socket is locked 2957 * when the retrans timer fires and is rescheduled. 2958 */ 2959 rto = usecs_to_jiffies(max_t(int, delta_us, 1)); 2960 } 2961 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 2962 TCP_RTO_MAX); 2963 } 2964 } 2965 2966 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */ 2967 static void tcp_set_xmit_timer(struct sock *sk) 2968 { 2969 if (!tcp_schedule_loss_probe(sk, true)) 2970 tcp_rearm_rto(sk); 2971 } 2972 2973 /* If we get here, the whole TSO packet has not been acked. */ 2974 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 2975 { 2976 struct tcp_sock *tp = tcp_sk(sk); 2977 u32 packets_acked; 2978 2979 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 2980 2981 packets_acked = tcp_skb_pcount(skb); 2982 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2983 return 0; 2984 packets_acked -= tcp_skb_pcount(skb); 2985 2986 if (packets_acked) { 2987 BUG_ON(tcp_skb_pcount(skb) == 0); 2988 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 2989 } 2990 2991 return packets_acked; 2992 } 2993 2994 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, 2995 u32 prior_snd_una) 2996 { 2997 const struct skb_shared_info *shinfo; 2998 2999 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ 3000 if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) 3001 return; 3002 3003 shinfo = skb_shinfo(skb); 3004 if (!before(shinfo->tskey, prior_snd_una) && 3005 before(shinfo->tskey, tcp_sk(sk)->snd_una)) { 3006 tcp_skb_tsorted_save(skb) { 3007 __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); 3008 } tcp_skb_tsorted_restore(skb); 3009 } 3010 } 3011 3012 /* Remove acknowledged frames from the retransmission queue. If our packet 3013 * is before the ack sequence we can discard it as it's confirmed to have 3014 * arrived at the other end. 3015 */ 3016 static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, 3017 u32 prior_snd_una, 3018 struct tcp_sacktag_state *sack) 3019 { 3020 const struct inet_connection_sock *icsk = inet_csk(sk); 3021 u64 first_ackt, last_ackt; 3022 struct tcp_sock *tp = tcp_sk(sk); 3023 u32 prior_sacked = tp->sacked_out; 3024 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ 3025 struct sk_buff *skb, *next; 3026 bool fully_acked = true; 3027 long sack_rtt_us = -1L; 3028 long seq_rtt_us = -1L; 3029 long ca_rtt_us = -1L; 3030 u32 pkts_acked = 0; 3031 u32 last_in_flight = 0; 3032 bool rtt_update; 3033 int flag = 0; 3034 3035 first_ackt = 0; 3036 3037 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) { 3038 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3039 const u32 start_seq = scb->seq; 3040 u8 sacked = scb->sacked; 3041 u32 acked_pcount; 3042 3043 tcp_ack_tstamp(sk, skb, prior_snd_una); 3044 3045 /* Determine how many packets and what bytes were acked, tso and else */ 3046 if (after(scb->end_seq, tp->snd_una)) { 3047 if (tcp_skb_pcount(skb) == 1 || 3048 !after(tp->snd_una, scb->seq)) 3049 break; 3050 3051 acked_pcount = tcp_tso_acked(sk, skb); 3052 if (!acked_pcount) 3053 break; 3054 fully_acked = false; 3055 } else { 3056 acked_pcount = tcp_skb_pcount(skb); 3057 } 3058 3059 if (unlikely(sacked & TCPCB_RETRANS)) { 3060 if (sacked & TCPCB_SACKED_RETRANS) 3061 tp->retrans_out -= acked_pcount; 3062 flag |= FLAG_RETRANS_DATA_ACKED; 3063 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3064 last_ackt = skb->skb_mstamp; 3065 WARN_ON_ONCE(last_ackt == 0); 3066 if (!first_ackt) 3067 first_ackt = last_ackt; 3068 3069 last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; 3070 if (before(start_seq, reord)) 3071 reord = start_seq; 3072 if (!after(scb->end_seq, tp->high_seq)) 3073 flag |= FLAG_ORIG_SACK_ACKED; 3074 } 3075 3076 if (sacked & TCPCB_SACKED_ACKED) { 3077 tp->sacked_out -= acked_pcount; 3078 } else if (tcp_is_sack(tp)) { 3079 tp->delivered += acked_pcount; 3080 if (!tcp_skb_spurious_retrans(tp, skb)) 3081 tcp_rack_advance(tp, sacked, scb->end_seq, 3082 skb->skb_mstamp); 3083 } 3084 if (sacked & TCPCB_LOST) 3085 tp->lost_out -= acked_pcount; 3086 3087 tp->packets_out -= acked_pcount; 3088 pkts_acked += acked_pcount; 3089 tcp_rate_skb_delivered(sk, skb, sack->rate); 3090 3091 /* Initial outgoing SYN's get put onto the write_queue 3092 * just like anything else we transmit. It is not 3093 * true data, and if we misinform our callers that 3094 * this ACK acks real data, we will erroneously exit 3095 * connection startup slow start one packet too 3096 * quickly. This is severely frowned upon behavior. 3097 */ 3098 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { 3099 flag |= FLAG_DATA_ACKED; 3100 } else { 3101 flag |= FLAG_SYN_ACKED; 3102 tp->retrans_stamp = 0; 3103 } 3104 3105 if (!fully_acked) 3106 break; 3107 3108 next = skb_rb_next(skb); 3109 if (unlikely(skb == tp->retransmit_skb_hint)) 3110 tp->retransmit_skb_hint = NULL; 3111 if (unlikely(skb == tp->lost_skb_hint)) 3112 tp->lost_skb_hint = NULL; 3113 tcp_rtx_queue_unlink_and_free(skb, sk); 3114 } 3115 3116 if (!skb) 3117 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3118 3119 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3120 tp->snd_up = tp->snd_una; 3121 3122 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3123 flag |= FLAG_SACK_RENEGING; 3124 3125 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) { 3126 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); 3127 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); 3128 } 3129 if (sack->first_sackt) { 3130 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); 3131 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); 3132 } 3133 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, 3134 ca_rtt_us, sack->rate); 3135 3136 if (flag & FLAG_ACKED) { 3137 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3138 if (unlikely(icsk->icsk_mtup.probe_size && 3139 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3140 tcp_mtup_probe_success(sk); 3141 } 3142 3143 if (tcp_is_reno(tp)) { 3144 tcp_remove_reno_sacks(sk, pkts_acked); 3145 } else { 3146 int delta; 3147 3148 /* Non-retransmitted hole got filled? That's reordering */ 3149 if (before(reord, prior_fack)) 3150 tcp_check_sack_reordering(sk, reord, 0); 3151 3152 delta = prior_sacked - tp->sacked_out; 3153 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3154 } 3155 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3156 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { 3157 /* Do not re-arm RTO if the sack RTT is measured from data sent 3158 * after when the head was last (re)transmitted. Otherwise the 3159 * timeout may continue to extend in loss recovery. 3160 */ 3161 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ 3162 } 3163 3164 if (icsk->icsk_ca_ops->pkts_acked) { 3165 struct ack_sample sample = { .pkts_acked = pkts_acked, 3166 .rtt_us = sack->rate->rtt_us, 3167 .in_flight = last_in_flight }; 3168 3169 icsk->icsk_ca_ops->pkts_acked(sk, &sample); 3170 } 3171 3172 #if FASTRETRANS_DEBUG > 0 3173 WARN_ON((int)tp->sacked_out < 0); 3174 WARN_ON((int)tp->lost_out < 0); 3175 WARN_ON((int)tp->retrans_out < 0); 3176 if (!tp->packets_out && tcp_is_sack(tp)) { 3177 icsk = inet_csk(sk); 3178 if (tp->lost_out) { 3179 pr_debug("Leak l=%u %d\n", 3180 tp->lost_out, icsk->icsk_ca_state); 3181 tp->lost_out = 0; 3182 } 3183 if (tp->sacked_out) { 3184 pr_debug("Leak s=%u %d\n", 3185 tp->sacked_out, icsk->icsk_ca_state); 3186 tp->sacked_out = 0; 3187 } 3188 if (tp->retrans_out) { 3189 pr_debug("Leak r=%u %d\n", 3190 tp->retrans_out, icsk->icsk_ca_state); 3191 tp->retrans_out = 0; 3192 } 3193 } 3194 #endif 3195 return flag; 3196 } 3197 3198 static void tcp_ack_probe(struct sock *sk) 3199 { 3200 struct inet_connection_sock *icsk = inet_csk(sk); 3201 struct sk_buff *head = tcp_send_head(sk); 3202 const struct tcp_sock *tp = tcp_sk(sk); 3203 3204 /* Was it a usable window open? */ 3205 if (!head) 3206 return; 3207 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { 3208 icsk->icsk_backoff = 0; 3209 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3210 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3211 * This function is not for random using! 3212 */ 3213 } else { 3214 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); 3215 3216 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3217 when, TCP_RTO_MAX); 3218 } 3219 } 3220 3221 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) 3222 { 3223 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3224 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3225 } 3226 3227 /* Decide wheather to run the increase function of congestion control. */ 3228 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3229 { 3230 /* If reordering is high then always grow cwnd whenever data is 3231 * delivered regardless of its ordering. Otherwise stay conservative 3232 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ 3233 * new SACK or ECE mark may first advance cwnd here and later reduce 3234 * cwnd in tcp_fastretrans_alert() based on more states. 3235 */ 3236 if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) 3237 return flag & FLAG_FORWARD_PROGRESS; 3238 3239 return flag & FLAG_DATA_ACKED; 3240 } 3241 3242 /* The "ultimate" congestion control function that aims to replace the rigid 3243 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction). 3244 * It's called toward the end of processing an ACK with precise rate 3245 * information. All transmission or retransmission are delayed afterwards. 3246 */ 3247 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, 3248 int flag, const struct rate_sample *rs) 3249 { 3250 const struct inet_connection_sock *icsk = inet_csk(sk); 3251 3252 if (icsk->icsk_ca_ops->cong_control) { 3253 icsk->icsk_ca_ops->cong_control(sk, rs); 3254 return; 3255 } 3256 3257 if (tcp_in_cwnd_reduction(sk)) { 3258 /* Reduce cwnd if state mandates */ 3259 tcp_cwnd_reduction(sk, acked_sacked, flag); 3260 } else if (tcp_may_raise_cwnd(sk, flag)) { 3261 /* Advance cwnd if state allows */ 3262 tcp_cong_avoid(sk, ack, acked_sacked); 3263 } 3264 tcp_update_pacing_rate(sk); 3265 } 3266 3267 /* Check that window update is acceptable. 3268 * The function assumes that snd_una<=ack<=snd_next. 3269 */ 3270 static inline bool tcp_may_update_window(const struct tcp_sock *tp, 3271 const u32 ack, const u32 ack_seq, 3272 const u32 nwin) 3273 { 3274 return after(ack, tp->snd_una) || 3275 after(ack_seq, tp->snd_wl1) || 3276 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); 3277 } 3278 3279 /* If we update tp->snd_una, also update tp->bytes_acked */ 3280 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) 3281 { 3282 u32 delta = ack - tp->snd_una; 3283 3284 sock_owned_by_me((struct sock *)tp); 3285 tp->bytes_acked += delta; 3286 tp->snd_una = ack; 3287 } 3288 3289 /* If we update tp->rcv_nxt, also update tp->bytes_received */ 3290 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) 3291 { 3292 u32 delta = seq - tp->rcv_nxt; 3293 3294 sock_owned_by_me((struct sock *)tp); 3295 tp->bytes_received += delta; 3296 tp->rcv_nxt = seq; 3297 } 3298 3299 /* Update our send window. 3300 * 3301 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3302 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3303 */ 3304 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3305 u32 ack_seq) 3306 { 3307 struct tcp_sock *tp = tcp_sk(sk); 3308 int flag = 0; 3309 u32 nwin = ntohs(tcp_hdr(skb)->window); 3310 3311 if (likely(!tcp_hdr(skb)->syn)) 3312 nwin <<= tp->rx_opt.snd_wscale; 3313 3314 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3315 flag |= FLAG_WIN_UPDATE; 3316 tcp_update_wl(tp, ack_seq); 3317 3318 if (tp->snd_wnd != nwin) { 3319 tp->snd_wnd = nwin; 3320 3321 /* Note, it is the only place, where 3322 * fast path is recovered for sending TCP. 3323 */ 3324 tp->pred_flags = 0; 3325 tcp_fast_path_check(sk); 3326 3327 if (!tcp_write_queue_empty(sk)) 3328 tcp_slow_start_after_idle_check(sk); 3329 3330 if (nwin > tp->max_window) { 3331 tp->max_window = nwin; 3332 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3333 } 3334 } 3335 } 3336 3337 tcp_snd_una_update(tp, ack); 3338 3339 return flag; 3340 } 3341 3342 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, 3343 u32 *last_oow_ack_time) 3344 { 3345 if (*last_oow_ack_time) { 3346 s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time); 3347 3348 if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) { 3349 NET_INC_STATS(net, mib_idx); 3350 return true; /* rate-limited: don't send yet! */ 3351 } 3352 } 3353 3354 *last_oow_ack_time = tcp_jiffies32; 3355 3356 return false; /* not rate-limited: go ahead, send dupack now! */ 3357 } 3358 3359 /* Return true if we're currently rate-limiting out-of-window ACKs and 3360 * thus shouldn't send a dupack right now. We rate-limit dupacks in 3361 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS 3362 * attacks that send repeated SYNs or ACKs for the same connection. To 3363 * do this, we do not send a duplicate SYNACK or ACK if the remote 3364 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. 3365 */ 3366 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 3367 int mib_idx, u32 *last_oow_ack_time) 3368 { 3369 /* Data packets without SYNs are not likely part of an ACK loop. */ 3370 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && 3371 !tcp_hdr(skb)->syn) 3372 return false; 3373 3374 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); 3375 } 3376 3377 /* RFC 5961 7 [ACK Throttling] */ 3378 static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) 3379 { 3380 /* unprotected vars, we dont care of overwrites */ 3381 static u32 challenge_timestamp; 3382 static unsigned int challenge_count; 3383 struct tcp_sock *tp = tcp_sk(sk); 3384 struct net *net = sock_net(sk); 3385 u32 count, now; 3386 3387 /* First check our per-socket dupack rate limit. */ 3388 if (__tcp_oow_rate_limited(net, 3389 LINUX_MIB_TCPACKSKIPPEDCHALLENGE, 3390 &tp->last_oow_ack_time)) 3391 return; 3392 3393 /* Then check host-wide RFC 5961 rate limit. */ 3394 now = jiffies / HZ; 3395 if (now != challenge_timestamp) { 3396 u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit; 3397 u32 half = (ack_limit + 1) >> 1; 3398 3399 challenge_timestamp = now; 3400 WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit)); 3401 } 3402 count = READ_ONCE(challenge_count); 3403 if (count > 0) { 3404 WRITE_ONCE(challenge_count, count - 1); 3405 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK); 3406 tcp_send_ack(sk); 3407 } 3408 } 3409 3410 static void tcp_store_ts_recent(struct tcp_sock *tp) 3411 { 3412 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3413 tp->rx_opt.ts_recent_stamp = get_seconds(); 3414 } 3415 3416 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 3417 { 3418 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 3419 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 3420 * extra check below makes sure this can only happen 3421 * for pure ACK frames. -DaveM 3422 * 3423 * Not only, also it occurs for expired timestamps. 3424 */ 3425 3426 if (tcp_paws_check(&tp->rx_opt, 0)) 3427 tcp_store_ts_recent(tp); 3428 } 3429 } 3430 3431 /* This routine deals with acks during a TLP episode. 3432 * We mark the end of a TLP episode on receiving TLP dupack or when 3433 * ack is after tlp_high_seq. 3434 * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. 3435 */ 3436 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) 3437 { 3438 struct tcp_sock *tp = tcp_sk(sk); 3439 3440 if (before(ack, tp->tlp_high_seq)) 3441 return; 3442 3443 if (flag & FLAG_DSACKING_ACK) { 3444 /* This DSACK means original and TLP probe arrived; no loss */ 3445 tp->tlp_high_seq = 0; 3446 } else if (after(ack, tp->tlp_high_seq)) { 3447 /* ACK advances: there was a loss, so reduce cwnd. Reset 3448 * tlp_high_seq in tcp_init_cwnd_reduction() 3449 */ 3450 tcp_init_cwnd_reduction(sk); 3451 tcp_set_ca_state(sk, TCP_CA_CWR); 3452 tcp_end_cwnd_reduction(sk); 3453 tcp_try_keep_open(sk); 3454 NET_INC_STATS(sock_net(sk), 3455 LINUX_MIB_TCPLOSSPROBERECOVERY); 3456 } else if (!(flag & (FLAG_SND_UNA_ADVANCED | 3457 FLAG_NOT_DUP | FLAG_DATA_SACKED))) { 3458 /* Pure dupack: original and TLP probe arrived; no loss */ 3459 tp->tlp_high_seq = 0; 3460 } 3461 } 3462 3463 static inline void tcp_in_ack_event(struct sock *sk, u32 flags) 3464 { 3465 const struct inet_connection_sock *icsk = inet_csk(sk); 3466 3467 if (icsk->icsk_ca_ops->in_ack_event) 3468 icsk->icsk_ca_ops->in_ack_event(sk, flags); 3469 } 3470 3471 /* Congestion control has updated the cwnd already. So if we're in 3472 * loss recovery then now we do any new sends (for FRTO) or 3473 * retransmits (for CA_Loss or CA_recovery) that make sense. 3474 */ 3475 static void tcp_xmit_recovery(struct sock *sk, int rexmit) 3476 { 3477 struct tcp_sock *tp = tcp_sk(sk); 3478 3479 if (rexmit == REXMIT_NONE) 3480 return; 3481 3482 if (unlikely(rexmit == 2)) { 3483 __tcp_push_pending_frames(sk, tcp_current_mss(sk), 3484 TCP_NAGLE_OFF); 3485 if (after(tp->snd_nxt, tp->high_seq)) 3486 return; 3487 tp->frto = 0; 3488 } 3489 tcp_xmit_retransmit_queue(sk); 3490 } 3491 3492 /* This routine deals with incoming acks, but not outgoing ones. */ 3493 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3494 { 3495 struct inet_connection_sock *icsk = inet_csk(sk); 3496 struct tcp_sock *tp = tcp_sk(sk); 3497 struct tcp_sacktag_state sack_state; 3498 struct rate_sample rs = { .prior_delivered = 0 }; 3499 u32 prior_snd_una = tp->snd_una; 3500 bool is_sack_reneg = tp->is_sack_reneg; 3501 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3502 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3503 bool is_dupack = false; 3504 int prior_packets = tp->packets_out; 3505 u32 delivered = tp->delivered; 3506 u32 lost = tp->lost; 3507 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ 3508 u32 prior_fack; 3509 3510 sack_state.first_sackt = 0; 3511 sack_state.rate = &rs; 3512 3513 /* We very likely will need to access rtx queue. */ 3514 prefetch(sk->tcp_rtx_queue.rb_node); 3515 3516 /* If the ack is older than previous acks 3517 * then we can probably ignore it. 3518 */ 3519 if (before(ack, prior_snd_una)) { 3520 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ 3521 if (before(ack, prior_snd_una - tp->max_window)) { 3522 if (!(flag & FLAG_NO_CHALLENGE_ACK)) 3523 tcp_send_challenge_ack(sk, skb); 3524 return -1; 3525 } 3526 goto old_ack; 3527 } 3528 3529 /* If the ack includes data we haven't sent yet, discard 3530 * this segment (RFC793 Section 3.9). 3531 */ 3532 if (after(ack, tp->snd_nxt)) 3533 goto invalid_ack; 3534 3535 if (after(ack, prior_snd_una)) { 3536 flag |= FLAG_SND_UNA_ADVANCED; 3537 icsk->icsk_retransmits = 0; 3538 } 3539 3540 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; 3541 rs.prior_in_flight = tcp_packets_in_flight(tp); 3542 3543 /* ts_recent update must be made after we are sure that the packet 3544 * is in window. 3545 */ 3546 if (flag & FLAG_UPDATE_TS_RECENT) 3547 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 3548 3549 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3550 /* Window is constant, pure forward advance. 3551 * No more checks are required. 3552 * Note, we use the fact that SND.UNA>=SND.WL2. 3553 */ 3554 tcp_update_wl(tp, ack_seq); 3555 tcp_snd_una_update(tp, ack); 3556 flag |= FLAG_WIN_UPDATE; 3557 3558 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); 3559 3560 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); 3561 } else { 3562 u32 ack_ev_flags = CA_ACK_SLOWPATH; 3563 3564 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3565 flag |= FLAG_DATA; 3566 else 3567 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3568 3569 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3570 3571 if (TCP_SKB_CB(skb)->sacked) 3572 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3573 &sack_state); 3574 3575 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { 3576 flag |= FLAG_ECE; 3577 ack_ev_flags |= CA_ACK_ECE; 3578 } 3579 3580 if (flag & FLAG_WIN_UPDATE) 3581 ack_ev_flags |= CA_ACK_WIN_UPDATE; 3582 3583 tcp_in_ack_event(sk, ack_ev_flags); 3584 } 3585 3586 /* We passed data and got it acked, remove any soft error 3587 * log. Something worked... 3588 */ 3589 sk->sk_err_soft = 0; 3590 icsk->icsk_probes_out = 0; 3591 tp->rcv_tstamp = tcp_jiffies32; 3592 if (!prior_packets) 3593 goto no_queue; 3594 3595 /* See if we can take anything off of the retransmit queue. */ 3596 flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state); 3597 3598 tcp_rack_update_reo_wnd(sk, &rs); 3599 3600 if (tp->tlp_high_seq) 3601 tcp_process_tlp_ack(sk, ack, flag); 3602 /* If needed, reset TLP/RTO timer; RACK may later override this. */ 3603 if (flag & FLAG_SET_XMIT_TIMER) 3604 tcp_set_xmit_timer(sk); 3605 3606 if (tcp_ack_is_dubious(sk, flag)) { 3607 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3608 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3609 &rexmit); 3610 } 3611 3612 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3613 sk_dst_confirm(sk); 3614 3615 delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */ 3616 lost = tp->lost - lost; /* freshly marked lost */ 3617 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); 3618 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); 3619 tcp_xmit_recovery(sk, rexmit); 3620 return 1; 3621 3622 no_queue: 3623 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3624 if (flag & FLAG_DSACKING_ACK) 3625 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3626 &rexmit); 3627 /* If this ack opens up a zero window, clear backoff. It was 3628 * being used to time the probes, and is probably far higher than 3629 * it needs to be for normal retransmission. 3630 */ 3631 tcp_ack_probe(sk); 3632 3633 if (tp->tlp_high_seq) 3634 tcp_process_tlp_ack(sk, ack, flag); 3635 return 1; 3636 3637 invalid_ack: 3638 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3639 return -1; 3640 3641 old_ack: 3642 /* If data was SACKed, tag it and see if we should send more data. 3643 * If data was DSACKed, see if we can undo a cwnd reduction. 3644 */ 3645 if (TCP_SKB_CB(skb)->sacked) { 3646 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, 3647 &sack_state); 3648 tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag, 3649 &rexmit); 3650 tcp_xmit_recovery(sk, rexmit); 3651 } 3652 3653 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3654 return 0; 3655 } 3656 3657 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie, 3658 bool syn, struct tcp_fastopen_cookie *foc, 3659 bool exp_opt) 3660 { 3661 /* Valid only in SYN or SYN-ACK with an even length. */ 3662 if (!foc || !syn || len < 0 || (len & 1)) 3663 return; 3664 3665 if (len >= TCP_FASTOPEN_COOKIE_MIN && 3666 len <= TCP_FASTOPEN_COOKIE_MAX) 3667 memcpy(foc->val, cookie, len); 3668 else if (len != 0) 3669 len = -1; 3670 foc->len = len; 3671 foc->exp = exp_opt; 3672 } 3673 3674 static void smc_parse_options(const struct tcphdr *th, 3675 struct tcp_options_received *opt_rx, 3676 const unsigned char *ptr, 3677 int opsize) 3678 { 3679 #if IS_ENABLED(CONFIG_SMC) 3680 if (static_branch_unlikely(&tcp_have_smc)) { 3681 if (th->syn && !(opsize & 1) && 3682 opsize >= TCPOLEN_EXP_SMC_BASE && 3683 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) 3684 opt_rx->smc_ok = 1; 3685 } 3686 #endif 3687 } 3688 3689 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3690 * But, this can also be called on packets in the established flow when 3691 * the fast version below fails. 3692 */ 3693 void tcp_parse_options(const struct net *net, 3694 const struct sk_buff *skb, 3695 struct tcp_options_received *opt_rx, int estab, 3696 struct tcp_fastopen_cookie *foc) 3697 { 3698 const unsigned char *ptr; 3699 const struct tcphdr *th = tcp_hdr(skb); 3700 int length = (th->doff * 4) - sizeof(struct tcphdr); 3701 3702 ptr = (const unsigned char *)(th + 1); 3703 opt_rx->saw_tstamp = 0; 3704 3705 while (length > 0) { 3706 int opcode = *ptr++; 3707 int opsize; 3708 3709 switch (opcode) { 3710 case TCPOPT_EOL: 3711 return; 3712 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3713 length--; 3714 continue; 3715 default: 3716 opsize = *ptr++; 3717 if (opsize < 2) /* "silly options" */ 3718 return; 3719 if (opsize > length) 3720 return; /* don't parse partial options */ 3721 switch (opcode) { 3722 case TCPOPT_MSS: 3723 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3724 u16 in_mss = get_unaligned_be16(ptr); 3725 if (in_mss) { 3726 if (opt_rx->user_mss && 3727 opt_rx->user_mss < in_mss) 3728 in_mss = opt_rx->user_mss; 3729 opt_rx->mss_clamp = in_mss; 3730 } 3731 } 3732 break; 3733 case TCPOPT_WINDOW: 3734 if (opsize == TCPOLEN_WINDOW && th->syn && 3735 !estab && net->ipv4.sysctl_tcp_window_scaling) { 3736 __u8 snd_wscale = *(__u8 *)ptr; 3737 opt_rx->wscale_ok = 1; 3738 if (snd_wscale > TCP_MAX_WSCALE) { 3739 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n", 3740 __func__, 3741 snd_wscale, 3742 TCP_MAX_WSCALE); 3743 snd_wscale = TCP_MAX_WSCALE; 3744 } 3745 opt_rx->snd_wscale = snd_wscale; 3746 } 3747 break; 3748 case TCPOPT_TIMESTAMP: 3749 if ((opsize == TCPOLEN_TIMESTAMP) && 3750 ((estab && opt_rx->tstamp_ok) || 3751 (!estab && net->ipv4.sysctl_tcp_timestamps))) { 3752 opt_rx->saw_tstamp = 1; 3753 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3754 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3755 } 3756 break; 3757 case TCPOPT_SACK_PERM: 3758 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3759 !estab && net->ipv4.sysctl_tcp_sack) { 3760 opt_rx->sack_ok = TCP_SACK_SEEN; 3761 tcp_sack_reset(opt_rx); 3762 } 3763 break; 3764 3765 case TCPOPT_SACK: 3766 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3767 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3768 opt_rx->sack_ok) { 3769 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3770 } 3771 break; 3772 #ifdef CONFIG_TCP_MD5SIG 3773 case TCPOPT_MD5SIG: 3774 /* 3775 * The MD5 Hash has already been 3776 * checked (see tcp_v{4,6}_do_rcv()). 3777 */ 3778 break; 3779 #endif 3780 case TCPOPT_FASTOPEN: 3781 tcp_parse_fastopen_option( 3782 opsize - TCPOLEN_FASTOPEN_BASE, 3783 ptr, th->syn, foc, false); 3784 break; 3785 3786 case TCPOPT_EXP: 3787 /* Fast Open option shares code 254 using a 3788 * 16 bits magic number. 3789 */ 3790 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE && 3791 get_unaligned_be16(ptr) == 3792 TCPOPT_FASTOPEN_MAGIC) 3793 tcp_parse_fastopen_option(opsize - 3794 TCPOLEN_EXP_FASTOPEN_BASE, 3795 ptr + 2, th->syn, foc, true); 3796 else 3797 smc_parse_options(th, opt_rx, ptr, 3798 opsize); 3799 break; 3800 3801 } 3802 ptr += opsize-2; 3803 length -= opsize; 3804 } 3805 } 3806 } 3807 EXPORT_SYMBOL(tcp_parse_options); 3808 3809 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 3810 { 3811 const __be32 *ptr = (const __be32 *)(th + 1); 3812 3813 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3814 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3815 tp->rx_opt.saw_tstamp = 1; 3816 ++ptr; 3817 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3818 ++ptr; 3819 if (*ptr) 3820 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3821 else 3822 tp->rx_opt.rcv_tsecr = 0; 3823 return true; 3824 } 3825 return false; 3826 } 3827 3828 /* Fast parse options. This hopes to only see timestamps. 3829 * If it is wrong it falls back on tcp_parse_options(). 3830 */ 3831 static bool tcp_fast_parse_options(const struct net *net, 3832 const struct sk_buff *skb, 3833 const struct tcphdr *th, struct tcp_sock *tp) 3834 { 3835 /* In the spirit of fast parsing, compare doff directly to constant 3836 * values. Because equality is used, short doff can be ignored here. 3837 */ 3838 if (th->doff == (sizeof(*th) / 4)) { 3839 tp->rx_opt.saw_tstamp = 0; 3840 return false; 3841 } else if (tp->rx_opt.tstamp_ok && 3842 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 3843 if (tcp_parse_aligned_timestamp(tp, th)) 3844 return true; 3845 } 3846 3847 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); 3848 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 3849 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3850 3851 return true; 3852 } 3853 3854 #ifdef CONFIG_TCP_MD5SIG 3855 /* 3856 * Parse MD5 Signature option 3857 */ 3858 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 3859 { 3860 int length = (th->doff << 2) - sizeof(*th); 3861 const u8 *ptr = (const u8 *)(th + 1); 3862 3863 /* If the TCP option is too short, we can short cut */ 3864 if (length < TCPOLEN_MD5SIG) 3865 return NULL; 3866 3867 while (length > 0) { 3868 int opcode = *ptr++; 3869 int opsize; 3870 3871 switch (opcode) { 3872 case TCPOPT_EOL: 3873 return NULL; 3874 case TCPOPT_NOP: 3875 length--; 3876 continue; 3877 default: 3878 opsize = *ptr++; 3879 if (opsize < 2 || opsize > length) 3880 return NULL; 3881 if (opcode == TCPOPT_MD5SIG) 3882 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 3883 } 3884 ptr += opsize - 2; 3885 length -= opsize; 3886 } 3887 return NULL; 3888 } 3889 EXPORT_SYMBOL(tcp_parse_md5sig_option); 3890 #endif 3891 3892 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 3893 * 3894 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 3895 * it can pass through stack. So, the following predicate verifies that 3896 * this segment is not used for anything but congestion avoidance or 3897 * fast retransmit. Moreover, we even are able to eliminate most of such 3898 * second order effects, if we apply some small "replay" window (~RTO) 3899 * to timestamp space. 3900 * 3901 * All these measures still do not guarantee that we reject wrapped ACKs 3902 * on networks with high bandwidth, when sequence space is recycled fastly, 3903 * but it guarantees that such events will be very rare and do not affect 3904 * connection seriously. This doesn't look nice, but alas, PAWS is really 3905 * buggy extension. 3906 * 3907 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 3908 * states that events when retransmit arrives after original data are rare. 3909 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 3910 * the biggest problem on large power networks even with minor reordering. 3911 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 3912 * up to bandwidth of 18Gigabit/sec. 8) ] 3913 */ 3914 3915 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 3916 { 3917 const struct tcp_sock *tp = tcp_sk(sk); 3918 const struct tcphdr *th = tcp_hdr(skb); 3919 u32 seq = TCP_SKB_CB(skb)->seq; 3920 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3921 3922 return (/* 1. Pure ACK with correct sequence number. */ 3923 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 3924 3925 /* 2. ... and duplicate ACK. */ 3926 ack == tp->snd_una && 3927 3928 /* 3. ... and does not update window. */ 3929 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 3930 3931 /* 4. ... and sits in replay window. */ 3932 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 3933 } 3934 3935 static inline bool tcp_paws_discard(const struct sock *sk, 3936 const struct sk_buff *skb) 3937 { 3938 const struct tcp_sock *tp = tcp_sk(sk); 3939 3940 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 3941 !tcp_disordered_ack(sk, skb); 3942 } 3943 3944 /* Check segment sequence number for validity. 3945 * 3946 * Segment controls are considered valid, if the segment 3947 * fits to the window after truncation to the window. Acceptability 3948 * of data (and SYN, FIN, of course) is checked separately. 3949 * See tcp_data_queue(), for example. 3950 * 3951 * Also, controls (RST is main one) are accepted using RCV.WUP instead 3952 * of RCV.NXT. Peer still did not advance his SND.UNA when we 3953 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 3954 * (borrowed from freebsd) 3955 */ 3956 3957 static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 3958 { 3959 return !before(end_seq, tp->rcv_wup) && 3960 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 3961 } 3962 3963 /* When we get a reset we do this. */ 3964 void tcp_reset(struct sock *sk) 3965 { 3966 trace_tcp_receive_reset(sk); 3967 3968 /* We want the right error as BSD sees it (and indeed as we do). */ 3969 switch (sk->sk_state) { 3970 case TCP_SYN_SENT: 3971 sk->sk_err = ECONNREFUSED; 3972 break; 3973 case TCP_CLOSE_WAIT: 3974 sk->sk_err = EPIPE; 3975 break; 3976 case TCP_CLOSE: 3977 return; 3978 default: 3979 sk->sk_err = ECONNRESET; 3980 } 3981 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 3982 smp_wmb(); 3983 3984 tcp_done(sk); 3985 3986 if (!sock_flag(sk, SOCK_DEAD)) 3987 sk->sk_error_report(sk); 3988 } 3989 3990 /* 3991 * Process the FIN bit. This now behaves as it is supposed to work 3992 * and the FIN takes effect when it is validly part of sequence 3993 * space. Not before when we get holes. 3994 * 3995 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 3996 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 3997 * TIME-WAIT) 3998 * 3999 * If we are in FINWAIT-1, a received FIN indicates simultaneous 4000 * close and we go into CLOSING (and later onto TIME-WAIT) 4001 * 4002 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4003 */ 4004 void tcp_fin(struct sock *sk) 4005 { 4006 struct tcp_sock *tp = tcp_sk(sk); 4007 4008 inet_csk_schedule_ack(sk); 4009 4010 sk->sk_shutdown |= RCV_SHUTDOWN; 4011 sock_set_flag(sk, SOCK_DONE); 4012 4013 switch (sk->sk_state) { 4014 case TCP_SYN_RECV: 4015 case TCP_ESTABLISHED: 4016 /* Move to CLOSE_WAIT */ 4017 tcp_set_state(sk, TCP_CLOSE_WAIT); 4018 inet_csk(sk)->icsk_ack.pingpong = 1; 4019 break; 4020 4021 case TCP_CLOSE_WAIT: 4022 case TCP_CLOSING: 4023 /* Received a retransmission of the FIN, do 4024 * nothing. 4025 */ 4026 break; 4027 case TCP_LAST_ACK: 4028 /* RFC793: Remain in the LAST-ACK state. */ 4029 break; 4030 4031 case TCP_FIN_WAIT1: 4032 /* This case occurs when a simultaneous close 4033 * happens, we must ack the received FIN and 4034 * enter the CLOSING state. 4035 */ 4036 tcp_send_ack(sk); 4037 tcp_set_state(sk, TCP_CLOSING); 4038 break; 4039 case TCP_FIN_WAIT2: 4040 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 4041 tcp_send_ack(sk); 4042 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 4043 break; 4044 default: 4045 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4046 * cases we should never reach this piece of code. 4047 */ 4048 pr_err("%s: Impossible, sk->sk_state=%d\n", 4049 __func__, sk->sk_state); 4050 break; 4051 } 4052 4053 /* It _is_ possible, that we have something out-of-order _after_ FIN. 4054 * Probably, we should reset in this case. For now drop them. 4055 */ 4056 skb_rbtree_purge(&tp->out_of_order_queue); 4057 if (tcp_is_sack(tp)) 4058 tcp_sack_reset(&tp->rx_opt); 4059 sk_mem_reclaim(sk); 4060 4061 if (!sock_flag(sk, SOCK_DEAD)) { 4062 sk->sk_state_change(sk); 4063 4064 /* Do not send POLL_HUP for half duplex close. */ 4065 if (sk->sk_shutdown == SHUTDOWN_MASK || 4066 sk->sk_state == TCP_CLOSE) 4067 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 4068 else 4069 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 4070 } 4071 } 4072 4073 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4074 u32 end_seq) 4075 { 4076 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4077 if (before(seq, sp->start_seq)) 4078 sp->start_seq = seq; 4079 if (after(end_seq, sp->end_seq)) 4080 sp->end_seq = end_seq; 4081 return true; 4082 } 4083 return false; 4084 } 4085 4086 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4087 { 4088 struct tcp_sock *tp = tcp_sk(sk); 4089 4090 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { 4091 int mib_idx; 4092 4093 if (before(seq, tp->rcv_nxt)) 4094 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 4095 else 4096 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4097 4098 NET_INC_STATS(sock_net(sk), mib_idx); 4099 4100 tp->rx_opt.dsack = 1; 4101 tp->duplicate_sack[0].start_seq = seq; 4102 tp->duplicate_sack[0].end_seq = end_seq; 4103 } 4104 } 4105 4106 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 4107 { 4108 struct tcp_sock *tp = tcp_sk(sk); 4109 4110 if (!tp->rx_opt.dsack) 4111 tcp_dsack_set(sk, seq, end_seq); 4112 else 4113 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4114 } 4115 4116 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4117 { 4118 struct tcp_sock *tp = tcp_sk(sk); 4119 4120 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4121 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4122 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4123 tcp_enter_quickack_mode(sk); 4124 4125 if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) { 4126 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4127 4128 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4129 end_seq = tp->rcv_nxt; 4130 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 4131 } 4132 } 4133 4134 tcp_send_ack(sk); 4135 } 4136 4137 /* These routines update the SACK block as out-of-order packets arrive or 4138 * in-order packets close up the sequence space. 4139 */ 4140 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 4141 { 4142 int this_sack; 4143 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4144 struct tcp_sack_block *swalk = sp + 1; 4145 4146 /* See if the recent change to the first SACK eats into 4147 * or hits the sequence space of other SACK blocks, if so coalesce. 4148 */ 4149 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 4150 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 4151 int i; 4152 4153 /* Zap SWALK, by moving every further SACK up by one slot. 4154 * Decrease num_sacks. 4155 */ 4156 tp->rx_opt.num_sacks--; 4157 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4158 sp[i] = sp[i + 1]; 4159 continue; 4160 } 4161 this_sack++, swalk++; 4162 } 4163 } 4164 4165 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4166 { 4167 struct tcp_sock *tp = tcp_sk(sk); 4168 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4169 int cur_sacks = tp->rx_opt.num_sacks; 4170 int this_sack; 4171 4172 if (!cur_sacks) 4173 goto new_sack; 4174 4175 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 4176 if (tcp_sack_extend(sp, seq, end_seq)) { 4177 /* Rotate this_sack to the first one. */ 4178 for (; this_sack > 0; this_sack--, sp--) 4179 swap(*sp, *(sp - 1)); 4180 if (cur_sacks > 1) 4181 tcp_sack_maybe_coalesce(tp); 4182 return; 4183 } 4184 } 4185 4186 /* Could not find an adjacent existing SACK, build a new one, 4187 * put it at the front, and shift everyone else down. We 4188 * always know there is at least one SACK present already here. 4189 * 4190 * If the sack array is full, forget about the last one. 4191 */ 4192 if (this_sack >= TCP_NUM_SACKS) { 4193 this_sack--; 4194 tp->rx_opt.num_sacks--; 4195 sp--; 4196 } 4197 for (; this_sack > 0; this_sack--, sp--) 4198 *sp = *(sp - 1); 4199 4200 new_sack: 4201 /* Build the new head SACK, and we're done. */ 4202 sp->start_seq = seq; 4203 sp->end_seq = end_seq; 4204 tp->rx_opt.num_sacks++; 4205 } 4206 4207 /* RCV.NXT advances, some SACKs should be eaten. */ 4208 4209 static void tcp_sack_remove(struct tcp_sock *tp) 4210 { 4211 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4212 int num_sacks = tp->rx_opt.num_sacks; 4213 int this_sack; 4214 4215 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4216 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4217 tp->rx_opt.num_sacks = 0; 4218 return; 4219 } 4220 4221 for (this_sack = 0; this_sack < num_sacks;) { 4222 /* Check if the start of the sack is covered by RCV.NXT. */ 4223 if (!before(tp->rcv_nxt, sp->start_seq)) { 4224 int i; 4225 4226 /* RCV.NXT must cover all the block! */ 4227 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 4228 4229 /* Zap this SACK, by moving forward any other SACKS. */ 4230 for (i = this_sack+1; i < num_sacks; i++) 4231 tp->selective_acks[i-1] = tp->selective_acks[i]; 4232 num_sacks--; 4233 continue; 4234 } 4235 this_sack++; 4236 sp++; 4237 } 4238 tp->rx_opt.num_sacks = num_sacks; 4239 } 4240 4241 /** 4242 * tcp_try_coalesce - try to merge skb to prior one 4243 * @sk: socket 4244 * @dest: destination queue 4245 * @to: prior buffer 4246 * @from: buffer to add in queue 4247 * @fragstolen: pointer to boolean 4248 * 4249 * Before queueing skb @from after @to, try to merge them 4250 * to reduce overall memory use and queue lengths, if cost is small. 4251 * Packets in ofo or receive queues can stay a long time. 4252 * Better try to coalesce them right now to avoid future collapses. 4253 * Returns true if caller should free @from instead of queueing it 4254 */ 4255 static bool tcp_try_coalesce(struct sock *sk, 4256 struct sk_buff *to, 4257 struct sk_buff *from, 4258 bool *fragstolen) 4259 { 4260 int delta; 4261 4262 *fragstolen = false; 4263 4264 /* Its possible this segment overlaps with prior segment in queue */ 4265 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) 4266 return false; 4267 4268 if (!skb_try_coalesce(to, from, fragstolen, &delta)) 4269 return false; 4270 4271 atomic_add(delta, &sk->sk_rmem_alloc); 4272 sk_mem_charge(sk, delta); 4273 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4274 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4275 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4276 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; 4277 4278 if (TCP_SKB_CB(from)->has_rxtstamp) { 4279 TCP_SKB_CB(to)->has_rxtstamp = true; 4280 to->tstamp = from->tstamp; 4281 } 4282 4283 return true; 4284 } 4285 4286 static void tcp_drop(struct sock *sk, struct sk_buff *skb) 4287 { 4288 sk_drops_add(sk, skb); 4289 __kfree_skb(skb); 4290 } 4291 4292 /* This one checks to see if we can put data from the 4293 * out_of_order queue into the receive_queue. 4294 */ 4295 static void tcp_ofo_queue(struct sock *sk) 4296 { 4297 struct tcp_sock *tp = tcp_sk(sk); 4298 __u32 dsack_high = tp->rcv_nxt; 4299 bool fin, fragstolen, eaten; 4300 struct sk_buff *skb, *tail; 4301 struct rb_node *p; 4302 4303 p = rb_first(&tp->out_of_order_queue); 4304 while (p) { 4305 skb = rb_to_skb(p); 4306 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4307 break; 4308 4309 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4310 __u32 dsack = dsack_high; 4311 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4312 dsack_high = TCP_SKB_CB(skb)->end_seq; 4313 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4314 } 4315 p = rb_next(p); 4316 rb_erase(&skb->rbnode, &tp->out_of_order_queue); 4317 4318 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { 4319 SOCK_DEBUG(sk, "ofo packet was already received\n"); 4320 tcp_drop(sk, skb); 4321 continue; 4322 } 4323 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 4324 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4325 TCP_SKB_CB(skb)->end_seq); 4326 4327 tail = skb_peek_tail(&sk->sk_receive_queue); 4328 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); 4329 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4330 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; 4331 if (!eaten) 4332 __skb_queue_tail(&sk->sk_receive_queue, skb); 4333 else 4334 kfree_skb_partial(skb, fragstolen); 4335 4336 if (unlikely(fin)) { 4337 tcp_fin(sk); 4338 /* tcp_fin() purges tp->out_of_order_queue, 4339 * so we must end this loop right now. 4340 */ 4341 break; 4342 } 4343 } 4344 } 4345 4346 static bool tcp_prune_ofo_queue(struct sock *sk); 4347 static int tcp_prune_queue(struct sock *sk); 4348 4349 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, 4350 unsigned int size) 4351 { 4352 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4353 !sk_rmem_schedule(sk, skb, size)) { 4354 4355 if (tcp_prune_queue(sk) < 0) 4356 return -1; 4357 4358 while (!sk_rmem_schedule(sk, skb, size)) { 4359 if (!tcp_prune_ofo_queue(sk)) 4360 return -1; 4361 } 4362 } 4363 return 0; 4364 } 4365 4366 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4367 { 4368 struct tcp_sock *tp = tcp_sk(sk); 4369 struct rb_node **p, *parent; 4370 struct sk_buff *skb1; 4371 u32 seq, end_seq; 4372 bool fragstolen; 4373 4374 tcp_ecn_check_ce(tp, skb); 4375 4376 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { 4377 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); 4378 tcp_drop(sk, skb); 4379 return; 4380 } 4381 4382 /* Disable header prediction. */ 4383 tp->pred_flags = 0; 4384 inet_csk_schedule_ack(sk); 4385 4386 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); 4387 seq = TCP_SKB_CB(skb)->seq; 4388 end_seq = TCP_SKB_CB(skb)->end_seq; 4389 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4390 tp->rcv_nxt, seq, end_seq); 4391 4392 p = &tp->out_of_order_queue.rb_node; 4393 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4394 /* Initial out of order segment, build 1 SACK. */ 4395 if (tcp_is_sack(tp)) { 4396 tp->rx_opt.num_sacks = 1; 4397 tp->selective_acks[0].start_seq = seq; 4398 tp->selective_acks[0].end_seq = end_seq; 4399 } 4400 rb_link_node(&skb->rbnode, NULL, p); 4401 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4402 tp->ooo_last_skb = skb; 4403 goto end; 4404 } 4405 4406 /* In the typical case, we are adding an skb to the end of the list. 4407 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. 4408 */ 4409 if (tcp_try_coalesce(sk, tp->ooo_last_skb, 4410 skb, &fragstolen)) { 4411 coalesce_done: 4412 tcp_grow_window(sk, skb); 4413 kfree_skb_partial(skb, fragstolen); 4414 skb = NULL; 4415 goto add_sack; 4416 } 4417 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ 4418 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { 4419 parent = &tp->ooo_last_skb->rbnode; 4420 p = &parent->rb_right; 4421 goto insert; 4422 } 4423 4424 /* Find place to insert this segment. Handle overlaps on the way. */ 4425 parent = NULL; 4426 while (*p) { 4427 parent = *p; 4428 skb1 = rb_to_skb(parent); 4429 if (before(seq, TCP_SKB_CB(skb1)->seq)) { 4430 p = &parent->rb_left; 4431 continue; 4432 } 4433 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4434 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4435 /* All the bits are present. Drop. */ 4436 NET_INC_STATS(sock_net(sk), 4437 LINUX_MIB_TCPOFOMERGE); 4438 __kfree_skb(skb); 4439 skb = NULL; 4440 tcp_dsack_set(sk, seq, end_seq); 4441 goto add_sack; 4442 } 4443 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4444 /* Partial overlap. */ 4445 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq); 4446 } else { 4447 /* skb's seq == skb1's seq and skb covers skb1. 4448 * Replace skb1 with skb. 4449 */ 4450 rb_replace_node(&skb1->rbnode, &skb->rbnode, 4451 &tp->out_of_order_queue); 4452 tcp_dsack_extend(sk, 4453 TCP_SKB_CB(skb1)->seq, 4454 TCP_SKB_CB(skb1)->end_seq); 4455 NET_INC_STATS(sock_net(sk), 4456 LINUX_MIB_TCPOFOMERGE); 4457 __kfree_skb(skb1); 4458 goto merge_right; 4459 } 4460 } else if (tcp_try_coalesce(sk, skb1, 4461 skb, &fragstolen)) { 4462 goto coalesce_done; 4463 } 4464 p = &parent->rb_right; 4465 } 4466 insert: 4467 /* Insert segment into RB tree. */ 4468 rb_link_node(&skb->rbnode, parent, p); 4469 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); 4470 4471 merge_right: 4472 /* Remove other segments covered by skb. */ 4473 while ((skb1 = skb_rb_next(skb)) != NULL) { 4474 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4475 break; 4476 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4477 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4478 end_seq); 4479 break; 4480 } 4481 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); 4482 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4483 TCP_SKB_CB(skb1)->end_seq); 4484 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); 4485 tcp_drop(sk, skb1); 4486 } 4487 /* If there is no skb after us, we are the last_skb ! */ 4488 if (!skb1) 4489 tp->ooo_last_skb = skb; 4490 4491 add_sack: 4492 if (tcp_is_sack(tp)) 4493 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4494 end: 4495 if (skb) { 4496 tcp_grow_window(sk, skb); 4497 skb_condense(skb); 4498 skb_set_owner_r(skb, sk); 4499 } 4500 } 4501 4502 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4503 bool *fragstolen) 4504 { 4505 int eaten; 4506 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4507 4508 __skb_pull(skb, hdrlen); 4509 eaten = (tail && 4510 tcp_try_coalesce(sk, tail, 4511 skb, fragstolen)) ? 1 : 0; 4512 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); 4513 if (!eaten) { 4514 __skb_queue_tail(&sk->sk_receive_queue, skb); 4515 skb_set_owner_r(skb, sk); 4516 } 4517 return eaten; 4518 } 4519 4520 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) 4521 { 4522 struct sk_buff *skb; 4523 int err = -ENOMEM; 4524 int data_len = 0; 4525 bool fragstolen; 4526 4527 if (size == 0) 4528 return 0; 4529 4530 if (size > PAGE_SIZE) { 4531 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); 4532 4533 data_len = npages << PAGE_SHIFT; 4534 size = data_len + (size & ~PAGE_MASK); 4535 } 4536 skb = alloc_skb_with_frags(size - data_len, data_len, 4537 PAGE_ALLOC_COSTLY_ORDER, 4538 &err, sk->sk_allocation); 4539 if (!skb) 4540 goto err; 4541 4542 skb_put(skb, size - data_len); 4543 skb->data_len = data_len; 4544 skb->len = size; 4545 4546 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4547 goto err_free; 4548 4549 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); 4550 if (err) 4551 goto err_free; 4552 4553 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; 4554 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; 4555 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; 4556 4557 if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { 4558 WARN_ON_ONCE(fragstolen); /* should not happen */ 4559 __kfree_skb(skb); 4560 } 4561 return size; 4562 4563 err_free: 4564 kfree_skb(skb); 4565 err: 4566 return err; 4567 4568 } 4569 4570 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4571 { 4572 struct tcp_sock *tp = tcp_sk(sk); 4573 bool fragstolen; 4574 int eaten; 4575 4576 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 4577 __kfree_skb(skb); 4578 return; 4579 } 4580 skb_dst_drop(skb); 4581 __skb_pull(skb, tcp_hdr(skb)->doff * 4); 4582 4583 tcp_ecn_accept_cwr(tp, skb); 4584 4585 tp->rx_opt.dsack = 0; 4586 4587 /* Queue data for delivery to the user. 4588 * Packets in sequence go to the receive queue. 4589 * Out of sequence packets to the out_of_order_queue. 4590 */ 4591 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4592 if (tcp_receive_window(tp) == 0) 4593 goto out_of_window; 4594 4595 /* Ok. In sequence. In window. */ 4596 queue_and_out: 4597 if (skb_queue_len(&sk->sk_receive_queue) == 0) 4598 sk_forced_mem_schedule(sk, skb->truesize); 4599 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) 4600 goto drop; 4601 4602 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4603 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); 4604 if (skb->len) 4605 tcp_event_data_recv(sk, skb); 4606 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 4607 tcp_fin(sk); 4608 4609 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { 4610 tcp_ofo_queue(sk); 4611 4612 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4613 * gap in queue is filled. 4614 */ 4615 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4616 inet_csk(sk)->icsk_ack.pingpong = 0; 4617 } 4618 4619 if (tp->rx_opt.num_sacks) 4620 tcp_sack_remove(tp); 4621 4622 tcp_fast_path_check(sk); 4623 4624 if (eaten > 0) 4625 kfree_skb_partial(skb, fragstolen); 4626 if (!sock_flag(sk, SOCK_DEAD)) 4627 sk->sk_data_ready(sk); 4628 return; 4629 } 4630 4631 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4632 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4633 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4634 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4635 4636 out_of_window: 4637 tcp_enter_quickack_mode(sk); 4638 inet_csk_schedule_ack(sk); 4639 drop: 4640 tcp_drop(sk, skb); 4641 return; 4642 } 4643 4644 /* Out of window. F.e. zero window probe. */ 4645 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4646 goto out_of_window; 4647 4648 tcp_enter_quickack_mode(sk); 4649 4650 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4651 /* Partial packet, seq < rcv_next < end_seq */ 4652 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4653 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4654 TCP_SKB_CB(skb)->end_seq); 4655 4656 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4657 4658 /* If window is closed, drop tail of packet. But after 4659 * remembering D-SACK for its head made in previous line. 4660 */ 4661 if (!tcp_receive_window(tp)) 4662 goto out_of_window; 4663 goto queue_and_out; 4664 } 4665 4666 tcp_data_queue_ofo(sk, skb); 4667 } 4668 4669 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list) 4670 { 4671 if (list) 4672 return !skb_queue_is_last(list, skb) ? skb->next : NULL; 4673 4674 return skb_rb_next(skb); 4675 } 4676 4677 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4678 struct sk_buff_head *list, 4679 struct rb_root *root) 4680 { 4681 struct sk_buff *next = tcp_skb_next(skb, list); 4682 4683 if (list) 4684 __skb_unlink(skb, list); 4685 else 4686 rb_erase(&skb->rbnode, root); 4687 4688 __kfree_skb(skb); 4689 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4690 4691 return next; 4692 } 4693 4694 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */ 4695 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb) 4696 { 4697 struct rb_node **p = &root->rb_node; 4698 struct rb_node *parent = NULL; 4699 struct sk_buff *skb1; 4700 4701 while (*p) { 4702 parent = *p; 4703 skb1 = rb_to_skb(parent); 4704 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq)) 4705 p = &parent->rb_left; 4706 else 4707 p = &parent->rb_right; 4708 } 4709 rb_link_node(&skb->rbnode, parent, p); 4710 rb_insert_color(&skb->rbnode, root); 4711 } 4712 4713 /* Collapse contiguous sequence of skbs head..tail with 4714 * sequence numbers start..end. 4715 * 4716 * If tail is NULL, this means until the end of the queue. 4717 * 4718 * Segments with FIN/SYN are not collapsed (only because this 4719 * simplifies code) 4720 */ 4721 static void 4722 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, 4723 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end) 4724 { 4725 struct sk_buff *skb = head, *n; 4726 struct sk_buff_head tmp; 4727 bool end_of_skbs; 4728 4729 /* First, check that queue is collapsible and find 4730 * the point where collapsing can be useful. 4731 */ 4732 restart: 4733 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) { 4734 n = tcp_skb_next(skb, list); 4735 4736 /* No new bits? It is possible on ofo queue. */ 4737 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4738 skb = tcp_collapse_one(sk, skb, list, root); 4739 if (!skb) 4740 break; 4741 goto restart; 4742 } 4743 4744 /* The first skb to collapse is: 4745 * - not SYN/FIN and 4746 * - bloated or contains data before "start" or 4747 * overlaps to the next one. 4748 */ 4749 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && 4750 (tcp_win_from_space(sk, skb->truesize) > skb->len || 4751 before(TCP_SKB_CB(skb)->seq, start))) { 4752 end_of_skbs = false; 4753 break; 4754 } 4755 4756 if (n && n != tail && 4757 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { 4758 end_of_skbs = false; 4759 break; 4760 } 4761 4762 /* Decided to skip this, advance start seq. */ 4763 start = TCP_SKB_CB(skb)->end_seq; 4764 } 4765 if (end_of_skbs || 4766 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 4767 return; 4768 4769 __skb_queue_head_init(&tmp); 4770 4771 while (before(start, end)) { 4772 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); 4773 struct sk_buff *nskb; 4774 4775 nskb = alloc_skb(copy, GFP_ATOMIC); 4776 if (!nskb) 4777 break; 4778 4779 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4780 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4781 if (list) 4782 __skb_queue_before(list, skb, nskb); 4783 else 4784 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ 4785 skb_set_owner_r(nskb, sk); 4786 4787 /* Copy data, releasing collapsed skbs. */ 4788 while (copy > 0) { 4789 int offset = start - TCP_SKB_CB(skb)->seq; 4790 int size = TCP_SKB_CB(skb)->end_seq - start; 4791 4792 BUG_ON(offset < 0); 4793 if (size > 0) { 4794 size = min(copy, size); 4795 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4796 BUG(); 4797 TCP_SKB_CB(nskb)->end_seq += size; 4798 copy -= size; 4799 start += size; 4800 } 4801 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4802 skb = tcp_collapse_one(sk, skb, list, root); 4803 if (!skb || 4804 skb == tail || 4805 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) 4806 goto end; 4807 } 4808 } 4809 } 4810 end: 4811 skb_queue_walk_safe(&tmp, skb, n) 4812 tcp_rbtree_insert(root, skb); 4813 } 4814 4815 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 4816 * and tcp_collapse() them until all the queue is collapsed. 4817 */ 4818 static void tcp_collapse_ofo_queue(struct sock *sk) 4819 { 4820 struct tcp_sock *tp = tcp_sk(sk); 4821 struct sk_buff *skb, *head; 4822 u32 start, end; 4823 4824 skb = skb_rb_first(&tp->out_of_order_queue); 4825 new_range: 4826 if (!skb) { 4827 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); 4828 return; 4829 } 4830 start = TCP_SKB_CB(skb)->seq; 4831 end = TCP_SKB_CB(skb)->end_seq; 4832 4833 for (head = skb;;) { 4834 skb = skb_rb_next(skb); 4835 4836 /* Range is terminated when we see a gap or when 4837 * we are at the queue end. 4838 */ 4839 if (!skb || 4840 after(TCP_SKB_CB(skb)->seq, end) || 4841 before(TCP_SKB_CB(skb)->end_seq, start)) { 4842 tcp_collapse(sk, NULL, &tp->out_of_order_queue, 4843 head, skb, start, end); 4844 goto new_range; 4845 } 4846 4847 if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) 4848 start = TCP_SKB_CB(skb)->seq; 4849 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4850 end = TCP_SKB_CB(skb)->end_seq; 4851 } 4852 } 4853 4854 /* 4855 * Clean the out-of-order queue to make room. 4856 * We drop high sequences packets to : 4857 * 1) Let a chance for holes to be filled. 4858 * 2) not add too big latencies if thousands of packets sit there. 4859 * (But if application shrinks SO_RCVBUF, we could still end up 4860 * freeing whole queue here) 4861 * 4862 * Return true if queue has shrunk. 4863 */ 4864 static bool tcp_prune_ofo_queue(struct sock *sk) 4865 { 4866 struct tcp_sock *tp = tcp_sk(sk); 4867 struct rb_node *node, *prev; 4868 4869 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) 4870 return false; 4871 4872 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); 4873 node = &tp->ooo_last_skb->rbnode; 4874 do { 4875 prev = rb_prev(node); 4876 rb_erase(node, &tp->out_of_order_queue); 4877 tcp_drop(sk, rb_to_skb(node)); 4878 sk_mem_reclaim(sk); 4879 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 4880 !tcp_under_memory_pressure(sk)) 4881 break; 4882 node = prev; 4883 } while (node); 4884 tp->ooo_last_skb = rb_to_skb(prev); 4885 4886 /* Reset SACK state. A conforming SACK implementation will 4887 * do the same at a timeout based retransmit. When a connection 4888 * is in a sad state like this, we care only about integrity 4889 * of the connection not performance. 4890 */ 4891 if (tp->rx_opt.sack_ok) 4892 tcp_sack_reset(&tp->rx_opt); 4893 return true; 4894 } 4895 4896 /* Reduce allocated memory if we can, trying to get 4897 * the socket within its memory limits again. 4898 * 4899 * Return less than zero if we should start dropping frames 4900 * until the socket owning process reads some of the data 4901 * to stabilize the situation. 4902 */ 4903 static int tcp_prune_queue(struct sock *sk) 4904 { 4905 struct tcp_sock *tp = tcp_sk(sk); 4906 4907 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4908 4909 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED); 4910 4911 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4912 tcp_clamp_window(sk); 4913 else if (tcp_under_memory_pressure(sk)) 4914 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4915 4916 tcp_collapse_ofo_queue(sk); 4917 if (!skb_queue_empty(&sk->sk_receive_queue)) 4918 tcp_collapse(sk, &sk->sk_receive_queue, NULL, 4919 skb_peek(&sk->sk_receive_queue), 4920 NULL, 4921 tp->copied_seq, tp->rcv_nxt); 4922 sk_mem_reclaim(sk); 4923 4924 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4925 return 0; 4926 4927 /* Collapsing did not help, destructive actions follow. 4928 * This must not ever occur. */ 4929 4930 tcp_prune_ofo_queue(sk); 4931 4932 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4933 return 0; 4934 4935 /* If we are really being abused, tell the caller to silently 4936 * drop receive data on the floor. It will get retransmitted 4937 * and hopefully then we'll have sufficient space. 4938 */ 4939 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); 4940 4941 /* Massive buffer overcommit. */ 4942 tp->pred_flags = 0; 4943 return -1; 4944 } 4945 4946 static bool tcp_should_expand_sndbuf(const struct sock *sk) 4947 { 4948 const struct tcp_sock *tp = tcp_sk(sk); 4949 4950 /* If the user specified a specific send buffer setting, do 4951 * not modify it. 4952 */ 4953 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 4954 return false; 4955 4956 /* If we are under global TCP memory pressure, do not expand. */ 4957 if (tcp_under_memory_pressure(sk)) 4958 return false; 4959 4960 /* If we are under soft global TCP memory pressure, do not expand. */ 4961 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 4962 return false; 4963 4964 /* If we filled the congestion window, do not expand. */ 4965 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 4966 return false; 4967 4968 return true; 4969 } 4970 4971 /* When incoming ACK allowed to free some skb from write_queue, 4972 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 4973 * on the exit from tcp input handler. 4974 * 4975 * PROBLEM: sndbuf expansion does not work well with largesend. 4976 */ 4977 static void tcp_new_space(struct sock *sk) 4978 { 4979 struct tcp_sock *tp = tcp_sk(sk); 4980 4981 if (tcp_should_expand_sndbuf(sk)) { 4982 tcp_sndbuf_expand(sk); 4983 tp->snd_cwnd_stamp = tcp_jiffies32; 4984 } 4985 4986 sk->sk_write_space(sk); 4987 } 4988 4989 static void tcp_check_space(struct sock *sk) 4990 { 4991 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 4992 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 4993 /* pairs with tcp_poll() */ 4994 smp_mb(); 4995 if (sk->sk_socket && 4996 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 4997 tcp_new_space(sk); 4998 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 4999 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 5000 } 5001 } 5002 } 5003 5004 static inline void tcp_data_snd_check(struct sock *sk) 5005 { 5006 tcp_push_pending_frames(sk); 5007 tcp_check_space(sk); 5008 } 5009 5010 /* 5011 * Check if sending an ack is needed. 5012 */ 5013 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 5014 { 5015 struct tcp_sock *tp = tcp_sk(sk); 5016 5017 /* More than one full frame received... */ 5018 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 5019 /* ... and right edge of window advances far enough. 5020 * (tcp_recvmsg() will send ACK otherwise). Or... 5021 */ 5022 __tcp_select_window(sk) >= tp->rcv_wnd) || 5023 /* We ACK each frame or... */ 5024 tcp_in_quickack_mode(sk) || 5025 /* We have out of order data. */ 5026 (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) { 5027 /* Then ack it now */ 5028 tcp_send_ack(sk); 5029 } else { 5030 /* Else, send delayed ack. */ 5031 tcp_send_delayed_ack(sk); 5032 } 5033 } 5034 5035 static inline void tcp_ack_snd_check(struct sock *sk) 5036 { 5037 if (!inet_csk_ack_scheduled(sk)) { 5038 /* We sent a data segment already. */ 5039 return; 5040 } 5041 __tcp_ack_snd_check(sk, 1); 5042 } 5043 5044 /* 5045 * This routine is only called when we have urgent data 5046 * signaled. Its the 'slow' part of tcp_urg. It could be 5047 * moved inline now as tcp_urg is only called from one 5048 * place. We handle URGent data wrong. We have to - as 5049 * BSD still doesn't use the correction from RFC961. 5050 * For 1003.1g we should support a new option TCP_STDURG to permit 5051 * either form (or just set the sysctl tcp_stdurg). 5052 */ 5053 5054 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5055 { 5056 struct tcp_sock *tp = tcp_sk(sk); 5057 u32 ptr = ntohs(th->urg_ptr); 5058 5059 if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg) 5060 ptr--; 5061 ptr += ntohl(th->seq); 5062 5063 /* Ignore urgent data that we've already seen and read. */ 5064 if (after(tp->copied_seq, ptr)) 5065 return; 5066 5067 /* Do not replay urg ptr. 5068 * 5069 * NOTE: interesting situation not covered by specs. 5070 * Misbehaving sender may send urg ptr, pointing to segment, 5071 * which we already have in ofo queue. We are not able to fetch 5072 * such data and will stay in TCP_URG_NOTYET until will be eaten 5073 * by recvmsg(). Seems, we are not obliged to handle such wicked 5074 * situations. But it is worth to think about possibility of some 5075 * DoSes using some hypothetical application level deadlock. 5076 */ 5077 if (before(ptr, tp->rcv_nxt)) 5078 return; 5079 5080 /* Do we already have a newer (or duplicate) urgent pointer? */ 5081 if (tp->urg_data && !after(ptr, tp->urg_seq)) 5082 return; 5083 5084 /* Tell the world about our new urgent pointer. */ 5085 sk_send_sigurg(sk); 5086 5087 /* We may be adding urgent data when the last byte read was 5088 * urgent. To do this requires some care. We cannot just ignore 5089 * tp->copied_seq since we would read the last urgent byte again 5090 * as data, nor can we alter copied_seq until this data arrives 5091 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 5092 * 5093 * NOTE. Double Dutch. Rendering to plain English: author of comment 5094 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 5095 * and expect that both A and B disappear from stream. This is _wrong_. 5096 * Though this happens in BSD with high probability, this is occasional. 5097 * Any application relying on this is buggy. Note also, that fix "works" 5098 * only in this artificial test. Insert some normal data between A and B and we will 5099 * decline of BSD again. Verdict: it is better to remove to trap 5100 * buggy users. 5101 */ 5102 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 5103 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 5104 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 5105 tp->copied_seq++; 5106 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 5107 __skb_unlink(skb, &sk->sk_receive_queue); 5108 __kfree_skb(skb); 5109 } 5110 } 5111 5112 tp->urg_data = TCP_URG_NOTYET; 5113 tp->urg_seq = ptr; 5114 5115 /* Disable header prediction. */ 5116 tp->pred_flags = 0; 5117 } 5118 5119 /* This is the 'fast' part of urgent handling. */ 5120 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5121 { 5122 struct tcp_sock *tp = tcp_sk(sk); 5123 5124 /* Check if we get a new urgent pointer - normally not. */ 5125 if (th->urg) 5126 tcp_check_urg(sk, th); 5127 5128 /* Do we wait for any urgent data? - normally not... */ 5129 if (tp->urg_data == TCP_URG_NOTYET) { 5130 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 5131 th->syn; 5132 5133 /* Is the urgent pointer pointing into this packet? */ 5134 if (ptr < skb->len) { 5135 u8 tmp; 5136 if (skb_copy_bits(skb, ptr, &tmp, 1)) 5137 BUG(); 5138 tp->urg_data = TCP_URG_VALID | tmp; 5139 if (!sock_flag(sk, SOCK_DEAD)) 5140 sk->sk_data_ready(sk); 5141 } 5142 } 5143 } 5144 5145 /* Accept RST for rcv_nxt - 1 after a FIN. 5146 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a 5147 * FIN is sent followed by a RST packet. The RST is sent with the same 5148 * sequence number as the FIN, and thus according to RFC 5961 a challenge 5149 * ACK should be sent. However, Mac OSX rate limits replies to challenge 5150 * ACKs on the closed socket. In addition middleboxes can drop either the 5151 * challenge ACK or a subsequent RST. 5152 */ 5153 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) 5154 { 5155 struct tcp_sock *tp = tcp_sk(sk); 5156 5157 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && 5158 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | 5159 TCPF_CLOSING)); 5160 } 5161 5162 /* Does PAWS and seqno based validation of an incoming segment, flags will 5163 * play significant role here. 5164 */ 5165 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5166 const struct tcphdr *th, int syn_inerr) 5167 { 5168 struct tcp_sock *tp = tcp_sk(sk); 5169 bool rst_seq_match = false; 5170 5171 /* RFC1323: H1. Apply PAWS check first. */ 5172 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && 5173 tp->rx_opt.saw_tstamp && 5174 tcp_paws_discard(sk, skb)) { 5175 if (!th->rst) { 5176 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5177 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5178 LINUX_MIB_TCPACKSKIPPEDPAWS, 5179 &tp->last_oow_ack_time)) 5180 tcp_send_dupack(sk, skb); 5181 goto discard; 5182 } 5183 /* Reset is accepted even if it did not pass PAWS. */ 5184 } 5185 5186 /* Step 1: check sequence number */ 5187 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 5188 /* RFC793, page 37: "In all states except SYN-SENT, all reset 5189 * (RST) segments are validated by checking their SEQ-fields." 5190 * And page 69: "If an incoming segment is not acceptable, 5191 * an acknowledgment should be sent in reply (unless the RST 5192 * bit is set, if so drop the segment and return)". 5193 */ 5194 if (!th->rst) { 5195 if (th->syn) 5196 goto syn_challenge; 5197 if (!tcp_oow_rate_limited(sock_net(sk), skb, 5198 LINUX_MIB_TCPACKSKIPPEDSEQ, 5199 &tp->last_oow_ack_time)) 5200 tcp_send_dupack(sk, skb); 5201 } else if (tcp_reset_check(sk, skb)) { 5202 tcp_reset(sk); 5203 } 5204 goto discard; 5205 } 5206 5207 /* Step 2: check RST bit */ 5208 if (th->rst) { 5209 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a 5210 * FIN and SACK too if available): 5211 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or 5212 * the right-most SACK block, 5213 * then 5214 * RESET the connection 5215 * else 5216 * Send a challenge ACK 5217 */ 5218 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || 5219 tcp_reset_check(sk, skb)) { 5220 rst_seq_match = true; 5221 } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { 5222 struct tcp_sack_block *sp = &tp->selective_acks[0]; 5223 int max_sack = sp[0].end_seq; 5224 int this_sack; 5225 5226 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; 5227 ++this_sack) { 5228 max_sack = after(sp[this_sack].end_seq, 5229 max_sack) ? 5230 sp[this_sack].end_seq : max_sack; 5231 } 5232 5233 if (TCP_SKB_CB(skb)->seq == max_sack) 5234 rst_seq_match = true; 5235 } 5236 5237 if (rst_seq_match) 5238 tcp_reset(sk); 5239 else { 5240 /* Disable TFO if RST is out-of-order 5241 * and no data has been received 5242 * for current active TFO socket 5243 */ 5244 if (tp->syn_fastopen && !tp->data_segs_in && 5245 sk->sk_state == TCP_ESTABLISHED) 5246 tcp_fastopen_active_disable(sk); 5247 tcp_send_challenge_ack(sk, skb); 5248 } 5249 goto discard; 5250 } 5251 5252 /* step 3: check security and precedence [ignored] */ 5253 5254 /* step 4: Check for a SYN 5255 * RFC 5961 4.2 : Send a challenge ack 5256 */ 5257 if (th->syn) { 5258 syn_challenge: 5259 if (syn_inerr) 5260 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5261 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); 5262 tcp_send_challenge_ack(sk, skb); 5263 goto discard; 5264 } 5265 5266 return true; 5267 5268 discard: 5269 tcp_drop(sk, skb); 5270 return false; 5271 } 5272 5273 /* 5274 * TCP receive function for the ESTABLISHED state. 5275 * 5276 * It is split into a fast path and a slow path. The fast path is 5277 * disabled when: 5278 * - A zero window was announced from us - zero window probing 5279 * is only handled properly in the slow path. 5280 * - Out of order segments arrived. 5281 * - Urgent data is expected. 5282 * - There is no buffer space left 5283 * - Unexpected TCP flags/window values/header lengths are received 5284 * (detected by checking the TCP header against pred_flags) 5285 * - Data is sent in both directions. Fast path only supports pure senders 5286 * or pure receivers (this means either the sequence number or the ack 5287 * value must stay constant) 5288 * - Unexpected TCP option. 5289 * 5290 * When these conditions are not satisfied it drops into a standard 5291 * receive procedure patterned after RFC793 to handle all cases. 5292 * The first three cases are guaranteed by proper pred_flags setting, 5293 * the rest is checked inline. Fast processing is turned on in 5294 * tcp_data_queue when everything is OK. 5295 */ 5296 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5297 const struct tcphdr *th) 5298 { 5299 unsigned int len = skb->len; 5300 struct tcp_sock *tp = tcp_sk(sk); 5301 5302 /* TCP congestion window tracking */ 5303 trace_tcp_probe(sk, skb); 5304 5305 tcp_mstamp_refresh(tp); 5306 if (unlikely(!sk->sk_rx_dst)) 5307 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); 5308 /* 5309 * Header prediction. 5310 * The code loosely follows the one in the famous 5311 * "30 instruction TCP receive" Van Jacobson mail. 5312 * 5313 * Van's trick is to deposit buffers into socket queue 5314 * on a device interrupt, to call tcp_recv function 5315 * on the receive process context and checksum and copy 5316 * the buffer to user space. smart... 5317 * 5318 * Our current scheme is not silly either but we take the 5319 * extra cost of the net_bh soft interrupt processing... 5320 * We do checksum and copy also but from device to kernel. 5321 */ 5322 5323 tp->rx_opt.saw_tstamp = 0; 5324 5325 /* pred_flags is 0xS?10 << 16 + snd_wnd 5326 * if header_prediction is to be made 5327 * 'S' will always be tp->tcp_header_len >> 2 5328 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5329 * turn it off (when there are holes in the receive 5330 * space for instance) 5331 * PSH flag is ignored. 5332 */ 5333 5334 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5335 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5336 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5337 int tcp_header_len = tp->tcp_header_len; 5338 5339 /* Timestamp header prediction: tcp_header_len 5340 * is automatically equal to th->doff*4 due to pred_flags 5341 * match. 5342 */ 5343 5344 /* Check timestamp */ 5345 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5346 /* No? Slow path! */ 5347 if (!tcp_parse_aligned_timestamp(tp, th)) 5348 goto slow_path; 5349 5350 /* If PAWS failed, check it more carefully in slow path */ 5351 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5352 goto slow_path; 5353 5354 /* DO NOT update ts_recent here, if checksum fails 5355 * and timestamp was corrupted part, it will result 5356 * in a hung connection since we will drop all 5357 * future packets due to the PAWS test. 5358 */ 5359 } 5360 5361 if (len <= tcp_header_len) { 5362 /* Bulk data transfer: sender */ 5363 if (len == tcp_header_len) { 5364 /* Predicted packet is in window by definition. 5365 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5366 * Hence, check seq<=rcv_wup reduces to: 5367 */ 5368 if (tcp_header_len == 5369 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5370 tp->rcv_nxt == tp->rcv_wup) 5371 tcp_store_ts_recent(tp); 5372 5373 /* We know that such packets are checksummed 5374 * on entry. 5375 */ 5376 tcp_ack(sk, skb, 0); 5377 __kfree_skb(skb); 5378 tcp_data_snd_check(sk); 5379 return; 5380 } else { /* Header too small */ 5381 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5382 goto discard; 5383 } 5384 } else { 5385 int eaten = 0; 5386 bool fragstolen = false; 5387 5388 if (tcp_checksum_complete(skb)) 5389 goto csum_error; 5390 5391 if ((int)skb->truesize > sk->sk_forward_alloc) 5392 goto step5; 5393 5394 /* Predicted packet is in window by definition. 5395 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5396 * Hence, check seq<=rcv_wup reduces to: 5397 */ 5398 if (tcp_header_len == 5399 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5400 tp->rcv_nxt == tp->rcv_wup) 5401 tcp_store_ts_recent(tp); 5402 5403 tcp_rcv_rtt_measure_ts(sk, skb); 5404 5405 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); 5406 5407 /* Bulk data transfer: receiver */ 5408 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5409 &fragstolen); 5410 5411 tcp_event_data_recv(sk, skb); 5412 5413 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 5414 /* Well, only one small jumplet in fast path... */ 5415 tcp_ack(sk, skb, FLAG_DATA); 5416 tcp_data_snd_check(sk); 5417 if (!inet_csk_ack_scheduled(sk)) 5418 goto no_ack; 5419 } 5420 5421 __tcp_ack_snd_check(sk, 0); 5422 no_ack: 5423 if (eaten) 5424 kfree_skb_partial(skb, fragstolen); 5425 sk->sk_data_ready(sk); 5426 return; 5427 } 5428 } 5429 5430 slow_path: 5431 if (len < (th->doff << 2) || tcp_checksum_complete(skb)) 5432 goto csum_error; 5433 5434 if (!th->ack && !th->rst && !th->syn) 5435 goto discard; 5436 5437 /* 5438 * Standard slow path. 5439 */ 5440 5441 if (!tcp_validate_incoming(sk, skb, th, 1)) 5442 return; 5443 5444 step5: 5445 if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) 5446 goto discard; 5447 5448 tcp_rcv_rtt_measure_ts(sk, skb); 5449 5450 /* Process urgent data. */ 5451 tcp_urg(sk, skb, th); 5452 5453 /* step 7: process the segment text */ 5454 tcp_data_queue(sk, skb); 5455 5456 tcp_data_snd_check(sk); 5457 tcp_ack_snd_check(sk); 5458 return; 5459 5460 csum_error: 5461 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); 5462 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); 5463 5464 discard: 5465 tcp_drop(sk, skb); 5466 } 5467 EXPORT_SYMBOL(tcp_rcv_established); 5468 5469 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 5470 { 5471 struct tcp_sock *tp = tcp_sk(sk); 5472 struct inet_connection_sock *icsk = inet_csk(sk); 5473 5474 tcp_set_state(sk, TCP_ESTABLISHED); 5475 icsk->icsk_ack.lrcvtime = tcp_jiffies32; 5476 5477 if (skb) { 5478 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); 5479 security_inet_conn_established(sk, skb); 5480 } 5481 5482 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB); 5483 5484 /* Prevent spurious tcp_cwnd_restart() on first data 5485 * packet. 5486 */ 5487 tp->lsndtime = tcp_jiffies32; 5488 5489 if (sock_flag(sk, SOCK_KEEPOPEN)) 5490 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5491 5492 if (!tp->rx_opt.snd_wscale) 5493 __tcp_fast_path_on(tp, tp->snd_wnd); 5494 else 5495 tp->pred_flags = 0; 5496 } 5497 5498 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, 5499 struct tcp_fastopen_cookie *cookie) 5500 { 5501 struct tcp_sock *tp = tcp_sk(sk); 5502 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; 5503 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; 5504 bool syn_drop = false; 5505 5506 if (mss == tp->rx_opt.user_mss) { 5507 struct tcp_options_received opt; 5508 5509 /* Get original SYNACK MSS value if user MSS sets mss_clamp */ 5510 tcp_clear_options(&opt); 5511 opt.user_mss = opt.mss_clamp = 0; 5512 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL); 5513 mss = opt.mss_clamp; 5514 } 5515 5516 if (!tp->syn_fastopen) { 5517 /* Ignore an unsolicited cookie */ 5518 cookie->len = -1; 5519 } else if (tp->total_retrans) { 5520 /* SYN timed out and the SYN-ACK neither has a cookie nor 5521 * acknowledges data. Presumably the remote received only 5522 * the retransmitted (regular) SYNs: either the original 5523 * SYN-data or the corresponding SYN-ACK was dropped. 5524 */ 5525 syn_drop = (cookie->len < 0 && data); 5526 } else if (cookie->len < 0 && !tp->syn_data) { 5527 /* We requested a cookie but didn't get it. If we did not use 5528 * the (old) exp opt format then try so next time (try_exp=1). 5529 * Otherwise we go back to use the RFC7413 opt (try_exp=2). 5530 */ 5531 try_exp = tp->syn_fastopen_exp ? 2 : 1; 5532 } 5533 5534 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); 5535 5536 if (data) { /* Retransmit unacked data in SYN */ 5537 skb_rbtree_walk_from(data) { 5538 if (__tcp_retransmit_skb(sk, data, 1)) 5539 break; 5540 } 5541 tcp_rearm_rto(sk); 5542 NET_INC_STATS(sock_net(sk), 5543 LINUX_MIB_TCPFASTOPENACTIVEFAIL); 5544 return true; 5545 } 5546 tp->syn_data_acked = tp->syn_data; 5547 if (tp->syn_data_acked) 5548 NET_INC_STATS(sock_net(sk), 5549 LINUX_MIB_TCPFASTOPENACTIVE); 5550 5551 tcp_fastopen_add_skb(sk, synack); 5552 5553 return false; 5554 } 5555 5556 static void smc_check_reset_syn(struct tcp_sock *tp) 5557 { 5558 #if IS_ENABLED(CONFIG_SMC) 5559 if (static_branch_unlikely(&tcp_have_smc)) { 5560 if (tp->syn_smc && !tp->rx_opt.smc_ok) 5561 tp->syn_smc = 0; 5562 } 5563 #endif 5564 } 5565 5566 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5567 const struct tcphdr *th) 5568 { 5569 struct inet_connection_sock *icsk = inet_csk(sk); 5570 struct tcp_sock *tp = tcp_sk(sk); 5571 struct tcp_fastopen_cookie foc = { .len = -1 }; 5572 int saved_clamp = tp->rx_opt.mss_clamp; 5573 bool fastopen_fail; 5574 5575 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); 5576 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 5577 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5578 5579 if (th->ack) { 5580 /* rfc793: 5581 * "If the state is SYN-SENT then 5582 * first check the ACK bit 5583 * If the ACK bit is set 5584 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5585 * a reset (unless the RST bit is set, if so drop 5586 * the segment and return)" 5587 */ 5588 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || 5589 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) 5590 goto reset_and_undo; 5591 5592 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5593 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5594 tcp_time_stamp(tp))) { 5595 NET_INC_STATS(sock_net(sk), 5596 LINUX_MIB_PAWSACTIVEREJECTED); 5597 goto reset_and_undo; 5598 } 5599 5600 /* Now ACK is acceptable. 5601 * 5602 * "If the RST bit is set 5603 * If the ACK was acceptable then signal the user "error: 5604 * connection reset", drop the segment, enter CLOSED state, 5605 * delete TCB, and return." 5606 */ 5607 5608 if (th->rst) { 5609 tcp_reset(sk); 5610 goto discard; 5611 } 5612 5613 /* rfc793: 5614 * "fifth, if neither of the SYN or RST bits is set then 5615 * drop the segment and return." 5616 * 5617 * See note below! 5618 * --ANK(990513) 5619 */ 5620 if (!th->syn) 5621 goto discard_and_undo; 5622 5623 /* rfc793: 5624 * "If the SYN bit is on ... 5625 * are acceptable then ... 5626 * (our SYN has been ACKed), change the connection 5627 * state to ESTABLISHED..." 5628 */ 5629 5630 tcp_ecn_rcv_synack(tp, th); 5631 5632 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5633 tcp_ack(sk, skb, FLAG_SLOWPATH); 5634 5635 /* Ok.. it's good. Set up sequence numbers and 5636 * move to established. 5637 */ 5638 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5639 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5640 5641 /* RFC1323: The window in SYN & SYN/ACK segments is 5642 * never scaled. 5643 */ 5644 tp->snd_wnd = ntohs(th->window); 5645 5646 if (!tp->rx_opt.wscale_ok) { 5647 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5648 tp->window_clamp = min(tp->window_clamp, 65535U); 5649 } 5650 5651 if (tp->rx_opt.saw_tstamp) { 5652 tp->rx_opt.tstamp_ok = 1; 5653 tp->tcp_header_len = 5654 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5655 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5656 tcp_store_ts_recent(tp); 5657 } else { 5658 tp->tcp_header_len = sizeof(struct tcphdr); 5659 } 5660 5661 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5662 tcp_initialize_rcv_mss(sk); 5663 5664 /* Remember, tcp_poll() does not lock socket! 5665 * Change state from SYN-SENT only after copied_seq 5666 * is initialized. */ 5667 tp->copied_seq = tp->rcv_nxt; 5668 5669 smc_check_reset_syn(tp); 5670 5671 smp_mb(); 5672 5673 tcp_finish_connect(sk, skb); 5674 5675 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && 5676 tcp_rcv_fastopen_synack(sk, skb, &foc); 5677 5678 if (!sock_flag(sk, SOCK_DEAD)) { 5679 sk->sk_state_change(sk); 5680 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5681 } 5682 if (fastopen_fail) 5683 return -1; 5684 if (sk->sk_write_pending || 5685 icsk->icsk_accept_queue.rskq_defer_accept || 5686 icsk->icsk_ack.pingpong) { 5687 /* Save one ACK. Data will be ready after 5688 * several ticks, if write_pending is set. 5689 * 5690 * It may be deleted, but with this feature tcpdumps 5691 * look so _wonderfully_ clever, that I was not able 5692 * to stand against the temptation 8) --ANK 5693 */ 5694 inet_csk_schedule_ack(sk); 5695 tcp_enter_quickack_mode(sk); 5696 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5697 TCP_DELACK_MAX, TCP_RTO_MAX); 5698 5699 discard: 5700 tcp_drop(sk, skb); 5701 return 0; 5702 } else { 5703 tcp_send_ack(sk); 5704 } 5705 return -1; 5706 } 5707 5708 /* No ACK in the segment */ 5709 5710 if (th->rst) { 5711 /* rfc793: 5712 * "If the RST bit is set 5713 * 5714 * Otherwise (no ACK) drop the segment and return." 5715 */ 5716 5717 goto discard_and_undo; 5718 } 5719 5720 /* PAWS check. */ 5721 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5722 tcp_paws_reject(&tp->rx_opt, 0)) 5723 goto discard_and_undo; 5724 5725 if (th->syn) { 5726 /* We see SYN without ACK. It is attempt of 5727 * simultaneous connect with crossed SYNs. 5728 * Particularly, it can be connect to self. 5729 */ 5730 tcp_set_state(sk, TCP_SYN_RECV); 5731 5732 if (tp->rx_opt.saw_tstamp) { 5733 tp->rx_opt.tstamp_ok = 1; 5734 tcp_store_ts_recent(tp); 5735 tp->tcp_header_len = 5736 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5737 } else { 5738 tp->tcp_header_len = sizeof(struct tcphdr); 5739 } 5740 5741 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5742 tp->copied_seq = tp->rcv_nxt; 5743 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5744 5745 /* RFC1323: The window in SYN & SYN/ACK segments is 5746 * never scaled. 5747 */ 5748 tp->snd_wnd = ntohs(th->window); 5749 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5750 tp->max_window = tp->snd_wnd; 5751 5752 tcp_ecn_rcv_syn(tp, th); 5753 5754 tcp_mtup_init(sk); 5755 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5756 tcp_initialize_rcv_mss(sk); 5757 5758 tcp_send_synack(sk); 5759 #if 0 5760 /* Note, we could accept data and URG from this segment. 5761 * There are no obstacles to make this (except that we must 5762 * either change tcp_recvmsg() to prevent it from returning data 5763 * before 3WHS completes per RFC793, or employ TCP Fast Open). 5764 * 5765 * However, if we ignore data in ACKless segments sometimes, 5766 * we have no reasons to accept it sometimes. 5767 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5768 * is not flawless. So, discard packet for sanity. 5769 * Uncomment this return to process the data. 5770 */ 5771 return -1; 5772 #else 5773 goto discard; 5774 #endif 5775 } 5776 /* "fifth, if neither of the SYN or RST bits is set then 5777 * drop the segment and return." 5778 */ 5779 5780 discard_and_undo: 5781 tcp_clear_options(&tp->rx_opt); 5782 tp->rx_opt.mss_clamp = saved_clamp; 5783 goto discard; 5784 5785 reset_and_undo: 5786 tcp_clear_options(&tp->rx_opt); 5787 tp->rx_opt.mss_clamp = saved_clamp; 5788 return 1; 5789 } 5790 5791 /* 5792 * This function implements the receiving procedure of RFC 793 for 5793 * all states except ESTABLISHED and TIME_WAIT. 5794 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 5795 * address independent. 5796 */ 5797 5798 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) 5799 { 5800 struct tcp_sock *tp = tcp_sk(sk); 5801 struct inet_connection_sock *icsk = inet_csk(sk); 5802 const struct tcphdr *th = tcp_hdr(skb); 5803 struct request_sock *req; 5804 int queued = 0; 5805 bool acceptable; 5806 5807 switch (sk->sk_state) { 5808 case TCP_CLOSE: 5809 goto discard; 5810 5811 case TCP_LISTEN: 5812 if (th->ack) 5813 return 1; 5814 5815 if (th->rst) 5816 goto discard; 5817 5818 if (th->syn) { 5819 if (th->fin) 5820 goto discard; 5821 /* It is possible that we process SYN packets from backlog, 5822 * so we need to make sure to disable BH right there. 5823 */ 5824 local_bh_disable(); 5825 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0; 5826 local_bh_enable(); 5827 5828 if (!acceptable) 5829 return 1; 5830 consume_skb(skb); 5831 return 0; 5832 } 5833 goto discard; 5834 5835 case TCP_SYN_SENT: 5836 tp->rx_opt.saw_tstamp = 0; 5837 tcp_mstamp_refresh(tp); 5838 queued = tcp_rcv_synsent_state_process(sk, skb, th); 5839 if (queued >= 0) 5840 return queued; 5841 5842 /* Do step6 onward by hand. */ 5843 tcp_urg(sk, skb, th); 5844 __kfree_skb(skb); 5845 tcp_data_snd_check(sk); 5846 return 0; 5847 } 5848 5849 tcp_mstamp_refresh(tp); 5850 tp->rx_opt.saw_tstamp = 0; 5851 req = tp->fastopen_rsk; 5852 if (req) { 5853 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 5854 sk->sk_state != TCP_FIN_WAIT1); 5855 5856 if (!tcp_check_req(sk, skb, req, true)) 5857 goto discard; 5858 } 5859 5860 if (!th->ack && !th->rst && !th->syn) 5861 goto discard; 5862 5863 if (!tcp_validate_incoming(sk, skb, th, 0)) 5864 return 0; 5865 5866 /* step 5: check the ACK field */ 5867 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | 5868 FLAG_UPDATE_TS_RECENT | 5869 FLAG_NO_CHALLENGE_ACK) > 0; 5870 5871 if (!acceptable) { 5872 if (sk->sk_state == TCP_SYN_RECV) 5873 return 1; /* send one RST */ 5874 tcp_send_challenge_ack(sk, skb); 5875 goto discard; 5876 } 5877 switch (sk->sk_state) { 5878 case TCP_SYN_RECV: 5879 if (!tp->srtt_us) 5880 tcp_synack_rtt_meas(sk, req); 5881 5882 /* Once we leave TCP_SYN_RECV, we no longer need req 5883 * so release it. 5884 */ 5885 if (req) { 5886 inet_csk(sk)->icsk_retransmits = 0; 5887 reqsk_fastopen_remove(sk, req, false); 5888 /* Re-arm the timer because data may have been sent out. 5889 * This is similar to the regular data transmission case 5890 * when new data has just been ack'ed. 5891 * 5892 * (TFO) - we could try to be more aggressive and 5893 * retransmitting any data sooner based on when they 5894 * are sent out. 5895 */ 5896 tcp_rearm_rto(sk); 5897 } else { 5898 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB); 5899 tp->copied_seq = tp->rcv_nxt; 5900 } 5901 smp_mb(); 5902 tcp_set_state(sk, TCP_ESTABLISHED); 5903 sk->sk_state_change(sk); 5904 5905 /* Note, that this wakeup is only for marginal crossed SYN case. 5906 * Passively open sockets are not waked up, because 5907 * sk->sk_sleep == NULL and sk->sk_socket == NULL. 5908 */ 5909 if (sk->sk_socket) 5910 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5911 5912 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5913 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5914 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5915 5916 if (tp->rx_opt.tstamp_ok) 5917 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5918 5919 if (!inet_csk(sk)->icsk_ca_ops->cong_control) 5920 tcp_update_pacing_rate(sk); 5921 5922 /* Prevent spurious tcp_cwnd_restart() on first data packet */ 5923 tp->lsndtime = tcp_jiffies32; 5924 5925 tcp_initialize_rcv_mss(sk); 5926 tcp_fast_path_on(tp); 5927 break; 5928 5929 case TCP_FIN_WAIT1: { 5930 int tmo; 5931 5932 /* If we enter the TCP_FIN_WAIT1 state and we are a 5933 * Fast Open socket and this is the first acceptable 5934 * ACK we have received, this would have acknowledged 5935 * our SYNACK so stop the SYNACK timer. 5936 */ 5937 if (req) { 5938 /* We no longer need the request sock. */ 5939 reqsk_fastopen_remove(sk, req, false); 5940 tcp_rearm_rto(sk); 5941 } 5942 if (tp->snd_una != tp->write_seq) 5943 break; 5944 5945 tcp_set_state(sk, TCP_FIN_WAIT2); 5946 sk->sk_shutdown |= SEND_SHUTDOWN; 5947 5948 sk_dst_confirm(sk); 5949 5950 if (!sock_flag(sk, SOCK_DEAD)) { 5951 /* Wake up lingering close() */ 5952 sk->sk_state_change(sk); 5953 break; 5954 } 5955 5956 if (tp->linger2 < 0) { 5957 tcp_done(sk); 5958 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5959 return 1; 5960 } 5961 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5962 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5963 /* Receive out of order FIN after close() */ 5964 if (tp->syn_fastopen && th->fin) 5965 tcp_fastopen_active_disable(sk); 5966 tcp_done(sk); 5967 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5968 return 1; 5969 } 5970 5971 tmo = tcp_fin_time(sk); 5972 if (tmo > TCP_TIMEWAIT_LEN) { 5973 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 5974 } else if (th->fin || sock_owned_by_user(sk)) { 5975 /* Bad case. We could lose such FIN otherwise. 5976 * It is not a big problem, but it looks confusing 5977 * and not so rare event. We still can lose it now, 5978 * if it spins in bh_lock_sock(), but it is really 5979 * marginal case. 5980 */ 5981 inet_csk_reset_keepalive_timer(sk, tmo); 5982 } else { 5983 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 5984 goto discard; 5985 } 5986 break; 5987 } 5988 5989 case TCP_CLOSING: 5990 if (tp->snd_una == tp->write_seq) { 5991 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 5992 goto discard; 5993 } 5994 break; 5995 5996 case TCP_LAST_ACK: 5997 if (tp->snd_una == tp->write_seq) { 5998 tcp_update_metrics(sk); 5999 tcp_done(sk); 6000 goto discard; 6001 } 6002 break; 6003 } 6004 6005 /* step 6: check the URG bit */ 6006 tcp_urg(sk, skb, th); 6007 6008 /* step 7: process the segment text */ 6009 switch (sk->sk_state) { 6010 case TCP_CLOSE_WAIT: 6011 case TCP_CLOSING: 6012 case TCP_LAST_ACK: 6013 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 6014 break; 6015 /* fall through */ 6016 case TCP_FIN_WAIT1: 6017 case TCP_FIN_WAIT2: 6018 /* RFC 793 says to queue data in these states, 6019 * RFC 1122 says we MUST send a reset. 6020 * BSD 4.4 also does reset. 6021 */ 6022 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6023 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6024 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6025 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6026 tcp_reset(sk); 6027 return 1; 6028 } 6029 } 6030 /* Fall through */ 6031 case TCP_ESTABLISHED: 6032 tcp_data_queue(sk, skb); 6033 queued = 1; 6034 break; 6035 } 6036 6037 /* tcp_data could move socket to TIME-WAIT */ 6038 if (sk->sk_state != TCP_CLOSE) { 6039 tcp_data_snd_check(sk); 6040 tcp_ack_snd_check(sk); 6041 } 6042 6043 if (!queued) { 6044 discard: 6045 tcp_drop(sk, skb); 6046 } 6047 return 0; 6048 } 6049 EXPORT_SYMBOL(tcp_rcv_state_process); 6050 6051 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) 6052 { 6053 struct inet_request_sock *ireq = inet_rsk(req); 6054 6055 if (family == AF_INET) 6056 net_dbg_ratelimited("drop open request from %pI4/%u\n", 6057 &ireq->ir_rmt_addr, port); 6058 #if IS_ENABLED(CONFIG_IPV6) 6059 else if (family == AF_INET6) 6060 net_dbg_ratelimited("drop open request from %pI6/%u\n", 6061 &ireq->ir_v6_rmt_addr, port); 6062 #endif 6063 } 6064 6065 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set 6066 * 6067 * If we receive a SYN packet with these bits set, it means a 6068 * network is playing bad games with TOS bits. In order to 6069 * avoid possible false congestion notifications, we disable 6070 * TCP ECN negotiation. 6071 * 6072 * Exception: tcp_ca wants ECN. This is required for DCTCP 6073 * congestion control: Linux DCTCP asserts ECT on all packets, 6074 * including SYN, which is most optimal solution; however, 6075 * others, such as FreeBSD do not. 6076 */ 6077 static void tcp_ecn_create_request(struct request_sock *req, 6078 const struct sk_buff *skb, 6079 const struct sock *listen_sk, 6080 const struct dst_entry *dst) 6081 { 6082 const struct tcphdr *th = tcp_hdr(skb); 6083 const struct net *net = sock_net(listen_sk); 6084 bool th_ecn = th->ece && th->cwr; 6085 bool ect, ecn_ok; 6086 u32 ecn_ok_dst; 6087 6088 if (!th_ecn) 6089 return; 6090 6091 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); 6092 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); 6093 ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst; 6094 6095 if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) || 6096 (ecn_ok_dst & DST_FEATURE_ECN_CA) || 6097 tcp_bpf_ca_needs_ecn((struct sock *)req)) 6098 inet_rsk(req)->ecn_ok = 1; 6099 } 6100 6101 static void tcp_openreq_init(struct request_sock *req, 6102 const struct tcp_options_received *rx_opt, 6103 struct sk_buff *skb, const struct sock *sk) 6104 { 6105 struct inet_request_sock *ireq = inet_rsk(req); 6106 6107 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 6108 req->cookie_ts = 0; 6109 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; 6110 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 6111 tcp_rsk(req)->snt_synack = tcp_clock_us(); 6112 tcp_rsk(req)->last_oow_ack_time = 0; 6113 req->mss = rx_opt->mss_clamp; 6114 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 6115 ireq->tstamp_ok = rx_opt->tstamp_ok; 6116 ireq->sack_ok = rx_opt->sack_ok; 6117 ireq->snd_wscale = rx_opt->snd_wscale; 6118 ireq->wscale_ok = rx_opt->wscale_ok; 6119 ireq->acked = 0; 6120 ireq->ecn_ok = 0; 6121 ireq->ir_rmt_port = tcp_hdr(skb)->source; 6122 ireq->ir_num = ntohs(tcp_hdr(skb)->dest); 6123 ireq->ir_mark = inet_request_mark(sk, skb); 6124 #if IS_ENABLED(CONFIG_SMC) 6125 ireq->smc_ok = rx_opt->smc_ok; 6126 #endif 6127 } 6128 6129 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, 6130 struct sock *sk_listener, 6131 bool attach_listener) 6132 { 6133 struct request_sock *req = reqsk_alloc(ops, sk_listener, 6134 attach_listener); 6135 6136 if (req) { 6137 struct inet_request_sock *ireq = inet_rsk(req); 6138 6139 ireq->ireq_opt = NULL; 6140 #if IS_ENABLED(CONFIG_IPV6) 6141 ireq->pktopts = NULL; 6142 #endif 6143 atomic64_set(&ireq->ir_cookie, 0); 6144 ireq->ireq_state = TCP_NEW_SYN_RECV; 6145 write_pnet(&ireq->ireq_net, sock_net(sk_listener)); 6146 ireq->ireq_family = sk_listener->sk_family; 6147 } 6148 6149 return req; 6150 } 6151 EXPORT_SYMBOL(inet_reqsk_alloc); 6152 6153 /* 6154 * Return true if a syncookie should be sent 6155 */ 6156 static bool tcp_syn_flood_action(const struct sock *sk, 6157 const struct sk_buff *skb, 6158 const char *proto) 6159 { 6160 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 6161 const char *msg = "Dropping request"; 6162 bool want_cookie = false; 6163 struct net *net = sock_net(sk); 6164 6165 #ifdef CONFIG_SYN_COOKIES 6166 if (net->ipv4.sysctl_tcp_syncookies) { 6167 msg = "Sending cookies"; 6168 want_cookie = true; 6169 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); 6170 } else 6171 #endif 6172 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); 6173 6174 if (!queue->synflood_warned && 6175 net->ipv4.sysctl_tcp_syncookies != 2 && 6176 xchg(&queue->synflood_warned, 1) == 0) 6177 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", 6178 proto, ntohs(tcp_hdr(skb)->dest), msg); 6179 6180 return want_cookie; 6181 } 6182 6183 static void tcp_reqsk_record_syn(const struct sock *sk, 6184 struct request_sock *req, 6185 const struct sk_buff *skb) 6186 { 6187 if (tcp_sk(sk)->save_syn) { 6188 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb); 6189 u32 *copy; 6190 6191 copy = kmalloc(len + sizeof(u32), GFP_ATOMIC); 6192 if (copy) { 6193 copy[0] = len; 6194 memcpy(©[1], skb_network_header(skb), len); 6195 req->saved_syn = copy; 6196 } 6197 } 6198 } 6199 6200 int tcp_conn_request(struct request_sock_ops *rsk_ops, 6201 const struct tcp_request_sock_ops *af_ops, 6202 struct sock *sk, struct sk_buff *skb) 6203 { 6204 struct tcp_fastopen_cookie foc = { .len = -1 }; 6205 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; 6206 struct tcp_options_received tmp_opt; 6207 struct tcp_sock *tp = tcp_sk(sk); 6208 struct net *net = sock_net(sk); 6209 struct sock *fastopen_sk = NULL; 6210 struct request_sock *req; 6211 bool want_cookie = false; 6212 struct dst_entry *dst; 6213 struct flowi fl; 6214 6215 /* TW buckets are converted to open requests without 6216 * limitations, they conserve resources and peer is 6217 * evidently real one. 6218 */ 6219 if ((net->ipv4.sysctl_tcp_syncookies == 2 || 6220 inet_csk_reqsk_queue_is_full(sk)) && !isn) { 6221 want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); 6222 if (!want_cookie) 6223 goto drop; 6224 } 6225 6226 if (sk_acceptq_is_full(sk)) { 6227 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); 6228 goto drop; 6229 } 6230 6231 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); 6232 if (!req) 6233 goto drop; 6234 6235 tcp_rsk(req)->af_specific = af_ops; 6236 tcp_rsk(req)->ts_off = 0; 6237 6238 tcp_clear_options(&tmp_opt); 6239 tmp_opt.mss_clamp = af_ops->mss_clamp; 6240 tmp_opt.user_mss = tp->rx_opt.user_mss; 6241 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, 6242 want_cookie ? NULL : &foc); 6243 6244 if (want_cookie && !tmp_opt.saw_tstamp) 6245 tcp_clear_options(&tmp_opt); 6246 6247 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 6248 tcp_openreq_init(req, &tmp_opt, skb, sk); 6249 inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent; 6250 6251 /* Note: tcp_v6_init_req() might override ir_iif for link locals */ 6252 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); 6253 6254 af_ops->init_req(req, sk, skb); 6255 6256 if (security_inet_conn_request(sk, skb, req)) 6257 goto drop_and_free; 6258 6259 if (tmp_opt.tstamp_ok) 6260 tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); 6261 6262 dst = af_ops->route_req(sk, &fl, req); 6263 if (!dst) 6264 goto drop_and_free; 6265 6266 if (!want_cookie && !isn) { 6267 /* Kill the following clause, if you dislike this way. */ 6268 if (!net->ipv4.sysctl_tcp_syncookies && 6269 (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 6270 (net->ipv4.sysctl_max_syn_backlog >> 2)) && 6271 !tcp_peer_is_proven(req, dst)) { 6272 /* Without syncookies last quarter of 6273 * backlog is filled with destinations, 6274 * proven to be alive. 6275 * It means that we continue to communicate 6276 * to destinations, already remembered 6277 * to the moment of synflood. 6278 */ 6279 pr_drop_req(req, ntohs(tcp_hdr(skb)->source), 6280 rsk_ops->family); 6281 goto drop_and_release; 6282 } 6283 6284 isn = af_ops->init_seq(skb); 6285 } 6286 6287 tcp_ecn_create_request(req, skb, sk, dst); 6288 6289 if (want_cookie) { 6290 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); 6291 req->cookie_ts = tmp_opt.tstamp_ok; 6292 if (!tmp_opt.tstamp_ok) 6293 inet_rsk(req)->ecn_ok = 0; 6294 } 6295 6296 tcp_rsk(req)->snt_isn = isn; 6297 tcp_rsk(req)->txhash = net_tx_rndhash(); 6298 tcp_openreq_init_rwin(req, sk, dst); 6299 if (!want_cookie) { 6300 tcp_reqsk_record_syn(sk, req, skb); 6301 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); 6302 } 6303 if (fastopen_sk) { 6304 af_ops->send_synack(fastopen_sk, dst, &fl, req, 6305 &foc, TCP_SYNACK_FASTOPEN); 6306 /* Add the child socket directly into the accept queue */ 6307 inet_csk_reqsk_queue_add(sk, req, fastopen_sk); 6308 sk->sk_data_ready(sk); 6309 bh_unlock_sock(fastopen_sk); 6310 sock_put(fastopen_sk); 6311 } else { 6312 tcp_rsk(req)->tfo_listener = false; 6313 if (!want_cookie) 6314 inet_csk_reqsk_queue_hash_add(sk, req, 6315 tcp_timeout_init((struct sock *)req)); 6316 af_ops->send_synack(sk, dst, &fl, req, &foc, 6317 !want_cookie ? TCP_SYNACK_NORMAL : 6318 TCP_SYNACK_COOKIE); 6319 if (want_cookie) { 6320 reqsk_free(req); 6321 return 0; 6322 } 6323 } 6324 reqsk_put(req); 6325 return 0; 6326 6327 drop_and_release: 6328 dst_release(dst); 6329 drop_and_free: 6330 reqsk_free(req); 6331 drop: 6332 tcp_listendrop(sk); 6333 return 0; 6334 } 6335 EXPORT_SYMBOL(tcp_conn_request); 6336