1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: 23 * Pedro Roque : Fast Retransmit/Recovery. 24 * Two receive queues. 25 * Retransmit queue handled by TCP. 26 * Better retransmit timer handling. 27 * New congestion avoidance. 28 * Header prediction. 29 * Variable renaming. 30 * 31 * Eric : Fast Retransmit. 32 * Randy Scott : MSS option defines. 33 * Eric Schenk : Fixes to slow start algorithm. 34 * Eric Schenk : Yet another double ACK bug. 35 * Eric Schenk : Delayed ACK bug fixes. 36 * Eric Schenk : Floyd style fast retrans war avoidance. 37 * David S. Miller : Don't allow zero congestion window. 38 * Eric Schenk : Fix retransmitter so that it sends 39 * next packet on ack of previous packet. 40 * Andi Kleen : Moved open_request checking here 41 * and process RSTs for open_requests. 42 * Andi Kleen : Better prune_queue, and other fixes. 43 * Andrey Savochkin: Fix RTT measurements in the presence of 44 * timestamps. 45 * Andrey Savochkin: Check sequence numbers correctly when 46 * removing SACKs due to in sequence incoming 47 * data segments. 48 * Andi Kleen: Make sure we never ack data there is not 49 * enough room for. Also make this condition 50 * a fatal error if it might still happen. 51 * Andi Kleen: Add tcp_measure_rcv_mss to make 52 * connections with MSS<min(MTU,ann. MSS) 53 * work without delayed acks. 54 * Andi Kleen: Process packets with PSH set in the 55 * fast path. 56 * J Hadi Salim: ECN support 57 * Andrei Gurtov, 58 * Pasi Sarolahti, 59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 60 * engine. Lots of bugs are found. 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 62 */ 63 64 #define pr_fmt(fmt) "TCP: " fmt 65 66 #include <linux/mm.h> 67 #include <linux/slab.h> 68 #include <linux/module.h> 69 #include <linux/sysctl.h> 70 #include <linux/kernel.h> 71 #include <net/dst.h> 72 #include <net/tcp.h> 73 #include <net/inet_common.h> 74 #include <linux/ipsec.h> 75 #include <asm/unaligned.h> 76 #include <net/netdma.h> 77 78 int sysctl_tcp_timestamps __read_mostly = 1; 79 int sysctl_tcp_window_scaling __read_mostly = 1; 80 int sysctl_tcp_sack __read_mostly = 1; 81 int sysctl_tcp_fack __read_mostly = 1; 82 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 83 EXPORT_SYMBOL(sysctl_tcp_reordering); 84 int sysctl_tcp_ecn __read_mostly = 2; 85 EXPORT_SYMBOL(sysctl_tcp_ecn); 86 int sysctl_tcp_dsack __read_mostly = 1; 87 int sysctl_tcp_app_win __read_mostly = 31; 88 int sysctl_tcp_adv_win_scale __read_mostly = 1; 89 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 90 91 int sysctl_tcp_stdurg __read_mostly; 92 int sysctl_tcp_rfc1337 __read_mostly; 93 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 94 int sysctl_tcp_frto __read_mostly = 2; 95 int sysctl_tcp_frto_response __read_mostly; 96 int sysctl_tcp_nometrics_save __read_mostly; 97 98 int sysctl_tcp_thin_dupack __read_mostly; 99 100 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 101 int sysctl_tcp_abc __read_mostly; 102 int sysctl_tcp_early_retrans __read_mostly = 2; 103 104 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 105 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 106 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 107 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 108 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 109 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 110 #define FLAG_ECE 0x40 /* ECE in this ACK */ 111 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 112 #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 113 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 114 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 115 #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 116 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 117 118 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 119 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 120 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 121 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 122 #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) 123 124 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 125 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 126 127 /* Adapt the MSS value used to make delayed ack decision to the 128 * real world. 129 */ 130 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 131 { 132 struct inet_connection_sock *icsk = inet_csk(sk); 133 const unsigned int lss = icsk->icsk_ack.last_seg_size; 134 unsigned int len; 135 136 icsk->icsk_ack.last_seg_size = 0; 137 138 /* skb->len may jitter because of SACKs, even if peer 139 * sends good full-sized frames. 140 */ 141 len = skb_shinfo(skb)->gso_size ? : skb->len; 142 if (len >= icsk->icsk_ack.rcv_mss) { 143 icsk->icsk_ack.rcv_mss = len; 144 } else { 145 /* Otherwise, we make more careful check taking into account, 146 * that SACKs block is variable. 147 * 148 * "len" is invariant segment length, including TCP header. 149 */ 150 len += skb->data - skb_transport_header(skb); 151 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || 152 /* If PSH is not set, packet should be 153 * full sized, provided peer TCP is not badly broken. 154 * This observation (if it is correct 8)) allows 155 * to handle super-low mtu links fairly. 156 */ 157 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 158 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 159 /* Subtract also invariant (if peer is RFC compliant), 160 * tcp header plus fixed timestamp option length. 161 * Resulting "len" is MSS free of SACK jitter. 162 */ 163 len -= tcp_sk(sk)->tcp_header_len; 164 icsk->icsk_ack.last_seg_size = len; 165 if (len == lss) { 166 icsk->icsk_ack.rcv_mss = len; 167 return; 168 } 169 } 170 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 171 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 172 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 173 } 174 } 175 176 static void tcp_incr_quickack(struct sock *sk) 177 { 178 struct inet_connection_sock *icsk = inet_csk(sk); 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 180 181 if (quickacks == 0) 182 quickacks = 2; 183 if (quickacks > icsk->icsk_ack.quick) 184 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 185 } 186 187 static void tcp_enter_quickack_mode(struct sock *sk) 188 { 189 struct inet_connection_sock *icsk = inet_csk(sk); 190 tcp_incr_quickack(sk); 191 icsk->icsk_ack.pingpong = 0; 192 icsk->icsk_ack.ato = TCP_ATO_MIN; 193 } 194 195 /* Send ACKs quickly, if "quick" count is not exhausted 196 * and the session is not interactive. 197 */ 198 199 static inline int tcp_in_quickack_mode(const struct sock *sk) 200 { 201 const struct inet_connection_sock *icsk = inet_csk(sk); 202 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 203 } 204 205 static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 206 { 207 if (tp->ecn_flags & TCP_ECN_OK) 208 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 209 } 210 211 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 212 { 213 if (tcp_hdr(skb)->cwr) 214 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 215 } 216 217 static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 218 { 219 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 220 } 221 222 static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) 223 { 224 if (!(tp->ecn_flags & TCP_ECN_OK)) 225 return; 226 227 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { 228 case INET_ECN_NOT_ECT: 229 /* Funny extension: if ECT is not set on a segment, 230 * and we already seen ECT on a previous segment, 231 * it is probably a retransmit. 232 */ 233 if (tp->ecn_flags & TCP_ECN_SEEN) 234 tcp_enter_quickack_mode((struct sock *)tp); 235 break; 236 case INET_ECN_CE: 237 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 238 /* fallinto */ 239 default: 240 tp->ecn_flags |= TCP_ECN_SEEN; 241 } 242 } 243 244 static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 245 { 246 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 247 tp->ecn_flags &= ~TCP_ECN_OK; 248 } 249 250 static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 251 { 252 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 253 tp->ecn_flags &= ~TCP_ECN_OK; 254 } 255 256 static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 257 { 258 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 259 return 1; 260 return 0; 261 } 262 263 /* Buffer size and advertised window tuning. 264 * 265 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 266 */ 267 268 static void tcp_fixup_sndbuf(struct sock *sk) 269 { 270 int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER); 271 272 sndmem *= TCP_INIT_CWND; 273 if (sk->sk_sndbuf < sndmem) 274 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 275 } 276 277 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 278 * 279 * All tcp_full_space() is split to two parts: "network" buffer, allocated 280 * forward and advertised in receiver window (tp->rcv_wnd) and 281 * "application buffer", required to isolate scheduling/application 282 * latencies from network. 283 * window_clamp is maximal advertised window. It can be less than 284 * tcp_full_space(), in this case tcp_full_space() - window_clamp 285 * is reserved for "application" buffer. The less window_clamp is 286 * the smoother our behaviour from viewpoint of network, but the lower 287 * throughput and the higher sensitivity of the connection to losses. 8) 288 * 289 * rcv_ssthresh is more strict window_clamp used at "slow start" 290 * phase to predict further behaviour of this connection. 291 * It is used for two goals: 292 * - to enforce header prediction at sender, even when application 293 * requires some significant "application buffer". It is check #1. 294 * - to prevent pruning of receive queue because of misprediction 295 * of receiver window. Check #2. 296 * 297 * The scheme does not work when sender sends good segments opening 298 * window and then starts to feed us spaghetti. But it should work 299 * in common situations. Otherwise, we have to rely on queue collapsing. 300 */ 301 302 /* Slow part of check#2. */ 303 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 304 { 305 struct tcp_sock *tp = tcp_sk(sk); 306 /* Optimize this! */ 307 int truesize = tcp_win_from_space(skb->truesize) >> 1; 308 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; 309 310 while (tp->rcv_ssthresh <= window) { 311 if (truesize <= skb->len) 312 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 313 314 truesize >>= 1; 315 window >>= 1; 316 } 317 return 0; 318 } 319 320 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 321 { 322 struct tcp_sock *tp = tcp_sk(sk); 323 324 /* Check #1 */ 325 if (tp->rcv_ssthresh < tp->window_clamp && 326 (int)tp->rcv_ssthresh < tcp_space(sk) && 327 !sk_under_memory_pressure(sk)) { 328 int incr; 329 330 /* Check #2. Increase window, if skb with such overhead 331 * will fit to rcvbuf in future. 332 */ 333 if (tcp_win_from_space(skb->truesize) <= skb->len) 334 incr = 2 * tp->advmss; 335 else 336 incr = __tcp_grow_window(sk, skb); 337 338 if (incr) { 339 incr = max_t(int, incr, 2 * skb->len); 340 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 341 tp->window_clamp); 342 inet_csk(sk)->icsk_ack.quick |= 1; 343 } 344 } 345 } 346 347 /* 3. Tuning rcvbuf, when connection enters established state. */ 348 349 static void tcp_fixup_rcvbuf(struct sock *sk) 350 { 351 u32 mss = tcp_sk(sk)->advmss; 352 u32 icwnd = TCP_DEFAULT_INIT_RCVWND; 353 int rcvmem; 354 355 /* Limit to 10 segments if mss <= 1460, 356 * or 14600/mss segments, with a minimum of two segments. 357 */ 358 if (mss > 1460) 359 icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 360 361 rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER); 362 while (tcp_win_from_space(rcvmem) < mss) 363 rcvmem += 128; 364 365 rcvmem *= icwnd; 366 367 if (sk->sk_rcvbuf < rcvmem) 368 sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); 369 } 370 371 /* 4. Try to fixup all. It is made immediately after connection enters 372 * established state. 373 */ 374 static void tcp_init_buffer_space(struct sock *sk) 375 { 376 struct tcp_sock *tp = tcp_sk(sk); 377 int maxwin; 378 379 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 380 tcp_fixup_rcvbuf(sk); 381 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 382 tcp_fixup_sndbuf(sk); 383 384 tp->rcvq_space.space = tp->rcv_wnd; 385 386 maxwin = tcp_full_space(sk); 387 388 if (tp->window_clamp >= maxwin) { 389 tp->window_clamp = maxwin; 390 391 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 392 tp->window_clamp = max(maxwin - 393 (maxwin >> sysctl_tcp_app_win), 394 4 * tp->advmss); 395 } 396 397 /* Force reservation of one segment. */ 398 if (sysctl_tcp_app_win && 399 tp->window_clamp > 2 * tp->advmss && 400 tp->window_clamp + tp->advmss > maxwin) 401 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 402 403 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 404 tp->snd_cwnd_stamp = tcp_time_stamp; 405 } 406 407 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 408 static void tcp_clamp_window(struct sock *sk) 409 { 410 struct tcp_sock *tp = tcp_sk(sk); 411 struct inet_connection_sock *icsk = inet_csk(sk); 412 413 icsk->icsk_ack.quick = 0; 414 415 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 416 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 417 !sk_under_memory_pressure(sk) && 418 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { 419 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 420 sysctl_tcp_rmem[2]); 421 } 422 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 423 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 424 } 425 426 /* Initialize RCV_MSS value. 427 * RCV_MSS is an our guess about MSS used by the peer. 428 * We haven't any direct information about the MSS. 429 * It's better to underestimate the RCV_MSS rather than overestimate. 430 * Overestimations make us ACKing less frequently than needed. 431 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 432 */ 433 void tcp_initialize_rcv_mss(struct sock *sk) 434 { 435 const struct tcp_sock *tp = tcp_sk(sk); 436 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 437 438 hint = min(hint, tp->rcv_wnd / 2); 439 hint = min(hint, TCP_MSS_DEFAULT); 440 hint = max(hint, TCP_MIN_MSS); 441 442 inet_csk(sk)->icsk_ack.rcv_mss = hint; 443 } 444 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 445 446 /* Receiver "autotuning" code. 447 * 448 * The algorithm for RTT estimation w/o timestamps is based on 449 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 450 * <http://public.lanl.gov/radiant/pubs.html#DRS> 451 * 452 * More detail on this code can be found at 453 * <http://staff.psc.edu/jheffner/>, 454 * though this reference is out of date. A new paper 455 * is pending. 456 */ 457 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 458 { 459 u32 new_sample = tp->rcv_rtt_est.rtt; 460 long m = sample; 461 462 if (m == 0) 463 m = 1; 464 465 if (new_sample != 0) { 466 /* If we sample in larger samples in the non-timestamp 467 * case, we could grossly overestimate the RTT especially 468 * with chatty applications or bulk transfer apps which 469 * are stalled on filesystem I/O. 470 * 471 * Also, since we are only going for a minimum in the 472 * non-timestamp case, we do not smooth things out 473 * else with timestamps disabled convergence takes too 474 * long. 475 */ 476 if (!win_dep) { 477 m -= (new_sample >> 3); 478 new_sample += m; 479 } else { 480 m <<= 3; 481 if (m < new_sample) 482 new_sample = m; 483 } 484 } else { 485 /* No previous measure. */ 486 new_sample = m << 3; 487 } 488 489 if (tp->rcv_rtt_est.rtt != new_sample) 490 tp->rcv_rtt_est.rtt = new_sample; 491 } 492 493 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 494 { 495 if (tp->rcv_rtt_est.time == 0) 496 goto new_measure; 497 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 498 return; 499 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); 500 501 new_measure: 502 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 503 tp->rcv_rtt_est.time = tcp_time_stamp; 504 } 505 506 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 507 const struct sk_buff *skb) 508 { 509 struct tcp_sock *tp = tcp_sk(sk); 510 if (tp->rx_opt.rcv_tsecr && 511 (TCP_SKB_CB(skb)->end_seq - 512 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 513 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 514 } 515 516 /* 517 * This function should be called every time data is copied to user space. 518 * It calculates the appropriate TCP receive buffer space. 519 */ 520 void tcp_rcv_space_adjust(struct sock *sk) 521 { 522 struct tcp_sock *tp = tcp_sk(sk); 523 int time; 524 int space; 525 526 if (tp->rcvq_space.time == 0) 527 goto new_measure; 528 529 time = tcp_time_stamp - tp->rcvq_space.time; 530 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 531 return; 532 533 space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 534 535 space = max(tp->rcvq_space.space, space); 536 537 if (tp->rcvq_space.space != space) { 538 int rcvmem; 539 540 tp->rcvq_space.space = space; 541 542 if (sysctl_tcp_moderate_rcvbuf && 543 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 544 int new_clamp = space; 545 546 /* Receive space grows, normalize in order to 547 * take into account packet headers and sk_buff 548 * structure overhead. 549 */ 550 space /= tp->advmss; 551 if (!space) 552 space = 1; 553 rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); 554 while (tcp_win_from_space(rcvmem) < tp->advmss) 555 rcvmem += 128; 556 space *= rcvmem; 557 space = min(space, sysctl_tcp_rmem[2]); 558 if (space > sk->sk_rcvbuf) { 559 sk->sk_rcvbuf = space; 560 561 /* Make the window clamp follow along. */ 562 tp->window_clamp = new_clamp; 563 } 564 } 565 } 566 567 new_measure: 568 tp->rcvq_space.seq = tp->copied_seq; 569 tp->rcvq_space.time = tcp_time_stamp; 570 } 571 572 /* There is something which you must keep in mind when you analyze the 573 * behavior of the tp->ato delayed ack timeout interval. When a 574 * connection starts up, we want to ack as quickly as possible. The 575 * problem is that "good" TCP's do slow start at the beginning of data 576 * transmission. The means that until we send the first few ACK's the 577 * sender will sit on his end and only queue most of his data, because 578 * he can only send snd_cwnd unacked packets at any given time. For 579 * each ACK we send, he increments snd_cwnd and transmits more of his 580 * queue. -DaveM 581 */ 582 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 583 { 584 struct tcp_sock *tp = tcp_sk(sk); 585 struct inet_connection_sock *icsk = inet_csk(sk); 586 u32 now; 587 588 inet_csk_schedule_ack(sk); 589 590 tcp_measure_rcv_mss(sk, skb); 591 592 tcp_rcv_rtt_measure(tp); 593 594 now = tcp_time_stamp; 595 596 if (!icsk->icsk_ack.ato) { 597 /* The _first_ data packet received, initialize 598 * delayed ACK engine. 599 */ 600 tcp_incr_quickack(sk); 601 icsk->icsk_ack.ato = TCP_ATO_MIN; 602 } else { 603 int m = now - icsk->icsk_ack.lrcvtime; 604 605 if (m <= TCP_ATO_MIN / 2) { 606 /* The fastest case is the first. */ 607 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 608 } else if (m < icsk->icsk_ack.ato) { 609 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 610 if (icsk->icsk_ack.ato > icsk->icsk_rto) 611 icsk->icsk_ack.ato = icsk->icsk_rto; 612 } else if (m > icsk->icsk_rto) { 613 /* Too long gap. Apparently sender failed to 614 * restart window, so that we send ACKs quickly. 615 */ 616 tcp_incr_quickack(sk); 617 sk_mem_reclaim(sk); 618 } 619 } 620 icsk->icsk_ack.lrcvtime = now; 621 622 TCP_ECN_check_ce(tp, skb); 623 624 if (skb->len >= 128) 625 tcp_grow_window(sk, skb); 626 } 627 628 /* Called to compute a smoothed rtt estimate. The data fed to this 629 * routine either comes from timestamps, or from segments that were 630 * known _not_ to have been retransmitted [see Karn/Partridge 631 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 632 * piece by Van Jacobson. 633 * NOTE: the next three routines used to be one big routine. 634 * To save cycles in the RFC 1323 implementation it was better to break 635 * it up into three procedures. -- erics 636 */ 637 static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 638 { 639 struct tcp_sock *tp = tcp_sk(sk); 640 long m = mrtt; /* RTT */ 641 642 /* The following amusing code comes from Jacobson's 643 * article in SIGCOMM '88. Note that rtt and mdev 644 * are scaled versions of rtt and mean deviation. 645 * This is designed to be as fast as possible 646 * m stands for "measurement". 647 * 648 * On a 1990 paper the rto value is changed to: 649 * RTO = rtt + 4 * mdev 650 * 651 * Funny. This algorithm seems to be very broken. 652 * These formulae increase RTO, when it should be decreased, increase 653 * too slowly, when it should be increased quickly, decrease too quickly 654 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 655 * does not matter how to _calculate_ it. Seems, it was trap 656 * that VJ failed to avoid. 8) 657 */ 658 if (m == 0) 659 m = 1; 660 if (tp->srtt != 0) { 661 m -= (tp->srtt >> 3); /* m is now error in rtt est */ 662 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 663 if (m < 0) { 664 m = -m; /* m is now abs(error) */ 665 m -= (tp->mdev >> 2); /* similar update on mdev */ 666 /* This is similar to one of Eifel findings. 667 * Eifel blocks mdev updates when rtt decreases. 668 * This solution is a bit different: we use finer gain 669 * for mdev in this case (alpha*beta). 670 * Like Eifel it also prevents growth of rto, 671 * but also it limits too fast rto decreases, 672 * happening in pure Eifel. 673 */ 674 if (m > 0) 675 m >>= 3; 676 } else { 677 m -= (tp->mdev >> 2); /* similar update on mdev */ 678 } 679 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 680 if (tp->mdev > tp->mdev_max) { 681 tp->mdev_max = tp->mdev; 682 if (tp->mdev_max > tp->rttvar) 683 tp->rttvar = tp->mdev_max; 684 } 685 if (after(tp->snd_una, tp->rtt_seq)) { 686 if (tp->mdev_max < tp->rttvar) 687 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 688 tp->rtt_seq = tp->snd_nxt; 689 tp->mdev_max = tcp_rto_min(sk); 690 } 691 } else { 692 /* no previous measure. */ 693 tp->srtt = m << 3; /* take the measured time to be rtt */ 694 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 695 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 696 tp->rtt_seq = tp->snd_nxt; 697 } 698 } 699 700 /* Calculate rto without backoff. This is the second half of Van Jacobson's 701 * routine referred to above. 702 */ 703 static inline void tcp_set_rto(struct sock *sk) 704 { 705 const struct tcp_sock *tp = tcp_sk(sk); 706 /* Old crap is replaced with new one. 8) 707 * 708 * More seriously: 709 * 1. If rtt variance happened to be less 50msec, it is hallucination. 710 * It cannot be less due to utterly erratic ACK generation made 711 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 712 * to do with delayed acks, because at cwnd>2 true delack timeout 713 * is invisible. Actually, Linux-2.4 also generates erratic 714 * ACKs in some circumstances. 715 */ 716 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); 717 718 /* 2. Fixups made earlier cannot be right. 719 * If we do not estimate RTO correctly without them, 720 * all the algo is pure shit and should be replaced 721 * with correct one. It is exactly, which we pretend to do. 722 */ 723 724 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 725 * guarantees that rto is higher. 726 */ 727 tcp_bound_rto(sk); 728 } 729 730 /* Save metrics learned by this TCP session. 731 This function is called only, when TCP finishes successfully 732 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. 733 */ 734 void tcp_update_metrics(struct sock *sk) 735 { 736 struct tcp_sock *tp = tcp_sk(sk); 737 struct dst_entry *dst = __sk_dst_get(sk); 738 739 if (sysctl_tcp_nometrics_save) 740 return; 741 742 dst_confirm(dst); 743 744 if (dst && (dst->flags & DST_HOST)) { 745 const struct inet_connection_sock *icsk = inet_csk(sk); 746 int m; 747 unsigned long rtt; 748 749 if (icsk->icsk_backoff || !tp->srtt) { 750 /* This session failed to estimate rtt. Why? 751 * Probably, no packets returned in time. 752 * Reset our results. 753 */ 754 if (!(dst_metric_locked(dst, RTAX_RTT))) 755 dst_metric_set(dst, RTAX_RTT, 0); 756 return; 757 } 758 759 rtt = dst_metric_rtt(dst, RTAX_RTT); 760 m = rtt - tp->srtt; 761 762 /* If newly calculated rtt larger than stored one, 763 * store new one. Otherwise, use EWMA. Remember, 764 * rtt overestimation is always better than underestimation. 765 */ 766 if (!(dst_metric_locked(dst, RTAX_RTT))) { 767 if (m <= 0) 768 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); 769 else 770 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); 771 } 772 773 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 774 unsigned long var; 775 if (m < 0) 776 m = -m; 777 778 /* Scale deviation to rttvar fixed point */ 779 m >>= 1; 780 if (m < tp->mdev) 781 m = tp->mdev; 782 783 var = dst_metric_rtt(dst, RTAX_RTTVAR); 784 if (m >= var) 785 var = m; 786 else 787 var -= (var - m) >> 2; 788 789 set_dst_metric_rtt(dst, RTAX_RTTVAR, var); 790 } 791 792 if (tcp_in_initial_slowstart(tp)) { 793 /* Slow start still did not finish. */ 794 if (dst_metric(dst, RTAX_SSTHRESH) && 795 !dst_metric_locked(dst, RTAX_SSTHRESH) && 796 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 797 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); 798 if (!dst_metric_locked(dst, RTAX_CWND) && 799 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 800 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); 801 } else if (tp->snd_cwnd > tp->snd_ssthresh && 802 icsk->icsk_ca_state == TCP_CA_Open) { 803 /* Cong. avoidance phase, cwnd is reliable. */ 804 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 805 dst_metric_set(dst, RTAX_SSTHRESH, 806 max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); 807 if (!dst_metric_locked(dst, RTAX_CWND)) 808 dst_metric_set(dst, RTAX_CWND, 809 (dst_metric(dst, RTAX_CWND) + 810 tp->snd_cwnd) >> 1); 811 } else { 812 /* Else slow start did not finish, cwnd is non-sense, 813 ssthresh may be also invalid. 814 */ 815 if (!dst_metric_locked(dst, RTAX_CWND)) 816 dst_metric_set(dst, RTAX_CWND, 817 (dst_metric(dst, RTAX_CWND) + 818 tp->snd_ssthresh) >> 1); 819 if (dst_metric(dst, RTAX_SSTHRESH) && 820 !dst_metric_locked(dst, RTAX_SSTHRESH) && 821 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) 822 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); 823 } 824 825 if (!dst_metric_locked(dst, RTAX_REORDERING)) { 826 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && 827 tp->reordering != sysctl_tcp_reordering) 828 dst_metric_set(dst, RTAX_REORDERING, tp->reordering); 829 } 830 } 831 } 832 833 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 834 { 835 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 836 837 if (!cwnd) 838 cwnd = TCP_INIT_CWND; 839 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 840 } 841 842 /* Set slow start threshold and cwnd not falling to slow start */ 843 void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 844 { 845 struct tcp_sock *tp = tcp_sk(sk); 846 const struct inet_connection_sock *icsk = inet_csk(sk); 847 848 tp->prior_ssthresh = 0; 849 tp->bytes_acked = 0; 850 if (icsk->icsk_ca_state < TCP_CA_CWR) { 851 tp->undo_marker = 0; 852 if (set_ssthresh) 853 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 854 tp->snd_cwnd = min(tp->snd_cwnd, 855 tcp_packets_in_flight(tp) + 1U); 856 tp->snd_cwnd_cnt = 0; 857 tp->high_seq = tp->snd_nxt; 858 tp->snd_cwnd_stamp = tcp_time_stamp; 859 TCP_ECN_queue_cwr(tp); 860 861 tcp_set_ca_state(sk, TCP_CA_CWR); 862 } 863 } 864 865 /* 866 * Packet counting of FACK is based on in-order assumptions, therefore TCP 867 * disables it when reordering is detected 868 */ 869 static void tcp_disable_fack(struct tcp_sock *tp) 870 { 871 /* RFC3517 uses different metric in lost marker => reset on change */ 872 if (tcp_is_fack(tp)) 873 tp->lost_skb_hint = NULL; 874 tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; 875 } 876 877 /* Take a notice that peer is sending D-SACKs */ 878 static void tcp_dsack_seen(struct tcp_sock *tp) 879 { 880 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 881 } 882 883 /* Initialize metrics on socket. */ 884 885 static void tcp_init_metrics(struct sock *sk) 886 { 887 struct tcp_sock *tp = tcp_sk(sk); 888 struct dst_entry *dst = __sk_dst_get(sk); 889 890 if (dst == NULL) 891 goto reset; 892 893 dst_confirm(dst); 894 895 if (dst_metric_locked(dst, RTAX_CWND)) 896 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); 897 if (dst_metric(dst, RTAX_SSTHRESH)) { 898 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 899 if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 900 tp->snd_ssthresh = tp->snd_cwnd_clamp; 901 } else { 902 /* ssthresh may have been reduced unnecessarily during. 903 * 3WHS. Restore it back to its initial default. 904 */ 905 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 906 } 907 if (dst_metric(dst, RTAX_REORDERING) && 908 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 909 tcp_disable_fack(tp); 910 tcp_disable_early_retrans(tp); 911 tp->reordering = dst_metric(dst, RTAX_REORDERING); 912 } 913 914 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) 915 goto reset; 916 917 /* Initial rtt is determined from SYN,SYN-ACK. 918 * The segment is small and rtt may appear much 919 * less than real one. Use per-dst memory 920 * to make it more realistic. 921 * 922 * A bit of theory. RTT is time passed after "normal" sized packet 923 * is sent until it is ACKed. In normal circumstances sending small 924 * packets force peer to delay ACKs and calculation is correct too. 925 * The algorithm is adaptive and, provided we follow specs, it 926 * NEVER underestimate RTT. BUT! If peer tries to make some clever 927 * tricks sort of "quick acks" for time long enough to decrease RTT 928 * to low value, and then abruptly stops to do it and starts to delay 929 * ACKs, wait for troubles. 930 */ 931 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { 932 tp->srtt = dst_metric_rtt(dst, RTAX_RTT); 933 tp->rtt_seq = tp->snd_nxt; 934 } 935 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { 936 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); 937 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 938 } 939 tcp_set_rto(sk); 940 reset: 941 if (tp->srtt == 0) { 942 /* RFC6298: 5.7 We've failed to get a valid RTT sample from 943 * 3WHS. This is most likely due to retransmission, 944 * including spurious one. Reset the RTO back to 3secs 945 * from the more aggressive 1sec to avoid more spurious 946 * retransmission. 947 */ 948 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; 949 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; 950 } 951 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 952 * retransmitted. In light of RFC6298 more aggressive 1sec 953 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 954 * retransmission has occurred. 955 */ 956 if (tp->total_retrans > 1) 957 tp->snd_cwnd = 1; 958 else 959 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 960 tp->snd_cwnd_stamp = tcp_time_stamp; 961 } 962 963 static void tcp_update_reordering(struct sock *sk, const int metric, 964 const int ts) 965 { 966 struct tcp_sock *tp = tcp_sk(sk); 967 if (metric > tp->reordering) { 968 int mib_idx; 969 970 tp->reordering = min(TCP_MAX_REORDERING, metric); 971 972 /* This exciting event is worth to be remembered. 8) */ 973 if (ts) 974 mib_idx = LINUX_MIB_TCPTSREORDER; 975 else if (tcp_is_reno(tp)) 976 mib_idx = LINUX_MIB_TCPRENOREORDER; 977 else if (tcp_is_fack(tp)) 978 mib_idx = LINUX_MIB_TCPFACKREORDER; 979 else 980 mib_idx = LINUX_MIB_TCPSACKREORDER; 981 982 NET_INC_STATS_BH(sock_net(sk), mib_idx); 983 #if FASTRETRANS_DEBUG > 1 984 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 985 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 986 tp->reordering, 987 tp->fackets_out, 988 tp->sacked_out, 989 tp->undo_marker ? tp->undo_retrans : 0); 990 #endif 991 tcp_disable_fack(tp); 992 } 993 994 if (metric > 0) 995 tcp_disable_early_retrans(tp); 996 } 997 998 /* This must be called before lost_out is incremented */ 999 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 1000 { 1001 if ((tp->retransmit_skb_hint == NULL) || 1002 before(TCP_SKB_CB(skb)->seq, 1003 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 1004 tp->retransmit_skb_hint = skb; 1005 1006 if (!tp->lost_out || 1007 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) 1008 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 1009 } 1010 1011 static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) 1012 { 1013 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 1014 tcp_verify_retransmit_hint(tp, skb); 1015 1016 tp->lost_out += tcp_skb_pcount(skb); 1017 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1018 } 1019 } 1020 1021 static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, 1022 struct sk_buff *skb) 1023 { 1024 tcp_verify_retransmit_hint(tp, skb); 1025 1026 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 1027 tp->lost_out += tcp_skb_pcount(skb); 1028 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1029 } 1030 } 1031 1032 /* This procedure tags the retransmission queue when SACKs arrive. 1033 * 1034 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 1035 * Packets in queue with these bits set are counted in variables 1036 * sacked_out, retrans_out and lost_out, correspondingly. 1037 * 1038 * Valid combinations are: 1039 * Tag InFlight Description 1040 * 0 1 - orig segment is in flight. 1041 * S 0 - nothing flies, orig reached receiver. 1042 * L 0 - nothing flies, orig lost by net. 1043 * R 2 - both orig and retransmit are in flight. 1044 * L|R 1 - orig is lost, retransmit is in flight. 1045 * S|R 1 - orig reached receiver, retrans is still in flight. 1046 * (L|S|R is logically valid, it could occur when L|R is sacked, 1047 * but it is equivalent to plain S and code short-curcuits it to S. 1048 * L|S is logically invalid, it would mean -1 packet in flight 8)) 1049 * 1050 * These 6 states form finite state machine, controlled by the following events: 1051 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1052 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1053 * 3. Loss detection event of two flavors: 1054 * A. Scoreboard estimator decided the packet is lost. 1055 * A'. Reno "three dupacks" marks head of queue lost. 1056 * A''. Its FACK modification, head until snd.fack is lost. 1057 * B. SACK arrives sacking SND.NXT at the moment, when the 1058 * segment was retransmitted. 1059 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1060 * 1061 * It is pleasant to note, that state diagram turns out to be commutative, 1062 * so that we are allowed not to be bothered by order of our actions, 1063 * when multiple events arrive simultaneously. (see the function below). 1064 * 1065 * Reordering detection. 1066 * -------------------- 1067 * Reordering metric is maximal distance, which a packet can be displaced 1068 * in packet stream. With SACKs we can estimate it: 1069 * 1070 * 1. SACK fills old hole and the corresponding segment was not 1071 * ever retransmitted -> reordering. Alas, we cannot use it 1072 * when segment was retransmitted. 1073 * 2. The last flaw is solved with D-SACK. D-SACK arrives 1074 * for retransmitted and already SACKed segment -> reordering.. 1075 * Both of these heuristics are not used in Loss state, when we cannot 1076 * account for retransmits accurately. 1077 * 1078 * SACK block validation. 1079 * ---------------------- 1080 * 1081 * SACK block range validation checks that the received SACK block fits to 1082 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 1083 * Note that SND.UNA is not included to the range though being valid because 1084 * it means that the receiver is rather inconsistent with itself reporting 1085 * SACK reneging when it should advance SND.UNA. Such SACK block this is 1086 * perfectly valid, however, in light of RFC2018 which explicitly states 1087 * that "SACK block MUST reflect the newest segment. Even if the newest 1088 * segment is going to be discarded ...", not that it looks very clever 1089 * in case of head skb. Due to potentional receiver driven attacks, we 1090 * choose to avoid immediate execution of a walk in write queue due to 1091 * reneging and defer head skb's loss recovery to standard loss recovery 1092 * procedure that will eventually trigger (nothing forbids us doing this). 1093 * 1094 * Implements also blockage to start_seq wrap-around. Problem lies in the 1095 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1096 * there's no guarantee that it will be before snd_nxt (n). The problem 1097 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1098 * wrap (s_w): 1099 * 1100 * <- outs wnd -> <- wrapzone -> 1101 * u e n u_w e_w s n_w 1102 * | | | | | | | 1103 * |<------------+------+----- TCP seqno space --------------+---------->| 1104 * ...-- <2^31 ->| |<--------... 1105 * ...---- >2^31 ------>| |<--------... 1106 * 1107 * Current code wouldn't be vulnerable but it's better still to discard such 1108 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1109 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1110 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1111 * equal to the ideal case (infinite seqno space without wrap caused issues). 1112 * 1113 * With D-SACK the lower bound is extended to cover sequence space below 1114 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1115 * again, D-SACK block must not to go across snd_una (for the same reason as 1116 * for the normal SACK blocks, explained above). But there all simplicity 1117 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1118 * fully below undo_marker they do not affect behavior in anyway and can 1119 * therefore be safely ignored. In rare cases (which are more or less 1120 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1121 * fragmentation and packet reordering past skb's retransmission. To consider 1122 * them correctly, the acceptable range must be extended even more though 1123 * the exact amount is rather hard to quantify. However, tp->max_window can 1124 * be used as an exaggerated estimate. 1125 */ 1126 static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 1127 u32 start_seq, u32 end_seq) 1128 { 1129 /* Too far in future, or reversed (interpretation is ambiguous) */ 1130 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1131 return 0; 1132 1133 /* Nasty start_seq wrap-around check (see comments above) */ 1134 if (!before(start_seq, tp->snd_nxt)) 1135 return 0; 1136 1137 /* In outstanding window? ...This is valid exit for D-SACKs too. 1138 * start_seq == snd_una is non-sensical (see comments above) 1139 */ 1140 if (after(start_seq, tp->snd_una)) 1141 return 1; 1142 1143 if (!is_dsack || !tp->undo_marker) 1144 return 0; 1145 1146 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1147 if (after(end_seq, tp->snd_una)) 1148 return 0; 1149 1150 if (!before(start_seq, tp->undo_marker)) 1151 return 1; 1152 1153 /* Too old */ 1154 if (!after(end_seq, tp->undo_marker)) 1155 return 0; 1156 1157 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1158 * start_seq < undo_marker and end_seq >= undo_marker. 1159 */ 1160 return !before(start_seq, end_seq - tp->max_window); 1161 } 1162 1163 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1164 * Event "B". Later note: FACK people cheated me again 8), we have to account 1165 * for reordering! Ugly, but should help. 1166 * 1167 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1168 * less than what is now known to be received by the other end (derived from 1169 * highest SACK block). Also calculate the lowest snd_nxt among the remaining 1170 * retransmitted skbs to avoid some costly processing per ACKs. 1171 */ 1172 static void tcp_mark_lost_retrans(struct sock *sk) 1173 { 1174 const struct inet_connection_sock *icsk = inet_csk(sk); 1175 struct tcp_sock *tp = tcp_sk(sk); 1176 struct sk_buff *skb; 1177 int cnt = 0; 1178 u32 new_low_seq = tp->snd_nxt; 1179 u32 received_upto = tcp_highest_sack_seq(tp); 1180 1181 if (!tcp_is_fack(tp) || !tp->retrans_out || 1182 !after(received_upto, tp->lost_retrans_low) || 1183 icsk->icsk_ca_state != TCP_CA_Recovery) 1184 return; 1185 1186 tcp_for_write_queue(skb, sk) { 1187 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 1188 1189 if (skb == tcp_send_head(sk)) 1190 break; 1191 if (cnt == tp->retrans_out) 1192 break; 1193 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1194 continue; 1195 1196 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 1197 continue; 1198 1199 /* TODO: We would like to get rid of tcp_is_fack(tp) only 1200 * constraint here (see above) but figuring out that at 1201 * least tp->reordering SACK blocks reside between ack_seq 1202 * and received_upto is not easy task to do cheaply with 1203 * the available datastructures. 1204 * 1205 * Whether FACK should check here for tp->reordering segs 1206 * in-between one could argue for either way (it would be 1207 * rather simple to implement as we could count fack_count 1208 * during the walk and do tp->fackets_out - fack_count). 1209 */ 1210 if (after(received_upto, ack_seq)) { 1211 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1212 tp->retrans_out -= tcp_skb_pcount(skb); 1213 1214 tcp_skb_mark_lost_uncond_verify(tp, skb); 1215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1216 } else { 1217 if (before(ack_seq, new_low_seq)) 1218 new_low_seq = ack_seq; 1219 cnt += tcp_skb_pcount(skb); 1220 } 1221 } 1222 1223 if (tp->retrans_out) 1224 tp->lost_retrans_low = new_low_seq; 1225 } 1226 1227 static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1228 struct tcp_sack_block_wire *sp, int num_sacks, 1229 u32 prior_snd_una) 1230 { 1231 struct tcp_sock *tp = tcp_sk(sk); 1232 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1233 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1234 int dup_sack = 0; 1235 1236 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1237 dup_sack = 1; 1238 tcp_dsack_seen(tp); 1239 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1240 } else if (num_sacks > 1) { 1241 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1242 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1243 1244 if (!after(end_seq_0, end_seq_1) && 1245 !before(start_seq_0, start_seq_1)) { 1246 dup_sack = 1; 1247 tcp_dsack_seen(tp); 1248 NET_INC_STATS_BH(sock_net(sk), 1249 LINUX_MIB_TCPDSACKOFORECV); 1250 } 1251 } 1252 1253 /* D-SACK for already forgotten data... Do dumb counting. */ 1254 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1255 !after(end_seq_0, prior_snd_una) && 1256 after(end_seq_0, tp->undo_marker)) 1257 tp->undo_retrans--; 1258 1259 return dup_sack; 1260 } 1261 1262 struct tcp_sacktag_state { 1263 int reord; 1264 int fack_count; 1265 int flag; 1266 }; 1267 1268 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1269 * the incoming SACK may not exactly match but we can find smaller MSS 1270 * aligned portion of it that matches. Therefore we might need to fragment 1271 * which may fail and creates some hassle (caller must handle error case 1272 * returns). 1273 * 1274 * FIXME: this could be merged to shift decision code 1275 */ 1276 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1277 u32 start_seq, u32 end_seq) 1278 { 1279 int in_sack, err; 1280 unsigned int pkt_len; 1281 unsigned int mss; 1282 1283 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1284 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1285 1286 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1287 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1288 mss = tcp_skb_mss(skb); 1289 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1290 1291 if (!in_sack) { 1292 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1293 if (pkt_len < mss) 1294 pkt_len = mss; 1295 } else { 1296 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1297 if (pkt_len < mss) 1298 return -EINVAL; 1299 } 1300 1301 /* Round if necessary so that SACKs cover only full MSSes 1302 * and/or the remaining small portion (if present) 1303 */ 1304 if (pkt_len > mss) { 1305 unsigned int new_len = (pkt_len / mss) * mss; 1306 if (!in_sack && new_len < pkt_len) { 1307 new_len += mss; 1308 if (new_len > skb->len) 1309 return 0; 1310 } 1311 pkt_len = new_len; 1312 } 1313 err = tcp_fragment(sk, skb, pkt_len, mss); 1314 if (err < 0) 1315 return err; 1316 } 1317 1318 return in_sack; 1319 } 1320 1321 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ 1322 static u8 tcp_sacktag_one(struct sock *sk, 1323 struct tcp_sacktag_state *state, u8 sacked, 1324 u32 start_seq, u32 end_seq, 1325 int dup_sack, int pcount) 1326 { 1327 struct tcp_sock *tp = tcp_sk(sk); 1328 int fack_count = state->fack_count; 1329 1330 /* Account D-SACK for retransmitted packet. */ 1331 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1332 if (tp->undo_marker && tp->undo_retrans && 1333 after(end_seq, tp->undo_marker)) 1334 tp->undo_retrans--; 1335 if (sacked & TCPCB_SACKED_ACKED) 1336 state->reord = min(fack_count, state->reord); 1337 } 1338 1339 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1340 if (!after(end_seq, tp->snd_una)) 1341 return sacked; 1342 1343 if (!(sacked & TCPCB_SACKED_ACKED)) { 1344 if (sacked & TCPCB_SACKED_RETRANS) { 1345 /* If the segment is not tagged as lost, 1346 * we do not clear RETRANS, believing 1347 * that retransmission is still in flight. 1348 */ 1349 if (sacked & TCPCB_LOST) { 1350 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1351 tp->lost_out -= pcount; 1352 tp->retrans_out -= pcount; 1353 } 1354 } else { 1355 if (!(sacked & TCPCB_RETRANS)) { 1356 /* New sack for not retransmitted frame, 1357 * which was in hole. It is reordering. 1358 */ 1359 if (before(start_seq, 1360 tcp_highest_sack_seq(tp))) 1361 state->reord = min(fack_count, 1362 state->reord); 1363 1364 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1365 if (!after(end_seq, tp->frto_highmark)) 1366 state->flag |= FLAG_ONLY_ORIG_SACKED; 1367 } 1368 1369 if (sacked & TCPCB_LOST) { 1370 sacked &= ~TCPCB_LOST; 1371 tp->lost_out -= pcount; 1372 } 1373 } 1374 1375 sacked |= TCPCB_SACKED_ACKED; 1376 state->flag |= FLAG_DATA_SACKED; 1377 tp->sacked_out += pcount; 1378 1379 fack_count += pcount; 1380 1381 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1382 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1383 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1384 tp->lost_cnt_hint += pcount; 1385 1386 if (fack_count > tp->fackets_out) 1387 tp->fackets_out = fack_count; 1388 } 1389 1390 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1391 * frames and clear it. undo_retrans is decreased above, L|R frames 1392 * are accounted above as well. 1393 */ 1394 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { 1395 sacked &= ~TCPCB_SACKED_RETRANS; 1396 tp->retrans_out -= pcount; 1397 } 1398 1399 return sacked; 1400 } 1401 1402 /* Shift newly-SACKed bytes from this skb to the immediately previous 1403 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. 1404 */ 1405 static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, 1406 struct tcp_sacktag_state *state, 1407 unsigned int pcount, int shifted, int mss, 1408 int dup_sack) 1409 { 1410 struct tcp_sock *tp = tcp_sk(sk); 1411 struct sk_buff *prev = tcp_write_queue_prev(sk, skb); 1412 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ 1413 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ 1414 1415 BUG_ON(!pcount); 1416 1417 /* Adjust counters and hints for the newly sacked sequence 1418 * range but discard the return value since prev is already 1419 * marked. We must tag the range first because the seq 1420 * advancement below implicitly advances 1421 * tcp_highest_sack_seq() when skb is highest_sack. 1422 */ 1423 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1424 start_seq, end_seq, dup_sack, pcount); 1425 1426 if (skb == tp->lost_skb_hint) 1427 tp->lost_cnt_hint += pcount; 1428 1429 TCP_SKB_CB(prev)->end_seq += shifted; 1430 TCP_SKB_CB(skb)->seq += shifted; 1431 1432 skb_shinfo(prev)->gso_segs += pcount; 1433 BUG_ON(skb_shinfo(skb)->gso_segs < pcount); 1434 skb_shinfo(skb)->gso_segs -= pcount; 1435 1436 /* When we're adding to gso_segs == 1, gso_size will be zero, 1437 * in theory this shouldn't be necessary but as long as DSACK 1438 * code can come after this skb later on it's better to keep 1439 * setting gso_size to something. 1440 */ 1441 if (!skb_shinfo(prev)->gso_size) { 1442 skb_shinfo(prev)->gso_size = mss; 1443 skb_shinfo(prev)->gso_type = sk->sk_gso_type; 1444 } 1445 1446 /* CHECKME: To clear or not to clear? Mimics normal skb currently */ 1447 if (skb_shinfo(skb)->gso_segs <= 1) { 1448 skb_shinfo(skb)->gso_size = 0; 1449 skb_shinfo(skb)->gso_type = 0; 1450 } 1451 1452 /* Difference in this won't matter, both ACKed by the same cumul. ACK */ 1453 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); 1454 1455 if (skb->len > 0) { 1456 BUG_ON(!tcp_skb_pcount(skb)); 1457 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); 1458 return 0; 1459 } 1460 1461 /* Whole SKB was eaten :-) */ 1462 1463 if (skb == tp->retransmit_skb_hint) 1464 tp->retransmit_skb_hint = prev; 1465 if (skb == tp->scoreboard_skb_hint) 1466 tp->scoreboard_skb_hint = prev; 1467 if (skb == tp->lost_skb_hint) { 1468 tp->lost_skb_hint = prev; 1469 tp->lost_cnt_hint -= tcp_skb_pcount(prev); 1470 } 1471 1472 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; 1473 if (skb == tcp_highest_sack(sk)) 1474 tcp_advance_highest_sack(sk, skb); 1475 1476 tcp_unlink_write_queue(skb, sk); 1477 sk_wmem_free_skb(sk, skb); 1478 1479 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); 1480 1481 return 1; 1482 } 1483 1484 /* I wish gso_size would have a bit more sane initialization than 1485 * something-or-zero which complicates things 1486 */ 1487 static int tcp_skb_seglen(const struct sk_buff *skb) 1488 { 1489 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1490 } 1491 1492 /* Shifting pages past head area doesn't work */ 1493 static int skb_can_shift(const struct sk_buff *skb) 1494 { 1495 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1496 } 1497 1498 /* Try collapsing SACK blocks spanning across multiple skbs to a single 1499 * skb. 1500 */ 1501 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, 1502 struct tcp_sacktag_state *state, 1503 u32 start_seq, u32 end_seq, 1504 int dup_sack) 1505 { 1506 struct tcp_sock *tp = tcp_sk(sk); 1507 struct sk_buff *prev; 1508 int mss; 1509 int pcount = 0; 1510 int len; 1511 int in_sack; 1512 1513 if (!sk_can_gso(sk)) 1514 goto fallback; 1515 1516 /* Normally R but no L won't result in plain S */ 1517 if (!dup_sack && 1518 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) 1519 goto fallback; 1520 if (!skb_can_shift(skb)) 1521 goto fallback; 1522 /* This frame is about to be dropped (was ACKed). */ 1523 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1524 goto fallback; 1525 1526 /* Can only happen with delayed DSACK + discard craziness */ 1527 if (unlikely(skb == tcp_write_queue_head(sk))) 1528 goto fallback; 1529 prev = tcp_write_queue_prev(sk, skb); 1530 1531 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) 1532 goto fallback; 1533 1534 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1535 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1536 1537 if (in_sack) { 1538 len = skb->len; 1539 pcount = tcp_skb_pcount(skb); 1540 mss = tcp_skb_seglen(skb); 1541 1542 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1543 * drop this restriction as unnecessary 1544 */ 1545 if (mss != tcp_skb_seglen(prev)) 1546 goto fallback; 1547 } else { 1548 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) 1549 goto noop; 1550 /* CHECKME: This is non-MSS split case only?, this will 1551 * cause skipped skbs due to advancing loop btw, original 1552 * has that feature too 1553 */ 1554 if (tcp_skb_pcount(skb) <= 1) 1555 goto noop; 1556 1557 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1558 if (!in_sack) { 1559 /* TODO: head merge to next could be attempted here 1560 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), 1561 * though it might not be worth of the additional hassle 1562 * 1563 * ...we can probably just fallback to what was done 1564 * previously. We could try merging non-SACKed ones 1565 * as well but it probably isn't going to buy off 1566 * because later SACKs might again split them, and 1567 * it would make skb timestamp tracking considerably 1568 * harder problem. 1569 */ 1570 goto fallback; 1571 } 1572 1573 len = end_seq - TCP_SKB_CB(skb)->seq; 1574 BUG_ON(len < 0); 1575 BUG_ON(len > skb->len); 1576 1577 /* MSS boundaries should be honoured or else pcount will 1578 * severely break even though it makes things bit trickier. 1579 * Optimize common case to avoid most of the divides 1580 */ 1581 mss = tcp_skb_mss(skb); 1582 1583 /* TODO: Fix DSACKs to not fragment already SACKed and we can 1584 * drop this restriction as unnecessary 1585 */ 1586 if (mss != tcp_skb_seglen(prev)) 1587 goto fallback; 1588 1589 if (len == mss) { 1590 pcount = 1; 1591 } else if (len < mss) { 1592 goto noop; 1593 } else { 1594 pcount = len / mss; 1595 len = pcount * mss; 1596 } 1597 } 1598 1599 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ 1600 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) 1601 goto fallback; 1602 1603 if (!skb_shift(prev, skb, len)) 1604 goto fallback; 1605 if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) 1606 goto out; 1607 1608 /* Hole filled allows collapsing with the next as well, this is very 1609 * useful when hole on every nth skb pattern happens 1610 */ 1611 if (prev == tcp_write_queue_tail(sk)) 1612 goto out; 1613 skb = tcp_write_queue_next(sk, prev); 1614 1615 if (!skb_can_shift(skb) || 1616 (skb == tcp_send_head(sk)) || 1617 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || 1618 (mss != tcp_skb_seglen(skb))) 1619 goto out; 1620 1621 len = skb->len; 1622 if (skb_shift(prev, skb, len)) { 1623 pcount += tcp_skb_pcount(skb); 1624 tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); 1625 } 1626 1627 out: 1628 state->fack_count += pcount; 1629 return prev; 1630 1631 noop: 1632 return skb; 1633 1634 fallback: 1635 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); 1636 return NULL; 1637 } 1638 1639 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1640 struct tcp_sack_block *next_dup, 1641 struct tcp_sacktag_state *state, 1642 u32 start_seq, u32 end_seq, 1643 int dup_sack_in) 1644 { 1645 struct tcp_sock *tp = tcp_sk(sk); 1646 struct sk_buff *tmp; 1647 1648 tcp_for_write_queue_from(skb, sk) { 1649 int in_sack = 0; 1650 int dup_sack = dup_sack_in; 1651 1652 if (skb == tcp_send_head(sk)) 1653 break; 1654 1655 /* queue is in-order => we can short-circuit the walk early */ 1656 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1657 break; 1658 1659 if ((next_dup != NULL) && 1660 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1661 in_sack = tcp_match_skb_to_sack(sk, skb, 1662 next_dup->start_seq, 1663 next_dup->end_seq); 1664 if (in_sack > 0) 1665 dup_sack = 1; 1666 } 1667 1668 /* skb reference here is a bit tricky to get right, since 1669 * shifting can eat and free both this skb and the next, 1670 * so not even _safe variant of the loop is enough. 1671 */ 1672 if (in_sack <= 0) { 1673 tmp = tcp_shift_skb_data(sk, skb, state, 1674 start_seq, end_seq, dup_sack); 1675 if (tmp != NULL) { 1676 if (tmp != skb) { 1677 skb = tmp; 1678 continue; 1679 } 1680 1681 in_sack = 0; 1682 } else { 1683 in_sack = tcp_match_skb_to_sack(sk, skb, 1684 start_seq, 1685 end_seq); 1686 } 1687 } 1688 1689 if (unlikely(in_sack < 0)) 1690 break; 1691 1692 if (in_sack) { 1693 TCP_SKB_CB(skb)->sacked = 1694 tcp_sacktag_one(sk, 1695 state, 1696 TCP_SKB_CB(skb)->sacked, 1697 TCP_SKB_CB(skb)->seq, 1698 TCP_SKB_CB(skb)->end_seq, 1699 dup_sack, 1700 tcp_skb_pcount(skb)); 1701 1702 if (!before(TCP_SKB_CB(skb)->seq, 1703 tcp_highest_sack_seq(tp))) 1704 tcp_advance_highest_sack(sk, skb); 1705 } 1706 1707 state->fack_count += tcp_skb_pcount(skb); 1708 } 1709 return skb; 1710 } 1711 1712 /* Avoid all extra work that is being done by sacktag while walking in 1713 * a normal way 1714 */ 1715 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1716 struct tcp_sacktag_state *state, 1717 u32 skip_to_seq) 1718 { 1719 tcp_for_write_queue_from(skb, sk) { 1720 if (skb == tcp_send_head(sk)) 1721 break; 1722 1723 if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1724 break; 1725 1726 state->fack_count += tcp_skb_pcount(skb); 1727 } 1728 return skb; 1729 } 1730 1731 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1732 struct sock *sk, 1733 struct tcp_sack_block *next_dup, 1734 struct tcp_sacktag_state *state, 1735 u32 skip_to_seq) 1736 { 1737 if (next_dup == NULL) 1738 return skb; 1739 1740 if (before(next_dup->start_seq, skip_to_seq)) { 1741 skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); 1742 skb = tcp_sacktag_walk(skb, sk, NULL, state, 1743 next_dup->start_seq, next_dup->end_seq, 1744 1); 1745 } 1746 1747 return skb; 1748 } 1749 1750 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1751 { 1752 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1753 } 1754 1755 static int 1756 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1757 u32 prior_snd_una) 1758 { 1759 const struct inet_connection_sock *icsk = inet_csk(sk); 1760 struct tcp_sock *tp = tcp_sk(sk); 1761 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1762 TCP_SKB_CB(ack_skb)->sacked); 1763 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1764 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1765 struct tcp_sack_block *cache; 1766 struct tcp_sacktag_state state; 1767 struct sk_buff *skb; 1768 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1769 int used_sacks; 1770 int found_dup_sack = 0; 1771 int i, j; 1772 int first_sack_index; 1773 1774 state.flag = 0; 1775 state.reord = tp->packets_out; 1776 1777 if (!tp->sacked_out) { 1778 if (WARN_ON(tp->fackets_out)) 1779 tp->fackets_out = 0; 1780 tcp_highest_sack_reset(sk); 1781 } 1782 1783 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1784 num_sacks, prior_snd_una); 1785 if (found_dup_sack) 1786 state.flag |= FLAG_DSACKING_ACK; 1787 1788 /* Eliminate too old ACKs, but take into 1789 * account more or less fresh ones, they can 1790 * contain valid SACK info. 1791 */ 1792 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1793 return 0; 1794 1795 if (!tp->packets_out) 1796 goto out; 1797 1798 used_sacks = 0; 1799 first_sack_index = 0; 1800 for (i = 0; i < num_sacks; i++) { 1801 int dup_sack = !i && found_dup_sack; 1802 1803 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1804 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1805 1806 if (!tcp_is_sackblock_valid(tp, dup_sack, 1807 sp[used_sacks].start_seq, 1808 sp[used_sacks].end_seq)) { 1809 int mib_idx; 1810 1811 if (dup_sack) { 1812 if (!tp->undo_marker) 1813 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1814 else 1815 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1816 } else { 1817 /* Don't count olds caused by ACK reordering */ 1818 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1819 !after(sp[used_sacks].end_seq, tp->snd_una)) 1820 continue; 1821 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1822 } 1823 1824 NET_INC_STATS_BH(sock_net(sk), mib_idx); 1825 if (i == 0) 1826 first_sack_index = -1; 1827 continue; 1828 } 1829 1830 /* Ignore very old stuff early */ 1831 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1832 continue; 1833 1834 used_sacks++; 1835 } 1836 1837 /* order SACK blocks to allow in order walk of the retrans queue */ 1838 for (i = used_sacks - 1; i > 0; i--) { 1839 for (j = 0; j < i; j++) { 1840 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1841 swap(sp[j], sp[j + 1]); 1842 1843 /* Track where the first SACK block goes to */ 1844 if (j == first_sack_index) 1845 first_sack_index = j + 1; 1846 } 1847 } 1848 } 1849 1850 skb = tcp_write_queue_head(sk); 1851 state.fack_count = 0; 1852 i = 0; 1853 1854 if (!tp->sacked_out) { 1855 /* It's already past, so skip checking against it */ 1856 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1857 } else { 1858 cache = tp->recv_sack_cache; 1859 /* Skip empty blocks in at head of the cache */ 1860 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1861 !cache->end_seq) 1862 cache++; 1863 } 1864 1865 while (i < used_sacks) { 1866 u32 start_seq = sp[i].start_seq; 1867 u32 end_seq = sp[i].end_seq; 1868 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1869 struct tcp_sack_block *next_dup = NULL; 1870 1871 if (found_dup_sack && ((i + 1) == first_sack_index)) 1872 next_dup = &sp[i + 1]; 1873 1874 /* Skip too early cached blocks */ 1875 while (tcp_sack_cache_ok(tp, cache) && 1876 !before(start_seq, cache->end_seq)) 1877 cache++; 1878 1879 /* Can skip some work by looking recv_sack_cache? */ 1880 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1881 after(end_seq, cache->start_seq)) { 1882 1883 /* Head todo? */ 1884 if (before(start_seq, cache->start_seq)) { 1885 skb = tcp_sacktag_skip(skb, sk, &state, 1886 start_seq); 1887 skb = tcp_sacktag_walk(skb, sk, next_dup, 1888 &state, 1889 start_seq, 1890 cache->start_seq, 1891 dup_sack); 1892 } 1893 1894 /* Rest of the block already fully processed? */ 1895 if (!after(end_seq, cache->end_seq)) 1896 goto advance_sp; 1897 1898 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1899 &state, 1900 cache->end_seq); 1901 1902 /* ...tail remains todo... */ 1903 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1904 /* ...but better entrypoint exists! */ 1905 skb = tcp_highest_sack(sk); 1906 if (skb == NULL) 1907 break; 1908 state.fack_count = tp->fackets_out; 1909 cache++; 1910 goto walk; 1911 } 1912 1913 skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq); 1914 /* Check overlap against next cached too (past this one already) */ 1915 cache++; 1916 continue; 1917 } 1918 1919 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1920 skb = tcp_highest_sack(sk); 1921 if (skb == NULL) 1922 break; 1923 state.fack_count = tp->fackets_out; 1924 } 1925 skb = tcp_sacktag_skip(skb, sk, &state, start_seq); 1926 1927 walk: 1928 skb = tcp_sacktag_walk(skb, sk, next_dup, &state, 1929 start_seq, end_seq, dup_sack); 1930 1931 advance_sp: 1932 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct 1933 * due to in-order walk 1934 */ 1935 if (after(end_seq, tp->frto_highmark)) 1936 state.flag &= ~FLAG_ONLY_ORIG_SACKED; 1937 1938 i++; 1939 } 1940 1941 /* Clear the head of the cache sack blocks so we can skip it next time */ 1942 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1943 tp->recv_sack_cache[i].start_seq = 0; 1944 tp->recv_sack_cache[i].end_seq = 0; 1945 } 1946 for (j = 0; j < used_sacks; j++) 1947 tp->recv_sack_cache[i++] = sp[j]; 1948 1949 tcp_mark_lost_retrans(sk); 1950 1951 tcp_verify_left_out(tp); 1952 1953 if ((state.reord < tp->fackets_out) && 1954 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1955 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1956 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); 1957 1958 out: 1959 1960 #if FASTRETRANS_DEBUG > 0 1961 WARN_ON((int)tp->sacked_out < 0); 1962 WARN_ON((int)tp->lost_out < 0); 1963 WARN_ON((int)tp->retrans_out < 0); 1964 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1965 #endif 1966 return state.flag; 1967 } 1968 1969 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1970 * packets_out. Returns zero if sacked_out adjustement wasn't necessary. 1971 */ 1972 static int tcp_limit_reno_sacked(struct tcp_sock *tp) 1973 { 1974 u32 holes; 1975 1976 holes = max(tp->lost_out, 1U); 1977 holes = min(holes, tp->packets_out); 1978 1979 if ((tp->sacked_out + holes) > tp->packets_out) { 1980 tp->sacked_out = tp->packets_out - holes; 1981 return 1; 1982 } 1983 return 0; 1984 } 1985 1986 /* If we receive more dupacks than we expected counting segments 1987 * in assumption of absent reordering, interpret this as reordering. 1988 * The only another reason could be bug in receiver TCP. 1989 */ 1990 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1991 { 1992 struct tcp_sock *tp = tcp_sk(sk); 1993 if (tcp_limit_reno_sacked(tp)) 1994 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1995 } 1996 1997 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 1998 1999 static void tcp_add_reno_sack(struct sock *sk) 2000 { 2001 struct tcp_sock *tp = tcp_sk(sk); 2002 tp->sacked_out++; 2003 tcp_check_reno_reordering(sk, 0); 2004 tcp_verify_left_out(tp); 2005 } 2006 2007 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 2008 2009 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 2010 { 2011 struct tcp_sock *tp = tcp_sk(sk); 2012 2013 if (acked > 0) { 2014 /* One ACK acked hole. The rest eat duplicate ACKs. */ 2015 if (acked - 1 >= tp->sacked_out) 2016 tp->sacked_out = 0; 2017 else 2018 tp->sacked_out -= acked - 1; 2019 } 2020 tcp_check_reno_reordering(sk, acked); 2021 tcp_verify_left_out(tp); 2022 } 2023 2024 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 2025 { 2026 tp->sacked_out = 0; 2027 } 2028 2029 static int tcp_is_sackfrto(const struct tcp_sock *tp) 2030 { 2031 return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); 2032 } 2033 2034 /* F-RTO can only be used if TCP has never retransmitted anything other than 2035 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 2036 */ 2037 int tcp_use_frto(struct sock *sk) 2038 { 2039 const struct tcp_sock *tp = tcp_sk(sk); 2040 const struct inet_connection_sock *icsk = inet_csk(sk); 2041 struct sk_buff *skb; 2042 2043 if (!sysctl_tcp_frto) 2044 return 0; 2045 2046 /* MTU probe and F-RTO won't really play nicely along currently */ 2047 if (icsk->icsk_mtup.probe_size) 2048 return 0; 2049 2050 if (tcp_is_sackfrto(tp)) 2051 return 1; 2052 2053 /* Avoid expensive walking of rexmit queue if possible */ 2054 if (tp->retrans_out > 1) 2055 return 0; 2056 2057 skb = tcp_write_queue_head(sk); 2058 if (tcp_skb_is_last(sk, skb)) 2059 return 1; 2060 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 2061 tcp_for_write_queue_from(skb, sk) { 2062 if (skb == tcp_send_head(sk)) 2063 break; 2064 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2065 return 0; 2066 /* Short-circuit when first non-SACKed skb has been checked */ 2067 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2068 break; 2069 } 2070 return 1; 2071 } 2072 2073 /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 2074 * recovery a bit and use heuristics in tcp_process_frto() to detect if 2075 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to 2076 * keep retrans_out counting accurate (with SACK F-RTO, other than head 2077 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS 2078 * bits are handled if the Loss state is really to be entered (in 2079 * tcp_enter_frto_loss). 2080 * 2081 * Do like tcp_enter_loss() would; when RTO expires the second time it 2082 * does: 2083 * "Reduce ssthresh if it has not yet been made inside this window." 2084 */ 2085 void tcp_enter_frto(struct sock *sk) 2086 { 2087 const struct inet_connection_sock *icsk = inet_csk(sk); 2088 struct tcp_sock *tp = tcp_sk(sk); 2089 struct sk_buff *skb; 2090 2091 if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || 2092 tp->snd_una == tp->high_seq || 2093 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && 2094 !icsk->icsk_retransmits)) { 2095 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2096 /* Our state is too optimistic in ssthresh() call because cwnd 2097 * is not reduced until tcp_enter_frto_loss() when previous F-RTO 2098 * recovery has not yet completed. Pattern would be this: RTO, 2099 * Cumulative ACK, RTO (2xRTO for the same segment does not end 2100 * up here twice). 2101 * RFC4138 should be more specific on what to do, even though 2102 * RTO is quite unlikely to occur after the first Cumulative ACK 2103 * due to back-off and complexity of triggering events ... 2104 */ 2105 if (tp->frto_counter) { 2106 u32 stored_cwnd; 2107 stored_cwnd = tp->snd_cwnd; 2108 tp->snd_cwnd = 2; 2109 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2110 tp->snd_cwnd = stored_cwnd; 2111 } else { 2112 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2113 } 2114 /* ... in theory, cong.control module could do "any tricks" in 2115 * ssthresh(), which means that ca_state, lost bits and lost_out 2116 * counter would have to be faked before the call occurs. We 2117 * consider that too expensive, unlikely and hacky, so modules 2118 * using these in ssthresh() must deal these incompatibility 2119 * issues if they receives CA_EVENT_FRTO and frto_counter != 0 2120 */ 2121 tcp_ca_event(sk, CA_EVENT_FRTO); 2122 } 2123 2124 tp->undo_marker = tp->snd_una; 2125 tp->undo_retrans = 0; 2126 2127 skb = tcp_write_queue_head(sk); 2128 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2129 tp->undo_marker = 0; 2130 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2131 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2132 tp->retrans_out -= tcp_skb_pcount(skb); 2133 } 2134 tcp_verify_left_out(tp); 2135 2136 /* Too bad if TCP was application limited */ 2137 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 2138 2139 /* Earlier loss recovery underway (see RFC4138; Appendix B). 2140 * The last condition is necessary at least in tp->frto_counter case. 2141 */ 2142 if (tcp_is_sackfrto(tp) && (tp->frto_counter || 2143 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && 2144 after(tp->high_seq, tp->snd_una)) { 2145 tp->frto_highmark = tp->high_seq; 2146 } else { 2147 tp->frto_highmark = tp->snd_nxt; 2148 } 2149 tcp_set_ca_state(sk, TCP_CA_Disorder); 2150 tp->high_seq = tp->snd_nxt; 2151 tp->frto_counter = 1; 2152 } 2153 2154 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 2155 * which indicates that we should follow the traditional RTO recovery, 2156 * i.e. mark everything lost and do go-back-N retransmission. 2157 */ 2158 static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) 2159 { 2160 struct tcp_sock *tp = tcp_sk(sk); 2161 struct sk_buff *skb; 2162 2163 tp->lost_out = 0; 2164 tp->retrans_out = 0; 2165 if (tcp_is_reno(tp)) 2166 tcp_reset_reno_sack(tp); 2167 2168 tcp_for_write_queue(skb, sk) { 2169 if (skb == tcp_send_head(sk)) 2170 break; 2171 2172 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2173 /* 2174 * Count the retransmission made on RTO correctly (only when 2175 * waiting for the first ACK and did not get it)... 2176 */ 2177 if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { 2178 /* For some reason this R-bit might get cleared? */ 2179 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 2180 tp->retrans_out += tcp_skb_pcount(skb); 2181 /* ...enter this if branch just for the first segment */ 2182 flag |= FLAG_DATA_ACKED; 2183 } else { 2184 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2185 tp->undo_marker = 0; 2186 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 2187 } 2188 2189 /* Marking forward transmissions that were made after RTO lost 2190 * can cause unnecessary retransmissions in some scenarios, 2191 * SACK blocks will mitigate that in some but not in all cases. 2192 * We used to not mark them but it was causing break-ups with 2193 * receivers that do only in-order receival. 2194 * 2195 * TODO: we could detect presence of such receiver and select 2196 * different behavior per flow. 2197 */ 2198 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2199 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2200 tp->lost_out += tcp_skb_pcount(skb); 2201 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 2202 } 2203 } 2204 tcp_verify_left_out(tp); 2205 2206 tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; 2207 tp->snd_cwnd_cnt = 0; 2208 tp->snd_cwnd_stamp = tcp_time_stamp; 2209 tp->frto_counter = 0; 2210 tp->bytes_acked = 0; 2211 2212 tp->reordering = min_t(unsigned int, tp->reordering, 2213 sysctl_tcp_reordering); 2214 tcp_set_ca_state(sk, TCP_CA_Loss); 2215 tp->high_seq = tp->snd_nxt; 2216 TCP_ECN_queue_cwr(tp); 2217 2218 tcp_clear_all_retrans_hints(tp); 2219 } 2220 2221 static void tcp_clear_retrans_partial(struct tcp_sock *tp) 2222 { 2223 tp->retrans_out = 0; 2224 tp->lost_out = 0; 2225 2226 tp->undo_marker = 0; 2227 tp->undo_retrans = 0; 2228 } 2229 2230 void tcp_clear_retrans(struct tcp_sock *tp) 2231 { 2232 tcp_clear_retrans_partial(tp); 2233 2234 tp->fackets_out = 0; 2235 tp->sacked_out = 0; 2236 } 2237 2238 /* Enter Loss state. If "how" is not zero, forget all SACK information 2239 * and reset tags completely, otherwise preserve SACKs. If receiver 2240 * dropped its ofo queue, we will know this due to reneging detection. 2241 */ 2242 void tcp_enter_loss(struct sock *sk, int how) 2243 { 2244 const struct inet_connection_sock *icsk = inet_csk(sk); 2245 struct tcp_sock *tp = tcp_sk(sk); 2246 struct sk_buff *skb; 2247 2248 /* Reduce ssthresh if it has not yet been made inside this window. */ 2249 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 2250 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 2251 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2252 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2253 tcp_ca_event(sk, CA_EVENT_LOSS); 2254 } 2255 tp->snd_cwnd = 1; 2256 tp->snd_cwnd_cnt = 0; 2257 tp->snd_cwnd_stamp = tcp_time_stamp; 2258 2259 tp->bytes_acked = 0; 2260 tcp_clear_retrans_partial(tp); 2261 2262 if (tcp_is_reno(tp)) 2263 tcp_reset_reno_sack(tp); 2264 2265 if (!how) { 2266 /* Push undo marker, if it was plain RTO and nothing 2267 * was retransmitted. */ 2268 tp->undo_marker = tp->snd_una; 2269 } else { 2270 tp->sacked_out = 0; 2271 tp->fackets_out = 0; 2272 } 2273 tcp_clear_all_retrans_hints(tp); 2274 2275 tcp_for_write_queue(skb, sk) { 2276 if (skb == tcp_send_head(sk)) 2277 break; 2278 2279 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 2280 tp->undo_marker = 0; 2281 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 2282 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 2283 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 2284 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2285 tp->lost_out += tcp_skb_pcount(skb); 2286 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; 2287 } 2288 } 2289 tcp_verify_left_out(tp); 2290 2291 tp->reordering = min_t(unsigned int, tp->reordering, 2292 sysctl_tcp_reordering); 2293 tcp_set_ca_state(sk, TCP_CA_Loss); 2294 tp->high_seq = tp->snd_nxt; 2295 TCP_ECN_queue_cwr(tp); 2296 /* Abort F-RTO algorithm if one is in progress */ 2297 tp->frto_counter = 0; 2298 } 2299 2300 /* If ACK arrived pointing to a remembered SACK, it means that our 2301 * remembered SACKs do not reflect real state of receiver i.e. 2302 * receiver _host_ is heavily congested (or buggy). 2303 * 2304 * Do processing similar to RTO timeout. 2305 */ 2306 static int tcp_check_sack_reneging(struct sock *sk, int flag) 2307 { 2308 if (flag & FLAG_SACK_RENEGING) { 2309 struct inet_connection_sock *icsk = inet_csk(sk); 2310 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 2311 2312 tcp_enter_loss(sk, 1); 2313 icsk->icsk_retransmits++; 2314 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 2315 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2316 icsk->icsk_rto, TCP_RTO_MAX); 2317 return 1; 2318 } 2319 return 0; 2320 } 2321 2322 static inline int tcp_fackets_out(const struct tcp_sock *tp) 2323 { 2324 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 2325 } 2326 2327 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 2328 * counter when SACK is enabled (without SACK, sacked_out is used for 2329 * that purpose). 2330 * 2331 * Instead, with FACK TCP uses fackets_out that includes both SACKed 2332 * segments up to the highest received SACK block so far and holes in 2333 * between them. 2334 * 2335 * With reordering, holes may still be in flight, so RFC3517 recovery 2336 * uses pure sacked_out (total number of SACKed segments) even though 2337 * it violates the RFC that uses duplicate ACKs, often these are equal 2338 * but when e.g. out-of-window ACKs or packet duplication occurs, 2339 * they differ. Since neither occurs due to loss, TCP should really 2340 * ignore them. 2341 */ 2342 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2343 { 2344 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2345 } 2346 2347 static bool tcp_pause_early_retransmit(struct sock *sk, int flag) 2348 { 2349 struct tcp_sock *tp = tcp_sk(sk); 2350 unsigned long delay; 2351 2352 /* Delay early retransmit and entering fast recovery for 2353 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples 2354 * available, or RTO is scheduled to fire first. 2355 */ 2356 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) 2357 return false; 2358 2359 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); 2360 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) 2361 return false; 2362 2363 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); 2364 tp->early_retrans_delayed = 1; 2365 return true; 2366 } 2367 2368 static inline int tcp_skb_timedout(const struct sock *sk, 2369 const struct sk_buff *skb) 2370 { 2371 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 2372 } 2373 2374 static inline int tcp_head_timedout(const struct sock *sk) 2375 { 2376 const struct tcp_sock *tp = tcp_sk(sk); 2377 2378 return tp->packets_out && 2379 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 2380 } 2381 2382 /* Linux NewReno/SACK/FACK/ECN state machine. 2383 * -------------------------------------- 2384 * 2385 * "Open" Normal state, no dubious events, fast path. 2386 * "Disorder" In all the respects it is "Open", 2387 * but requires a bit more attention. It is entered when 2388 * we see some SACKs or dupacks. It is split of "Open" 2389 * mainly to move some processing from fast path to slow one. 2390 * "CWR" CWND was reduced due to some Congestion Notification event. 2391 * It can be ECN, ICMP source quench, local device congestion. 2392 * "Recovery" CWND was reduced, we are fast-retransmitting. 2393 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2394 * 2395 * tcp_fastretrans_alert() is entered: 2396 * - each incoming ACK, if state is not "Open" 2397 * - when arrived ACK is unusual, namely: 2398 * * SACK 2399 * * Duplicate ACK. 2400 * * ECN ECE. 2401 * 2402 * Counting packets in flight is pretty simple. 2403 * 2404 * in_flight = packets_out - left_out + retrans_out 2405 * 2406 * packets_out is SND.NXT-SND.UNA counted in packets. 2407 * 2408 * retrans_out is number of retransmitted segments. 2409 * 2410 * left_out is number of segments left network, but not ACKed yet. 2411 * 2412 * left_out = sacked_out + lost_out 2413 * 2414 * sacked_out: Packets, which arrived to receiver out of order 2415 * and hence not ACKed. With SACKs this number is simply 2416 * amount of SACKed data. Even without SACKs 2417 * it is easy to give pretty reliable estimate of this number, 2418 * counting duplicate ACKs. 2419 * 2420 * lost_out: Packets lost by network. TCP has no explicit 2421 * "loss notification" feedback from network (for now). 2422 * It means that this number can be only _guessed_. 2423 * Actually, it is the heuristics to predict lossage that 2424 * distinguishes different algorithms. 2425 * 2426 * F.e. after RTO, when all the queue is considered as lost, 2427 * lost_out = packets_out and in_flight = retrans_out. 2428 * 2429 * Essentially, we have now two algorithms counting 2430 * lost packets. 2431 * 2432 * FACK: It is the simplest heuristics. As soon as we decided 2433 * that something is lost, we decide that _all_ not SACKed 2434 * packets until the most forward SACK are lost. I.e. 2435 * lost_out = fackets_out - sacked_out and left_out = fackets_out. 2436 * It is absolutely correct estimate, if network does not reorder 2437 * packets. And it loses any connection to reality when reordering 2438 * takes place. We use FACK by default until reordering 2439 * is suspected on the path to this destination. 2440 * 2441 * NewReno: when Recovery is entered, we assume that one segment 2442 * is lost (classic Reno). While we are in Recovery and 2443 * a partial ACK arrives, we assume that one more packet 2444 * is lost (NewReno). This heuristics are the same in NewReno 2445 * and SACK. 2446 * 2447 * Imagine, that's all! Forget about all this shamanism about CWND inflation 2448 * deflation etc. CWND is real congestion window, never inflated, changes 2449 * only according to classic VJ rules. 2450 * 2451 * Really tricky (and requiring careful tuning) part of algorithm 2452 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2453 * The first determines the moment _when_ we should reduce CWND and, 2454 * hence, slow down forward transmission. In fact, it determines the moment 2455 * when we decide that hole is caused by loss, rather than by a reorder. 2456 * 2457 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2458 * holes, caused by lost packets. 2459 * 2460 * And the most logically complicated part of algorithm is undo 2461 * heuristics. We detect false retransmits due to both too early 2462 * fast retransmit (reordering) and underestimated RTO, analyzing 2463 * timestamps and D-SACKs. When we detect that some segments were 2464 * retransmitted by mistake and CWND reduction was wrong, we undo 2465 * window reduction and abort recovery phase. This logic is hidden 2466 * inside several functions named tcp_try_undo_<something>. 2467 */ 2468 2469 /* This function decides, when we should leave Disordered state 2470 * and enter Recovery phase, reducing congestion window. 2471 * 2472 * Main question: may we further continue forward transmission 2473 * with the same cwnd? 2474 */ 2475 static int tcp_time_to_recover(struct sock *sk, int flag) 2476 { 2477 struct tcp_sock *tp = tcp_sk(sk); 2478 __u32 packets_out; 2479 2480 /* Do not perform any recovery during F-RTO algorithm */ 2481 if (tp->frto_counter) 2482 return 0; 2483 2484 /* Trick#1: The loss is proven. */ 2485 if (tp->lost_out) 2486 return 1; 2487 2488 /* Not-A-Trick#2 : Classic rule... */ 2489 if (tcp_dupack_heuristics(tp) > tp->reordering) 2490 return 1; 2491 2492 /* Trick#3 : when we use RFC2988 timer restart, fast 2493 * retransmit can be triggered by timeout of queue head. 2494 */ 2495 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2496 return 1; 2497 2498 /* Trick#4: It is still not OK... But will it be useful to delay 2499 * recovery more? 2500 */ 2501 packets_out = tp->packets_out; 2502 if (packets_out <= tp->reordering && 2503 tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 2504 !tcp_may_send_now(sk)) { 2505 /* We have nothing to send. This connection is limited 2506 * either by receiver window or by application. 2507 */ 2508 return 1; 2509 } 2510 2511 /* If a thin stream is detected, retransmit after first 2512 * received dupack. Employ only if SACK is supported in order 2513 * to avoid possible corner-case series of spurious retransmissions 2514 * Use only if there are no unsent data. 2515 */ 2516 if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && 2517 tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && 2518 tcp_is_sack(tp) && !tcp_send_head(sk)) 2519 return 1; 2520 2521 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious 2522 * retransmissions due to small network reorderings, we implement 2523 * Mitigation A.3 in the RFC and delay the retransmission for a short 2524 * interval if appropriate. 2525 */ 2526 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && 2527 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && 2528 !tcp_may_send_now(sk)) 2529 return !tcp_pause_early_retransmit(sk, flag); 2530 2531 return 0; 2532 } 2533 2534 /* New heuristics: it is possible only after we switched to restart timer 2535 * each time when something is ACKed. Hence, we can detect timed out packets 2536 * during fast retransmit without falling to slow start. 2537 * 2538 * Usefulness of this as is very questionable, since we should know which of 2539 * the segments is the next to timeout which is relatively expensive to find 2540 * in general case unless we add some data structure just for that. The 2541 * current approach certainly won't find the right one too often and when it 2542 * finally does find _something_ it usually marks large part of the window 2543 * right away (because a retransmission with a larger timestamp blocks the 2544 * loop from advancing). -ij 2545 */ 2546 static void tcp_timeout_skbs(struct sock *sk) 2547 { 2548 struct tcp_sock *tp = tcp_sk(sk); 2549 struct sk_buff *skb; 2550 2551 if (!tcp_is_fack(tp) || !tcp_head_timedout(sk)) 2552 return; 2553 2554 skb = tp->scoreboard_skb_hint; 2555 if (tp->scoreboard_skb_hint == NULL) 2556 skb = tcp_write_queue_head(sk); 2557 2558 tcp_for_write_queue_from(skb, sk) { 2559 if (skb == tcp_send_head(sk)) 2560 break; 2561 if (!tcp_skb_timedout(sk, skb)) 2562 break; 2563 2564 tcp_skb_mark_lost(tp, skb); 2565 } 2566 2567 tp->scoreboard_skb_hint = skb; 2568 2569 tcp_verify_left_out(tp); 2570 } 2571 2572 /* Detect loss in event "A" above by marking head of queue up as lost. 2573 * For FACK or non-SACK(Reno) senders, the first "packets" number of segments 2574 * are considered lost. For RFC3517 SACK, a segment is considered lost if it 2575 * has at least tp->reordering SACKed seqments above it; "packets" refers to 2576 * the maximum SACKed segments to pass before reaching this limit. 2577 */ 2578 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) 2579 { 2580 struct tcp_sock *tp = tcp_sk(sk); 2581 struct sk_buff *skb; 2582 int cnt, oldcnt; 2583 int err; 2584 unsigned int mss; 2585 /* Use SACK to deduce losses of new sequences sent during recovery */ 2586 const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; 2587 2588 WARN_ON(packets > tp->packets_out); 2589 if (tp->lost_skb_hint) { 2590 skb = tp->lost_skb_hint; 2591 cnt = tp->lost_cnt_hint; 2592 /* Head already handled? */ 2593 if (mark_head && skb != tcp_write_queue_head(sk)) 2594 return; 2595 } else { 2596 skb = tcp_write_queue_head(sk); 2597 cnt = 0; 2598 } 2599 2600 tcp_for_write_queue_from(skb, sk) { 2601 if (skb == tcp_send_head(sk)) 2602 break; 2603 /* TODO: do this better */ 2604 /* this is not the most efficient way to do this... */ 2605 tp->lost_skb_hint = skb; 2606 tp->lost_cnt_hint = cnt; 2607 2608 if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) 2609 break; 2610 2611 oldcnt = cnt; 2612 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2613 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2614 cnt += tcp_skb_pcount(skb); 2615 2616 if (cnt > packets) { 2617 if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || 2618 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || 2619 (oldcnt >= packets)) 2620 break; 2621 2622 mss = skb_shinfo(skb)->gso_size; 2623 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2624 if (err < 0) 2625 break; 2626 cnt = packets; 2627 } 2628 2629 tcp_skb_mark_lost(tp, skb); 2630 2631 if (mark_head) 2632 break; 2633 } 2634 tcp_verify_left_out(tp); 2635 } 2636 2637 /* Account newly detected lost packet(s) */ 2638 2639 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2640 { 2641 struct tcp_sock *tp = tcp_sk(sk); 2642 2643 if (tcp_is_reno(tp)) { 2644 tcp_mark_head_lost(sk, 1, 1); 2645 } else if (tcp_is_fack(tp)) { 2646 int lost = tp->fackets_out - tp->reordering; 2647 if (lost <= 0) 2648 lost = 1; 2649 tcp_mark_head_lost(sk, lost, 0); 2650 } else { 2651 int sacked_upto = tp->sacked_out - tp->reordering; 2652 if (sacked_upto >= 0) 2653 tcp_mark_head_lost(sk, sacked_upto, 0); 2654 else if (fast_rexmit) 2655 tcp_mark_head_lost(sk, 1, 1); 2656 } 2657 2658 tcp_timeout_skbs(sk); 2659 } 2660 2661 /* CWND moderation, preventing bursts due to too big ACKs 2662 * in dubious situations. 2663 */ 2664 static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 2665 { 2666 tp->snd_cwnd = min(tp->snd_cwnd, 2667 tcp_packets_in_flight(tp) + tcp_max_burst(tp)); 2668 tp->snd_cwnd_stamp = tcp_time_stamp; 2669 } 2670 2671 /* Lower bound on congestion window is slow start threshold 2672 * unless congestion avoidance choice decides to overide it. 2673 */ 2674 static inline u32 tcp_cwnd_min(const struct sock *sk) 2675 { 2676 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2677 2678 return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; 2679 } 2680 2681 /* Decrease cwnd each second ack. */ 2682 static void tcp_cwnd_down(struct sock *sk, int flag) 2683 { 2684 struct tcp_sock *tp = tcp_sk(sk); 2685 int decr = tp->snd_cwnd_cnt + 1; 2686 2687 if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || 2688 (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { 2689 tp->snd_cwnd_cnt = decr & 1; 2690 decr >>= 1; 2691 2692 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 2693 tp->snd_cwnd -= decr; 2694 2695 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 2696 tp->snd_cwnd_stamp = tcp_time_stamp; 2697 } 2698 } 2699 2700 /* Nothing was retransmitted or returned timestamp is less 2701 * than timestamp of the first retransmission. 2702 */ 2703 static inline int tcp_packet_delayed(const struct tcp_sock *tp) 2704 { 2705 return !tp->retrans_stamp || 2706 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2707 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); 2708 } 2709 2710 /* Undo procedures. */ 2711 2712 #if FASTRETRANS_DEBUG > 1 2713 static void DBGUNDO(struct sock *sk, const char *msg) 2714 { 2715 struct tcp_sock *tp = tcp_sk(sk); 2716 struct inet_sock *inet = inet_sk(sk); 2717 2718 if (sk->sk_family == AF_INET) { 2719 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2720 msg, 2721 &inet->inet_daddr, ntohs(inet->inet_dport), 2722 tp->snd_cwnd, tcp_left_out(tp), 2723 tp->snd_ssthresh, tp->prior_ssthresh, 2724 tp->packets_out); 2725 } 2726 #if IS_ENABLED(CONFIG_IPV6) 2727 else if (sk->sk_family == AF_INET6) { 2728 struct ipv6_pinfo *np = inet6_sk(sk); 2729 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2730 msg, 2731 &np->daddr, ntohs(inet->inet_dport), 2732 tp->snd_cwnd, tcp_left_out(tp), 2733 tp->snd_ssthresh, tp->prior_ssthresh, 2734 tp->packets_out); 2735 } 2736 #endif 2737 } 2738 #else 2739 #define DBGUNDO(x...) do { } while (0) 2740 #endif 2741 2742 static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) 2743 { 2744 struct tcp_sock *tp = tcp_sk(sk); 2745 2746 if (tp->prior_ssthresh) { 2747 const struct inet_connection_sock *icsk = inet_csk(sk); 2748 2749 if (icsk->icsk_ca_ops->undo_cwnd) 2750 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2751 else 2752 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2753 2754 if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) { 2755 tp->snd_ssthresh = tp->prior_ssthresh; 2756 TCP_ECN_withdraw_cwr(tp); 2757 } 2758 } else { 2759 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2760 } 2761 tp->snd_cwnd_stamp = tcp_time_stamp; 2762 } 2763 2764 static inline int tcp_may_undo(const struct tcp_sock *tp) 2765 { 2766 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2767 } 2768 2769 /* People celebrate: "We love our President!" */ 2770 static int tcp_try_undo_recovery(struct sock *sk) 2771 { 2772 struct tcp_sock *tp = tcp_sk(sk); 2773 2774 if (tcp_may_undo(tp)) { 2775 int mib_idx; 2776 2777 /* Happy end! We did not retransmit anything 2778 * or our original transmission succeeded. 2779 */ 2780 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2781 tcp_undo_cwr(sk, true); 2782 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2783 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2784 else 2785 mib_idx = LINUX_MIB_TCPFULLUNDO; 2786 2787 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2788 tp->undo_marker = 0; 2789 } 2790 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2791 /* Hold old state until something *above* high_seq 2792 * is ACKed. For Reno it is MUST to prevent false 2793 * fast retransmits (RFC2582). SACK TCP is safe. */ 2794 tcp_moderate_cwnd(tp); 2795 return 1; 2796 } 2797 tcp_set_ca_state(sk, TCP_CA_Open); 2798 return 0; 2799 } 2800 2801 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2802 static void tcp_try_undo_dsack(struct sock *sk) 2803 { 2804 struct tcp_sock *tp = tcp_sk(sk); 2805 2806 if (tp->undo_marker && !tp->undo_retrans) { 2807 DBGUNDO(sk, "D-SACK"); 2808 tcp_undo_cwr(sk, true); 2809 tp->undo_marker = 0; 2810 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2811 } 2812 } 2813 2814 /* We can clear retrans_stamp when there are no retransmissions in the 2815 * window. It would seem that it is trivially available for us in 2816 * tp->retrans_out, however, that kind of assumptions doesn't consider 2817 * what will happen if errors occur when sending retransmission for the 2818 * second time. ...It could the that such segment has only 2819 * TCPCB_EVER_RETRANS set at the present time. It seems that checking 2820 * the head skb is enough except for some reneging corner cases that 2821 * are not worth the effort. 2822 * 2823 * Main reason for all this complexity is the fact that connection dying 2824 * time now depends on the validity of the retrans_stamp, in particular, 2825 * that successive retransmissions of a segment must not advance 2826 * retrans_stamp under any conditions. 2827 */ 2828 static int tcp_any_retrans_done(const struct sock *sk) 2829 { 2830 const struct tcp_sock *tp = tcp_sk(sk); 2831 struct sk_buff *skb; 2832 2833 if (tp->retrans_out) 2834 return 1; 2835 2836 skb = tcp_write_queue_head(sk); 2837 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) 2838 return 1; 2839 2840 return 0; 2841 } 2842 2843 /* Undo during fast recovery after partial ACK. */ 2844 2845 static int tcp_try_undo_partial(struct sock *sk, int acked) 2846 { 2847 struct tcp_sock *tp = tcp_sk(sk); 2848 /* Partial ACK arrived. Force Hoe's retransmit. */ 2849 int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); 2850 2851 if (tcp_may_undo(tp)) { 2852 /* Plain luck! Hole if filled with delayed 2853 * packet, rather than with a retransmit. 2854 */ 2855 if (!tcp_any_retrans_done(sk)) 2856 tp->retrans_stamp = 0; 2857 2858 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2859 2860 DBGUNDO(sk, "Hoe"); 2861 tcp_undo_cwr(sk, false); 2862 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2863 2864 /* So... Do not make Hoe's retransmit yet. 2865 * If the first packet was delayed, the rest 2866 * ones are most probably delayed as well. 2867 */ 2868 failed = 0; 2869 } 2870 return failed; 2871 } 2872 2873 /* Undo during loss recovery after partial ACK. */ 2874 static int tcp_try_undo_loss(struct sock *sk) 2875 { 2876 struct tcp_sock *tp = tcp_sk(sk); 2877 2878 if (tcp_may_undo(tp)) { 2879 struct sk_buff *skb; 2880 tcp_for_write_queue(skb, sk) { 2881 if (skb == tcp_send_head(sk)) 2882 break; 2883 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2884 } 2885 2886 tcp_clear_all_retrans_hints(tp); 2887 2888 DBGUNDO(sk, "partial loss"); 2889 tp->lost_out = 0; 2890 tcp_undo_cwr(sk, true); 2891 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2892 inet_csk(sk)->icsk_retransmits = 0; 2893 tp->undo_marker = 0; 2894 if (tcp_is_sack(tp)) 2895 tcp_set_ca_state(sk, TCP_CA_Open); 2896 return 1; 2897 } 2898 return 0; 2899 } 2900 2901 static inline void tcp_complete_cwr(struct sock *sk) 2902 { 2903 struct tcp_sock *tp = tcp_sk(sk); 2904 2905 /* Do not moderate cwnd if it's already undone in cwr or recovery. */ 2906 if (tp->undo_marker) { 2907 if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { 2908 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2909 tp->snd_cwnd_stamp = tcp_time_stamp; 2910 } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { 2911 /* PRR algorithm. */ 2912 tp->snd_cwnd = tp->snd_ssthresh; 2913 tp->snd_cwnd_stamp = tcp_time_stamp; 2914 } 2915 } 2916 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2917 } 2918 2919 static void tcp_try_keep_open(struct sock *sk) 2920 { 2921 struct tcp_sock *tp = tcp_sk(sk); 2922 int state = TCP_CA_Open; 2923 2924 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) 2925 state = TCP_CA_Disorder; 2926 2927 if (inet_csk(sk)->icsk_ca_state != state) { 2928 tcp_set_ca_state(sk, state); 2929 tp->high_seq = tp->snd_nxt; 2930 } 2931 } 2932 2933 static void tcp_try_to_open(struct sock *sk, int flag) 2934 { 2935 struct tcp_sock *tp = tcp_sk(sk); 2936 2937 tcp_verify_left_out(tp); 2938 2939 if (!tp->frto_counter && !tcp_any_retrans_done(sk)) 2940 tp->retrans_stamp = 0; 2941 2942 if (flag & FLAG_ECE) 2943 tcp_enter_cwr(sk, 1); 2944 2945 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2946 tcp_try_keep_open(sk); 2947 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 2948 tcp_moderate_cwnd(tp); 2949 } else { 2950 tcp_cwnd_down(sk, flag); 2951 } 2952 } 2953 2954 static void tcp_mtup_probe_failed(struct sock *sk) 2955 { 2956 struct inet_connection_sock *icsk = inet_csk(sk); 2957 2958 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2959 icsk->icsk_mtup.probe_size = 0; 2960 } 2961 2962 static void tcp_mtup_probe_success(struct sock *sk) 2963 { 2964 struct tcp_sock *tp = tcp_sk(sk); 2965 struct inet_connection_sock *icsk = inet_csk(sk); 2966 2967 /* FIXME: breaks with very large cwnd */ 2968 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2969 tp->snd_cwnd = tp->snd_cwnd * 2970 tcp_mss_to_mtu(sk, tp->mss_cache) / 2971 icsk->icsk_mtup.probe_size; 2972 tp->snd_cwnd_cnt = 0; 2973 tp->snd_cwnd_stamp = tcp_time_stamp; 2974 tp->snd_ssthresh = tcp_current_ssthresh(sk); 2975 2976 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2977 icsk->icsk_mtup.probe_size = 0; 2978 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2979 } 2980 2981 /* Do a simple retransmit without using the backoff mechanisms in 2982 * tcp_timer. This is used for path mtu discovery. 2983 * The socket is already locked here. 2984 */ 2985 void tcp_simple_retransmit(struct sock *sk) 2986 { 2987 const struct inet_connection_sock *icsk = inet_csk(sk); 2988 struct tcp_sock *tp = tcp_sk(sk); 2989 struct sk_buff *skb; 2990 unsigned int mss = tcp_current_mss(sk); 2991 u32 prior_lost = tp->lost_out; 2992 2993 tcp_for_write_queue(skb, sk) { 2994 if (skb == tcp_send_head(sk)) 2995 break; 2996 if (tcp_skb_seglen(skb) > mss && 2997 !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 2998 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2999 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 3000 tp->retrans_out -= tcp_skb_pcount(skb); 3001 } 3002 tcp_skb_mark_lost_uncond_verify(tp, skb); 3003 } 3004 } 3005 3006 tcp_clear_retrans_hints_partial(tp); 3007 3008 if (prior_lost == tp->lost_out) 3009 return; 3010 3011 if (tcp_is_reno(tp)) 3012 tcp_limit_reno_sacked(tp); 3013 3014 tcp_verify_left_out(tp); 3015 3016 /* Don't muck with the congestion window here. 3017 * Reason is that we do not increase amount of _data_ 3018 * in network, but units changed and effective 3019 * cwnd/ssthresh really reduced now. 3020 */ 3021 if (icsk->icsk_ca_state != TCP_CA_Loss) { 3022 tp->high_seq = tp->snd_nxt; 3023 tp->snd_ssthresh = tcp_current_ssthresh(sk); 3024 tp->prior_ssthresh = 0; 3025 tp->undo_marker = 0; 3026 tcp_set_ca_state(sk, TCP_CA_Loss); 3027 } 3028 tcp_xmit_retransmit_queue(sk); 3029 } 3030 EXPORT_SYMBOL(tcp_simple_retransmit); 3031 3032 /* This function implements the PRR algorithm, specifcally the PRR-SSRB 3033 * (proportional rate reduction with slow start reduction bound) as described in 3034 * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. 3035 * It computes the number of packets to send (sndcnt) based on packets newly 3036 * delivered: 3037 * 1) If the packets in flight is larger than ssthresh, PRR spreads the 3038 * cwnd reductions across a full RTT. 3039 * 2) If packets in flight is lower than ssthresh (such as due to excess 3040 * losses and/or application stalls), do not perform any further cwnd 3041 * reductions, but instead slow start up to ssthresh. 3042 */ 3043 static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, 3044 int fast_rexmit, int flag) 3045 { 3046 struct tcp_sock *tp = tcp_sk(sk); 3047 int sndcnt = 0; 3048 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); 3049 3050 if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { 3051 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + 3052 tp->prior_cwnd - 1; 3053 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; 3054 } else { 3055 sndcnt = min_t(int, delta, 3056 max_t(int, tp->prr_delivered - tp->prr_out, 3057 newly_acked_sacked) + 1); 3058 } 3059 3060 sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); 3061 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 3062 } 3063 3064 static void tcp_enter_recovery(struct sock *sk, bool ece_ack) 3065 { 3066 struct tcp_sock *tp = tcp_sk(sk); 3067 int mib_idx; 3068 3069 if (tcp_is_reno(tp)) 3070 mib_idx = LINUX_MIB_TCPRENORECOVERY; 3071 else 3072 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 3073 3074 NET_INC_STATS_BH(sock_net(sk), mib_idx); 3075 3076 tp->high_seq = tp->snd_nxt; 3077 tp->prior_ssthresh = 0; 3078 tp->undo_marker = tp->snd_una; 3079 tp->undo_retrans = tp->retrans_out; 3080 3081 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 3082 if (!ece_ack) 3083 tp->prior_ssthresh = tcp_current_ssthresh(sk); 3084 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); 3085 TCP_ECN_queue_cwr(tp); 3086 } 3087 3088 tp->bytes_acked = 0; 3089 tp->snd_cwnd_cnt = 0; 3090 tp->prior_cwnd = tp->snd_cwnd; 3091 tp->prr_delivered = 0; 3092 tp->prr_out = 0; 3093 tcp_set_ca_state(sk, TCP_CA_Recovery); 3094 } 3095 3096 /* Process an event, which can update packets-in-flight not trivially. 3097 * Main goal of this function is to calculate new estimate for left_out, 3098 * taking into account both packets sitting in receiver's buffer and 3099 * packets lost by network. 3100 * 3101 * Besides that it does CWND reduction, when packet loss is detected 3102 * and changes state of machine. 3103 * 3104 * It does _not_ decide what to send, it is made in function 3105 * tcp_xmit_retransmit_queue(). 3106 */ 3107 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 3108 int newly_acked_sacked, bool is_dupack, 3109 int flag) 3110 { 3111 struct inet_connection_sock *icsk = inet_csk(sk); 3112 struct tcp_sock *tp = tcp_sk(sk); 3113 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 3114 (tcp_fackets_out(tp) > tp->reordering)); 3115 int fast_rexmit = 0; 3116 3117 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 3118 tp->sacked_out = 0; 3119 if (WARN_ON(!tp->sacked_out && tp->fackets_out)) 3120 tp->fackets_out = 0; 3121 3122 /* Now state machine starts. 3123 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 3124 if (flag & FLAG_ECE) 3125 tp->prior_ssthresh = 0; 3126 3127 /* B. In all the states check for reneging SACKs. */ 3128 if (tcp_check_sack_reneging(sk, flag)) 3129 return; 3130 3131 /* C. Check consistency of the current state. */ 3132 tcp_verify_left_out(tp); 3133 3134 /* D. Check state exit conditions. State can be terminated 3135 * when high_seq is ACKed. */ 3136 if (icsk->icsk_ca_state == TCP_CA_Open) { 3137 WARN_ON(tp->retrans_out != 0); 3138 tp->retrans_stamp = 0; 3139 } else if (!before(tp->snd_una, tp->high_seq)) { 3140 switch (icsk->icsk_ca_state) { 3141 case TCP_CA_Loss: 3142 icsk->icsk_retransmits = 0; 3143 if (tcp_try_undo_recovery(sk)) 3144 return; 3145 break; 3146 3147 case TCP_CA_CWR: 3148 /* CWR is to be held something *above* high_seq 3149 * is ACKed for CWR bit to reach receiver. */ 3150 if (tp->snd_una != tp->high_seq) { 3151 tcp_complete_cwr(sk); 3152 tcp_set_ca_state(sk, TCP_CA_Open); 3153 } 3154 break; 3155 3156 case TCP_CA_Recovery: 3157 if (tcp_is_reno(tp)) 3158 tcp_reset_reno_sack(tp); 3159 if (tcp_try_undo_recovery(sk)) 3160 return; 3161 tcp_complete_cwr(sk); 3162 break; 3163 } 3164 } 3165 3166 /* E. Process state. */ 3167 switch (icsk->icsk_ca_state) { 3168 case TCP_CA_Recovery: 3169 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 3170 if (tcp_is_reno(tp) && is_dupack) 3171 tcp_add_reno_sack(sk); 3172 } else 3173 do_lost = tcp_try_undo_partial(sk, pkts_acked); 3174 break; 3175 case TCP_CA_Loss: 3176 if (flag & FLAG_DATA_ACKED) 3177 icsk->icsk_retransmits = 0; 3178 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) 3179 tcp_reset_reno_sack(tp); 3180 if (!tcp_try_undo_loss(sk)) { 3181 tcp_moderate_cwnd(tp); 3182 tcp_xmit_retransmit_queue(sk); 3183 return; 3184 } 3185 if (icsk->icsk_ca_state != TCP_CA_Open) 3186 return; 3187 /* Loss is undone; fall through to processing in Open state. */ 3188 default: 3189 if (tcp_is_reno(tp)) { 3190 if (flag & FLAG_SND_UNA_ADVANCED) 3191 tcp_reset_reno_sack(tp); 3192 if (is_dupack) 3193 tcp_add_reno_sack(sk); 3194 } 3195 3196 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3197 tcp_try_undo_dsack(sk); 3198 3199 if (!tcp_time_to_recover(sk, flag)) { 3200 tcp_try_to_open(sk, flag); 3201 return; 3202 } 3203 3204 /* MTU probe failure: don't reduce cwnd */ 3205 if (icsk->icsk_ca_state < TCP_CA_CWR && 3206 icsk->icsk_mtup.probe_size && 3207 tp->snd_una == tp->mtu_probe.probe_seq_start) { 3208 tcp_mtup_probe_failed(sk); 3209 /* Restores the reduction we did in tcp_mtup_probe() */ 3210 tp->snd_cwnd++; 3211 tcp_simple_retransmit(sk); 3212 return; 3213 } 3214 3215 /* Otherwise enter Recovery state */ 3216 tcp_enter_recovery(sk, (flag & FLAG_ECE)); 3217 fast_rexmit = 1; 3218 } 3219 3220 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 3221 tcp_update_scoreboard(sk, fast_rexmit); 3222 tp->prr_delivered += newly_acked_sacked; 3223 tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag); 3224 tcp_xmit_retransmit_queue(sk); 3225 } 3226 3227 void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) 3228 { 3229 tcp_rtt_estimator(sk, seq_rtt); 3230 tcp_set_rto(sk); 3231 inet_csk(sk)->icsk_backoff = 0; 3232 } 3233 EXPORT_SYMBOL(tcp_valid_rtt_meas); 3234 3235 /* Read draft-ietf-tcplw-high-performance before mucking 3236 * with this code. (Supersedes RFC1323) 3237 */ 3238 static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 3239 { 3240 /* RTTM Rule: A TSecr value received in a segment is used to 3241 * update the averaged RTT measurement only if the segment 3242 * acknowledges some new data, i.e., only if it advances the 3243 * left edge of the send window. 3244 * 3245 * See draft-ietf-tcplw-high-performance-00, section 3.3. 3246 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 3247 * 3248 * Changed: reset backoff as soon as we see the first valid sample. 3249 * If we do not, we get strongly overestimated rto. With timestamps 3250 * samples are accepted even from very old segments: f.e., when rtt=1 3251 * increases to 8, we retransmit 5 times and after 8 seconds delayed 3252 * answer arrives rto becomes 120 seconds! If at least one of segments 3253 * in window is lost... Voila. --ANK (010210) 3254 */ 3255 struct tcp_sock *tp = tcp_sk(sk); 3256 3257 tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr); 3258 } 3259 3260 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 3261 { 3262 /* We don't have a timestamp. Can only use 3263 * packets that are not retransmitted to determine 3264 * rtt estimates. Also, we must not reset the 3265 * backoff for rto until we get a non-retransmitted 3266 * packet. This allows us to deal with a situation 3267 * where the network delay has increased suddenly. 3268 * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 3269 */ 3270 3271 if (flag & FLAG_RETRANS_DATA_ACKED) 3272 return; 3273 3274 tcp_valid_rtt_meas(sk, seq_rtt); 3275 } 3276 3277 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 3278 const s32 seq_rtt) 3279 { 3280 const struct tcp_sock *tp = tcp_sk(sk); 3281 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 3282 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 3283 tcp_ack_saw_tstamp(sk, flag); 3284 else if (seq_rtt >= 0) 3285 tcp_ack_no_tstamp(sk, seq_rtt, flag); 3286 } 3287 3288 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 3289 { 3290 const struct inet_connection_sock *icsk = inet_csk(sk); 3291 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 3292 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 3293 } 3294 3295 /* Restart timer after forward progress on connection. 3296 * RFC2988 recommends to restart timer to now+rto. 3297 */ 3298 void tcp_rearm_rto(struct sock *sk) 3299 { 3300 struct tcp_sock *tp = tcp_sk(sk); 3301 3302 if (!tp->packets_out) { 3303 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3304 } else { 3305 u32 rto = inet_csk(sk)->icsk_rto; 3306 /* Offset the time elapsed after installing regular RTO */ 3307 if (tp->early_retrans_delayed) { 3308 struct sk_buff *skb = tcp_write_queue_head(sk); 3309 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; 3310 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 3311 /* delta may not be positive if the socket is locked 3312 * when the delayed ER timer fires and is rescheduled. 3313 */ 3314 if (delta > 0) 3315 rto = delta; 3316 } 3317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 3318 TCP_RTO_MAX); 3319 } 3320 tp->early_retrans_delayed = 0; 3321 } 3322 3323 /* This function is called when the delayed ER timer fires. TCP enters 3324 * fast recovery and performs fast-retransmit. 3325 */ 3326 void tcp_resume_early_retransmit(struct sock *sk) 3327 { 3328 struct tcp_sock *tp = tcp_sk(sk); 3329 3330 tcp_rearm_rto(sk); 3331 3332 /* Stop if ER is disabled after the delayed ER timer is scheduled */ 3333 if (!tp->do_early_retrans) 3334 return; 3335 3336 tcp_enter_recovery(sk, false); 3337 tcp_update_scoreboard(sk, 1); 3338 tcp_xmit_retransmit_queue(sk); 3339 } 3340 3341 /* If we get here, the whole TSO packet has not been acked. */ 3342 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 3343 { 3344 struct tcp_sock *tp = tcp_sk(sk); 3345 u32 packets_acked; 3346 3347 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 3348 3349 packets_acked = tcp_skb_pcount(skb); 3350 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 3351 return 0; 3352 packets_acked -= tcp_skb_pcount(skb); 3353 3354 if (packets_acked) { 3355 BUG_ON(tcp_skb_pcount(skb) == 0); 3356 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 3357 } 3358 3359 return packets_acked; 3360 } 3361 3362 /* Remove acknowledged frames from the retransmission queue. If our packet 3363 * is before the ack sequence we can discard it as it's confirmed to have 3364 * arrived at the other end. 3365 */ 3366 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, 3367 u32 prior_snd_una) 3368 { 3369 struct tcp_sock *tp = tcp_sk(sk); 3370 const struct inet_connection_sock *icsk = inet_csk(sk); 3371 struct sk_buff *skb; 3372 u32 now = tcp_time_stamp; 3373 int fully_acked = 1; 3374 int flag = 0; 3375 u32 pkts_acked = 0; 3376 u32 reord = tp->packets_out; 3377 u32 prior_sacked = tp->sacked_out; 3378 s32 seq_rtt = -1; 3379 s32 ca_seq_rtt = -1; 3380 ktime_t last_ackt = net_invalid_timestamp(); 3381 3382 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3383 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3384 u32 acked_pcount; 3385 u8 sacked = scb->sacked; 3386 3387 /* Determine how many packets and what bytes were acked, tso and else */ 3388 if (after(scb->end_seq, tp->snd_una)) { 3389 if (tcp_skb_pcount(skb) == 1 || 3390 !after(tp->snd_una, scb->seq)) 3391 break; 3392 3393 acked_pcount = tcp_tso_acked(sk, skb); 3394 if (!acked_pcount) 3395 break; 3396 3397 fully_acked = 0; 3398 } else { 3399 acked_pcount = tcp_skb_pcount(skb); 3400 } 3401 3402 if (sacked & TCPCB_RETRANS) { 3403 if (sacked & TCPCB_SACKED_RETRANS) 3404 tp->retrans_out -= acked_pcount; 3405 flag |= FLAG_RETRANS_DATA_ACKED; 3406 ca_seq_rtt = -1; 3407 seq_rtt = -1; 3408 if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) 3409 flag |= FLAG_NONHEAD_RETRANS_ACKED; 3410 } else { 3411 ca_seq_rtt = now - scb->when; 3412 last_ackt = skb->tstamp; 3413 if (seq_rtt < 0) { 3414 seq_rtt = ca_seq_rtt; 3415 } 3416 if (!(sacked & TCPCB_SACKED_ACKED)) 3417 reord = min(pkts_acked, reord); 3418 } 3419 3420 if (sacked & TCPCB_SACKED_ACKED) 3421 tp->sacked_out -= acked_pcount; 3422 if (sacked & TCPCB_LOST) 3423 tp->lost_out -= acked_pcount; 3424 3425 tp->packets_out -= acked_pcount; 3426 pkts_acked += acked_pcount; 3427 3428 /* Initial outgoing SYN's get put onto the write_queue 3429 * just like anything else we transmit. It is not 3430 * true data, and if we misinform our callers that 3431 * this ACK acks real data, we will erroneously exit 3432 * connection startup slow start one packet too 3433 * quickly. This is severely frowned upon behavior. 3434 */ 3435 if (!(scb->tcp_flags & TCPHDR_SYN)) { 3436 flag |= FLAG_DATA_ACKED; 3437 } else { 3438 flag |= FLAG_SYN_ACKED; 3439 tp->retrans_stamp = 0; 3440 } 3441 3442 if (!fully_acked) 3443 break; 3444 3445 tcp_unlink_write_queue(skb, sk); 3446 sk_wmem_free_skb(sk, skb); 3447 tp->scoreboard_skb_hint = NULL; 3448 if (skb == tp->retransmit_skb_hint) 3449 tp->retransmit_skb_hint = NULL; 3450 if (skb == tp->lost_skb_hint) 3451 tp->lost_skb_hint = NULL; 3452 } 3453 3454 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) 3455 tp->snd_up = tp->snd_una; 3456 3457 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3458 flag |= FLAG_SACK_RENEGING; 3459 3460 if (flag & FLAG_ACKED) { 3461 const struct tcp_congestion_ops *ca_ops 3462 = inet_csk(sk)->icsk_ca_ops; 3463 3464 if (unlikely(icsk->icsk_mtup.probe_size && 3465 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3466 tcp_mtup_probe_success(sk); 3467 } 3468 3469 tcp_ack_update_rtt(sk, flag, seq_rtt); 3470 tcp_rearm_rto(sk); 3471 3472 if (tcp_is_reno(tp)) { 3473 tcp_remove_reno_sacks(sk, pkts_acked); 3474 } else { 3475 int delta; 3476 3477 /* Non-retransmitted hole got filled? That's reordering */ 3478 if (reord < prior_fackets) 3479 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3480 3481 delta = tcp_is_fack(tp) ? pkts_acked : 3482 prior_sacked - tp->sacked_out; 3483 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3484 } 3485 3486 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 3487 3488 if (ca_ops->pkts_acked) { 3489 s32 rtt_us = -1; 3490 3491 /* Is the ACK triggering packet unambiguous? */ 3492 if (!(flag & FLAG_RETRANS_DATA_ACKED)) { 3493 /* High resolution needed and available? */ 3494 if (ca_ops->flags & TCP_CONG_RTT_STAMP && 3495 !ktime_equal(last_ackt, 3496 net_invalid_timestamp())) 3497 rtt_us = ktime_us_delta(ktime_get_real(), 3498 last_ackt); 3499 else if (ca_seq_rtt >= 0) 3500 rtt_us = jiffies_to_usecs(ca_seq_rtt); 3501 } 3502 3503 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3504 } 3505 } 3506 3507 #if FASTRETRANS_DEBUG > 0 3508 WARN_ON((int)tp->sacked_out < 0); 3509 WARN_ON((int)tp->lost_out < 0); 3510 WARN_ON((int)tp->retrans_out < 0); 3511 if (!tp->packets_out && tcp_is_sack(tp)) { 3512 icsk = inet_csk(sk); 3513 if (tp->lost_out) { 3514 printk(KERN_DEBUG "Leak l=%u %d\n", 3515 tp->lost_out, icsk->icsk_ca_state); 3516 tp->lost_out = 0; 3517 } 3518 if (tp->sacked_out) { 3519 printk(KERN_DEBUG "Leak s=%u %d\n", 3520 tp->sacked_out, icsk->icsk_ca_state); 3521 tp->sacked_out = 0; 3522 } 3523 if (tp->retrans_out) { 3524 printk(KERN_DEBUG "Leak r=%u %d\n", 3525 tp->retrans_out, icsk->icsk_ca_state); 3526 tp->retrans_out = 0; 3527 } 3528 } 3529 #endif 3530 return flag; 3531 } 3532 3533 static void tcp_ack_probe(struct sock *sk) 3534 { 3535 const struct tcp_sock *tp = tcp_sk(sk); 3536 struct inet_connection_sock *icsk = inet_csk(sk); 3537 3538 /* Was it a usable window open? */ 3539 3540 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { 3541 icsk->icsk_backoff = 0; 3542 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3543 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3544 * This function is not for random using! 3545 */ 3546 } else { 3547 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3548 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3549 TCP_RTO_MAX); 3550 } 3551 } 3552 3553 static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3554 { 3555 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3556 inet_csk(sk)->icsk_ca_state != TCP_CA_Open; 3557 } 3558 3559 static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3560 { 3561 const struct tcp_sock *tp = tcp_sk(sk); 3562 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3563 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 3564 } 3565 3566 /* Check that window update is acceptable. 3567 * The function assumes that snd_una<=ack<=snd_next. 3568 */ 3569 static inline int tcp_may_update_window(const struct tcp_sock *tp, 3570 const u32 ack, const u32 ack_seq, 3571 const u32 nwin) 3572 { 3573 return after(ack, tp->snd_una) || 3574 after(ack_seq, tp->snd_wl1) || 3575 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); 3576 } 3577 3578 /* Update our send window. 3579 * 3580 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3581 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3582 */ 3583 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3584 u32 ack_seq) 3585 { 3586 struct tcp_sock *tp = tcp_sk(sk); 3587 int flag = 0; 3588 u32 nwin = ntohs(tcp_hdr(skb)->window); 3589 3590 if (likely(!tcp_hdr(skb)->syn)) 3591 nwin <<= tp->rx_opt.snd_wscale; 3592 3593 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3594 flag |= FLAG_WIN_UPDATE; 3595 tcp_update_wl(tp, ack_seq); 3596 3597 if (tp->snd_wnd != nwin) { 3598 tp->snd_wnd = nwin; 3599 3600 /* Note, it is the only place, where 3601 * fast path is recovered for sending TCP. 3602 */ 3603 tp->pred_flags = 0; 3604 tcp_fast_path_check(sk); 3605 3606 if (nwin > tp->max_window) { 3607 tp->max_window = nwin; 3608 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3609 } 3610 } 3611 } 3612 3613 tp->snd_una = ack; 3614 3615 return flag; 3616 } 3617 3618 /* A very conservative spurious RTO response algorithm: reduce cwnd and 3619 * continue in congestion avoidance. 3620 */ 3621 static void tcp_conservative_spur_to_response(struct tcp_sock *tp) 3622 { 3623 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3624 tp->snd_cwnd_cnt = 0; 3625 tp->bytes_acked = 0; 3626 TCP_ECN_queue_cwr(tp); 3627 tcp_moderate_cwnd(tp); 3628 } 3629 3630 /* A conservative spurious RTO response algorithm: reduce cwnd using 3631 * rate halving and continue in congestion avoidance. 3632 */ 3633 static void tcp_ratehalving_spur_to_response(struct sock *sk) 3634 { 3635 tcp_enter_cwr(sk, 0); 3636 } 3637 3638 static void tcp_undo_spur_to_response(struct sock *sk, int flag) 3639 { 3640 if (flag & FLAG_ECE) 3641 tcp_ratehalving_spur_to_response(sk); 3642 else 3643 tcp_undo_cwr(sk, true); 3644 } 3645 3646 /* F-RTO spurious RTO detection algorithm (RFC4138) 3647 * 3648 * F-RTO affects during two new ACKs following RTO (well, almost, see inline 3649 * comments). State (ACK number) is kept in frto_counter. When ACK advances 3650 * window (but not to or beyond highest sequence sent before RTO): 3651 * On First ACK, send two new segments out. 3652 * On Second ACK, RTO was likely spurious. Do spurious response (response 3653 * algorithm is not part of the F-RTO detection algorithm 3654 * given in RFC4138 but can be selected separately). 3655 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss 3656 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding 3657 * of Nagle, this is done using frto_counter states 2 and 3, when a new data 3658 * segment of any size sent during F-RTO, state 2 is upgraded to 3. 3659 * 3660 * Rationale: if the RTO was spurious, new ACKs should arrive from the 3661 * original window even after we transmit two new data segments. 3662 * 3663 * SACK version: 3664 * on first step, wait until first cumulative ACK arrives, then move to 3665 * the second step. In second step, the next ACK decides. 3666 * 3667 * F-RTO is implemented (mainly) in four functions: 3668 * - tcp_use_frto() is used to determine if TCP is can use F-RTO 3669 * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is 3670 * called when tcp_use_frto() showed green light 3671 * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm 3672 * - tcp_enter_frto_loss() is called if there is not enough evidence 3673 * to prove that the RTO is indeed spurious. It transfers the control 3674 * from F-RTO to the conventional RTO recovery 3675 */ 3676 static int tcp_process_frto(struct sock *sk, int flag) 3677 { 3678 struct tcp_sock *tp = tcp_sk(sk); 3679 3680 tcp_verify_left_out(tp); 3681 3682 /* Duplicate the behavior from Loss state (fastretrans_alert) */ 3683 if (flag & FLAG_DATA_ACKED) 3684 inet_csk(sk)->icsk_retransmits = 0; 3685 3686 if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || 3687 ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) 3688 tp->undo_marker = 0; 3689 3690 if (!before(tp->snd_una, tp->frto_highmark)) { 3691 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3692 return 1; 3693 } 3694 3695 if (!tcp_is_sackfrto(tp)) { 3696 /* RFC4138 shortcoming in step 2; should also have case c): 3697 * ACK isn't duplicate nor advances window, e.g., opposite dir 3698 * data, winupdate 3699 */ 3700 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3701 return 1; 3702 3703 if (!(flag & FLAG_DATA_ACKED)) { 3704 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3705 flag); 3706 return 1; 3707 } 3708 } else { 3709 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3710 /* Prevent sending of new data. */ 3711 tp->snd_cwnd = min(tp->snd_cwnd, 3712 tcp_packets_in_flight(tp)); 3713 return 1; 3714 } 3715 3716 if ((tp->frto_counter >= 2) && 3717 (!(flag & FLAG_FORWARD_PROGRESS) || 3718 ((flag & FLAG_DATA_SACKED) && 3719 !(flag & FLAG_ONLY_ORIG_SACKED)))) { 3720 /* RFC4138 shortcoming (see comment above) */ 3721 if (!(flag & FLAG_FORWARD_PROGRESS) && 3722 (flag & FLAG_NOT_DUP)) 3723 return 1; 3724 3725 tcp_enter_frto_loss(sk, 3, flag); 3726 return 1; 3727 } 3728 } 3729 3730 if (tp->frto_counter == 1) { 3731 /* tcp_may_send_now needs to see updated state */ 3732 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 3733 tp->frto_counter = 2; 3734 3735 if (!tcp_may_send_now(sk)) 3736 tcp_enter_frto_loss(sk, 2, flag); 3737 3738 return 1; 3739 } else { 3740 switch (sysctl_tcp_frto_response) { 3741 case 2: 3742 tcp_undo_spur_to_response(sk, flag); 3743 break; 3744 case 1: 3745 tcp_conservative_spur_to_response(tp); 3746 break; 3747 default: 3748 tcp_ratehalving_spur_to_response(sk); 3749 break; 3750 } 3751 tp->frto_counter = 0; 3752 tp->undo_marker = 0; 3753 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3754 } 3755 return 0; 3756 } 3757 3758 /* This routine deals with incoming acks, but not outgoing ones. */ 3759 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3760 { 3761 struct inet_connection_sock *icsk = inet_csk(sk); 3762 struct tcp_sock *tp = tcp_sk(sk); 3763 u32 prior_snd_una = tp->snd_una; 3764 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3765 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3766 bool is_dupack = false; 3767 u32 prior_in_flight; 3768 u32 prior_fackets; 3769 int prior_packets; 3770 int prior_sacked = tp->sacked_out; 3771 int pkts_acked = 0; 3772 int newly_acked_sacked = 0; 3773 int frto_cwnd = 0; 3774 3775 /* If the ack is older than previous acks 3776 * then we can probably ignore it. 3777 */ 3778 if (before(ack, prior_snd_una)) 3779 goto old_ack; 3780 3781 /* If the ack includes data we haven't sent yet, discard 3782 * this segment (RFC793 Section 3.9). 3783 */ 3784 if (after(ack, tp->snd_nxt)) 3785 goto invalid_ack; 3786 3787 if (tp->early_retrans_delayed) 3788 tcp_rearm_rto(sk); 3789 3790 if (after(ack, prior_snd_una)) 3791 flag |= FLAG_SND_UNA_ADVANCED; 3792 3793 if (sysctl_tcp_abc) { 3794 if (icsk->icsk_ca_state < TCP_CA_CWR) 3795 tp->bytes_acked += ack - prior_snd_una; 3796 else if (icsk->icsk_ca_state == TCP_CA_Loss) 3797 /* we assume just one segment left network */ 3798 tp->bytes_acked += min(ack - prior_snd_una, 3799 tp->mss_cache); 3800 } 3801 3802 prior_fackets = tp->fackets_out; 3803 prior_in_flight = tcp_packets_in_flight(tp); 3804 3805 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3806 /* Window is constant, pure forward advance. 3807 * No more checks are required. 3808 * Note, we use the fact that SND.UNA>=SND.WL2. 3809 */ 3810 tcp_update_wl(tp, ack_seq); 3811 tp->snd_una = ack; 3812 flag |= FLAG_WIN_UPDATE; 3813 3814 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3815 3816 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3817 } else { 3818 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3819 flag |= FLAG_DATA; 3820 else 3821 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3822 3823 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3824 3825 if (TCP_SKB_CB(skb)->sacked) 3826 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3827 3828 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3829 flag |= FLAG_ECE; 3830 3831 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 3832 } 3833 3834 /* We passed data and got it acked, remove any soft error 3835 * log. Something worked... 3836 */ 3837 sk->sk_err_soft = 0; 3838 icsk->icsk_probes_out = 0; 3839 tp->rcv_tstamp = tcp_time_stamp; 3840 prior_packets = tp->packets_out; 3841 if (!prior_packets) 3842 goto no_queue; 3843 3844 /* See if we can take anything off of the retransmit queue. */ 3845 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3846 3847 pkts_acked = prior_packets - tp->packets_out; 3848 newly_acked_sacked = (prior_packets - prior_sacked) - 3849 (tp->packets_out - tp->sacked_out); 3850 3851 if (tp->frto_counter) 3852 frto_cwnd = tcp_process_frto(sk, flag); 3853 /* Guarantee sacktag reordering detection against wrap-arounds */ 3854 if (before(tp->frto_highmark, tp->snd_una)) 3855 tp->frto_highmark = 0; 3856 3857 if (tcp_ack_is_dubious(sk, flag)) { 3858 /* Advance CWND, if state allows this. */ 3859 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 3860 tcp_may_raise_cwnd(sk, flag)) 3861 tcp_cong_avoid(sk, ack, prior_in_flight); 3862 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3863 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3864 is_dupack, flag); 3865 } else { 3866 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3867 tcp_cong_avoid(sk, ack, prior_in_flight); 3868 } 3869 3870 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3871 dst_confirm(__sk_dst_get(sk)); 3872 3873 return 1; 3874 3875 no_queue: 3876 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3877 if (flag & FLAG_DSACKING_ACK) 3878 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3879 is_dupack, flag); 3880 /* If this ack opens up a zero window, clear backoff. It was 3881 * being used to time the probes, and is probably far higher than 3882 * it needs to be for normal retransmission. 3883 */ 3884 if (tcp_send_head(sk)) 3885 tcp_ack_probe(sk); 3886 return 1; 3887 3888 invalid_ack: 3889 SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3890 return -1; 3891 3892 old_ack: 3893 /* If data was SACKed, tag it and see if we should send more data. 3894 * If data was DSACKed, see if we can undo a cwnd reduction. 3895 */ 3896 if (TCP_SKB_CB(skb)->sacked) { 3897 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3898 newly_acked_sacked = tp->sacked_out - prior_sacked; 3899 tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, 3900 is_dupack, flag); 3901 } 3902 3903 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3904 return 0; 3905 } 3906 3907 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3908 * But, this can also be called on packets in the established flow when 3909 * the fast version below fails. 3910 */ 3911 void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3912 const u8 **hvpp, int estab) 3913 { 3914 const unsigned char *ptr; 3915 const struct tcphdr *th = tcp_hdr(skb); 3916 int length = (th->doff * 4) - sizeof(struct tcphdr); 3917 3918 ptr = (const unsigned char *)(th + 1); 3919 opt_rx->saw_tstamp = 0; 3920 3921 while (length > 0) { 3922 int opcode = *ptr++; 3923 int opsize; 3924 3925 switch (opcode) { 3926 case TCPOPT_EOL: 3927 return; 3928 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3929 length--; 3930 continue; 3931 default: 3932 opsize = *ptr++; 3933 if (opsize < 2) /* "silly options" */ 3934 return; 3935 if (opsize > length) 3936 return; /* don't parse partial options */ 3937 switch (opcode) { 3938 case TCPOPT_MSS: 3939 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3940 u16 in_mss = get_unaligned_be16(ptr); 3941 if (in_mss) { 3942 if (opt_rx->user_mss && 3943 opt_rx->user_mss < in_mss) 3944 in_mss = opt_rx->user_mss; 3945 opt_rx->mss_clamp = in_mss; 3946 } 3947 } 3948 break; 3949 case TCPOPT_WINDOW: 3950 if (opsize == TCPOLEN_WINDOW && th->syn && 3951 !estab && sysctl_tcp_window_scaling) { 3952 __u8 snd_wscale = *(__u8 *)ptr; 3953 opt_rx->wscale_ok = 1; 3954 if (snd_wscale > 14) { 3955 if (net_ratelimit()) 3956 pr_info("%s: Illegal window scaling value %d >14 received\n", 3957 __func__, 3958 snd_wscale); 3959 snd_wscale = 14; 3960 } 3961 opt_rx->snd_wscale = snd_wscale; 3962 } 3963 break; 3964 case TCPOPT_TIMESTAMP: 3965 if ((opsize == TCPOLEN_TIMESTAMP) && 3966 ((estab && opt_rx->tstamp_ok) || 3967 (!estab && sysctl_tcp_timestamps))) { 3968 opt_rx->saw_tstamp = 1; 3969 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3970 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3971 } 3972 break; 3973 case TCPOPT_SACK_PERM: 3974 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3975 !estab && sysctl_tcp_sack) { 3976 opt_rx->sack_ok = TCP_SACK_SEEN; 3977 tcp_sack_reset(opt_rx); 3978 } 3979 break; 3980 3981 case TCPOPT_SACK: 3982 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3983 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3984 opt_rx->sack_ok) { 3985 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3986 } 3987 break; 3988 #ifdef CONFIG_TCP_MD5SIG 3989 case TCPOPT_MD5SIG: 3990 /* 3991 * The MD5 Hash has already been 3992 * checked (see tcp_v{4,6}_do_rcv()). 3993 */ 3994 break; 3995 #endif 3996 case TCPOPT_COOKIE: 3997 /* This option is variable length. 3998 */ 3999 switch (opsize) { 4000 case TCPOLEN_COOKIE_BASE: 4001 /* not yet implemented */ 4002 break; 4003 case TCPOLEN_COOKIE_PAIR: 4004 /* not yet implemented */ 4005 break; 4006 case TCPOLEN_COOKIE_MIN+0: 4007 case TCPOLEN_COOKIE_MIN+2: 4008 case TCPOLEN_COOKIE_MIN+4: 4009 case TCPOLEN_COOKIE_MIN+6: 4010 case TCPOLEN_COOKIE_MAX: 4011 /* 16-bit multiple */ 4012 opt_rx->cookie_plus = opsize; 4013 *hvpp = ptr; 4014 break; 4015 default: 4016 /* ignore option */ 4017 break; 4018 } 4019 break; 4020 } 4021 4022 ptr += opsize-2; 4023 length -= opsize; 4024 } 4025 } 4026 } 4027 EXPORT_SYMBOL(tcp_parse_options); 4028 4029 static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 4030 { 4031 const __be32 *ptr = (const __be32 *)(th + 1); 4032 4033 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4034 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 4035 tp->rx_opt.saw_tstamp = 1; 4036 ++ptr; 4037 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4038 ++ptr; 4039 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4040 return 1; 4041 } 4042 return 0; 4043 } 4044 4045 /* Fast parse options. This hopes to only see timestamps. 4046 * If it is wrong it falls back on tcp_parse_options(). 4047 */ 4048 static int tcp_fast_parse_options(const struct sk_buff *skb, 4049 const struct tcphdr *th, 4050 struct tcp_sock *tp, const u8 **hvpp) 4051 { 4052 /* In the spirit of fast parsing, compare doff directly to constant 4053 * values. Because equality is used, short doff can be ignored here. 4054 */ 4055 if (th->doff == (sizeof(*th) / 4)) { 4056 tp->rx_opt.saw_tstamp = 0; 4057 return 0; 4058 } else if (tp->rx_opt.tstamp_ok && 4059 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) { 4060 if (tcp_parse_aligned_timestamp(tp, th)) 4061 return 1; 4062 } 4063 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1); 4064 return 1; 4065 } 4066 4067 #ifdef CONFIG_TCP_MD5SIG 4068 /* 4069 * Parse MD5 Signature option 4070 */ 4071 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 4072 { 4073 int length = (th->doff << 2) - sizeof(*th); 4074 const u8 *ptr = (const u8 *)(th + 1); 4075 4076 /* If the TCP option is too short, we can short cut */ 4077 if (length < TCPOLEN_MD5SIG) 4078 return NULL; 4079 4080 while (length > 0) { 4081 int opcode = *ptr++; 4082 int opsize; 4083 4084 switch(opcode) { 4085 case TCPOPT_EOL: 4086 return NULL; 4087 case TCPOPT_NOP: 4088 length--; 4089 continue; 4090 default: 4091 opsize = *ptr++; 4092 if (opsize < 2 || opsize > length) 4093 return NULL; 4094 if (opcode == TCPOPT_MD5SIG) 4095 return opsize == TCPOLEN_MD5SIG ? ptr : NULL; 4096 } 4097 ptr += opsize - 2; 4098 length -= opsize; 4099 } 4100 return NULL; 4101 } 4102 EXPORT_SYMBOL(tcp_parse_md5sig_option); 4103 #endif 4104 4105 static inline void tcp_store_ts_recent(struct tcp_sock *tp) 4106 { 4107 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 4108 tp->rx_opt.ts_recent_stamp = get_seconds(); 4109 } 4110 4111 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 4112 { 4113 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 4114 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 4115 * extra check below makes sure this can only happen 4116 * for pure ACK frames. -DaveM 4117 * 4118 * Not only, also it occurs for expired timestamps. 4119 */ 4120 4121 if (tcp_paws_check(&tp->rx_opt, 0)) 4122 tcp_store_ts_recent(tp); 4123 } 4124 } 4125 4126 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 4127 * 4128 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 4129 * it can pass through stack. So, the following predicate verifies that 4130 * this segment is not used for anything but congestion avoidance or 4131 * fast retransmit. Moreover, we even are able to eliminate most of such 4132 * second order effects, if we apply some small "replay" window (~RTO) 4133 * to timestamp space. 4134 * 4135 * All these measures still do not guarantee that we reject wrapped ACKs 4136 * on networks with high bandwidth, when sequence space is recycled fastly, 4137 * but it guarantees that such events will be very rare and do not affect 4138 * connection seriously. This doesn't look nice, but alas, PAWS is really 4139 * buggy extension. 4140 * 4141 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 4142 * states that events when retransmit arrives after original data are rare. 4143 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 4144 * the biggest problem on large power networks even with minor reordering. 4145 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 4146 * up to bandwidth of 18Gigabit/sec. 8) ] 4147 */ 4148 4149 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4150 { 4151 const struct tcp_sock *tp = tcp_sk(sk); 4152 const struct tcphdr *th = tcp_hdr(skb); 4153 u32 seq = TCP_SKB_CB(skb)->seq; 4154 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4155 4156 return (/* 1. Pure ACK with correct sequence number. */ 4157 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 4158 4159 /* 2. ... and duplicate ACK. */ 4160 ack == tp->snd_una && 4161 4162 /* 3. ... and does not update window. */ 4163 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 4164 4165 /* 4. ... and sits in replay window. */ 4166 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 4167 } 4168 4169 static inline int tcp_paws_discard(const struct sock *sk, 4170 const struct sk_buff *skb) 4171 { 4172 const struct tcp_sock *tp = tcp_sk(sk); 4173 4174 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && 4175 !tcp_disordered_ack(sk, skb); 4176 } 4177 4178 /* Check segment sequence number for validity. 4179 * 4180 * Segment controls are considered valid, if the segment 4181 * fits to the window after truncation to the window. Acceptability 4182 * of data (and SYN, FIN, of course) is checked separately. 4183 * See tcp_data_queue(), for example. 4184 * 4185 * Also, controls (RST is main one) are accepted using RCV.WUP instead 4186 * of RCV.NXT. Peer still did not advance his SND.UNA when we 4187 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 4188 * (borrowed from freebsd) 4189 */ 4190 4191 static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 4192 { 4193 return !before(end_seq, tp->rcv_wup) && 4194 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 4195 } 4196 4197 /* When we get a reset we do this. */ 4198 static void tcp_reset(struct sock *sk) 4199 { 4200 /* We want the right error as BSD sees it (and indeed as we do). */ 4201 switch (sk->sk_state) { 4202 case TCP_SYN_SENT: 4203 sk->sk_err = ECONNREFUSED; 4204 break; 4205 case TCP_CLOSE_WAIT: 4206 sk->sk_err = EPIPE; 4207 break; 4208 case TCP_CLOSE: 4209 return; 4210 default: 4211 sk->sk_err = ECONNRESET; 4212 } 4213 /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4214 smp_wmb(); 4215 4216 if (!sock_flag(sk, SOCK_DEAD)) 4217 sk->sk_error_report(sk); 4218 4219 tcp_done(sk); 4220 } 4221 4222 /* 4223 * Process the FIN bit. This now behaves as it is supposed to work 4224 * and the FIN takes effect when it is validly part of sequence 4225 * space. Not before when we get holes. 4226 * 4227 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 4228 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 4229 * TIME-WAIT) 4230 * 4231 * If we are in FINWAIT-1, a received FIN indicates simultaneous 4232 * close and we go into CLOSING (and later onto TIME-WAIT) 4233 * 4234 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 4235 */ 4236 static void tcp_fin(struct sock *sk) 4237 { 4238 struct tcp_sock *tp = tcp_sk(sk); 4239 4240 inet_csk_schedule_ack(sk); 4241 4242 sk->sk_shutdown |= RCV_SHUTDOWN; 4243 sock_set_flag(sk, SOCK_DONE); 4244 4245 switch (sk->sk_state) { 4246 case TCP_SYN_RECV: 4247 case TCP_ESTABLISHED: 4248 /* Move to CLOSE_WAIT */ 4249 tcp_set_state(sk, TCP_CLOSE_WAIT); 4250 inet_csk(sk)->icsk_ack.pingpong = 1; 4251 break; 4252 4253 case TCP_CLOSE_WAIT: 4254 case TCP_CLOSING: 4255 /* Received a retransmission of the FIN, do 4256 * nothing. 4257 */ 4258 break; 4259 case TCP_LAST_ACK: 4260 /* RFC793: Remain in the LAST-ACK state. */ 4261 break; 4262 4263 case TCP_FIN_WAIT1: 4264 /* This case occurs when a simultaneous close 4265 * happens, we must ack the received FIN and 4266 * enter the CLOSING state. 4267 */ 4268 tcp_send_ack(sk); 4269 tcp_set_state(sk, TCP_CLOSING); 4270 break; 4271 case TCP_FIN_WAIT2: 4272 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 4273 tcp_send_ack(sk); 4274 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 4275 break; 4276 default: 4277 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 4278 * cases we should never reach this piece of code. 4279 */ 4280 pr_err("%s: Impossible, sk->sk_state=%d\n", 4281 __func__, sk->sk_state); 4282 break; 4283 } 4284 4285 /* It _is_ possible, that we have something out-of-order _after_ FIN. 4286 * Probably, we should reset in this case. For now drop them. 4287 */ 4288 __skb_queue_purge(&tp->out_of_order_queue); 4289 if (tcp_is_sack(tp)) 4290 tcp_sack_reset(&tp->rx_opt); 4291 sk_mem_reclaim(sk); 4292 4293 if (!sock_flag(sk, SOCK_DEAD)) { 4294 sk->sk_state_change(sk); 4295 4296 /* Do not send POLL_HUP for half duplex close. */ 4297 if (sk->sk_shutdown == SHUTDOWN_MASK || 4298 sk->sk_state == TCP_CLOSE) 4299 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 4300 else 4301 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 4302 } 4303 } 4304 4305 static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 4306 u32 end_seq) 4307 { 4308 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 4309 if (before(seq, sp->start_seq)) 4310 sp->start_seq = seq; 4311 if (after(end_seq, sp->end_seq)) 4312 sp->end_seq = end_seq; 4313 return 1; 4314 } 4315 return 0; 4316 } 4317 4318 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 4319 { 4320 struct tcp_sock *tp = tcp_sk(sk); 4321 4322 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4323 int mib_idx; 4324 4325 if (before(seq, tp->rcv_nxt)) 4326 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 4327 else 4328 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 4329 4330 NET_INC_STATS_BH(sock_net(sk), mib_idx); 4331 4332 tp->rx_opt.dsack = 1; 4333 tp->duplicate_sack[0].start_seq = seq; 4334 tp->duplicate_sack[0].end_seq = end_seq; 4335 } 4336 } 4337 4338 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 4339 { 4340 struct tcp_sock *tp = tcp_sk(sk); 4341 4342 if (!tp->rx_opt.dsack) 4343 tcp_dsack_set(sk, seq, end_seq); 4344 else 4345 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4346 } 4347 4348 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4349 { 4350 struct tcp_sock *tp = tcp_sk(sk); 4351 4352 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 4353 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4354 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4355 tcp_enter_quickack_mode(sk); 4356 4357 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 4358 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4359 4360 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 4361 end_seq = tp->rcv_nxt; 4362 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 4363 } 4364 } 4365 4366 tcp_send_ack(sk); 4367 } 4368 4369 /* These routines update the SACK block as out-of-order packets arrive or 4370 * in-order packets close up the sequence space. 4371 */ 4372 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 4373 { 4374 int this_sack; 4375 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4376 struct tcp_sack_block *swalk = sp + 1; 4377 4378 /* See if the recent change to the first SACK eats into 4379 * or hits the sequence space of other SACK blocks, if so coalesce. 4380 */ 4381 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 4382 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 4383 int i; 4384 4385 /* Zap SWALK, by moving every further SACK up by one slot. 4386 * Decrease num_sacks. 4387 */ 4388 tp->rx_opt.num_sacks--; 4389 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 4390 sp[i] = sp[i + 1]; 4391 continue; 4392 } 4393 this_sack++, swalk++; 4394 } 4395 } 4396 4397 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 4398 { 4399 struct tcp_sock *tp = tcp_sk(sk); 4400 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4401 int cur_sacks = tp->rx_opt.num_sacks; 4402 int this_sack; 4403 4404 if (!cur_sacks) 4405 goto new_sack; 4406 4407 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 4408 if (tcp_sack_extend(sp, seq, end_seq)) { 4409 /* Rotate this_sack to the first one. */ 4410 for (; this_sack > 0; this_sack--, sp--) 4411 swap(*sp, *(sp - 1)); 4412 if (cur_sacks > 1) 4413 tcp_sack_maybe_coalesce(tp); 4414 return; 4415 } 4416 } 4417 4418 /* Could not find an adjacent existing SACK, build a new one, 4419 * put it at the front, and shift everyone else down. We 4420 * always know there is at least one SACK present already here. 4421 * 4422 * If the sack array is full, forget about the last one. 4423 */ 4424 if (this_sack >= TCP_NUM_SACKS) { 4425 this_sack--; 4426 tp->rx_opt.num_sacks--; 4427 sp--; 4428 } 4429 for (; this_sack > 0; this_sack--, sp--) 4430 *sp = *(sp - 1); 4431 4432 new_sack: 4433 /* Build the new head SACK, and we're done. */ 4434 sp->start_seq = seq; 4435 sp->end_seq = end_seq; 4436 tp->rx_opt.num_sacks++; 4437 } 4438 4439 /* RCV.NXT advances, some SACKs should be eaten. */ 4440 4441 static void tcp_sack_remove(struct tcp_sock *tp) 4442 { 4443 struct tcp_sack_block *sp = &tp->selective_acks[0]; 4444 int num_sacks = tp->rx_opt.num_sacks; 4445 int this_sack; 4446 4447 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 4448 if (skb_queue_empty(&tp->out_of_order_queue)) { 4449 tp->rx_opt.num_sacks = 0; 4450 return; 4451 } 4452 4453 for (this_sack = 0; this_sack < num_sacks;) { 4454 /* Check if the start of the sack is covered by RCV.NXT. */ 4455 if (!before(tp->rcv_nxt, sp->start_seq)) { 4456 int i; 4457 4458 /* RCV.NXT must cover all the block! */ 4459 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 4460 4461 /* Zap this SACK, by moving forward any other SACKS. */ 4462 for (i=this_sack+1; i < num_sacks; i++) 4463 tp->selective_acks[i-1] = tp->selective_acks[i]; 4464 num_sacks--; 4465 continue; 4466 } 4467 this_sack++; 4468 sp++; 4469 } 4470 tp->rx_opt.num_sacks = num_sacks; 4471 } 4472 4473 /* This one checks to see if we can put data from the 4474 * out_of_order queue into the receive_queue. 4475 */ 4476 static void tcp_ofo_queue(struct sock *sk) 4477 { 4478 struct tcp_sock *tp = tcp_sk(sk); 4479 __u32 dsack_high = tp->rcv_nxt; 4480 struct sk_buff *skb; 4481 4482 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 4483 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 4484 break; 4485 4486 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 4487 __u32 dsack = dsack_high; 4488 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 4489 dsack_high = TCP_SKB_CB(skb)->end_seq; 4490 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 4491 } 4492 4493 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4494 SOCK_DEBUG(sk, "ofo packet was already received\n"); 4495 __skb_unlink(skb, &tp->out_of_order_queue); 4496 __kfree_skb(skb); 4497 continue; 4498 } 4499 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 4500 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4501 TCP_SKB_CB(skb)->end_seq); 4502 4503 __skb_unlink(skb, &tp->out_of_order_queue); 4504 __skb_queue_tail(&sk->sk_receive_queue, skb); 4505 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4506 if (tcp_hdr(skb)->fin) 4507 tcp_fin(sk); 4508 } 4509 } 4510 4511 static int tcp_prune_ofo_queue(struct sock *sk); 4512 static int tcp_prune_queue(struct sock *sk); 4513 4514 static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 4515 { 4516 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 4517 !sk_rmem_schedule(sk, size)) { 4518 4519 if (tcp_prune_queue(sk) < 0) 4520 return -1; 4521 4522 if (!sk_rmem_schedule(sk, size)) { 4523 if (!tcp_prune_ofo_queue(sk)) 4524 return -1; 4525 4526 if (!sk_rmem_schedule(sk, size)) 4527 return -1; 4528 } 4529 } 4530 return 0; 4531 } 4532 4533 /** 4534 * tcp_try_coalesce - try to merge skb to prior one 4535 * @sk: socket 4536 * @to: prior buffer 4537 * @from: buffer to add in queue 4538 * @fragstolen: pointer to boolean 4539 * 4540 * Before queueing skb @from after @to, try to merge them 4541 * to reduce overall memory use and queue lengths, if cost is small. 4542 * Packets in ofo or receive queues can stay a long time. 4543 * Better try to coalesce them right now to avoid future collapses. 4544 * Returns true if caller should free @from instead of queueing it 4545 */ 4546 static bool tcp_try_coalesce(struct sock *sk, 4547 struct sk_buff *to, 4548 struct sk_buff *from, 4549 bool *fragstolen) 4550 { 4551 int i, delta, len = from->len; 4552 4553 *fragstolen = false; 4554 4555 if (tcp_hdr(from)->fin || skb_cloned(to)) 4556 return false; 4557 4558 if (len <= skb_tailroom(to)) { 4559 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); 4560 goto merge; 4561 } 4562 4563 if (skb_has_frag_list(to) || skb_has_frag_list(from)) 4564 return false; 4565 4566 if (skb_headlen(from) != 0) { 4567 struct page *page; 4568 unsigned int offset; 4569 4570 if (skb_shinfo(to)->nr_frags + 4571 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) 4572 return false; 4573 4574 if (skb_head_is_locked(from)) 4575 return false; 4576 4577 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); 4578 4579 page = virt_to_head_page(from->head); 4580 offset = from->data - (unsigned char *)page_address(page); 4581 4582 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags, 4583 page, offset, skb_headlen(from)); 4584 *fragstolen = true; 4585 } else { 4586 if (skb_shinfo(to)->nr_frags + 4587 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS) 4588 return false; 4589 4590 delta = from->truesize - 4591 SKB_TRUESIZE(skb_end_pointer(from) - from->head); 4592 } 4593 4594 WARN_ON_ONCE(delta < len); 4595 4596 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags, 4597 skb_shinfo(from)->frags, 4598 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t)); 4599 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags; 4600 4601 if (!skb_cloned(from)) 4602 skb_shinfo(from)->nr_frags = 0; 4603 4604 /* if the skb is cloned this does nothing since we set nr_frags to 0 */ 4605 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) 4606 skb_frag_ref(from, i); 4607 4608 to->truesize += delta; 4609 atomic_add(delta, &sk->sk_rmem_alloc); 4610 sk_mem_charge(sk, delta); 4611 to->len += len; 4612 to->data_len += len; 4613 4614 merge: 4615 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); 4616 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; 4617 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; 4618 return true; 4619 } 4620 4621 static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) 4622 { 4623 if (head_stolen) 4624 kmem_cache_free(skbuff_head_cache, skb); 4625 else 4626 __kfree_skb(skb); 4627 } 4628 4629 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4630 { 4631 struct tcp_sock *tp = tcp_sk(sk); 4632 struct sk_buff *skb1; 4633 u32 seq, end_seq; 4634 4635 TCP_ECN_check_ce(tp, skb); 4636 4637 if (tcp_try_rmem_schedule(sk, skb->truesize)) { 4638 /* TODO: should increment a counter */ 4639 __kfree_skb(skb); 4640 return; 4641 } 4642 4643 /* Disable header prediction. */ 4644 tp->pred_flags = 0; 4645 inet_csk_schedule_ack(sk); 4646 4647 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4648 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4649 4650 skb1 = skb_peek_tail(&tp->out_of_order_queue); 4651 if (!skb1) { 4652 /* Initial out of order segment, build 1 SACK. */ 4653 if (tcp_is_sack(tp)) { 4654 tp->rx_opt.num_sacks = 1; 4655 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 4656 tp->selective_acks[0].end_seq = 4657 TCP_SKB_CB(skb)->end_seq; 4658 } 4659 __skb_queue_head(&tp->out_of_order_queue, skb); 4660 goto end; 4661 } 4662 4663 seq = TCP_SKB_CB(skb)->seq; 4664 end_seq = TCP_SKB_CB(skb)->end_seq; 4665 4666 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4667 bool fragstolen; 4668 4669 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { 4670 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4671 } else { 4672 kfree_skb_partial(skb, fragstolen); 4673 skb = NULL; 4674 } 4675 4676 if (!tp->rx_opt.num_sacks || 4677 tp->selective_acks[0].end_seq != seq) 4678 goto add_sack; 4679 4680 /* Common case: data arrive in order after hole. */ 4681 tp->selective_acks[0].end_seq = end_seq; 4682 goto end; 4683 } 4684 4685 /* Find place to insert this segment. */ 4686 while (1) { 4687 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4688 break; 4689 if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { 4690 skb1 = NULL; 4691 break; 4692 } 4693 skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); 4694 } 4695 4696 /* Do skb overlap to previous one? */ 4697 if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4698 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4699 /* All the bits are present. Drop. */ 4700 __kfree_skb(skb); 4701 skb = NULL; 4702 tcp_dsack_set(sk, seq, end_seq); 4703 goto add_sack; 4704 } 4705 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4706 /* Partial overlap. */ 4707 tcp_dsack_set(sk, seq, 4708 TCP_SKB_CB(skb1)->end_seq); 4709 } else { 4710 if (skb_queue_is_first(&tp->out_of_order_queue, 4711 skb1)) 4712 skb1 = NULL; 4713 else 4714 skb1 = skb_queue_prev( 4715 &tp->out_of_order_queue, 4716 skb1); 4717 } 4718 } 4719 if (!skb1) 4720 __skb_queue_head(&tp->out_of_order_queue, skb); 4721 else 4722 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4723 4724 /* And clean segments covered by new one as whole. */ 4725 while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { 4726 skb1 = skb_queue_next(&tp->out_of_order_queue, skb); 4727 4728 if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) 4729 break; 4730 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4731 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4732 end_seq); 4733 break; 4734 } 4735 __skb_unlink(skb1, &tp->out_of_order_queue); 4736 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4737 TCP_SKB_CB(skb1)->end_seq); 4738 __kfree_skb(skb1); 4739 } 4740 4741 add_sack: 4742 if (tcp_is_sack(tp)) 4743 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4744 end: 4745 if (skb) 4746 skb_set_owner_r(skb, sk); 4747 } 4748 4749 int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen, 4750 bool *fragstolen) 4751 { 4752 int eaten; 4753 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); 4754 4755 __skb_pull(skb, hdrlen); 4756 eaten = (tail && 4757 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; 4758 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4759 if (!eaten) { 4760 __skb_queue_tail(&sk->sk_receive_queue, skb); 4761 skb_set_owner_r(skb, sk); 4762 } 4763 return eaten; 4764 } 4765 4766 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4767 { 4768 const struct tcphdr *th = tcp_hdr(skb); 4769 struct tcp_sock *tp = tcp_sk(sk); 4770 int eaten = -1; 4771 bool fragstolen = false; 4772 4773 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4774 goto drop; 4775 4776 skb_dst_drop(skb); 4777 __skb_pull(skb, th->doff * 4); 4778 4779 TCP_ECN_accept_cwr(tp, skb); 4780 4781 tp->rx_opt.dsack = 0; 4782 4783 /* Queue data for delivery to the user. 4784 * Packets in sequence go to the receive queue. 4785 * Out of sequence packets to the out_of_order_queue. 4786 */ 4787 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4788 if (tcp_receive_window(tp) == 0) 4789 goto out_of_window; 4790 4791 /* Ok. In sequence. In window. */ 4792 if (tp->ucopy.task == current && 4793 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 4794 sock_owned_by_user(sk) && !tp->urg_data) { 4795 int chunk = min_t(unsigned int, skb->len, 4796 tp->ucopy.len); 4797 4798 __set_current_state(TASK_RUNNING); 4799 4800 local_bh_enable(); 4801 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 4802 tp->ucopy.len -= chunk; 4803 tp->copied_seq += chunk; 4804 eaten = (chunk == skb->len); 4805 tcp_rcv_space_adjust(sk); 4806 } 4807 local_bh_disable(); 4808 } 4809 4810 if (eaten <= 0) { 4811 queue_and_out: 4812 if (eaten < 0 && 4813 tcp_try_rmem_schedule(sk, skb->truesize)) 4814 goto drop; 4815 4816 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); 4817 } 4818 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4819 if (skb->len) 4820 tcp_event_data_recv(sk, skb); 4821 if (th->fin) 4822 tcp_fin(sk); 4823 4824 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4825 tcp_ofo_queue(sk); 4826 4827 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4828 * gap in queue is filled. 4829 */ 4830 if (skb_queue_empty(&tp->out_of_order_queue)) 4831 inet_csk(sk)->icsk_ack.pingpong = 0; 4832 } 4833 4834 if (tp->rx_opt.num_sacks) 4835 tcp_sack_remove(tp); 4836 4837 tcp_fast_path_check(sk); 4838 4839 if (eaten > 0) 4840 kfree_skb_partial(skb, fragstolen); 4841 else if (!sock_flag(sk, SOCK_DEAD)) 4842 sk->sk_data_ready(sk, 0); 4843 return; 4844 } 4845 4846 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4847 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4848 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4849 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4850 4851 out_of_window: 4852 tcp_enter_quickack_mode(sk); 4853 inet_csk_schedule_ack(sk); 4854 drop: 4855 __kfree_skb(skb); 4856 return; 4857 } 4858 4859 /* Out of window. F.e. zero window probe. */ 4860 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4861 goto out_of_window; 4862 4863 tcp_enter_quickack_mode(sk); 4864 4865 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4866 /* Partial packet, seq < rcv_next < end_seq */ 4867 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4868 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4869 TCP_SKB_CB(skb)->end_seq); 4870 4871 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4872 4873 /* If window is closed, drop tail of packet. But after 4874 * remembering D-SACK for its head made in previous line. 4875 */ 4876 if (!tcp_receive_window(tp)) 4877 goto out_of_window; 4878 goto queue_and_out; 4879 } 4880 4881 tcp_data_queue_ofo(sk, skb); 4882 } 4883 4884 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, 4885 struct sk_buff_head *list) 4886 { 4887 struct sk_buff *next = NULL; 4888 4889 if (!skb_queue_is_last(list, skb)) 4890 next = skb_queue_next(list, skb); 4891 4892 __skb_unlink(skb, list); 4893 __kfree_skb(skb); 4894 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4895 4896 return next; 4897 } 4898 4899 /* Collapse contiguous sequence of skbs head..tail with 4900 * sequence numbers start..end. 4901 * 4902 * If tail is NULL, this means until the end of the list. 4903 * 4904 * Segments with FIN/SYN are not collapsed (only because this 4905 * simplifies code) 4906 */ 4907 static void 4908 tcp_collapse(struct sock *sk, struct sk_buff_head *list, 4909 struct sk_buff *head, struct sk_buff *tail, 4910 u32 start, u32 end) 4911 { 4912 struct sk_buff *skb, *n; 4913 bool end_of_skbs; 4914 4915 /* First, check that queue is collapsible and find 4916 * the point where collapsing can be useful. */ 4917 skb = head; 4918 restart: 4919 end_of_skbs = true; 4920 skb_queue_walk_from_safe(list, skb, n) { 4921 if (skb == tail) 4922 break; 4923 /* No new bits? It is possible on ofo queue. */ 4924 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4925 skb = tcp_collapse_one(sk, skb, list); 4926 if (!skb) 4927 break; 4928 goto restart; 4929 } 4930 4931 /* The first skb to collapse is: 4932 * - not SYN/FIN and 4933 * - bloated or contains data before "start" or 4934 * overlaps to the next one. 4935 */ 4936 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4937 (tcp_win_from_space(skb->truesize) > skb->len || 4938 before(TCP_SKB_CB(skb)->seq, start))) { 4939 end_of_skbs = false; 4940 break; 4941 } 4942 4943 if (!skb_queue_is_last(list, skb)) { 4944 struct sk_buff *next = skb_queue_next(list, skb); 4945 if (next != tail && 4946 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { 4947 end_of_skbs = false; 4948 break; 4949 } 4950 } 4951 4952 /* Decided to skip this, advance start seq. */ 4953 start = TCP_SKB_CB(skb)->end_seq; 4954 } 4955 if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4956 return; 4957 4958 while (before(start, end)) { 4959 struct sk_buff *nskb; 4960 unsigned int header = skb_headroom(skb); 4961 int copy = SKB_MAX_ORDER(header, 0); 4962 4963 /* Too big header? This can happen with IPv6. */ 4964 if (copy < 0) 4965 return; 4966 if (end - start < copy) 4967 copy = end - start; 4968 nskb = alloc_skb(copy + header, GFP_ATOMIC); 4969 if (!nskb) 4970 return; 4971 4972 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 4973 skb_set_network_header(nskb, (skb_network_header(skb) - 4974 skb->head)); 4975 skb_set_transport_header(nskb, (skb_transport_header(skb) - 4976 skb->head)); 4977 skb_reserve(nskb, header); 4978 memcpy(nskb->head, skb->head, header); 4979 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4980 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4981 __skb_queue_before(list, skb, nskb); 4982 skb_set_owner_r(nskb, sk); 4983 4984 /* Copy data, releasing collapsed skbs. */ 4985 while (copy > 0) { 4986 int offset = start - TCP_SKB_CB(skb)->seq; 4987 int size = TCP_SKB_CB(skb)->end_seq - start; 4988 4989 BUG_ON(offset < 0); 4990 if (size > 0) { 4991 size = min(copy, size); 4992 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4993 BUG(); 4994 TCP_SKB_CB(nskb)->end_seq += size; 4995 copy -= size; 4996 start += size; 4997 } 4998 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4999 skb = tcp_collapse_one(sk, skb, list); 5000 if (!skb || 5001 skb == tail || 5002 tcp_hdr(skb)->syn || 5003 tcp_hdr(skb)->fin) 5004 return; 5005 } 5006 } 5007 } 5008 } 5009 5010 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 5011 * and tcp_collapse() them until all the queue is collapsed. 5012 */ 5013 static void tcp_collapse_ofo_queue(struct sock *sk) 5014 { 5015 struct tcp_sock *tp = tcp_sk(sk); 5016 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 5017 struct sk_buff *head; 5018 u32 start, end; 5019 5020 if (skb == NULL) 5021 return; 5022 5023 start = TCP_SKB_CB(skb)->seq; 5024 end = TCP_SKB_CB(skb)->end_seq; 5025 head = skb; 5026 5027 for (;;) { 5028 struct sk_buff *next = NULL; 5029 5030 if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) 5031 next = skb_queue_next(&tp->out_of_order_queue, skb); 5032 skb = next; 5033 5034 /* Segment is terminated when we see gap or when 5035 * we are at the end of all the queue. */ 5036 if (!skb || 5037 after(TCP_SKB_CB(skb)->seq, end) || 5038 before(TCP_SKB_CB(skb)->end_seq, start)) { 5039 tcp_collapse(sk, &tp->out_of_order_queue, 5040 head, skb, start, end); 5041 head = skb; 5042 if (!skb) 5043 break; 5044 /* Start new segment */ 5045 start = TCP_SKB_CB(skb)->seq; 5046 end = TCP_SKB_CB(skb)->end_seq; 5047 } else { 5048 if (before(TCP_SKB_CB(skb)->seq, start)) 5049 start = TCP_SKB_CB(skb)->seq; 5050 if (after(TCP_SKB_CB(skb)->end_seq, end)) 5051 end = TCP_SKB_CB(skb)->end_seq; 5052 } 5053 } 5054 } 5055 5056 /* 5057 * Purge the out-of-order queue. 5058 * Return true if queue was pruned. 5059 */ 5060 static int tcp_prune_ofo_queue(struct sock *sk) 5061 { 5062 struct tcp_sock *tp = tcp_sk(sk); 5063 int res = 0; 5064 5065 if (!skb_queue_empty(&tp->out_of_order_queue)) { 5066 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 5067 __skb_queue_purge(&tp->out_of_order_queue); 5068 5069 /* Reset SACK state. A conforming SACK implementation will 5070 * do the same at a timeout based retransmit. When a connection 5071 * is in a sad state like this, we care only about integrity 5072 * of the connection not performance. 5073 */ 5074 if (tp->rx_opt.sack_ok) 5075 tcp_sack_reset(&tp->rx_opt); 5076 sk_mem_reclaim(sk); 5077 res = 1; 5078 } 5079 return res; 5080 } 5081 5082 /* Reduce allocated memory if we can, trying to get 5083 * the socket within its memory limits again. 5084 * 5085 * Return less than zero if we should start dropping frames 5086 * until the socket owning process reads some of the data 5087 * to stabilize the situation. 5088 */ 5089 static int tcp_prune_queue(struct sock *sk) 5090 { 5091 struct tcp_sock *tp = tcp_sk(sk); 5092 5093 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 5094 5095 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); 5096 5097 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 5098 tcp_clamp_window(sk); 5099 else if (sk_under_memory_pressure(sk)) 5100 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 5101 5102 tcp_collapse_ofo_queue(sk); 5103 if (!skb_queue_empty(&sk->sk_receive_queue)) 5104 tcp_collapse(sk, &sk->sk_receive_queue, 5105 skb_peek(&sk->sk_receive_queue), 5106 NULL, 5107 tp->copied_seq, tp->rcv_nxt); 5108 sk_mem_reclaim(sk); 5109 5110 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5111 return 0; 5112 5113 /* Collapsing did not help, destructive actions follow. 5114 * This must not ever occur. */ 5115 5116 tcp_prune_ofo_queue(sk); 5117 5118 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 5119 return 0; 5120 5121 /* If we are really being abused, tell the caller to silently 5122 * drop receive data on the floor. It will get retransmitted 5123 * and hopefully then we'll have sufficient space. 5124 */ 5125 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); 5126 5127 /* Massive buffer overcommit. */ 5128 tp->pred_flags = 0; 5129 return -1; 5130 } 5131 5132 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 5133 * As additional protections, we do not touch cwnd in retransmission phases, 5134 * and if application hit its sndbuf limit recently. 5135 */ 5136 void tcp_cwnd_application_limited(struct sock *sk) 5137 { 5138 struct tcp_sock *tp = tcp_sk(sk); 5139 5140 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 5141 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 5142 /* Limited by application or receiver window. */ 5143 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 5144 u32 win_used = max(tp->snd_cwnd_used, init_win); 5145 if (win_used < tp->snd_cwnd) { 5146 tp->snd_ssthresh = tcp_current_ssthresh(sk); 5147 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 5148 } 5149 tp->snd_cwnd_used = 0; 5150 } 5151 tp->snd_cwnd_stamp = tcp_time_stamp; 5152 } 5153 5154 static int tcp_should_expand_sndbuf(const struct sock *sk) 5155 { 5156 const struct tcp_sock *tp = tcp_sk(sk); 5157 5158 /* If the user specified a specific send buffer setting, do 5159 * not modify it. 5160 */ 5161 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 5162 return 0; 5163 5164 /* If we are under global TCP memory pressure, do not expand. */ 5165 if (sk_under_memory_pressure(sk)) 5166 return 0; 5167 5168 /* If we are under soft global TCP memory pressure, do not expand. */ 5169 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) 5170 return 0; 5171 5172 /* If we filled the congestion window, do not expand. */ 5173 if (tp->packets_out >= tp->snd_cwnd) 5174 return 0; 5175 5176 return 1; 5177 } 5178 5179 /* When incoming ACK allowed to free some skb from write_queue, 5180 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 5181 * on the exit from tcp input handler. 5182 * 5183 * PROBLEM: sndbuf expansion does not work well with largesend. 5184 */ 5185 static void tcp_new_space(struct sock *sk) 5186 { 5187 struct tcp_sock *tp = tcp_sk(sk); 5188 5189 if (tcp_should_expand_sndbuf(sk)) { 5190 int sndmem = SKB_TRUESIZE(max_t(u32, 5191 tp->rx_opt.mss_clamp, 5192 tp->mss_cache) + 5193 MAX_TCP_HEADER); 5194 int demanded = max_t(unsigned int, tp->snd_cwnd, 5195 tp->reordering + 1); 5196 sndmem *= 2 * demanded; 5197 if (sndmem > sk->sk_sndbuf) 5198 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 5199 tp->snd_cwnd_stamp = tcp_time_stamp; 5200 } 5201 5202 sk->sk_write_space(sk); 5203 } 5204 5205 static void tcp_check_space(struct sock *sk) 5206 { 5207 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 5208 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 5209 if (sk->sk_socket && 5210 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 5211 tcp_new_space(sk); 5212 } 5213 } 5214 5215 static inline void tcp_data_snd_check(struct sock *sk) 5216 { 5217 tcp_push_pending_frames(sk); 5218 tcp_check_space(sk); 5219 } 5220 5221 /* 5222 * Check if sending an ack is needed. 5223 */ 5224 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 5225 { 5226 struct tcp_sock *tp = tcp_sk(sk); 5227 5228 /* More than one full frame received... */ 5229 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && 5230 /* ... and right edge of window advances far enough. 5231 * (tcp_recvmsg() will send ACK otherwise). Or... 5232 */ 5233 __tcp_select_window(sk) >= tp->rcv_wnd) || 5234 /* We ACK each frame or... */ 5235 tcp_in_quickack_mode(sk) || 5236 /* We have out of order data. */ 5237 (ofo_possible && skb_peek(&tp->out_of_order_queue))) { 5238 /* Then ack it now */ 5239 tcp_send_ack(sk); 5240 } else { 5241 /* Else, send delayed ack. */ 5242 tcp_send_delayed_ack(sk); 5243 } 5244 } 5245 5246 static inline void tcp_ack_snd_check(struct sock *sk) 5247 { 5248 if (!inet_csk_ack_scheduled(sk)) { 5249 /* We sent a data segment already. */ 5250 return; 5251 } 5252 __tcp_ack_snd_check(sk, 1); 5253 } 5254 5255 /* 5256 * This routine is only called when we have urgent data 5257 * signaled. Its the 'slow' part of tcp_urg. It could be 5258 * moved inline now as tcp_urg is only called from one 5259 * place. We handle URGent data wrong. We have to - as 5260 * BSD still doesn't use the correction from RFC961. 5261 * For 1003.1g we should support a new option TCP_STDURG to permit 5262 * either form (or just set the sysctl tcp_stdurg). 5263 */ 5264 5265 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5266 { 5267 struct tcp_sock *tp = tcp_sk(sk); 5268 u32 ptr = ntohs(th->urg_ptr); 5269 5270 if (ptr && !sysctl_tcp_stdurg) 5271 ptr--; 5272 ptr += ntohl(th->seq); 5273 5274 /* Ignore urgent data that we've already seen and read. */ 5275 if (after(tp->copied_seq, ptr)) 5276 return; 5277 5278 /* Do not replay urg ptr. 5279 * 5280 * NOTE: interesting situation not covered by specs. 5281 * Misbehaving sender may send urg ptr, pointing to segment, 5282 * which we already have in ofo queue. We are not able to fetch 5283 * such data and will stay in TCP_URG_NOTYET until will be eaten 5284 * by recvmsg(). Seems, we are not obliged to handle such wicked 5285 * situations. But it is worth to think about possibility of some 5286 * DoSes using some hypothetical application level deadlock. 5287 */ 5288 if (before(ptr, tp->rcv_nxt)) 5289 return; 5290 5291 /* Do we already have a newer (or duplicate) urgent pointer? */ 5292 if (tp->urg_data && !after(ptr, tp->urg_seq)) 5293 return; 5294 5295 /* Tell the world about our new urgent pointer. */ 5296 sk_send_sigurg(sk); 5297 5298 /* We may be adding urgent data when the last byte read was 5299 * urgent. To do this requires some care. We cannot just ignore 5300 * tp->copied_seq since we would read the last urgent byte again 5301 * as data, nor can we alter copied_seq until this data arrives 5302 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 5303 * 5304 * NOTE. Double Dutch. Rendering to plain English: author of comment 5305 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 5306 * and expect that both A and B disappear from stream. This is _wrong_. 5307 * Though this happens in BSD with high probability, this is occasional. 5308 * Any application relying on this is buggy. Note also, that fix "works" 5309 * only in this artificial test. Insert some normal data between A and B and we will 5310 * decline of BSD again. Verdict: it is better to remove to trap 5311 * buggy users. 5312 */ 5313 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 5314 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 5315 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 5316 tp->copied_seq++; 5317 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 5318 __skb_unlink(skb, &sk->sk_receive_queue); 5319 __kfree_skb(skb); 5320 } 5321 } 5322 5323 tp->urg_data = TCP_URG_NOTYET; 5324 tp->urg_seq = ptr; 5325 5326 /* Disable header prediction. */ 5327 tp->pred_flags = 0; 5328 } 5329 5330 /* This is the 'fast' part of urgent handling. */ 5331 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5332 { 5333 struct tcp_sock *tp = tcp_sk(sk); 5334 5335 /* Check if we get a new urgent pointer - normally not. */ 5336 if (th->urg) 5337 tcp_check_urg(sk, th); 5338 5339 /* Do we wait for any urgent data? - normally not... */ 5340 if (tp->urg_data == TCP_URG_NOTYET) { 5341 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 5342 th->syn; 5343 5344 /* Is the urgent pointer pointing into this packet? */ 5345 if (ptr < skb->len) { 5346 u8 tmp; 5347 if (skb_copy_bits(skb, ptr, &tmp, 1)) 5348 BUG(); 5349 tp->urg_data = TCP_URG_VALID | tmp; 5350 if (!sock_flag(sk, SOCK_DEAD)) 5351 sk->sk_data_ready(sk, 0); 5352 } 5353 } 5354 } 5355 5356 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 5357 { 5358 struct tcp_sock *tp = tcp_sk(sk); 5359 int chunk = skb->len - hlen; 5360 int err; 5361 5362 local_bh_enable(); 5363 if (skb_csum_unnecessary(skb)) 5364 err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 5365 else 5366 err = skb_copy_and_csum_datagram_iovec(skb, hlen, 5367 tp->ucopy.iov); 5368 5369 if (!err) { 5370 tp->ucopy.len -= chunk; 5371 tp->copied_seq += chunk; 5372 tcp_rcv_space_adjust(sk); 5373 } 5374 5375 local_bh_disable(); 5376 return err; 5377 } 5378 5379 static __sum16 __tcp_checksum_complete_user(struct sock *sk, 5380 struct sk_buff *skb) 5381 { 5382 __sum16 result; 5383 5384 if (sock_owned_by_user(sk)) { 5385 local_bh_enable(); 5386 result = __tcp_checksum_complete(skb); 5387 local_bh_disable(); 5388 } else { 5389 result = __tcp_checksum_complete(skb); 5390 } 5391 return result; 5392 } 5393 5394 static inline int tcp_checksum_complete_user(struct sock *sk, 5395 struct sk_buff *skb) 5396 { 5397 return !skb_csum_unnecessary(skb) && 5398 __tcp_checksum_complete_user(sk, skb); 5399 } 5400 5401 #ifdef CONFIG_NET_DMA 5402 static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 5403 int hlen) 5404 { 5405 struct tcp_sock *tp = tcp_sk(sk); 5406 int chunk = skb->len - hlen; 5407 int dma_cookie; 5408 int copied_early = 0; 5409 5410 if (tp->ucopy.wakeup) 5411 return 0; 5412 5413 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5414 tp->ucopy.dma_chan = net_dma_find_channel(); 5415 5416 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5417 5418 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 5419 skb, hlen, 5420 tp->ucopy.iov, chunk, 5421 tp->ucopy.pinned_list); 5422 5423 if (dma_cookie < 0) 5424 goto out; 5425 5426 tp->ucopy.dma_cookie = dma_cookie; 5427 copied_early = 1; 5428 5429 tp->ucopy.len -= chunk; 5430 tp->copied_seq += chunk; 5431 tcp_rcv_space_adjust(sk); 5432 5433 if ((tp->ucopy.len == 0) || 5434 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 5435 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 5436 tp->ucopy.wakeup = 1; 5437 sk->sk_data_ready(sk, 0); 5438 } 5439 } else if (chunk > 0) { 5440 tp->ucopy.wakeup = 1; 5441 sk->sk_data_ready(sk, 0); 5442 } 5443 out: 5444 return copied_early; 5445 } 5446 #endif /* CONFIG_NET_DMA */ 5447 5448 /* Does PAWS and seqno based validation of an incoming segment, flags will 5449 * play significant role here. 5450 */ 5451 static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5452 const struct tcphdr *th, int syn_inerr) 5453 { 5454 const u8 *hash_location; 5455 struct tcp_sock *tp = tcp_sk(sk); 5456 5457 /* RFC1323: H1. Apply PAWS check first. */ 5458 if (tcp_fast_parse_options(skb, th, tp, &hash_location) && 5459 tp->rx_opt.saw_tstamp && 5460 tcp_paws_discard(sk, skb)) { 5461 if (!th->rst) { 5462 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5463 tcp_send_dupack(sk, skb); 5464 goto discard; 5465 } 5466 /* Reset is accepted even if it did not pass PAWS. */ 5467 } 5468 5469 /* Step 1: check sequence number */ 5470 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 5471 /* RFC793, page 37: "In all states except SYN-SENT, all reset 5472 * (RST) segments are validated by checking their SEQ-fields." 5473 * And page 69: "If an incoming segment is not acceptable, 5474 * an acknowledgment should be sent in reply (unless the RST 5475 * bit is set, if so drop the segment and return)". 5476 */ 5477 if (!th->rst) 5478 tcp_send_dupack(sk, skb); 5479 goto discard; 5480 } 5481 5482 /* Step 2: check RST bit */ 5483 if (th->rst) { 5484 tcp_reset(sk); 5485 goto discard; 5486 } 5487 5488 /* ts_recent update must be made after we are sure that the packet 5489 * is in window. 5490 */ 5491 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 5492 5493 /* step 3: check security and precedence [ignored] */ 5494 5495 /* step 4: Check for a SYN in window. */ 5496 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5497 if (syn_inerr) 5498 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 5500 tcp_reset(sk); 5501 return -1; 5502 } 5503 5504 return 1; 5505 5506 discard: 5507 __kfree_skb(skb); 5508 return 0; 5509 } 5510 5511 /* 5512 * TCP receive function for the ESTABLISHED state. 5513 * 5514 * It is split into a fast path and a slow path. The fast path is 5515 * disabled when: 5516 * - A zero window was announced from us - zero window probing 5517 * is only handled properly in the slow path. 5518 * - Out of order segments arrived. 5519 * - Urgent data is expected. 5520 * - There is no buffer space left 5521 * - Unexpected TCP flags/window values/header lengths are received 5522 * (detected by checking the TCP header against pred_flags) 5523 * - Data is sent in both directions. Fast path only supports pure senders 5524 * or pure receivers (this means either the sequence number or the ack 5525 * value must stay constant) 5526 * - Unexpected TCP option. 5527 * 5528 * When these conditions are not satisfied it drops into a standard 5529 * receive procedure patterned after RFC793 to handle all cases. 5530 * The first three cases are guaranteed by proper pred_flags setting, 5531 * the rest is checked inline. Fast processing is turned on in 5532 * tcp_data_queue when everything is OK. 5533 */ 5534 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5535 const struct tcphdr *th, unsigned int len) 5536 { 5537 struct tcp_sock *tp = tcp_sk(sk); 5538 int res; 5539 5540 /* 5541 * Header prediction. 5542 * The code loosely follows the one in the famous 5543 * "30 instruction TCP receive" Van Jacobson mail. 5544 * 5545 * Van's trick is to deposit buffers into socket queue 5546 * on a device interrupt, to call tcp_recv function 5547 * on the receive process context and checksum and copy 5548 * the buffer to user space. smart... 5549 * 5550 * Our current scheme is not silly either but we take the 5551 * extra cost of the net_bh soft interrupt processing... 5552 * We do checksum and copy also but from device to kernel. 5553 */ 5554 5555 tp->rx_opt.saw_tstamp = 0; 5556 5557 /* pred_flags is 0xS?10 << 16 + snd_wnd 5558 * if header_prediction is to be made 5559 * 'S' will always be tp->tcp_header_len >> 2 5560 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 5561 * turn it off (when there are holes in the receive 5562 * space for instance) 5563 * PSH flag is ignored. 5564 */ 5565 5566 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 5567 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && 5568 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { 5569 int tcp_header_len = tp->tcp_header_len; 5570 5571 /* Timestamp header prediction: tcp_header_len 5572 * is automatically equal to th->doff*4 due to pred_flags 5573 * match. 5574 */ 5575 5576 /* Check timestamp */ 5577 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 5578 /* No? Slow path! */ 5579 if (!tcp_parse_aligned_timestamp(tp, th)) 5580 goto slow_path; 5581 5582 /* If PAWS failed, check it more carefully in slow path */ 5583 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 5584 goto slow_path; 5585 5586 /* DO NOT update ts_recent here, if checksum fails 5587 * and timestamp was corrupted part, it will result 5588 * in a hung connection since we will drop all 5589 * future packets due to the PAWS test. 5590 */ 5591 } 5592 5593 if (len <= tcp_header_len) { 5594 /* Bulk data transfer: sender */ 5595 if (len == tcp_header_len) { 5596 /* Predicted packet is in window by definition. 5597 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5598 * Hence, check seq<=rcv_wup reduces to: 5599 */ 5600 if (tcp_header_len == 5601 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5602 tp->rcv_nxt == tp->rcv_wup) 5603 tcp_store_ts_recent(tp); 5604 5605 /* We know that such packets are checksummed 5606 * on entry. 5607 */ 5608 tcp_ack(sk, skb, 0); 5609 __kfree_skb(skb); 5610 tcp_data_snd_check(sk); 5611 return 0; 5612 } else { /* Header too small */ 5613 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5614 goto discard; 5615 } 5616 } else { 5617 int eaten = 0; 5618 int copied_early = 0; 5619 bool fragstolen = false; 5620 5621 if (tp->copied_seq == tp->rcv_nxt && 5622 len - tcp_header_len <= tp->ucopy.len) { 5623 #ifdef CONFIG_NET_DMA 5624 if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 5625 copied_early = 1; 5626 eaten = 1; 5627 } 5628 #endif 5629 if (tp->ucopy.task == current && 5630 sock_owned_by_user(sk) && !copied_early) { 5631 __set_current_state(TASK_RUNNING); 5632 5633 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 5634 eaten = 1; 5635 } 5636 if (eaten) { 5637 /* Predicted packet is in window by definition. 5638 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5639 * Hence, check seq<=rcv_wup reduces to: 5640 */ 5641 if (tcp_header_len == 5642 (sizeof(struct tcphdr) + 5643 TCPOLEN_TSTAMP_ALIGNED) && 5644 tp->rcv_nxt == tp->rcv_wup) 5645 tcp_store_ts_recent(tp); 5646 5647 tcp_rcv_rtt_measure_ts(sk, skb); 5648 5649 __skb_pull(skb, tcp_header_len); 5650 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 5651 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 5652 } 5653 if (copied_early) 5654 tcp_cleanup_rbuf(sk, skb->len); 5655 } 5656 if (!eaten) { 5657 if (tcp_checksum_complete_user(sk, skb)) 5658 goto csum_error; 5659 5660 /* Predicted packet is in window by definition. 5661 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 5662 * Hence, check seq<=rcv_wup reduces to: 5663 */ 5664 if (tcp_header_len == 5665 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 5666 tp->rcv_nxt == tp->rcv_wup) 5667 tcp_store_ts_recent(tp); 5668 5669 tcp_rcv_rtt_measure_ts(sk, skb); 5670 5671 if ((int)skb->truesize > sk->sk_forward_alloc) 5672 goto step5; 5673 5674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5675 5676 /* Bulk data transfer: receiver */ 5677 eaten = tcp_queue_rcv(sk, skb, tcp_header_len, 5678 &fragstolen); 5679 } 5680 5681 tcp_event_data_recv(sk, skb); 5682 5683 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 5684 /* Well, only one small jumplet in fast path... */ 5685 tcp_ack(sk, skb, FLAG_DATA); 5686 tcp_data_snd_check(sk); 5687 if (!inet_csk_ack_scheduled(sk)) 5688 goto no_ack; 5689 } 5690 5691 if (!copied_early || tp->rcv_nxt != tp->rcv_wup) 5692 __tcp_ack_snd_check(sk, 0); 5693 no_ack: 5694 #ifdef CONFIG_NET_DMA 5695 if (copied_early) 5696 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 5697 else 5698 #endif 5699 if (eaten) 5700 kfree_skb_partial(skb, fragstolen); 5701 else 5702 sk->sk_data_ready(sk, 0); 5703 return 0; 5704 } 5705 } 5706 5707 slow_path: 5708 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5709 goto csum_error; 5710 5711 /* 5712 * Standard slow path. 5713 */ 5714 5715 res = tcp_validate_incoming(sk, skb, th, 1); 5716 if (res <= 0) 5717 return -res; 5718 5719 step5: 5720 if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) 5721 goto discard; 5722 5723 tcp_rcv_rtt_measure_ts(sk, skb); 5724 5725 /* Process urgent data. */ 5726 tcp_urg(sk, skb, th); 5727 5728 /* step 7: process the segment text */ 5729 tcp_data_queue(sk, skb); 5730 5731 tcp_data_snd_check(sk); 5732 tcp_ack_snd_check(sk); 5733 return 0; 5734 5735 csum_error: 5736 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 5737 5738 discard: 5739 __kfree_skb(skb); 5740 return 0; 5741 } 5742 EXPORT_SYMBOL(tcp_rcv_established); 5743 5744 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) 5745 { 5746 struct tcp_sock *tp = tcp_sk(sk); 5747 struct inet_connection_sock *icsk = inet_csk(sk); 5748 5749 tcp_set_state(sk, TCP_ESTABLISHED); 5750 5751 if (skb != NULL) 5752 security_inet_conn_established(sk, skb); 5753 5754 /* Make sure socket is routed, for correct metrics. */ 5755 icsk->icsk_af_ops->rebuild_header(sk); 5756 5757 tcp_init_metrics(sk); 5758 5759 tcp_init_congestion_control(sk); 5760 5761 /* Prevent spurious tcp_cwnd_restart() on first data 5762 * packet. 5763 */ 5764 tp->lsndtime = tcp_time_stamp; 5765 5766 tcp_init_buffer_space(sk); 5767 5768 if (sock_flag(sk, SOCK_KEEPOPEN)) 5769 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5770 5771 if (!tp->rx_opt.snd_wscale) 5772 __tcp_fast_path_on(tp, tp->snd_wnd); 5773 else 5774 tp->pred_flags = 0; 5775 5776 if (!sock_flag(sk, SOCK_DEAD)) { 5777 sk->sk_state_change(sk); 5778 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5779 } 5780 } 5781 5782 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5783 const struct tcphdr *th, unsigned int len) 5784 { 5785 const u8 *hash_location; 5786 struct inet_connection_sock *icsk = inet_csk(sk); 5787 struct tcp_sock *tp = tcp_sk(sk); 5788 struct tcp_cookie_values *cvp = tp->cookie_values; 5789 int saved_clamp = tp->rx_opt.mss_clamp; 5790 5791 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0); 5792 5793 if (th->ack) { 5794 /* rfc793: 5795 * "If the state is SYN-SENT then 5796 * first check the ACK bit 5797 * If the ACK bit is set 5798 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 5799 * a reset (unless the RST bit is set, if so drop 5800 * the segment and return)" 5801 * 5802 * We do not send data with SYN, so that RFC-correct 5803 * test reduces to: 5804 */ 5805 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 5806 goto reset_and_undo; 5807 5808 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 5809 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 5810 tcp_time_stamp)) { 5811 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); 5812 goto reset_and_undo; 5813 } 5814 5815 /* Now ACK is acceptable. 5816 * 5817 * "If the RST bit is set 5818 * If the ACK was acceptable then signal the user "error: 5819 * connection reset", drop the segment, enter CLOSED state, 5820 * delete TCB, and return." 5821 */ 5822 5823 if (th->rst) { 5824 tcp_reset(sk); 5825 goto discard; 5826 } 5827 5828 /* rfc793: 5829 * "fifth, if neither of the SYN or RST bits is set then 5830 * drop the segment and return." 5831 * 5832 * See note below! 5833 * --ANK(990513) 5834 */ 5835 if (!th->syn) 5836 goto discard_and_undo; 5837 5838 /* rfc793: 5839 * "If the SYN bit is on ... 5840 * are acceptable then ... 5841 * (our SYN has been ACKed), change the connection 5842 * state to ESTABLISHED..." 5843 */ 5844 5845 TCP_ECN_rcv_synack(tp, th); 5846 5847 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5848 tcp_ack(sk, skb, FLAG_SLOWPATH); 5849 5850 /* Ok.. it's good. Set up sequence numbers and 5851 * move to established. 5852 */ 5853 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5854 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5855 5856 /* RFC1323: The window in SYN & SYN/ACK segments is 5857 * never scaled. 5858 */ 5859 tp->snd_wnd = ntohs(th->window); 5860 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5861 5862 if (!tp->rx_opt.wscale_ok) { 5863 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5864 tp->window_clamp = min(tp->window_clamp, 65535U); 5865 } 5866 5867 if (tp->rx_opt.saw_tstamp) { 5868 tp->rx_opt.tstamp_ok = 1; 5869 tp->tcp_header_len = 5870 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5871 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5872 tcp_store_ts_recent(tp); 5873 } else { 5874 tp->tcp_header_len = sizeof(struct tcphdr); 5875 } 5876 5877 if (tcp_is_sack(tp) && sysctl_tcp_fack) 5878 tcp_enable_fack(tp); 5879 5880 tcp_mtup_init(sk); 5881 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5882 tcp_initialize_rcv_mss(sk); 5883 5884 /* Remember, tcp_poll() does not lock socket! 5885 * Change state from SYN-SENT only after copied_seq 5886 * is initialized. */ 5887 tp->copied_seq = tp->rcv_nxt; 5888 5889 if (cvp != NULL && 5890 cvp->cookie_pair_size > 0 && 5891 tp->rx_opt.cookie_plus > 0) { 5892 int cookie_size = tp->rx_opt.cookie_plus 5893 - TCPOLEN_COOKIE_BASE; 5894 int cookie_pair_size = cookie_size 5895 + cvp->cookie_desired; 5896 5897 /* A cookie extension option was sent and returned. 5898 * Note that each incoming SYNACK replaces the 5899 * Responder cookie. The initial exchange is most 5900 * fragile, as protection against spoofing relies 5901 * entirely upon the sequence and timestamp (above). 5902 * This replacement strategy allows the correct pair to 5903 * pass through, while any others will be filtered via 5904 * Responder verification later. 5905 */ 5906 if (sizeof(cvp->cookie_pair) >= cookie_pair_size) { 5907 memcpy(&cvp->cookie_pair[cvp->cookie_desired], 5908 hash_location, cookie_size); 5909 cvp->cookie_pair_size = cookie_pair_size; 5910 } 5911 } 5912 5913 smp_mb(); 5914 5915 tcp_finish_connect(sk, skb); 5916 5917 if (sk->sk_write_pending || 5918 icsk->icsk_accept_queue.rskq_defer_accept || 5919 icsk->icsk_ack.pingpong) { 5920 /* Save one ACK. Data will be ready after 5921 * several ticks, if write_pending is set. 5922 * 5923 * It may be deleted, but with this feature tcpdumps 5924 * look so _wonderfully_ clever, that I was not able 5925 * to stand against the temptation 8) --ANK 5926 */ 5927 inet_csk_schedule_ack(sk); 5928 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5929 tcp_enter_quickack_mode(sk); 5930 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5931 TCP_DELACK_MAX, TCP_RTO_MAX); 5932 5933 discard: 5934 __kfree_skb(skb); 5935 return 0; 5936 } else { 5937 tcp_send_ack(sk); 5938 } 5939 return -1; 5940 } 5941 5942 /* No ACK in the segment */ 5943 5944 if (th->rst) { 5945 /* rfc793: 5946 * "If the RST bit is set 5947 * 5948 * Otherwise (no ACK) drop the segment and return." 5949 */ 5950 5951 goto discard_and_undo; 5952 } 5953 5954 /* PAWS check. */ 5955 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5956 tcp_paws_reject(&tp->rx_opt, 0)) 5957 goto discard_and_undo; 5958 5959 if (th->syn) { 5960 /* We see SYN without ACK. It is attempt of 5961 * simultaneous connect with crossed SYNs. 5962 * Particularly, it can be connect to self. 5963 */ 5964 tcp_set_state(sk, TCP_SYN_RECV); 5965 5966 if (tp->rx_opt.saw_tstamp) { 5967 tp->rx_opt.tstamp_ok = 1; 5968 tcp_store_ts_recent(tp); 5969 tp->tcp_header_len = 5970 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5971 } else { 5972 tp->tcp_header_len = sizeof(struct tcphdr); 5973 } 5974 5975 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5976 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5977 5978 /* RFC1323: The window in SYN & SYN/ACK segments is 5979 * never scaled. 5980 */ 5981 tp->snd_wnd = ntohs(th->window); 5982 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5983 tp->max_window = tp->snd_wnd; 5984 5985 TCP_ECN_rcv_syn(tp, th); 5986 5987 tcp_mtup_init(sk); 5988 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5989 tcp_initialize_rcv_mss(sk); 5990 5991 tcp_send_synack(sk); 5992 #if 0 5993 /* Note, we could accept data and URG from this segment. 5994 * There are no obstacles to make this. 5995 * 5996 * However, if we ignore data in ACKless segments sometimes, 5997 * we have no reasons to accept it sometimes. 5998 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5999 * is not flawless. So, discard packet for sanity. 6000 * Uncomment this return to process the data. 6001 */ 6002 return -1; 6003 #else 6004 goto discard; 6005 #endif 6006 } 6007 /* "fifth, if neither of the SYN or RST bits is set then 6008 * drop the segment and return." 6009 */ 6010 6011 discard_and_undo: 6012 tcp_clear_options(&tp->rx_opt); 6013 tp->rx_opt.mss_clamp = saved_clamp; 6014 goto discard; 6015 6016 reset_and_undo: 6017 tcp_clear_options(&tp->rx_opt); 6018 tp->rx_opt.mss_clamp = saved_clamp; 6019 return 1; 6020 } 6021 6022 /* 6023 * This function implements the receiving procedure of RFC 793 for 6024 * all states except ESTABLISHED and TIME_WAIT. 6025 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 6026 * address independent. 6027 */ 6028 6029 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 6030 const struct tcphdr *th, unsigned int len) 6031 { 6032 struct tcp_sock *tp = tcp_sk(sk); 6033 struct inet_connection_sock *icsk = inet_csk(sk); 6034 int queued = 0; 6035 int res; 6036 6037 tp->rx_opt.saw_tstamp = 0; 6038 6039 switch (sk->sk_state) { 6040 case TCP_CLOSE: 6041 goto discard; 6042 6043 case TCP_LISTEN: 6044 if (th->ack) 6045 return 1; 6046 6047 if (th->rst) 6048 goto discard; 6049 6050 if (th->syn) { 6051 if (th->fin) 6052 goto discard; 6053 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 6054 return 1; 6055 6056 /* Now we have several options: In theory there is 6057 * nothing else in the frame. KA9Q has an option to 6058 * send data with the syn, BSD accepts data with the 6059 * syn up to the [to be] advertised window and 6060 * Solaris 2.1 gives you a protocol error. For now 6061 * we just ignore it, that fits the spec precisely 6062 * and avoids incompatibilities. It would be nice in 6063 * future to drop through and process the data. 6064 * 6065 * Now that TTCP is starting to be used we ought to 6066 * queue this data. 6067 * But, this leaves one open to an easy denial of 6068 * service attack, and SYN cookies can't defend 6069 * against this problem. So, we drop the data 6070 * in the interest of security over speed unless 6071 * it's still in use. 6072 */ 6073 kfree_skb(skb); 6074 return 0; 6075 } 6076 goto discard; 6077 6078 case TCP_SYN_SENT: 6079 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 6080 if (queued >= 0) 6081 return queued; 6082 6083 /* Do step6 onward by hand. */ 6084 tcp_urg(sk, skb, th); 6085 __kfree_skb(skb); 6086 tcp_data_snd_check(sk); 6087 return 0; 6088 } 6089 6090 res = tcp_validate_incoming(sk, skb, th, 0); 6091 if (res <= 0) 6092 return -res; 6093 6094 /* step 5: check the ACK field */ 6095 if (th->ack) { 6096 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; 6097 6098 switch (sk->sk_state) { 6099 case TCP_SYN_RECV: 6100 if (acceptable) { 6101 tp->copied_seq = tp->rcv_nxt; 6102 smp_mb(); 6103 tcp_set_state(sk, TCP_ESTABLISHED); 6104 sk->sk_state_change(sk); 6105 6106 /* Note, that this wakeup is only for marginal 6107 * crossed SYN case. Passively open sockets 6108 * are not waked up, because sk->sk_sleep == 6109 * NULL and sk->sk_socket == NULL. 6110 */ 6111 if (sk->sk_socket) 6112 sk_wake_async(sk, 6113 SOCK_WAKE_IO, POLL_OUT); 6114 6115 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 6116 tp->snd_wnd = ntohs(th->window) << 6117 tp->rx_opt.snd_wscale; 6118 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 6119 6120 if (tp->rx_opt.tstamp_ok) 6121 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 6122 6123 /* Make sure socket is routed, for 6124 * correct metrics. 6125 */ 6126 icsk->icsk_af_ops->rebuild_header(sk); 6127 6128 tcp_init_metrics(sk); 6129 6130 tcp_init_congestion_control(sk); 6131 6132 /* Prevent spurious tcp_cwnd_restart() on 6133 * first data packet. 6134 */ 6135 tp->lsndtime = tcp_time_stamp; 6136 6137 tcp_mtup_init(sk); 6138 tcp_initialize_rcv_mss(sk); 6139 tcp_init_buffer_space(sk); 6140 tcp_fast_path_on(tp); 6141 } else { 6142 return 1; 6143 } 6144 break; 6145 6146 case TCP_FIN_WAIT1: 6147 if (tp->snd_una == tp->write_seq) { 6148 tcp_set_state(sk, TCP_FIN_WAIT2); 6149 sk->sk_shutdown |= SEND_SHUTDOWN; 6150 dst_confirm(__sk_dst_get(sk)); 6151 6152 if (!sock_flag(sk, SOCK_DEAD)) 6153 /* Wake up lingering close() */ 6154 sk->sk_state_change(sk); 6155 else { 6156 int tmo; 6157 6158 if (tp->linger2 < 0 || 6159 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6160 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 6161 tcp_done(sk); 6162 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6163 return 1; 6164 } 6165 6166 tmo = tcp_fin_time(sk); 6167 if (tmo > TCP_TIMEWAIT_LEN) { 6168 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 6169 } else if (th->fin || sock_owned_by_user(sk)) { 6170 /* Bad case. We could lose such FIN otherwise. 6171 * It is not a big problem, but it looks confusing 6172 * and not so rare event. We still can lose it now, 6173 * if it spins in bh_lock_sock(), but it is really 6174 * marginal case. 6175 */ 6176 inet_csk_reset_keepalive_timer(sk, tmo); 6177 } else { 6178 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 6179 goto discard; 6180 } 6181 } 6182 } 6183 break; 6184 6185 case TCP_CLOSING: 6186 if (tp->snd_una == tp->write_seq) { 6187 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 6188 goto discard; 6189 } 6190 break; 6191 6192 case TCP_LAST_ACK: 6193 if (tp->snd_una == tp->write_seq) { 6194 tcp_update_metrics(sk); 6195 tcp_done(sk); 6196 goto discard; 6197 } 6198 break; 6199 } 6200 } else 6201 goto discard; 6202 6203 /* step 6: check the URG bit */ 6204 tcp_urg(sk, skb, th); 6205 6206 /* step 7: process the segment text */ 6207 switch (sk->sk_state) { 6208 case TCP_CLOSE_WAIT: 6209 case TCP_CLOSING: 6210 case TCP_LAST_ACK: 6211 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 6212 break; 6213 case TCP_FIN_WAIT1: 6214 case TCP_FIN_WAIT2: 6215 /* RFC 793 says to queue data in these states, 6216 * RFC 1122 says we MUST send a reset. 6217 * BSD 4.4 also does reset. 6218 */ 6219 if (sk->sk_shutdown & RCV_SHUTDOWN) { 6220 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 6221 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 6222 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 6223 tcp_reset(sk); 6224 return 1; 6225 } 6226 } 6227 /* Fall through */ 6228 case TCP_ESTABLISHED: 6229 tcp_data_queue(sk, skb); 6230 queued = 1; 6231 break; 6232 } 6233 6234 /* tcp_data could move socket to TIME-WAIT */ 6235 if (sk->sk_state != TCP_CLOSE) { 6236 tcp_data_snd_check(sk); 6237 tcp_ack_snd_check(sk); 6238 } 6239 6240 if (!queued) { 6241 discard: 6242 __kfree_skb(skb); 6243 } 6244 return 0; 6245 } 6246 EXPORT_SYMBOL(tcp_rcv_state_process); 6247