1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: 23 * Pedro Roque : Fast Retransmit/Recovery. 24 * Two receive queues. 25 * Retransmit queue handled by TCP. 26 * Better retransmit timer handling. 27 * New congestion avoidance. 28 * Header prediction. 29 * Variable renaming. 30 * 31 * Eric : Fast Retransmit. 32 * Randy Scott : MSS option defines. 33 * Eric Schenk : Fixes to slow start algorithm. 34 * Eric Schenk : Yet another double ACK bug. 35 * Eric Schenk : Delayed ACK bug fixes. 36 * Eric Schenk : Floyd style fast retrans war avoidance. 37 * David S. Miller : Don't allow zero congestion window. 38 * Eric Schenk : Fix retransmitter so that it sends 39 * next packet on ack of previous packet. 40 * Andi Kleen : Moved open_request checking here 41 * and process RSTs for open_requests. 42 * Andi Kleen : Better prune_queue, and other fixes. 43 * Andrey Savochkin: Fix RTT measurements in the presence of 44 * timestamps. 45 * Andrey Savochkin: Check sequence numbers correctly when 46 * removing SACKs due to in sequence incoming 47 * data segments. 48 * Andi Kleen: Make sure we never ack data there is not 49 * enough room for. Also make this condition 50 * a fatal error if it might still happen. 51 * Andi Kleen: Add tcp_measure_rcv_mss to make 52 * connections with MSS<min(MTU,ann. MSS) 53 * work without delayed acks. 54 * Andi Kleen: Process packets with PSH set in the 55 * fast path. 56 * J Hadi Salim: ECN support 57 * Andrei Gurtov, 58 * Pasi Sarolahti, 59 * Panu Kuhlberg: Experimental audit of TCP (re)transmission 60 * engine. Lots of bugs are found. 61 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs 62 */ 63 64 #include <linux/mm.h> 65 #include <linux/module.h> 66 #include <linux/sysctl.h> 67 #include <net/dst.h> 68 #include <net/tcp.h> 69 #include <net/inet_common.h> 70 #include <linux/ipsec.h> 71 #include <asm/unaligned.h> 72 #include <net/netdma.h> 73 74 int sysctl_tcp_timestamps __read_mostly = 1; 75 int sysctl_tcp_window_scaling __read_mostly = 1; 76 int sysctl_tcp_sack __read_mostly = 1; 77 int sysctl_tcp_fack __read_mostly = 1; 78 int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 79 int sysctl_tcp_ecn __read_mostly; 80 int sysctl_tcp_dsack __read_mostly = 1; 81 int sysctl_tcp_app_win __read_mostly = 31; 82 int sysctl_tcp_adv_win_scale __read_mostly = 2; 83 84 int sysctl_tcp_stdurg __read_mostly; 85 int sysctl_tcp_rfc1337 __read_mostly; 86 int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 87 int sysctl_tcp_frto __read_mostly = 2; 88 int sysctl_tcp_frto_response __read_mostly; 89 int sysctl_tcp_nometrics_save __read_mostly; 90 91 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 92 int sysctl_tcp_abc __read_mostly; 93 94 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ 95 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 96 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */ 97 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */ 98 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */ 99 #define FLAG_DATA_SACKED 0x20 /* New SACK. */ 100 #define FLAG_ECE 0x40 /* ECE in this ACK */ 101 #define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */ 102 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 103 #define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 104 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 105 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 106 #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ 107 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 108 109 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 110 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 111 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 112 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 113 #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) 114 115 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 116 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 117 118 /* Adapt the MSS value used to make delayed ack decision to the 119 * real world. 120 */ 121 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) 122 { 123 struct inet_connection_sock *icsk = inet_csk(sk); 124 const unsigned int lss = icsk->icsk_ack.last_seg_size; 125 unsigned int len; 126 127 icsk->icsk_ack.last_seg_size = 0; 128 129 /* skb->len may jitter because of SACKs, even if peer 130 * sends good full-sized frames. 131 */ 132 len = skb_shinfo(skb)->gso_size ? : skb->len; 133 if (len >= icsk->icsk_ack.rcv_mss) { 134 icsk->icsk_ack.rcv_mss = len; 135 } else { 136 /* Otherwise, we make more careful check taking into account, 137 * that SACKs block is variable. 138 * 139 * "len" is invariant segment length, including TCP header. 140 */ 141 len += skb->data - skb_transport_header(skb); 142 if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) || 143 /* If PSH is not set, packet should be 144 * full sized, provided peer TCP is not badly broken. 145 * This observation (if it is correct 8)) allows 146 * to handle super-low mtu links fairly. 147 */ 148 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && 149 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { 150 /* Subtract also invariant (if peer is RFC compliant), 151 * tcp header plus fixed timestamp option length. 152 * Resulting "len" is MSS free of SACK jitter. 153 */ 154 len -= tcp_sk(sk)->tcp_header_len; 155 icsk->icsk_ack.last_seg_size = len; 156 if (len == lss) { 157 icsk->icsk_ack.rcv_mss = len; 158 return; 159 } 160 } 161 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) 162 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; 163 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 164 } 165 } 166 167 static void tcp_incr_quickack(struct sock *sk) 168 { 169 struct inet_connection_sock *icsk = inet_csk(sk); 170 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 171 172 if (quickacks == 0) 173 quickacks = 2; 174 if (quickacks > icsk->icsk_ack.quick) 175 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 176 } 177 178 void tcp_enter_quickack_mode(struct sock *sk) 179 { 180 struct inet_connection_sock *icsk = inet_csk(sk); 181 tcp_incr_quickack(sk); 182 icsk->icsk_ack.pingpong = 0; 183 icsk->icsk_ack.ato = TCP_ATO_MIN; 184 } 185 186 /* Send ACKs quickly, if "quick" count is not exhausted 187 * and the session is not interactive. 188 */ 189 190 static inline int tcp_in_quickack_mode(const struct sock *sk) 191 { 192 const struct inet_connection_sock *icsk = inet_csk(sk); 193 return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 194 } 195 196 static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp) 197 { 198 if (tp->ecn_flags & TCP_ECN_OK) 199 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 200 } 201 202 static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) 203 { 204 if (tcp_hdr(skb)->cwr) 205 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 206 } 207 208 static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp) 209 { 210 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 211 } 212 213 static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb) 214 { 215 if (tp->ecn_flags & TCP_ECN_OK) { 216 if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags)) 217 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; 218 /* Funny extension: if ECT is not set on a segment, 219 * it is surely retransmit. It is not in ECN RFC, 220 * but Linux follows this rule. */ 221 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 222 tcp_enter_quickack_mode((struct sock *)tp); 223 } 224 } 225 226 static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) 227 { 228 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 229 tp->ecn_flags &= ~TCP_ECN_OK; 230 } 231 232 static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) 233 { 234 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 235 tp->ecn_flags &= ~TCP_ECN_OK; 236 } 237 238 static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) 239 { 240 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 241 return 1; 242 return 0; 243 } 244 245 /* Buffer size and advertised window tuning. 246 * 247 * 1. Tuning sk->sk_sndbuf, when connection enters established state. 248 */ 249 250 static void tcp_fixup_sndbuf(struct sock *sk) 251 { 252 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 253 sizeof(struct sk_buff); 254 255 if (sk->sk_sndbuf < 3 * sndmem) 256 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 257 } 258 259 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 260 * 261 * All tcp_full_space() is split to two parts: "network" buffer, allocated 262 * forward and advertised in receiver window (tp->rcv_wnd) and 263 * "application buffer", required to isolate scheduling/application 264 * latencies from network. 265 * window_clamp is maximal advertised window. It can be less than 266 * tcp_full_space(), in this case tcp_full_space() - window_clamp 267 * is reserved for "application" buffer. The less window_clamp is 268 * the smoother our behaviour from viewpoint of network, but the lower 269 * throughput and the higher sensitivity of the connection to losses. 8) 270 * 271 * rcv_ssthresh is more strict window_clamp used at "slow start" 272 * phase to predict further behaviour of this connection. 273 * It is used for two goals: 274 * - to enforce header prediction at sender, even when application 275 * requires some significant "application buffer". It is check #1. 276 * - to prevent pruning of receive queue because of misprediction 277 * of receiver window. Check #2. 278 * 279 * The scheme does not work when sender sends good segments opening 280 * window and then starts to feed us spaghetti. But it should work 281 * in common situations. Otherwise, we have to rely on queue collapsing. 282 */ 283 284 /* Slow part of check#2. */ 285 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) 286 { 287 struct tcp_sock *tp = tcp_sk(sk); 288 /* Optimize this! */ 289 int truesize = tcp_win_from_space(skb->truesize) >> 1; 290 int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; 291 292 while (tp->rcv_ssthresh <= window) { 293 if (truesize <= skb->len) 294 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 295 296 truesize >>= 1; 297 window >>= 1; 298 } 299 return 0; 300 } 301 302 static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) 303 { 304 struct tcp_sock *tp = tcp_sk(sk); 305 306 /* Check #1 */ 307 if (tp->rcv_ssthresh < tp->window_clamp && 308 (int)tp->rcv_ssthresh < tcp_space(sk) && 309 !tcp_memory_pressure) { 310 int incr; 311 312 /* Check #2. Increase window, if skb with such overhead 313 * will fit to rcvbuf in future. 314 */ 315 if (tcp_win_from_space(skb->truesize) <= skb->len) 316 incr = 2 * tp->advmss; 317 else 318 incr = __tcp_grow_window(sk, skb); 319 320 if (incr) { 321 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 322 tp->window_clamp); 323 inet_csk(sk)->icsk_ack.quick |= 1; 324 } 325 } 326 } 327 328 /* 3. Tuning rcvbuf, when connection enters established state. */ 329 330 static void tcp_fixup_rcvbuf(struct sock *sk) 331 { 332 struct tcp_sock *tp = tcp_sk(sk); 333 int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); 334 335 /* Try to select rcvbuf so that 4 mss-sized segments 336 * will fit to window and corresponding skbs will fit to our rcvbuf. 337 * (was 3; 4 is minimum to allow fast retransmit to work.) 338 */ 339 while (tcp_win_from_space(rcvmem) < tp->advmss) 340 rcvmem += 128; 341 if (sk->sk_rcvbuf < 4 * rcvmem) 342 sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); 343 } 344 345 /* 4. Try to fixup all. It is made immediately after connection enters 346 * established state. 347 */ 348 static void tcp_init_buffer_space(struct sock *sk) 349 { 350 struct tcp_sock *tp = tcp_sk(sk); 351 int maxwin; 352 353 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) 354 tcp_fixup_rcvbuf(sk); 355 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) 356 tcp_fixup_sndbuf(sk); 357 358 tp->rcvq_space.space = tp->rcv_wnd; 359 360 maxwin = tcp_full_space(sk); 361 362 if (tp->window_clamp >= maxwin) { 363 tp->window_clamp = maxwin; 364 365 if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) 366 tp->window_clamp = max(maxwin - 367 (maxwin >> sysctl_tcp_app_win), 368 4 * tp->advmss); 369 } 370 371 /* Force reservation of one segment. */ 372 if (sysctl_tcp_app_win && 373 tp->window_clamp > 2 * tp->advmss && 374 tp->window_clamp + tp->advmss > maxwin) 375 tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); 376 377 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); 378 tp->snd_cwnd_stamp = tcp_time_stamp; 379 } 380 381 /* 5. Recalculate window clamp after socket hit its memory bounds. */ 382 static void tcp_clamp_window(struct sock *sk) 383 { 384 struct tcp_sock *tp = tcp_sk(sk); 385 struct inet_connection_sock *icsk = inet_csk(sk); 386 387 icsk->icsk_ack.quick = 0; 388 389 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 390 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 391 !tcp_memory_pressure && 392 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 393 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 394 sysctl_tcp_rmem[2]); 395 } 396 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) 397 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); 398 } 399 400 /* Initialize RCV_MSS value. 401 * RCV_MSS is an our guess about MSS used by the peer. 402 * We haven't any direct information about the MSS. 403 * It's better to underestimate the RCV_MSS rather than overestimate. 404 * Overestimations make us ACKing less frequently than needed. 405 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 406 */ 407 void tcp_initialize_rcv_mss(struct sock *sk) 408 { 409 struct tcp_sock *tp = tcp_sk(sk); 410 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 411 412 hint = min(hint, tp->rcv_wnd / 2); 413 hint = min(hint, TCP_MIN_RCVMSS); 414 hint = max(hint, TCP_MIN_MSS); 415 416 inet_csk(sk)->icsk_ack.rcv_mss = hint; 417 } 418 419 /* Receiver "autotuning" code. 420 * 421 * The algorithm for RTT estimation w/o timestamps is based on 422 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. 423 * <http://www.lanl.gov/radiant/website/pubs/drs/lacsi2001.ps> 424 * 425 * More detail on this code can be found at 426 * <http://www.psc.edu/~jheffner/senior_thesis.ps>, 427 * though this reference is out of date. A new paper 428 * is pending. 429 */ 430 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) 431 { 432 u32 new_sample = tp->rcv_rtt_est.rtt; 433 long m = sample; 434 435 if (m == 0) 436 m = 1; 437 438 if (new_sample != 0) { 439 /* If we sample in larger samples in the non-timestamp 440 * case, we could grossly overestimate the RTT especially 441 * with chatty applications or bulk transfer apps which 442 * are stalled on filesystem I/O. 443 * 444 * Also, since we are only going for a minimum in the 445 * non-timestamp case, we do not smooth things out 446 * else with timestamps disabled convergence takes too 447 * long. 448 */ 449 if (!win_dep) { 450 m -= (new_sample >> 3); 451 new_sample += m; 452 } else if (m < new_sample) 453 new_sample = m << 3; 454 } else { 455 /* No previous measure. */ 456 new_sample = m << 3; 457 } 458 459 if (tp->rcv_rtt_est.rtt != new_sample) 460 tp->rcv_rtt_est.rtt = new_sample; 461 } 462 463 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) 464 { 465 if (tp->rcv_rtt_est.time == 0) 466 goto new_measure; 467 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) 468 return; 469 tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); 470 471 new_measure: 472 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; 473 tp->rcv_rtt_est.time = tcp_time_stamp; 474 } 475 476 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, 477 const struct sk_buff *skb) 478 { 479 struct tcp_sock *tp = tcp_sk(sk); 480 if (tp->rx_opt.rcv_tsecr && 481 (TCP_SKB_CB(skb)->end_seq - 482 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 483 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 484 } 485 486 /* 487 * This function should be called every time data is copied to user space. 488 * It calculates the appropriate TCP receive buffer space. 489 */ 490 void tcp_rcv_space_adjust(struct sock *sk) 491 { 492 struct tcp_sock *tp = tcp_sk(sk); 493 int time; 494 int space; 495 496 if (tp->rcvq_space.time == 0) 497 goto new_measure; 498 499 time = tcp_time_stamp - tp->rcvq_space.time; 500 if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) 501 return; 502 503 space = 2 * (tp->copied_seq - tp->rcvq_space.seq); 504 505 space = max(tp->rcvq_space.space, space); 506 507 if (tp->rcvq_space.space != space) { 508 int rcvmem; 509 510 tp->rcvq_space.space = space; 511 512 if (sysctl_tcp_moderate_rcvbuf && 513 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { 514 int new_clamp = space; 515 516 /* Receive space grows, normalize in order to 517 * take into account packet headers and sk_buff 518 * structure overhead. 519 */ 520 space /= tp->advmss; 521 if (!space) 522 space = 1; 523 rcvmem = (tp->advmss + MAX_TCP_HEADER + 524 16 + sizeof(struct sk_buff)); 525 while (tcp_win_from_space(rcvmem) < tp->advmss) 526 rcvmem += 128; 527 space *= rcvmem; 528 space = min(space, sysctl_tcp_rmem[2]); 529 if (space > sk->sk_rcvbuf) { 530 sk->sk_rcvbuf = space; 531 532 /* Make the window clamp follow along. */ 533 tp->window_clamp = new_clamp; 534 } 535 } 536 } 537 538 new_measure: 539 tp->rcvq_space.seq = tp->copied_seq; 540 tp->rcvq_space.time = tcp_time_stamp; 541 } 542 543 /* There is something which you must keep in mind when you analyze the 544 * behavior of the tp->ato delayed ack timeout interval. When a 545 * connection starts up, we want to ack as quickly as possible. The 546 * problem is that "good" TCP's do slow start at the beginning of data 547 * transmission. The means that until we send the first few ACK's the 548 * sender will sit on his end and only queue most of his data, because 549 * he can only send snd_cwnd unacked packets at any given time. For 550 * each ACK we send, he increments snd_cwnd and transmits more of his 551 * queue. -DaveM 552 */ 553 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) 554 { 555 struct tcp_sock *tp = tcp_sk(sk); 556 struct inet_connection_sock *icsk = inet_csk(sk); 557 u32 now; 558 559 inet_csk_schedule_ack(sk); 560 561 tcp_measure_rcv_mss(sk, skb); 562 563 tcp_rcv_rtt_measure(tp); 564 565 now = tcp_time_stamp; 566 567 if (!icsk->icsk_ack.ato) { 568 /* The _first_ data packet received, initialize 569 * delayed ACK engine. 570 */ 571 tcp_incr_quickack(sk); 572 icsk->icsk_ack.ato = TCP_ATO_MIN; 573 } else { 574 int m = now - icsk->icsk_ack.lrcvtime; 575 576 if (m <= TCP_ATO_MIN / 2) { 577 /* The fastest case is the first. */ 578 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 579 } else if (m < icsk->icsk_ack.ato) { 580 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 581 if (icsk->icsk_ack.ato > icsk->icsk_rto) 582 icsk->icsk_ack.ato = icsk->icsk_rto; 583 } else if (m > icsk->icsk_rto) { 584 /* Too long gap. Apparently sender failed to 585 * restart window, so that we send ACKs quickly. 586 */ 587 tcp_incr_quickack(sk); 588 sk_mem_reclaim(sk); 589 } 590 } 591 icsk->icsk_ack.lrcvtime = now; 592 593 TCP_ECN_check_ce(tp, skb); 594 595 if (skb->len >= 128) 596 tcp_grow_window(sk, skb); 597 } 598 599 static u32 tcp_rto_min(struct sock *sk) 600 { 601 struct dst_entry *dst = __sk_dst_get(sk); 602 u32 rto_min = TCP_RTO_MIN; 603 604 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 605 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 606 return rto_min; 607 } 608 609 /* Called to compute a smoothed rtt estimate. The data fed to this 610 * routine either comes from timestamps, or from segments that were 611 * known _not_ to have been retransmitted [see Karn/Partridge 612 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 613 * piece by Van Jacobson. 614 * NOTE: the next three routines used to be one big routine. 615 * To save cycles in the RFC 1323 implementation it was better to break 616 * it up into three procedures. -- erics 617 */ 618 static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) 619 { 620 struct tcp_sock *tp = tcp_sk(sk); 621 long m = mrtt; /* RTT */ 622 623 /* The following amusing code comes from Jacobson's 624 * article in SIGCOMM '88. Note that rtt and mdev 625 * are scaled versions of rtt and mean deviation. 626 * This is designed to be as fast as possible 627 * m stands for "measurement". 628 * 629 * On a 1990 paper the rto value is changed to: 630 * RTO = rtt + 4 * mdev 631 * 632 * Funny. This algorithm seems to be very broken. 633 * These formulae increase RTO, when it should be decreased, increase 634 * too slowly, when it should be increased quickly, decrease too quickly 635 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 636 * does not matter how to _calculate_ it. Seems, it was trap 637 * that VJ failed to avoid. 8) 638 */ 639 if (m == 0) 640 m = 1; 641 if (tp->srtt != 0) { 642 m -= (tp->srtt >> 3); /* m is now error in rtt est */ 643 tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 644 if (m < 0) { 645 m = -m; /* m is now abs(error) */ 646 m -= (tp->mdev >> 2); /* similar update on mdev */ 647 /* This is similar to one of Eifel findings. 648 * Eifel blocks mdev updates when rtt decreases. 649 * This solution is a bit different: we use finer gain 650 * for mdev in this case (alpha*beta). 651 * Like Eifel it also prevents growth of rto, 652 * but also it limits too fast rto decreases, 653 * happening in pure Eifel. 654 */ 655 if (m > 0) 656 m >>= 3; 657 } else { 658 m -= (tp->mdev >> 2); /* similar update on mdev */ 659 } 660 tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ 661 if (tp->mdev > tp->mdev_max) { 662 tp->mdev_max = tp->mdev; 663 if (tp->mdev_max > tp->rttvar) 664 tp->rttvar = tp->mdev_max; 665 } 666 if (after(tp->snd_una, tp->rtt_seq)) { 667 if (tp->mdev_max < tp->rttvar) 668 tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2; 669 tp->rtt_seq = tp->snd_nxt; 670 tp->mdev_max = tcp_rto_min(sk); 671 } 672 } else { 673 /* no previous measure. */ 674 tp->srtt = m << 3; /* take the measured time to be rtt */ 675 tp->mdev = m << 1; /* make sure rto = 3*rtt */ 676 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 677 tp->rtt_seq = tp->snd_nxt; 678 } 679 } 680 681 /* Calculate rto without backoff. This is the second half of Van Jacobson's 682 * routine referred to above. 683 */ 684 static inline void tcp_set_rto(struct sock *sk) 685 { 686 const struct tcp_sock *tp = tcp_sk(sk); 687 /* Old crap is replaced with new one. 8) 688 * 689 * More seriously: 690 * 1. If rtt variance happened to be less 50msec, it is hallucination. 691 * It cannot be less due to utterly erratic ACK generation made 692 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 693 * to do with delayed acks, because at cwnd>2 true delack timeout 694 * is invisible. Actually, Linux-2.4 also generates erratic 695 * ACKs in some circumstances. 696 */ 697 inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 698 699 /* 2. Fixups made earlier cannot be right. 700 * If we do not estimate RTO correctly without them, 701 * all the algo is pure shit and should be replaced 702 * with correct one. It is exactly, which we pretend to do. 703 */ 704 } 705 706 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 707 * guarantees that rto is higher. 708 */ 709 static inline void tcp_bound_rto(struct sock *sk) 710 { 711 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 712 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 713 } 714 715 /* Save metrics learned by this TCP session. 716 This function is called only, when TCP finishes successfully 717 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. 718 */ 719 void tcp_update_metrics(struct sock *sk) 720 { 721 struct tcp_sock *tp = tcp_sk(sk); 722 struct dst_entry *dst = __sk_dst_get(sk); 723 724 if (sysctl_tcp_nometrics_save) 725 return; 726 727 dst_confirm(dst); 728 729 if (dst && (dst->flags & DST_HOST)) { 730 const struct inet_connection_sock *icsk = inet_csk(sk); 731 int m; 732 unsigned long rtt; 733 734 if (icsk->icsk_backoff || !tp->srtt) { 735 /* This session failed to estimate rtt. Why? 736 * Probably, no packets returned in time. 737 * Reset our results. 738 */ 739 if (!(dst_metric_locked(dst, RTAX_RTT))) 740 dst->metrics[RTAX_RTT - 1] = 0; 741 return; 742 } 743 744 rtt = dst_metric_rtt(dst, RTAX_RTT); 745 m = rtt - tp->srtt; 746 747 /* If newly calculated rtt larger than stored one, 748 * store new one. Otherwise, use EWMA. Remember, 749 * rtt overestimation is always better than underestimation. 750 */ 751 if (!(dst_metric_locked(dst, RTAX_RTT))) { 752 if (m <= 0) 753 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); 754 else 755 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); 756 } 757 758 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { 759 unsigned long var; 760 if (m < 0) 761 m = -m; 762 763 /* Scale deviation to rttvar fixed point */ 764 m >>= 1; 765 if (m < tp->mdev) 766 m = tp->mdev; 767 768 var = dst_metric_rtt(dst, RTAX_RTTVAR); 769 if (m >= var) 770 var = m; 771 else 772 var -= (var - m) >> 2; 773 774 set_dst_metric_rtt(dst, RTAX_RTTVAR, var); 775 } 776 777 if (tp->snd_ssthresh >= 0xFFFF) { 778 /* Slow start still did not finish. */ 779 if (dst_metric(dst, RTAX_SSTHRESH) && 780 !dst_metric_locked(dst, RTAX_SSTHRESH) && 781 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) 782 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1; 783 if (!dst_metric_locked(dst, RTAX_CWND) && 784 tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) 785 dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd; 786 } else if (tp->snd_cwnd > tp->snd_ssthresh && 787 icsk->icsk_ca_state == TCP_CA_Open) { 788 /* Cong. avoidance phase, cwnd is reliable. */ 789 if (!dst_metric_locked(dst, RTAX_SSTHRESH)) 790 dst->metrics[RTAX_SSTHRESH-1] = 791 max(tp->snd_cwnd >> 1, tp->snd_ssthresh); 792 if (!dst_metric_locked(dst, RTAX_CWND)) 793 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1; 794 } else { 795 /* Else slow start did not finish, cwnd is non-sense, 796 ssthresh may be also invalid. 797 */ 798 if (!dst_metric_locked(dst, RTAX_CWND)) 799 dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1; 800 if (dst_metric(dst, RTAX_SSTHRESH) && 801 !dst_metric_locked(dst, RTAX_SSTHRESH) && 802 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) 803 dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh; 804 } 805 806 if (!dst_metric_locked(dst, RTAX_REORDERING)) { 807 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && 808 tp->reordering != sysctl_tcp_reordering) 809 dst->metrics[RTAX_REORDERING-1] = tp->reordering; 810 } 811 } 812 } 813 814 /* Numbers are taken from RFC3390. 815 * 816 * John Heffner states: 817 * 818 * The RFC specifies a window of no more than 4380 bytes 819 * unless 2*MSS > 4380. Reading the pseudocode in the RFC 820 * is a bit misleading because they use a clamp at 4380 bytes 821 * rather than use a multiplier in the relevant range. 822 */ 823 __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 824 { 825 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 826 827 if (!cwnd) { 828 if (tp->mss_cache > 1460) 829 cwnd = 2; 830 else 831 cwnd = (tp->mss_cache > 1095) ? 3 : 4; 832 } 833 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); 834 } 835 836 /* Set slow start threshold and cwnd not falling to slow start */ 837 void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 838 { 839 struct tcp_sock *tp = tcp_sk(sk); 840 const struct inet_connection_sock *icsk = inet_csk(sk); 841 842 tp->prior_ssthresh = 0; 843 tp->bytes_acked = 0; 844 if (icsk->icsk_ca_state < TCP_CA_CWR) { 845 tp->undo_marker = 0; 846 if (set_ssthresh) 847 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 848 tp->snd_cwnd = min(tp->snd_cwnd, 849 tcp_packets_in_flight(tp) + 1U); 850 tp->snd_cwnd_cnt = 0; 851 tp->high_seq = tp->snd_nxt; 852 tp->snd_cwnd_stamp = tcp_time_stamp; 853 TCP_ECN_queue_cwr(tp); 854 855 tcp_set_ca_state(sk, TCP_CA_CWR); 856 } 857 } 858 859 /* 860 * Packet counting of FACK is based on in-order assumptions, therefore TCP 861 * disables it when reordering is detected 862 */ 863 static void tcp_disable_fack(struct tcp_sock *tp) 864 { 865 /* RFC3517 uses different metric in lost marker => reset on change */ 866 if (tcp_is_fack(tp)) 867 tp->lost_skb_hint = NULL; 868 tp->rx_opt.sack_ok &= ~2; 869 } 870 871 /* Take a notice that peer is sending D-SACKs */ 872 static void tcp_dsack_seen(struct tcp_sock *tp) 873 { 874 tp->rx_opt.sack_ok |= 4; 875 } 876 877 /* Initialize metrics on socket. */ 878 879 static void tcp_init_metrics(struct sock *sk) 880 { 881 struct tcp_sock *tp = tcp_sk(sk); 882 struct dst_entry *dst = __sk_dst_get(sk); 883 884 if (dst == NULL) 885 goto reset; 886 887 dst_confirm(dst); 888 889 if (dst_metric_locked(dst, RTAX_CWND)) 890 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); 891 if (dst_metric(dst, RTAX_SSTHRESH)) { 892 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 893 if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 894 tp->snd_ssthresh = tp->snd_cwnd_clamp; 895 } 896 if (dst_metric(dst, RTAX_REORDERING) && 897 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 898 tcp_disable_fack(tp); 899 tp->reordering = dst_metric(dst, RTAX_REORDERING); 900 } 901 902 if (dst_metric(dst, RTAX_RTT) == 0) 903 goto reset; 904 905 if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3)) 906 goto reset; 907 908 /* Initial rtt is determined from SYN,SYN-ACK. 909 * The segment is small and rtt may appear much 910 * less than real one. Use per-dst memory 911 * to make it more realistic. 912 * 913 * A bit of theory. RTT is time passed after "normal" sized packet 914 * is sent until it is ACKed. In normal circumstances sending small 915 * packets force peer to delay ACKs and calculation is correct too. 916 * The algorithm is adaptive and, provided we follow specs, it 917 * NEVER underestimate RTT. BUT! If peer tries to make some clever 918 * tricks sort of "quick acks" for time long enough to decrease RTT 919 * to low value, and then abruptly stops to do it and starts to delay 920 * ACKs, wait for troubles. 921 */ 922 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { 923 tp->srtt = dst_metric_rtt(dst, RTAX_RTT); 924 tp->rtt_seq = tp->snd_nxt; 925 } 926 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { 927 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); 928 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 929 } 930 tcp_set_rto(sk); 931 tcp_bound_rto(sk); 932 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 933 goto reset; 934 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 935 tp->snd_cwnd_stamp = tcp_time_stamp; 936 return; 937 938 reset: 939 /* Play conservative. If timestamps are not 940 * supported, TCP will fail to recalculate correct 941 * rtt, if initial rto is too small. FORGET ALL AND RESET! 942 */ 943 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 944 tp->srtt = 0; 945 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 946 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 947 } 948 } 949 950 static void tcp_update_reordering(struct sock *sk, const int metric, 951 const int ts) 952 { 953 struct tcp_sock *tp = tcp_sk(sk); 954 if (metric > tp->reordering) { 955 int mib_idx; 956 957 tp->reordering = min(TCP_MAX_REORDERING, metric); 958 959 /* This exciting event is worth to be remembered. 8) */ 960 if (ts) 961 mib_idx = LINUX_MIB_TCPTSREORDER; 962 else if (tcp_is_reno(tp)) 963 mib_idx = LINUX_MIB_TCPRENOREORDER; 964 else if (tcp_is_fack(tp)) 965 mib_idx = LINUX_MIB_TCPFACKREORDER; 966 else 967 mib_idx = LINUX_MIB_TCPSACKREORDER; 968 969 NET_INC_STATS_BH(sock_net(sk), mib_idx); 970 #if FASTRETRANS_DEBUG > 1 971 printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n", 972 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 973 tp->reordering, 974 tp->fackets_out, 975 tp->sacked_out, 976 tp->undo_marker ? tp->undo_retrans : 0); 977 #endif 978 tcp_disable_fack(tp); 979 } 980 } 981 982 /* This procedure tags the retransmission queue when SACKs arrive. 983 * 984 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 985 * Packets in queue with these bits set are counted in variables 986 * sacked_out, retrans_out and lost_out, correspondingly. 987 * 988 * Valid combinations are: 989 * Tag InFlight Description 990 * 0 1 - orig segment is in flight. 991 * S 0 - nothing flies, orig reached receiver. 992 * L 0 - nothing flies, orig lost by net. 993 * R 2 - both orig and retransmit are in flight. 994 * L|R 1 - orig is lost, retransmit is in flight. 995 * S|R 1 - orig reached receiver, retrans is still in flight. 996 * (L|S|R is logically valid, it could occur when L|R is sacked, 997 * but it is equivalent to plain S and code short-curcuits it to S. 998 * L|S is logically invalid, it would mean -1 packet in flight 8)) 999 * 1000 * These 6 states form finite state machine, controlled by the following events: 1001 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) 1002 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) 1003 * 3. Loss detection event of one of three flavors: 1004 * A. Scoreboard estimator decided the packet is lost. 1005 * A'. Reno "three dupacks" marks head of queue lost. 1006 * A''. Its FACK modfication, head until snd.fack is lost. 1007 * B. SACK arrives sacking data transmitted after never retransmitted 1008 * hole was sent out. 1009 * C. SACK arrives sacking SND.NXT at the moment, when the 1010 * segment was retransmitted. 1011 * 4. D-SACK added new rule: D-SACK changes any tag to S. 1012 * 1013 * It is pleasant to note, that state diagram turns out to be commutative, 1014 * so that we are allowed not to be bothered by order of our actions, 1015 * when multiple events arrive simultaneously. (see the function below). 1016 * 1017 * Reordering detection. 1018 * -------------------- 1019 * Reordering metric is maximal distance, which a packet can be displaced 1020 * in packet stream. With SACKs we can estimate it: 1021 * 1022 * 1. SACK fills old hole and the corresponding segment was not 1023 * ever retransmitted -> reordering. Alas, we cannot use it 1024 * when segment was retransmitted. 1025 * 2. The last flaw is solved with D-SACK. D-SACK arrives 1026 * for retransmitted and already SACKed segment -> reordering.. 1027 * Both of these heuristics are not used in Loss state, when we cannot 1028 * account for retransmits accurately. 1029 * 1030 * SACK block validation. 1031 * ---------------------- 1032 * 1033 * SACK block range validation checks that the received SACK block fits to 1034 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. 1035 * Note that SND.UNA is not included to the range though being valid because 1036 * it means that the receiver is rather inconsistent with itself reporting 1037 * SACK reneging when it should advance SND.UNA. Such SACK block this is 1038 * perfectly valid, however, in light of RFC2018 which explicitly states 1039 * that "SACK block MUST reflect the newest segment. Even if the newest 1040 * segment is going to be discarded ...", not that it looks very clever 1041 * in case of head skb. Due to potentional receiver driven attacks, we 1042 * choose to avoid immediate execution of a walk in write queue due to 1043 * reneging and defer head skb's loss recovery to standard loss recovery 1044 * procedure that will eventually trigger (nothing forbids us doing this). 1045 * 1046 * Implements also blockage to start_seq wrap-around. Problem lies in the 1047 * fact that though start_seq (s) is before end_seq (i.e., not reversed), 1048 * there's no guarantee that it will be before snd_nxt (n). The problem 1049 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt 1050 * wrap (s_w): 1051 * 1052 * <- outs wnd -> <- wrapzone -> 1053 * u e n u_w e_w s n_w 1054 * | | | | | | | 1055 * |<------------+------+----- TCP seqno space --------------+---------->| 1056 * ...-- <2^31 ->| |<--------... 1057 * ...---- >2^31 ------>| |<--------... 1058 * 1059 * Current code wouldn't be vulnerable but it's better still to discard such 1060 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat 1061 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in 1062 * snd_nxt wrap -> snd_una region will then become "well defined", i.e., 1063 * equal to the ideal case (infinite seqno space without wrap caused issues). 1064 * 1065 * With D-SACK the lower bound is extended to cover sequence space below 1066 * SND.UNA down to undo_marker, which is the last point of interest. Yet 1067 * again, D-SACK block must not to go across snd_una (for the same reason as 1068 * for the normal SACK blocks, explained above). But there all simplicity 1069 * ends, TCP might receive valid D-SACKs below that. As long as they reside 1070 * fully below undo_marker they do not affect behavior in anyway and can 1071 * therefore be safely ignored. In rare cases (which are more or less 1072 * theoretical ones), the D-SACK will nicely cross that boundary due to skb 1073 * fragmentation and packet reordering past skb's retransmission. To consider 1074 * them correctly, the acceptable range must be extended even more though 1075 * the exact amount is rather hard to quantify. However, tp->max_window can 1076 * be used as an exaggerated estimate. 1077 */ 1078 static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, 1079 u32 start_seq, u32 end_seq) 1080 { 1081 /* Too far in future, or reversed (interpretation is ambiguous) */ 1082 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) 1083 return 0; 1084 1085 /* Nasty start_seq wrap-around check (see comments above) */ 1086 if (!before(start_seq, tp->snd_nxt)) 1087 return 0; 1088 1089 /* In outstanding window? ...This is valid exit for D-SACKs too. 1090 * start_seq == snd_una is non-sensical (see comments above) 1091 */ 1092 if (after(start_seq, tp->snd_una)) 1093 return 1; 1094 1095 if (!is_dsack || !tp->undo_marker) 1096 return 0; 1097 1098 /* ...Then it's D-SACK, and must reside below snd_una completely */ 1099 if (!after(end_seq, tp->snd_una)) 1100 return 0; 1101 1102 if (!before(start_seq, tp->undo_marker)) 1103 return 1; 1104 1105 /* Too old */ 1106 if (!after(end_seq, tp->undo_marker)) 1107 return 0; 1108 1109 /* Undo_marker boundary crossing (overestimates a lot). Known already: 1110 * start_seq < undo_marker and end_seq >= undo_marker. 1111 */ 1112 return !before(start_seq, end_seq - tp->max_window); 1113 } 1114 1115 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving". 1116 * Event "C". Later note: FACK people cheated me again 8), we have to account 1117 * for reordering! Ugly, but should help. 1118 * 1119 * Search retransmitted skbs from write_queue that were sent when snd_nxt was 1120 * less than what is now known to be received by the other end (derived from 1121 * highest SACK block). Also calculate the lowest snd_nxt among the remaining 1122 * retransmitted skbs to avoid some costly processing per ACKs. 1123 */ 1124 static void tcp_mark_lost_retrans(struct sock *sk) 1125 { 1126 const struct inet_connection_sock *icsk = inet_csk(sk); 1127 struct tcp_sock *tp = tcp_sk(sk); 1128 struct sk_buff *skb; 1129 int cnt = 0; 1130 u32 new_low_seq = tp->snd_nxt; 1131 u32 received_upto = tcp_highest_sack_seq(tp); 1132 1133 if (!tcp_is_fack(tp) || !tp->retrans_out || 1134 !after(received_upto, tp->lost_retrans_low) || 1135 icsk->icsk_ca_state != TCP_CA_Recovery) 1136 return; 1137 1138 tcp_for_write_queue(skb, sk) { 1139 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; 1140 1141 if (skb == tcp_send_head(sk)) 1142 break; 1143 if (cnt == tp->retrans_out) 1144 break; 1145 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1146 continue; 1147 1148 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) 1149 continue; 1150 1151 if (after(received_upto, ack_seq) && 1152 (tcp_is_fack(tp) || 1153 !before(received_upto, 1154 ack_seq + tp->reordering * tp->mss_cache))) { 1155 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1156 tp->retrans_out -= tcp_skb_pcount(skb); 1157 1158 /* clear lost hint */ 1159 tp->retransmit_skb_hint = NULL; 1160 1161 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { 1162 tp->lost_out += tcp_skb_pcount(skb); 1163 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1164 } 1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1166 } else { 1167 if (before(ack_seq, new_low_seq)) 1168 new_low_seq = ack_seq; 1169 cnt += tcp_skb_pcount(skb); 1170 } 1171 } 1172 1173 if (tp->retrans_out) 1174 tp->lost_retrans_low = new_low_seq; 1175 } 1176 1177 static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, 1178 struct tcp_sack_block_wire *sp, int num_sacks, 1179 u32 prior_snd_una) 1180 { 1181 struct tcp_sock *tp = tcp_sk(sk); 1182 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); 1183 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); 1184 int dup_sack = 0; 1185 1186 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { 1187 dup_sack = 1; 1188 tcp_dsack_seen(tp); 1189 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); 1190 } else if (num_sacks > 1) { 1191 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); 1192 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); 1193 1194 if (!after(end_seq_0, end_seq_1) && 1195 !before(start_seq_0, start_seq_1)) { 1196 dup_sack = 1; 1197 tcp_dsack_seen(tp); 1198 NET_INC_STATS_BH(sock_net(sk), 1199 LINUX_MIB_TCPDSACKOFORECV); 1200 } 1201 } 1202 1203 /* D-SACK for already forgotten data... Do dumb counting. */ 1204 if (dup_sack && 1205 !after(end_seq_0, prior_snd_una) && 1206 after(end_seq_0, tp->undo_marker)) 1207 tp->undo_retrans--; 1208 1209 return dup_sack; 1210 } 1211 1212 /* Check if skb is fully within the SACK block. In presence of GSO skbs, 1213 * the incoming SACK may not exactly match but we can find smaller MSS 1214 * aligned portion of it that matches. Therefore we might need to fragment 1215 * which may fail and creates some hassle (caller must handle error case 1216 * returns). 1217 */ 1218 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, 1219 u32 start_seq, u32 end_seq) 1220 { 1221 int in_sack, err; 1222 unsigned int pkt_len; 1223 1224 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && 1225 !before(end_seq, TCP_SKB_CB(skb)->end_seq); 1226 1227 if (tcp_skb_pcount(skb) > 1 && !in_sack && 1228 after(TCP_SKB_CB(skb)->end_seq, start_seq)) { 1229 1230 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); 1231 1232 if (!in_sack) 1233 pkt_len = start_seq - TCP_SKB_CB(skb)->seq; 1234 else 1235 pkt_len = end_seq - TCP_SKB_CB(skb)->seq; 1236 err = tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->gso_size); 1237 if (err < 0) 1238 return err; 1239 } 1240 1241 return in_sack; 1242 } 1243 1244 static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1245 int *reord, int dup_sack, int fack_count) 1246 { 1247 struct tcp_sock *tp = tcp_sk(sk); 1248 u8 sacked = TCP_SKB_CB(skb)->sacked; 1249 int flag = 0; 1250 1251 /* Account D-SACK for retransmitted packet. */ 1252 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1253 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1254 tp->undo_retrans--; 1255 if (sacked & TCPCB_SACKED_ACKED) 1256 *reord = min(fack_count, *reord); 1257 } 1258 1259 /* Nothing to do; acked frame is about to be dropped (was ACKed). */ 1260 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1261 return flag; 1262 1263 if (!(sacked & TCPCB_SACKED_ACKED)) { 1264 if (sacked & TCPCB_SACKED_RETRANS) { 1265 /* If the segment is not tagged as lost, 1266 * we do not clear RETRANS, believing 1267 * that retransmission is still in flight. 1268 */ 1269 if (sacked & TCPCB_LOST) { 1270 TCP_SKB_CB(skb)->sacked &= 1271 ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1272 tp->lost_out -= tcp_skb_pcount(skb); 1273 tp->retrans_out -= tcp_skb_pcount(skb); 1274 1275 /* clear lost hint */ 1276 tp->retransmit_skb_hint = NULL; 1277 } 1278 } else { 1279 if (!(sacked & TCPCB_RETRANS)) { 1280 /* New sack for not retransmitted frame, 1281 * which was in hole. It is reordering. 1282 */ 1283 if (before(TCP_SKB_CB(skb)->seq, 1284 tcp_highest_sack_seq(tp))) 1285 *reord = min(fack_count, *reord); 1286 1287 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1288 if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) 1289 flag |= FLAG_ONLY_ORIG_SACKED; 1290 } 1291 1292 if (sacked & TCPCB_LOST) { 1293 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1294 tp->lost_out -= tcp_skb_pcount(skb); 1295 1296 /* clear lost hint */ 1297 tp->retransmit_skb_hint = NULL; 1298 } 1299 } 1300 1301 TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED; 1302 flag |= FLAG_DATA_SACKED; 1303 tp->sacked_out += tcp_skb_pcount(skb); 1304 1305 fack_count += tcp_skb_pcount(skb); 1306 1307 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ 1308 if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && 1309 before(TCP_SKB_CB(skb)->seq, 1310 TCP_SKB_CB(tp->lost_skb_hint)->seq)) 1311 tp->lost_cnt_hint += tcp_skb_pcount(skb); 1312 1313 if (fack_count > tp->fackets_out) 1314 tp->fackets_out = fack_count; 1315 1316 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 1317 tcp_advance_highest_sack(sk, skb); 1318 } 1319 1320 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1321 * frames and clear it. undo_retrans is decreased above, L|R frames 1322 * are accounted above as well. 1323 */ 1324 if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { 1325 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1326 tp->retrans_out -= tcp_skb_pcount(skb); 1327 tp->retransmit_skb_hint = NULL; 1328 } 1329 1330 return flag; 1331 } 1332 1333 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, 1334 struct tcp_sack_block *next_dup, 1335 u32 start_seq, u32 end_seq, 1336 int dup_sack_in, int *fack_count, 1337 int *reord, int *flag) 1338 { 1339 tcp_for_write_queue_from(skb, sk) { 1340 int in_sack = 0; 1341 int dup_sack = dup_sack_in; 1342 1343 if (skb == tcp_send_head(sk)) 1344 break; 1345 1346 /* queue is in-order => we can short-circuit the walk early */ 1347 if (!before(TCP_SKB_CB(skb)->seq, end_seq)) 1348 break; 1349 1350 if ((next_dup != NULL) && 1351 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { 1352 in_sack = tcp_match_skb_to_sack(sk, skb, 1353 next_dup->start_seq, 1354 next_dup->end_seq); 1355 if (in_sack > 0) 1356 dup_sack = 1; 1357 } 1358 1359 if (in_sack <= 0) 1360 in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, 1361 end_seq); 1362 if (unlikely(in_sack < 0)) 1363 break; 1364 1365 if (in_sack) 1366 *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, 1367 *fack_count); 1368 1369 *fack_count += tcp_skb_pcount(skb); 1370 } 1371 return skb; 1372 } 1373 1374 /* Avoid all extra work that is being done by sacktag while walking in 1375 * a normal way 1376 */ 1377 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, 1378 u32 skip_to_seq, int *fack_count) 1379 { 1380 tcp_for_write_queue_from(skb, sk) { 1381 if (skb == tcp_send_head(sk)) 1382 break; 1383 1384 if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) 1385 break; 1386 1387 *fack_count += tcp_skb_pcount(skb); 1388 } 1389 return skb; 1390 } 1391 1392 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, 1393 struct sock *sk, 1394 struct tcp_sack_block *next_dup, 1395 u32 skip_to_seq, 1396 int *fack_count, int *reord, 1397 int *flag) 1398 { 1399 if (next_dup == NULL) 1400 return skb; 1401 1402 if (before(next_dup->start_seq, skip_to_seq)) { 1403 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1404 skb = tcp_sacktag_walk(skb, sk, NULL, 1405 next_dup->start_seq, next_dup->end_seq, 1406 1, fack_count, reord, flag); 1407 } 1408 1409 return skb; 1410 } 1411 1412 static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) 1413 { 1414 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1415 } 1416 1417 static int 1418 tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, 1419 u32 prior_snd_una) 1420 { 1421 const struct inet_connection_sock *icsk = inet_csk(sk); 1422 struct tcp_sock *tp = tcp_sk(sk); 1423 unsigned char *ptr = (skb_transport_header(ack_skb) + 1424 TCP_SKB_CB(ack_skb)->sacked); 1425 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1426 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1427 struct tcp_sack_block *cache; 1428 struct sk_buff *skb; 1429 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); 1430 int used_sacks; 1431 int reord = tp->packets_out; 1432 int flag = 0; 1433 int found_dup_sack = 0; 1434 int fack_count; 1435 int i, j; 1436 int first_sack_index; 1437 1438 if (!tp->sacked_out) { 1439 if (WARN_ON(tp->fackets_out)) 1440 tp->fackets_out = 0; 1441 tcp_highest_sack_reset(sk); 1442 } 1443 1444 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, 1445 num_sacks, prior_snd_una); 1446 if (found_dup_sack) 1447 flag |= FLAG_DSACKING_ACK; 1448 1449 /* Eliminate too old ACKs, but take into 1450 * account more or less fresh ones, they can 1451 * contain valid SACK info. 1452 */ 1453 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) 1454 return 0; 1455 1456 if (!tp->packets_out) 1457 goto out; 1458 1459 used_sacks = 0; 1460 first_sack_index = 0; 1461 for (i = 0; i < num_sacks; i++) { 1462 int dup_sack = !i && found_dup_sack; 1463 1464 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); 1465 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); 1466 1467 if (!tcp_is_sackblock_valid(tp, dup_sack, 1468 sp[used_sacks].start_seq, 1469 sp[used_sacks].end_seq)) { 1470 int mib_idx; 1471 1472 if (dup_sack) { 1473 if (!tp->undo_marker) 1474 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; 1475 else 1476 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; 1477 } else { 1478 /* Don't count olds caused by ACK reordering */ 1479 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && 1480 !after(sp[used_sacks].end_seq, tp->snd_una)) 1481 continue; 1482 mib_idx = LINUX_MIB_TCPSACKDISCARD; 1483 } 1484 1485 NET_INC_STATS_BH(sock_net(sk), mib_idx); 1486 if (i == 0) 1487 first_sack_index = -1; 1488 continue; 1489 } 1490 1491 /* Ignore very old stuff early */ 1492 if (!after(sp[used_sacks].end_seq, prior_snd_una)) 1493 continue; 1494 1495 used_sacks++; 1496 } 1497 1498 /* order SACK blocks to allow in order walk of the retrans queue */ 1499 for (i = used_sacks - 1; i > 0; i--) { 1500 for (j = 0; j < i; j++) { 1501 if (after(sp[j].start_seq, sp[j + 1].start_seq)) { 1502 struct tcp_sack_block tmp; 1503 1504 tmp = sp[j]; 1505 sp[j] = sp[j + 1]; 1506 sp[j + 1] = tmp; 1507 1508 /* Track where the first SACK block goes to */ 1509 if (j == first_sack_index) 1510 first_sack_index = j + 1; 1511 } 1512 } 1513 } 1514 1515 skb = tcp_write_queue_head(sk); 1516 fack_count = 0; 1517 i = 0; 1518 1519 if (!tp->sacked_out) { 1520 /* It's already past, so skip checking against it */ 1521 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1522 } else { 1523 cache = tp->recv_sack_cache; 1524 /* Skip empty blocks in at head of the cache */ 1525 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && 1526 !cache->end_seq) 1527 cache++; 1528 } 1529 1530 while (i < used_sacks) { 1531 u32 start_seq = sp[i].start_seq; 1532 u32 end_seq = sp[i].end_seq; 1533 int dup_sack = (found_dup_sack && (i == first_sack_index)); 1534 struct tcp_sack_block *next_dup = NULL; 1535 1536 if (found_dup_sack && ((i + 1) == first_sack_index)) 1537 next_dup = &sp[i + 1]; 1538 1539 /* Event "B" in the comment above. */ 1540 if (after(end_seq, tp->high_seq)) 1541 flag |= FLAG_DATA_LOST; 1542 1543 /* Skip too early cached blocks */ 1544 while (tcp_sack_cache_ok(tp, cache) && 1545 !before(start_seq, cache->end_seq)) 1546 cache++; 1547 1548 /* Can skip some work by looking recv_sack_cache? */ 1549 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && 1550 after(end_seq, cache->start_seq)) { 1551 1552 /* Head todo? */ 1553 if (before(start_seq, cache->start_seq)) { 1554 skb = tcp_sacktag_skip(skb, sk, start_seq, 1555 &fack_count); 1556 skb = tcp_sacktag_walk(skb, sk, next_dup, 1557 start_seq, 1558 cache->start_seq, 1559 dup_sack, &fack_count, 1560 &reord, &flag); 1561 } 1562 1563 /* Rest of the block already fully processed? */ 1564 if (!after(end_seq, cache->end_seq)) 1565 goto advance_sp; 1566 1567 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, 1568 cache->end_seq, 1569 &fack_count, &reord, 1570 &flag); 1571 1572 /* ...tail remains todo... */ 1573 if (tcp_highest_sack_seq(tp) == cache->end_seq) { 1574 /* ...but better entrypoint exists! */ 1575 skb = tcp_highest_sack(sk); 1576 if (skb == NULL) 1577 break; 1578 fack_count = tp->fackets_out; 1579 cache++; 1580 goto walk; 1581 } 1582 1583 skb = tcp_sacktag_skip(skb, sk, cache->end_seq, 1584 &fack_count); 1585 /* Check overlap against next cached too (past this one already) */ 1586 cache++; 1587 continue; 1588 } 1589 1590 if (!before(start_seq, tcp_highest_sack_seq(tp))) { 1591 skb = tcp_highest_sack(sk); 1592 if (skb == NULL) 1593 break; 1594 fack_count = tp->fackets_out; 1595 } 1596 skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); 1597 1598 walk: 1599 skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq, 1600 dup_sack, &fack_count, &reord, &flag); 1601 1602 advance_sp: 1603 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct 1604 * due to in-order walk 1605 */ 1606 if (after(end_seq, tp->frto_highmark)) 1607 flag &= ~FLAG_ONLY_ORIG_SACKED; 1608 1609 i++; 1610 } 1611 1612 /* Clear the head of the cache sack blocks so we can skip it next time */ 1613 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { 1614 tp->recv_sack_cache[i].start_seq = 0; 1615 tp->recv_sack_cache[i].end_seq = 0; 1616 } 1617 for (j = 0; j < used_sacks; j++) 1618 tp->recv_sack_cache[i++] = sp[j]; 1619 1620 tcp_mark_lost_retrans(sk); 1621 1622 tcp_verify_left_out(tp); 1623 1624 if ((reord < tp->fackets_out) && 1625 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1626 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) 1627 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 1628 1629 out: 1630 1631 #if FASTRETRANS_DEBUG > 0 1632 WARN_ON((int)tp->sacked_out < 0); 1633 WARN_ON((int)tp->lost_out < 0); 1634 WARN_ON((int)tp->retrans_out < 0); 1635 WARN_ON((int)tcp_packets_in_flight(tp) < 0); 1636 #endif 1637 return flag; 1638 } 1639 1640 /* Limits sacked_out so that sum with lost_out isn't ever larger than 1641 * packets_out. Returns zero if sacked_out adjustement wasn't necessary. 1642 */ 1643 int tcp_limit_reno_sacked(struct tcp_sock *tp) 1644 { 1645 u32 holes; 1646 1647 holes = max(tp->lost_out, 1U); 1648 holes = min(holes, tp->packets_out); 1649 1650 if ((tp->sacked_out + holes) > tp->packets_out) { 1651 tp->sacked_out = tp->packets_out - holes; 1652 return 1; 1653 } 1654 return 0; 1655 } 1656 1657 /* If we receive more dupacks than we expected counting segments 1658 * in assumption of absent reordering, interpret this as reordering. 1659 * The only another reason could be bug in receiver TCP. 1660 */ 1661 static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1662 { 1663 struct tcp_sock *tp = tcp_sk(sk); 1664 if (tcp_limit_reno_sacked(tp)) 1665 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1666 } 1667 1668 /* Emulate SACKs for SACKless connection: account for a new dupack. */ 1669 1670 static void tcp_add_reno_sack(struct sock *sk) 1671 { 1672 struct tcp_sock *tp = tcp_sk(sk); 1673 tp->sacked_out++; 1674 tcp_check_reno_reordering(sk, 0); 1675 tcp_verify_left_out(tp); 1676 } 1677 1678 /* Account for ACK, ACKing some data in Reno Recovery phase. */ 1679 1680 static void tcp_remove_reno_sacks(struct sock *sk, int acked) 1681 { 1682 struct tcp_sock *tp = tcp_sk(sk); 1683 1684 if (acked > 0) { 1685 /* One ACK acked hole. The rest eat duplicate ACKs. */ 1686 if (acked - 1 >= tp->sacked_out) 1687 tp->sacked_out = 0; 1688 else 1689 tp->sacked_out -= acked - 1; 1690 } 1691 tcp_check_reno_reordering(sk, acked); 1692 tcp_verify_left_out(tp); 1693 } 1694 1695 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) 1696 { 1697 tp->sacked_out = 0; 1698 } 1699 1700 static int tcp_is_sackfrto(const struct tcp_sock *tp) 1701 { 1702 return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp); 1703 } 1704 1705 /* F-RTO can only be used if TCP has never retransmitted anything other than 1706 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) 1707 */ 1708 int tcp_use_frto(struct sock *sk) 1709 { 1710 const struct tcp_sock *tp = tcp_sk(sk); 1711 const struct inet_connection_sock *icsk = inet_csk(sk); 1712 struct sk_buff *skb; 1713 1714 if (!sysctl_tcp_frto) 1715 return 0; 1716 1717 /* MTU probe and F-RTO won't really play nicely along currently */ 1718 if (icsk->icsk_mtup.probe_size) 1719 return 0; 1720 1721 if (tcp_is_sackfrto(tp)) 1722 return 1; 1723 1724 /* Avoid expensive walking of rexmit queue if possible */ 1725 if (tp->retrans_out > 1) 1726 return 0; 1727 1728 skb = tcp_write_queue_head(sk); 1729 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 1730 tcp_for_write_queue_from(skb, sk) { 1731 if (skb == tcp_send_head(sk)) 1732 break; 1733 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1734 return 0; 1735 /* Short-circuit when first non-SACKed skb has been checked */ 1736 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 1737 break; 1738 } 1739 return 1; 1740 } 1741 1742 /* RTO occurred, but do not yet enter Loss state. Instead, defer RTO 1743 * recovery a bit and use heuristics in tcp_process_frto() to detect if 1744 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to 1745 * keep retrans_out counting accurate (with SACK F-RTO, other than head 1746 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS 1747 * bits are handled if the Loss state is really to be entered (in 1748 * tcp_enter_frto_loss). 1749 * 1750 * Do like tcp_enter_loss() would; when RTO expires the second time it 1751 * does: 1752 * "Reduce ssthresh if it has not yet been made inside this window." 1753 */ 1754 void tcp_enter_frto(struct sock *sk) 1755 { 1756 const struct inet_connection_sock *icsk = inet_csk(sk); 1757 struct tcp_sock *tp = tcp_sk(sk); 1758 struct sk_buff *skb; 1759 1760 if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) || 1761 tp->snd_una == tp->high_seq || 1762 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) && 1763 !icsk->icsk_retransmits)) { 1764 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1765 /* Our state is too optimistic in ssthresh() call because cwnd 1766 * is not reduced until tcp_enter_frto_loss() when previous F-RTO 1767 * recovery has not yet completed. Pattern would be this: RTO, 1768 * Cumulative ACK, RTO (2xRTO for the same segment does not end 1769 * up here twice). 1770 * RFC4138 should be more specific on what to do, even though 1771 * RTO is quite unlikely to occur after the first Cumulative ACK 1772 * due to back-off and complexity of triggering events ... 1773 */ 1774 if (tp->frto_counter) { 1775 u32 stored_cwnd; 1776 stored_cwnd = tp->snd_cwnd; 1777 tp->snd_cwnd = 2; 1778 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1779 tp->snd_cwnd = stored_cwnd; 1780 } else { 1781 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1782 } 1783 /* ... in theory, cong.control module could do "any tricks" in 1784 * ssthresh(), which means that ca_state, lost bits and lost_out 1785 * counter would have to be faked before the call occurs. We 1786 * consider that too expensive, unlikely and hacky, so modules 1787 * using these in ssthresh() must deal these incompatibility 1788 * issues if they receives CA_EVENT_FRTO and frto_counter != 0 1789 */ 1790 tcp_ca_event(sk, CA_EVENT_FRTO); 1791 } 1792 1793 tp->undo_marker = tp->snd_una; 1794 tp->undo_retrans = 0; 1795 1796 skb = tcp_write_queue_head(sk); 1797 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1798 tp->undo_marker = 0; 1799 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1800 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1801 tp->retrans_out -= tcp_skb_pcount(skb); 1802 } 1803 tcp_verify_left_out(tp); 1804 1805 /* Too bad if TCP was application limited */ 1806 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 1807 1808 /* Earlier loss recovery underway (see RFC4138; Appendix B). 1809 * The last condition is necessary at least in tp->frto_counter case. 1810 */ 1811 if (tcp_is_sackfrto(tp) && (tp->frto_counter || 1812 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) && 1813 after(tp->high_seq, tp->snd_una)) { 1814 tp->frto_highmark = tp->high_seq; 1815 } else { 1816 tp->frto_highmark = tp->snd_nxt; 1817 } 1818 tcp_set_ca_state(sk, TCP_CA_Disorder); 1819 tp->high_seq = tp->snd_nxt; 1820 tp->frto_counter = 1; 1821 } 1822 1823 /* Enter Loss state after F-RTO was applied. Dupack arrived after RTO, 1824 * which indicates that we should follow the traditional RTO recovery, 1825 * i.e. mark everything lost and do go-back-N retransmission. 1826 */ 1827 static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) 1828 { 1829 struct tcp_sock *tp = tcp_sk(sk); 1830 struct sk_buff *skb; 1831 1832 tp->lost_out = 0; 1833 tp->retrans_out = 0; 1834 if (tcp_is_reno(tp)) 1835 tcp_reset_reno_sack(tp); 1836 1837 tcp_for_write_queue(skb, sk) { 1838 if (skb == tcp_send_head(sk)) 1839 break; 1840 1841 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1842 /* 1843 * Count the retransmission made on RTO correctly (only when 1844 * waiting for the first ACK and did not get it)... 1845 */ 1846 if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) { 1847 /* For some reason this R-bit might get cleared? */ 1848 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1849 tp->retrans_out += tcp_skb_pcount(skb); 1850 /* ...enter this if branch just for the first segment */ 1851 flag |= FLAG_DATA_ACKED; 1852 } else { 1853 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1854 tp->undo_marker = 0; 1855 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1856 } 1857 1858 /* Marking forward transmissions that were made after RTO lost 1859 * can cause unnecessary retransmissions in some scenarios, 1860 * SACK blocks will mitigate that in some but not in all cases. 1861 * We used to not mark them but it was causing break-ups with 1862 * receivers that do only in-order receival. 1863 * 1864 * TODO: we could detect presence of such receiver and select 1865 * different behavior per flow. 1866 */ 1867 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1868 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1869 tp->lost_out += tcp_skb_pcount(skb); 1870 } 1871 } 1872 tcp_verify_left_out(tp); 1873 1874 tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments; 1875 tp->snd_cwnd_cnt = 0; 1876 tp->snd_cwnd_stamp = tcp_time_stamp; 1877 tp->frto_counter = 0; 1878 tp->bytes_acked = 0; 1879 1880 tp->reordering = min_t(unsigned int, tp->reordering, 1881 sysctl_tcp_reordering); 1882 tcp_set_ca_state(sk, TCP_CA_Loss); 1883 tp->high_seq = tp->snd_nxt; 1884 TCP_ECN_queue_cwr(tp); 1885 1886 tcp_clear_retrans_hints_partial(tp); 1887 } 1888 1889 static void tcp_clear_retrans_partial(struct tcp_sock *tp) 1890 { 1891 tp->retrans_out = 0; 1892 tp->lost_out = 0; 1893 1894 tp->undo_marker = 0; 1895 tp->undo_retrans = 0; 1896 } 1897 1898 void tcp_clear_retrans(struct tcp_sock *tp) 1899 { 1900 tcp_clear_retrans_partial(tp); 1901 1902 tp->fackets_out = 0; 1903 tp->sacked_out = 0; 1904 } 1905 1906 /* Enter Loss state. If "how" is not zero, forget all SACK information 1907 * and reset tags completely, otherwise preserve SACKs. If receiver 1908 * dropped its ofo queue, we will know this due to reneging detection. 1909 */ 1910 void tcp_enter_loss(struct sock *sk, int how) 1911 { 1912 const struct inet_connection_sock *icsk = inet_csk(sk); 1913 struct tcp_sock *tp = tcp_sk(sk); 1914 struct sk_buff *skb; 1915 1916 /* Reduce ssthresh if it has not yet been made inside this window. */ 1917 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1918 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1919 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1920 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1921 tcp_ca_event(sk, CA_EVENT_LOSS); 1922 } 1923 tp->snd_cwnd = 1; 1924 tp->snd_cwnd_cnt = 0; 1925 tp->snd_cwnd_stamp = tcp_time_stamp; 1926 1927 tp->bytes_acked = 0; 1928 tcp_clear_retrans_partial(tp); 1929 1930 if (tcp_is_reno(tp)) 1931 tcp_reset_reno_sack(tp); 1932 1933 if (!how) { 1934 /* Push undo marker, if it was plain RTO and nothing 1935 * was retransmitted. */ 1936 tp->undo_marker = tp->snd_una; 1937 tcp_clear_retrans_hints_partial(tp); 1938 } else { 1939 tp->sacked_out = 0; 1940 tp->fackets_out = 0; 1941 tcp_clear_all_retrans_hints(tp); 1942 } 1943 1944 tcp_for_write_queue(skb, sk) { 1945 if (skb == tcp_send_head(sk)) 1946 break; 1947 1948 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) 1949 tp->undo_marker = 0; 1950 TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; 1951 if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { 1952 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1953 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1954 tp->lost_out += tcp_skb_pcount(skb); 1955 } 1956 } 1957 tcp_verify_left_out(tp); 1958 1959 tp->reordering = min_t(unsigned int, tp->reordering, 1960 sysctl_tcp_reordering); 1961 tcp_set_ca_state(sk, TCP_CA_Loss); 1962 tp->high_seq = tp->snd_nxt; 1963 TCP_ECN_queue_cwr(tp); 1964 /* Abort F-RTO algorithm if one is in progress */ 1965 tp->frto_counter = 0; 1966 } 1967 1968 /* If ACK arrived pointing to a remembered SACK, it means that our 1969 * remembered SACKs do not reflect real state of receiver i.e. 1970 * receiver _host_ is heavily congested (or buggy). 1971 * 1972 * Do processing similar to RTO timeout. 1973 */ 1974 static int tcp_check_sack_reneging(struct sock *sk, int flag) 1975 { 1976 if (flag & FLAG_SACK_RENEGING) { 1977 struct inet_connection_sock *icsk = inet_csk(sk); 1978 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); 1979 1980 tcp_enter_loss(sk, 1); 1981 icsk->icsk_retransmits++; 1982 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 1983 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1984 icsk->icsk_rto, TCP_RTO_MAX); 1985 return 1; 1986 } 1987 return 0; 1988 } 1989 1990 static inline int tcp_fackets_out(struct tcp_sock *tp) 1991 { 1992 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 1993 } 1994 1995 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs 1996 * counter when SACK is enabled (without SACK, sacked_out is used for 1997 * that purpose). 1998 * 1999 * Instead, with FACK TCP uses fackets_out that includes both SACKed 2000 * segments up to the highest received SACK block so far and holes in 2001 * between them. 2002 * 2003 * With reordering, holes may still be in flight, so RFC3517 recovery 2004 * uses pure sacked_out (total number of SACKed segments) even though 2005 * it violates the RFC that uses duplicate ACKs, often these are equal 2006 * but when e.g. out-of-window ACKs or packet duplication occurs, 2007 * they differ. Since neither occurs due to loss, TCP should really 2008 * ignore them. 2009 */ 2010 static inline int tcp_dupack_heurestics(struct tcp_sock *tp) 2011 { 2012 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2013 } 2014 2015 static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2016 { 2017 return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 2018 } 2019 2020 static inline int tcp_head_timedout(struct sock *sk) 2021 { 2022 struct tcp_sock *tp = tcp_sk(sk); 2023 2024 return tp->packets_out && 2025 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); 2026 } 2027 2028 /* Linux NewReno/SACK/FACK/ECN state machine. 2029 * -------------------------------------- 2030 * 2031 * "Open" Normal state, no dubious events, fast path. 2032 * "Disorder" In all the respects it is "Open", 2033 * but requires a bit more attention. It is entered when 2034 * we see some SACKs or dupacks. It is split of "Open" 2035 * mainly to move some processing from fast path to slow one. 2036 * "CWR" CWND was reduced due to some Congestion Notification event. 2037 * It can be ECN, ICMP source quench, local device congestion. 2038 * "Recovery" CWND was reduced, we are fast-retransmitting. 2039 * "Loss" CWND was reduced due to RTO timeout or SACK reneging. 2040 * 2041 * tcp_fastretrans_alert() is entered: 2042 * - each incoming ACK, if state is not "Open" 2043 * - when arrived ACK is unusual, namely: 2044 * * SACK 2045 * * Duplicate ACK. 2046 * * ECN ECE. 2047 * 2048 * Counting packets in flight is pretty simple. 2049 * 2050 * in_flight = packets_out - left_out + retrans_out 2051 * 2052 * packets_out is SND.NXT-SND.UNA counted in packets. 2053 * 2054 * retrans_out is number of retransmitted segments. 2055 * 2056 * left_out is number of segments left network, but not ACKed yet. 2057 * 2058 * left_out = sacked_out + lost_out 2059 * 2060 * sacked_out: Packets, which arrived to receiver out of order 2061 * and hence not ACKed. With SACKs this number is simply 2062 * amount of SACKed data. Even without SACKs 2063 * it is easy to give pretty reliable estimate of this number, 2064 * counting duplicate ACKs. 2065 * 2066 * lost_out: Packets lost by network. TCP has no explicit 2067 * "loss notification" feedback from network (for now). 2068 * It means that this number can be only _guessed_. 2069 * Actually, it is the heuristics to predict lossage that 2070 * distinguishes different algorithms. 2071 * 2072 * F.e. after RTO, when all the queue is considered as lost, 2073 * lost_out = packets_out and in_flight = retrans_out. 2074 * 2075 * Essentially, we have now two algorithms counting 2076 * lost packets. 2077 * 2078 * FACK: It is the simplest heuristics. As soon as we decided 2079 * that something is lost, we decide that _all_ not SACKed 2080 * packets until the most forward SACK are lost. I.e. 2081 * lost_out = fackets_out - sacked_out and left_out = fackets_out. 2082 * It is absolutely correct estimate, if network does not reorder 2083 * packets. And it loses any connection to reality when reordering 2084 * takes place. We use FACK by default until reordering 2085 * is suspected on the path to this destination. 2086 * 2087 * NewReno: when Recovery is entered, we assume that one segment 2088 * is lost (classic Reno). While we are in Recovery and 2089 * a partial ACK arrives, we assume that one more packet 2090 * is lost (NewReno). This heuristics are the same in NewReno 2091 * and SACK. 2092 * 2093 * Imagine, that's all! Forget about all this shamanism about CWND inflation 2094 * deflation etc. CWND is real congestion window, never inflated, changes 2095 * only according to classic VJ rules. 2096 * 2097 * Really tricky (and requiring careful tuning) part of algorithm 2098 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). 2099 * The first determines the moment _when_ we should reduce CWND and, 2100 * hence, slow down forward transmission. In fact, it determines the moment 2101 * when we decide that hole is caused by loss, rather than by a reorder. 2102 * 2103 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill 2104 * holes, caused by lost packets. 2105 * 2106 * And the most logically complicated part of algorithm is undo 2107 * heuristics. We detect false retransmits due to both too early 2108 * fast retransmit (reordering) and underestimated RTO, analyzing 2109 * timestamps and D-SACKs. When we detect that some segments were 2110 * retransmitted by mistake and CWND reduction was wrong, we undo 2111 * window reduction and abort recovery phase. This logic is hidden 2112 * inside several functions named tcp_try_undo_<something>. 2113 */ 2114 2115 /* This function decides, when we should leave Disordered state 2116 * and enter Recovery phase, reducing congestion window. 2117 * 2118 * Main question: may we further continue forward transmission 2119 * with the same cwnd? 2120 */ 2121 static int tcp_time_to_recover(struct sock *sk) 2122 { 2123 struct tcp_sock *tp = tcp_sk(sk); 2124 __u32 packets_out; 2125 2126 /* Do not perform any recovery during F-RTO algorithm */ 2127 if (tp->frto_counter) 2128 return 0; 2129 2130 /* Trick#1: The loss is proven. */ 2131 if (tp->lost_out) 2132 return 1; 2133 2134 /* Not-A-Trick#2 : Classic rule... */ 2135 if (tcp_dupack_heurestics(tp) > tp->reordering) 2136 return 1; 2137 2138 /* Trick#3 : when we use RFC2988 timer restart, fast 2139 * retransmit can be triggered by timeout of queue head. 2140 */ 2141 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) 2142 return 1; 2143 2144 /* Trick#4: It is still not OK... But will it be useful to delay 2145 * recovery more? 2146 */ 2147 packets_out = tp->packets_out; 2148 if (packets_out <= tp->reordering && 2149 tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && 2150 !tcp_may_send_now(sk)) { 2151 /* We have nothing to send. This connection is limited 2152 * either by receiver window or by application. 2153 */ 2154 return 1; 2155 } 2156 2157 return 0; 2158 } 2159 2160 /* RFC: This is from the original, I doubt that this is necessary at all: 2161 * clear xmit_retrans hint if seq of this skb is beyond hint. How could we 2162 * retransmitted past LOST markings in the first place? I'm not fully sure 2163 * about undo and end of connection cases, which can cause R without L? 2164 */ 2165 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) 2166 { 2167 if ((tp->retransmit_skb_hint != NULL) && 2168 before(TCP_SKB_CB(skb)->seq, 2169 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) 2170 tp->retransmit_skb_hint = NULL; 2171 } 2172 2173 /* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2174 * is against sacked "cnt", otherwise it's against facked "cnt" 2175 */ 2176 static void tcp_mark_head_lost(struct sock *sk, int packets) 2177 { 2178 struct tcp_sock *tp = tcp_sk(sk); 2179 struct sk_buff *skb; 2180 int cnt, oldcnt; 2181 int err; 2182 unsigned int mss; 2183 2184 WARN_ON(packets > tp->packets_out); 2185 if (tp->lost_skb_hint) { 2186 skb = tp->lost_skb_hint; 2187 cnt = tp->lost_cnt_hint; 2188 } else { 2189 skb = tcp_write_queue_head(sk); 2190 cnt = 0; 2191 } 2192 2193 tcp_for_write_queue_from(skb, sk) { 2194 if (skb == tcp_send_head(sk)) 2195 break; 2196 /* TODO: do this better */ 2197 /* this is not the most efficient way to do this... */ 2198 tp->lost_skb_hint = skb; 2199 tp->lost_cnt_hint = cnt; 2200 2201 if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2202 break; 2203 2204 oldcnt = cnt; 2205 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2206 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2207 cnt += tcp_skb_pcount(skb); 2208 2209 if (cnt > packets) { 2210 if (tcp_is_sack(tp) || (oldcnt >= packets)) 2211 break; 2212 2213 mss = skb_shinfo(skb)->gso_size; 2214 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss); 2215 if (err < 0) 2216 break; 2217 cnt = packets; 2218 } 2219 2220 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2221 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2222 tp->lost_out += tcp_skb_pcount(skb); 2223 tcp_verify_retransmit_hint(tp, skb); 2224 } 2225 } 2226 tcp_verify_left_out(tp); 2227 } 2228 2229 /* Account newly detected lost packet(s) */ 2230 2231 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) 2232 { 2233 struct tcp_sock *tp = tcp_sk(sk); 2234 2235 if (tcp_is_reno(tp)) { 2236 tcp_mark_head_lost(sk, 1); 2237 } else if (tcp_is_fack(tp)) { 2238 int lost = tp->fackets_out - tp->reordering; 2239 if (lost <= 0) 2240 lost = 1; 2241 tcp_mark_head_lost(sk, lost); 2242 } else { 2243 int sacked_upto = tp->sacked_out - tp->reordering; 2244 if (sacked_upto < fast_rexmit) 2245 sacked_upto = fast_rexmit; 2246 tcp_mark_head_lost(sk, sacked_upto); 2247 } 2248 2249 /* New heuristics: it is possible only after we switched 2250 * to restart timer each time when something is ACKed. 2251 * Hence, we can detect timed out packets during fast 2252 * retransmit without falling to slow start. 2253 */ 2254 if (tcp_is_fack(tp) && tcp_head_timedout(sk)) { 2255 struct sk_buff *skb; 2256 2257 skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint 2258 : tcp_write_queue_head(sk); 2259 2260 tcp_for_write_queue_from(skb, sk) { 2261 if (skb == tcp_send_head(sk)) 2262 break; 2263 if (!tcp_skb_timedout(sk, skb)) 2264 break; 2265 2266 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2267 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2268 tp->lost_out += tcp_skb_pcount(skb); 2269 tcp_verify_retransmit_hint(tp, skb); 2270 } 2271 } 2272 2273 tp->scoreboard_skb_hint = skb; 2274 2275 tcp_verify_left_out(tp); 2276 } 2277 } 2278 2279 /* CWND moderation, preventing bursts due to too big ACKs 2280 * in dubious situations. 2281 */ 2282 static inline void tcp_moderate_cwnd(struct tcp_sock *tp) 2283 { 2284 tp->snd_cwnd = min(tp->snd_cwnd, 2285 tcp_packets_in_flight(tp) + tcp_max_burst(tp)); 2286 tp->snd_cwnd_stamp = tcp_time_stamp; 2287 } 2288 2289 /* Lower bound on congestion window is slow start threshold 2290 * unless congestion avoidance choice decides to overide it. 2291 */ 2292 static inline u32 tcp_cwnd_min(const struct sock *sk) 2293 { 2294 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 2295 2296 return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh; 2297 } 2298 2299 /* Decrease cwnd each second ack. */ 2300 static void tcp_cwnd_down(struct sock *sk, int flag) 2301 { 2302 struct tcp_sock *tp = tcp_sk(sk); 2303 int decr = tp->snd_cwnd_cnt + 1; 2304 2305 if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) || 2306 (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) { 2307 tp->snd_cwnd_cnt = decr & 1; 2308 decr >>= 1; 2309 2310 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk)) 2311 tp->snd_cwnd -= decr; 2312 2313 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); 2314 tp->snd_cwnd_stamp = tcp_time_stamp; 2315 } 2316 } 2317 2318 /* Nothing was retransmitted or returned timestamp is less 2319 * than timestamp of the first retransmission. 2320 */ 2321 static inline int tcp_packet_delayed(struct tcp_sock *tp) 2322 { 2323 return !tp->retrans_stamp || 2324 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2325 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp)); 2326 } 2327 2328 /* Undo procedures. */ 2329 2330 #if FASTRETRANS_DEBUG > 1 2331 static void DBGUNDO(struct sock *sk, const char *msg) 2332 { 2333 struct tcp_sock *tp = tcp_sk(sk); 2334 struct inet_sock *inet = inet_sk(sk); 2335 2336 if (sk->sk_family == AF_INET) { 2337 printk(KERN_DEBUG "Undo %s " NIPQUAD_FMT "/%u c%u l%u ss%u/%u p%u\n", 2338 msg, 2339 NIPQUAD(inet->daddr), ntohs(inet->dport), 2340 tp->snd_cwnd, tcp_left_out(tp), 2341 tp->snd_ssthresh, tp->prior_ssthresh, 2342 tp->packets_out); 2343 } 2344 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2345 else if (sk->sk_family == AF_INET6) { 2346 struct ipv6_pinfo *np = inet6_sk(sk); 2347 printk(KERN_DEBUG "Undo %s " NIP6_FMT "/%u c%u l%u ss%u/%u p%u\n", 2348 msg, 2349 NIP6(np->daddr), ntohs(inet->dport), 2350 tp->snd_cwnd, tcp_left_out(tp), 2351 tp->snd_ssthresh, tp->prior_ssthresh, 2352 tp->packets_out); 2353 } 2354 #endif 2355 } 2356 #else 2357 #define DBGUNDO(x...) do { } while (0) 2358 #endif 2359 2360 static void tcp_undo_cwr(struct sock *sk, const int undo) 2361 { 2362 struct tcp_sock *tp = tcp_sk(sk); 2363 2364 if (tp->prior_ssthresh) { 2365 const struct inet_connection_sock *icsk = inet_csk(sk); 2366 2367 if (icsk->icsk_ca_ops->undo_cwnd) 2368 tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2369 else 2370 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); 2371 2372 if (undo && tp->prior_ssthresh > tp->snd_ssthresh) { 2373 tp->snd_ssthresh = tp->prior_ssthresh; 2374 TCP_ECN_withdraw_cwr(tp); 2375 } 2376 } else { 2377 tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); 2378 } 2379 tcp_moderate_cwnd(tp); 2380 tp->snd_cwnd_stamp = tcp_time_stamp; 2381 2382 /* There is something screwy going on with the retrans hints after 2383 an undo */ 2384 tcp_clear_all_retrans_hints(tp); 2385 } 2386 2387 static inline int tcp_may_undo(struct tcp_sock *tp) 2388 { 2389 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2390 } 2391 2392 /* People celebrate: "We love our President!" */ 2393 static int tcp_try_undo_recovery(struct sock *sk) 2394 { 2395 struct tcp_sock *tp = tcp_sk(sk); 2396 2397 if (tcp_may_undo(tp)) { 2398 int mib_idx; 2399 2400 /* Happy end! We did not retransmit anything 2401 * or our original transmission succeeded. 2402 */ 2403 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); 2404 tcp_undo_cwr(sk, 1); 2405 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) 2406 mib_idx = LINUX_MIB_TCPLOSSUNDO; 2407 else 2408 mib_idx = LINUX_MIB_TCPFULLUNDO; 2409 2410 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2411 tp->undo_marker = 0; 2412 } 2413 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { 2414 /* Hold old state until something *above* high_seq 2415 * is ACKed. For Reno it is MUST to prevent false 2416 * fast retransmits (RFC2582). SACK TCP is safe. */ 2417 tcp_moderate_cwnd(tp); 2418 return 1; 2419 } 2420 tcp_set_ca_state(sk, TCP_CA_Open); 2421 return 0; 2422 } 2423 2424 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ 2425 static void tcp_try_undo_dsack(struct sock *sk) 2426 { 2427 struct tcp_sock *tp = tcp_sk(sk); 2428 2429 if (tp->undo_marker && !tp->undo_retrans) { 2430 DBGUNDO(sk, "D-SACK"); 2431 tcp_undo_cwr(sk, 1); 2432 tp->undo_marker = 0; 2433 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); 2434 } 2435 } 2436 2437 /* Undo during fast recovery after partial ACK. */ 2438 2439 static int tcp_try_undo_partial(struct sock *sk, int acked) 2440 { 2441 struct tcp_sock *tp = tcp_sk(sk); 2442 /* Partial ACK arrived. Force Hoe's retransmit. */ 2443 int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); 2444 2445 if (tcp_may_undo(tp)) { 2446 /* Plain luck! Hole if filled with delayed 2447 * packet, rather than with a retransmit. 2448 */ 2449 if (tp->retrans_out == 0) 2450 tp->retrans_stamp = 0; 2451 2452 tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); 2453 2454 DBGUNDO(sk, "Hoe"); 2455 tcp_undo_cwr(sk, 0); 2456 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); 2457 2458 /* So... Do not make Hoe's retransmit yet. 2459 * If the first packet was delayed, the rest 2460 * ones are most probably delayed as well. 2461 */ 2462 failed = 0; 2463 } 2464 return failed; 2465 } 2466 2467 /* Undo during loss recovery after partial ACK. */ 2468 static int tcp_try_undo_loss(struct sock *sk) 2469 { 2470 struct tcp_sock *tp = tcp_sk(sk); 2471 2472 if (tcp_may_undo(tp)) { 2473 struct sk_buff *skb; 2474 tcp_for_write_queue(skb, sk) { 2475 if (skb == tcp_send_head(sk)) 2476 break; 2477 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 2478 } 2479 2480 tcp_clear_all_retrans_hints(tp); 2481 2482 DBGUNDO(sk, "partial loss"); 2483 tp->lost_out = 0; 2484 tcp_undo_cwr(sk, 1); 2485 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2486 inet_csk(sk)->icsk_retransmits = 0; 2487 tp->undo_marker = 0; 2488 if (tcp_is_sack(tp)) 2489 tcp_set_ca_state(sk, TCP_CA_Open); 2490 return 1; 2491 } 2492 return 0; 2493 } 2494 2495 static inline void tcp_complete_cwr(struct sock *sk) 2496 { 2497 struct tcp_sock *tp = tcp_sk(sk); 2498 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 2499 tp->snd_cwnd_stamp = tcp_time_stamp; 2500 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2501 } 2502 2503 static void tcp_try_keep_open(struct sock *sk) 2504 { 2505 struct tcp_sock *tp = tcp_sk(sk); 2506 int state = TCP_CA_Open; 2507 2508 if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) 2509 state = TCP_CA_Disorder; 2510 2511 if (inet_csk(sk)->icsk_ca_state != state) { 2512 tcp_set_ca_state(sk, state); 2513 tp->high_seq = tp->snd_nxt; 2514 } 2515 } 2516 2517 static void tcp_try_to_open(struct sock *sk, int flag) 2518 { 2519 struct tcp_sock *tp = tcp_sk(sk); 2520 2521 tcp_verify_left_out(tp); 2522 2523 if (!tp->frto_counter && tp->retrans_out == 0) 2524 tp->retrans_stamp = 0; 2525 2526 if (flag & FLAG_ECE) 2527 tcp_enter_cwr(sk, 1); 2528 2529 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2530 tcp_try_keep_open(sk); 2531 tcp_moderate_cwnd(tp); 2532 } else { 2533 tcp_cwnd_down(sk, flag); 2534 } 2535 } 2536 2537 static void tcp_mtup_probe_failed(struct sock *sk) 2538 { 2539 struct inet_connection_sock *icsk = inet_csk(sk); 2540 2541 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; 2542 icsk->icsk_mtup.probe_size = 0; 2543 } 2544 2545 static void tcp_mtup_probe_success(struct sock *sk, struct sk_buff *skb) 2546 { 2547 struct tcp_sock *tp = tcp_sk(sk); 2548 struct inet_connection_sock *icsk = inet_csk(sk); 2549 2550 /* FIXME: breaks with very large cwnd */ 2551 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2552 tp->snd_cwnd = tp->snd_cwnd * 2553 tcp_mss_to_mtu(sk, tp->mss_cache) / 2554 icsk->icsk_mtup.probe_size; 2555 tp->snd_cwnd_cnt = 0; 2556 tp->snd_cwnd_stamp = tcp_time_stamp; 2557 tp->rcv_ssthresh = tcp_current_ssthresh(sk); 2558 2559 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; 2560 icsk->icsk_mtup.probe_size = 0; 2561 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 2562 } 2563 2564 /* Process an event, which can update packets-in-flight not trivially. 2565 * Main goal of this function is to calculate new estimate for left_out, 2566 * taking into account both packets sitting in receiver's buffer and 2567 * packets lost by network. 2568 * 2569 * Besides that it does CWND reduction, when packet loss is detected 2570 * and changes state of machine. 2571 * 2572 * It does _not_ decide what to send, it is made in function 2573 * tcp_xmit_retransmit_queue(). 2574 */ 2575 static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) 2576 { 2577 struct inet_connection_sock *icsk = inet_csk(sk); 2578 struct tcp_sock *tp = tcp_sk(sk); 2579 int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 2580 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 2581 (tcp_fackets_out(tp) > tp->reordering)); 2582 int fast_rexmit = 0, mib_idx; 2583 2584 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 2585 tp->sacked_out = 0; 2586 if (WARN_ON(!tp->sacked_out && tp->fackets_out)) 2587 tp->fackets_out = 0; 2588 2589 /* Now state machine starts. 2590 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ 2591 if (flag & FLAG_ECE) 2592 tp->prior_ssthresh = 0; 2593 2594 /* B. In all the states check for reneging SACKs. */ 2595 if (tcp_check_sack_reneging(sk, flag)) 2596 return; 2597 2598 /* C. Process data loss notification, provided it is valid. */ 2599 if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) && 2600 before(tp->snd_una, tp->high_seq) && 2601 icsk->icsk_ca_state != TCP_CA_Open && 2602 tp->fackets_out > tp->reordering) { 2603 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering); 2604 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS); 2605 } 2606 2607 /* D. Check consistency of the current state. */ 2608 tcp_verify_left_out(tp); 2609 2610 /* E. Check state exit conditions. State can be terminated 2611 * when high_seq is ACKed. */ 2612 if (icsk->icsk_ca_state == TCP_CA_Open) { 2613 WARN_ON(tp->retrans_out != 0); 2614 tp->retrans_stamp = 0; 2615 } else if (!before(tp->snd_una, tp->high_seq)) { 2616 switch (icsk->icsk_ca_state) { 2617 case TCP_CA_Loss: 2618 icsk->icsk_retransmits = 0; 2619 if (tcp_try_undo_recovery(sk)) 2620 return; 2621 break; 2622 2623 case TCP_CA_CWR: 2624 /* CWR is to be held something *above* high_seq 2625 * is ACKed for CWR bit to reach receiver. */ 2626 if (tp->snd_una != tp->high_seq) { 2627 tcp_complete_cwr(sk); 2628 tcp_set_ca_state(sk, TCP_CA_Open); 2629 } 2630 break; 2631 2632 case TCP_CA_Disorder: 2633 tcp_try_undo_dsack(sk); 2634 if (!tp->undo_marker || 2635 /* For SACK case do not Open to allow to undo 2636 * catching for all duplicate ACKs. */ 2637 tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { 2638 tp->undo_marker = 0; 2639 tcp_set_ca_state(sk, TCP_CA_Open); 2640 } 2641 break; 2642 2643 case TCP_CA_Recovery: 2644 if (tcp_is_reno(tp)) 2645 tcp_reset_reno_sack(tp); 2646 if (tcp_try_undo_recovery(sk)) 2647 return; 2648 tcp_complete_cwr(sk); 2649 break; 2650 } 2651 } 2652 2653 /* F. Process state. */ 2654 switch (icsk->icsk_ca_state) { 2655 case TCP_CA_Recovery: 2656 if (!(flag & FLAG_SND_UNA_ADVANCED)) { 2657 if (tcp_is_reno(tp) && is_dupack) 2658 tcp_add_reno_sack(sk); 2659 } else 2660 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2661 break; 2662 case TCP_CA_Loss: 2663 if (flag & FLAG_DATA_ACKED) 2664 icsk->icsk_retransmits = 0; 2665 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED) 2666 tcp_reset_reno_sack(tp); 2667 if (!tcp_try_undo_loss(sk)) { 2668 tcp_moderate_cwnd(tp); 2669 tcp_xmit_retransmit_queue(sk); 2670 return; 2671 } 2672 if (icsk->icsk_ca_state != TCP_CA_Open) 2673 return; 2674 /* Loss is undone; fall through to processing in Open state. */ 2675 default: 2676 if (tcp_is_reno(tp)) { 2677 if (flag & FLAG_SND_UNA_ADVANCED) 2678 tcp_reset_reno_sack(tp); 2679 if (is_dupack) 2680 tcp_add_reno_sack(sk); 2681 } 2682 2683 if (icsk->icsk_ca_state == TCP_CA_Disorder) 2684 tcp_try_undo_dsack(sk); 2685 2686 if (!tcp_time_to_recover(sk)) { 2687 tcp_try_to_open(sk, flag); 2688 return; 2689 } 2690 2691 /* MTU probe failure: don't reduce cwnd */ 2692 if (icsk->icsk_ca_state < TCP_CA_CWR && 2693 icsk->icsk_mtup.probe_size && 2694 tp->snd_una == tp->mtu_probe.probe_seq_start) { 2695 tcp_mtup_probe_failed(sk); 2696 /* Restores the reduction we did in tcp_mtup_probe() */ 2697 tp->snd_cwnd++; 2698 tcp_simple_retransmit(sk); 2699 return; 2700 } 2701 2702 /* Otherwise enter Recovery state */ 2703 2704 if (tcp_is_reno(tp)) 2705 mib_idx = LINUX_MIB_TCPRENORECOVERY; 2706 else 2707 mib_idx = LINUX_MIB_TCPSACKRECOVERY; 2708 2709 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2710 2711 tp->high_seq = tp->snd_nxt; 2712 tp->prior_ssthresh = 0; 2713 tp->undo_marker = tp->snd_una; 2714 tp->undo_retrans = tp->retrans_out; 2715 2716 if (icsk->icsk_ca_state < TCP_CA_CWR) { 2717 if (!(flag & FLAG_ECE)) 2718 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2719 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2720 TCP_ECN_queue_cwr(tp); 2721 } 2722 2723 tp->bytes_acked = 0; 2724 tp->snd_cwnd_cnt = 0; 2725 tcp_set_ca_state(sk, TCP_CA_Recovery); 2726 fast_rexmit = 1; 2727 } 2728 2729 if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk))) 2730 tcp_update_scoreboard(sk, fast_rexmit); 2731 tcp_cwnd_down(sk, flag); 2732 tcp_xmit_retransmit_queue(sk); 2733 } 2734 2735 /* Read draft-ietf-tcplw-high-performance before mucking 2736 * with this code. (Supersedes RFC1323) 2737 */ 2738 static void tcp_ack_saw_tstamp(struct sock *sk, int flag) 2739 { 2740 /* RTTM Rule: A TSecr value received in a segment is used to 2741 * update the averaged RTT measurement only if the segment 2742 * acknowledges some new data, i.e., only if it advances the 2743 * left edge of the send window. 2744 * 2745 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2746 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru> 2747 * 2748 * Changed: reset backoff as soon as we see the first valid sample. 2749 * If we do not, we get strongly overestimated rto. With timestamps 2750 * samples are accepted even from very old segments: f.e., when rtt=1 2751 * increases to 8, we retransmit 5 times and after 8 seconds delayed 2752 * answer arrives rto becomes 120 seconds! If at least one of segments 2753 * in window is lost... Voila. --ANK (010210) 2754 */ 2755 struct tcp_sock *tp = tcp_sk(sk); 2756 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2757 tcp_rtt_estimator(sk, seq_rtt); 2758 tcp_set_rto(sk); 2759 inet_csk(sk)->icsk_backoff = 0; 2760 tcp_bound_rto(sk); 2761 } 2762 2763 static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag) 2764 { 2765 /* We don't have a timestamp. Can only use 2766 * packets that are not retransmitted to determine 2767 * rtt estimates. Also, we must not reset the 2768 * backoff for rto until we get a non-retransmitted 2769 * packet. This allows us to deal with a situation 2770 * where the network delay has increased suddenly. 2771 * I.e. Karn's algorithm. (SIGCOMM '87, p5.) 2772 */ 2773 2774 if (flag & FLAG_RETRANS_DATA_ACKED) 2775 return; 2776 2777 tcp_rtt_estimator(sk, seq_rtt); 2778 tcp_set_rto(sk); 2779 inet_csk(sk)->icsk_backoff = 0; 2780 tcp_bound_rto(sk); 2781 } 2782 2783 static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 2784 const s32 seq_rtt) 2785 { 2786 const struct tcp_sock *tp = tcp_sk(sk); 2787 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 2788 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2789 tcp_ack_saw_tstamp(sk, flag); 2790 else if (seq_rtt >= 0) 2791 tcp_ack_no_tstamp(sk, seq_rtt, flag); 2792 } 2793 2794 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2795 { 2796 const struct inet_connection_sock *icsk = inet_csk(sk); 2797 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 2798 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2799 } 2800 2801 /* Restart timer after forward progress on connection. 2802 * RFC2988 recommends to restart timer to now+rto. 2803 */ 2804 static void tcp_rearm_rto(struct sock *sk) 2805 { 2806 struct tcp_sock *tp = tcp_sk(sk); 2807 2808 if (!tp->packets_out) { 2809 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 2810 } else { 2811 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2812 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2813 } 2814 } 2815 2816 /* If we get here, the whole TSO packet has not been acked. */ 2817 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) 2818 { 2819 struct tcp_sock *tp = tcp_sk(sk); 2820 u32 packets_acked; 2821 2822 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); 2823 2824 packets_acked = tcp_skb_pcount(skb); 2825 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2826 return 0; 2827 packets_acked -= tcp_skb_pcount(skb); 2828 2829 if (packets_acked) { 2830 BUG_ON(tcp_skb_pcount(skb) == 0); 2831 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); 2832 } 2833 2834 return packets_acked; 2835 } 2836 2837 /* Remove acknowledged frames from the retransmission queue. If our packet 2838 * is before the ack sequence we can discard it as it's confirmed to have 2839 * arrived at the other end. 2840 */ 2841 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets) 2842 { 2843 struct tcp_sock *tp = tcp_sk(sk); 2844 const struct inet_connection_sock *icsk = inet_csk(sk); 2845 struct sk_buff *skb; 2846 u32 now = tcp_time_stamp; 2847 int fully_acked = 1; 2848 int flag = 0; 2849 u32 pkts_acked = 0; 2850 u32 reord = tp->packets_out; 2851 s32 seq_rtt = -1; 2852 s32 ca_seq_rtt = -1; 2853 ktime_t last_ackt = net_invalid_timestamp(); 2854 2855 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 2856 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 2857 u32 end_seq; 2858 u32 acked_pcount; 2859 u8 sacked = scb->sacked; 2860 2861 /* Determine how many packets and what bytes were acked, tso and else */ 2862 if (after(scb->end_seq, tp->snd_una)) { 2863 if (tcp_skb_pcount(skb) == 1 || 2864 !after(tp->snd_una, scb->seq)) 2865 break; 2866 2867 acked_pcount = tcp_tso_acked(sk, skb); 2868 if (!acked_pcount) 2869 break; 2870 2871 fully_acked = 0; 2872 end_seq = tp->snd_una; 2873 } else { 2874 acked_pcount = tcp_skb_pcount(skb); 2875 end_seq = scb->end_seq; 2876 } 2877 2878 /* MTU probing checks */ 2879 if (fully_acked && icsk->icsk_mtup.probe_size && 2880 !after(tp->mtu_probe.probe_seq_end, scb->end_seq)) { 2881 tcp_mtup_probe_success(sk, skb); 2882 } 2883 2884 if (sacked & TCPCB_RETRANS) { 2885 if (sacked & TCPCB_SACKED_RETRANS) 2886 tp->retrans_out -= acked_pcount; 2887 flag |= FLAG_RETRANS_DATA_ACKED; 2888 ca_seq_rtt = -1; 2889 seq_rtt = -1; 2890 if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1)) 2891 flag |= FLAG_NONHEAD_RETRANS_ACKED; 2892 } else { 2893 ca_seq_rtt = now - scb->when; 2894 last_ackt = skb->tstamp; 2895 if (seq_rtt < 0) { 2896 seq_rtt = ca_seq_rtt; 2897 } 2898 if (!(sacked & TCPCB_SACKED_ACKED)) 2899 reord = min(pkts_acked, reord); 2900 } 2901 2902 if (sacked & TCPCB_SACKED_ACKED) 2903 tp->sacked_out -= acked_pcount; 2904 if (sacked & TCPCB_LOST) 2905 tp->lost_out -= acked_pcount; 2906 2907 if (unlikely(tp->urg_mode && !before(end_seq, tp->snd_up))) 2908 tp->urg_mode = 0; 2909 2910 tp->packets_out -= acked_pcount; 2911 pkts_acked += acked_pcount; 2912 2913 /* Initial outgoing SYN's get put onto the write_queue 2914 * just like anything else we transmit. It is not 2915 * true data, and if we misinform our callers that 2916 * this ACK acks real data, we will erroneously exit 2917 * connection startup slow start one packet too 2918 * quickly. This is severely frowned upon behavior. 2919 */ 2920 if (!(scb->flags & TCPCB_FLAG_SYN)) { 2921 flag |= FLAG_DATA_ACKED; 2922 } else { 2923 flag |= FLAG_SYN_ACKED; 2924 tp->retrans_stamp = 0; 2925 } 2926 2927 if (!fully_acked) 2928 break; 2929 2930 tcp_unlink_write_queue(skb, sk); 2931 sk_wmem_free_skb(sk, skb); 2932 tcp_clear_all_retrans_hints(tp); 2933 } 2934 2935 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2936 flag |= FLAG_SACK_RENEGING; 2937 2938 if (flag & FLAG_ACKED) { 2939 const struct tcp_congestion_ops *ca_ops 2940 = inet_csk(sk)->icsk_ca_ops; 2941 2942 tcp_ack_update_rtt(sk, flag, seq_rtt); 2943 tcp_rearm_rto(sk); 2944 2945 if (tcp_is_reno(tp)) { 2946 tcp_remove_reno_sacks(sk, pkts_acked); 2947 } else { 2948 /* Non-retransmitted hole got filled? That's reordering */ 2949 if (reord < prior_fackets) 2950 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 2951 } 2952 2953 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 2954 2955 if (ca_ops->pkts_acked) { 2956 s32 rtt_us = -1; 2957 2958 /* Is the ACK triggering packet unambiguous? */ 2959 if (!(flag & FLAG_RETRANS_DATA_ACKED)) { 2960 /* High resolution needed and available? */ 2961 if (ca_ops->flags & TCP_CONG_RTT_STAMP && 2962 !ktime_equal(last_ackt, 2963 net_invalid_timestamp())) 2964 rtt_us = ktime_us_delta(ktime_get_real(), 2965 last_ackt); 2966 else if (ca_seq_rtt > 0) 2967 rtt_us = jiffies_to_usecs(ca_seq_rtt); 2968 } 2969 2970 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 2971 } 2972 } 2973 2974 #if FASTRETRANS_DEBUG > 0 2975 WARN_ON((int)tp->sacked_out < 0); 2976 WARN_ON((int)tp->lost_out < 0); 2977 WARN_ON((int)tp->retrans_out < 0); 2978 if (!tp->packets_out && tcp_is_sack(tp)) { 2979 icsk = inet_csk(sk); 2980 if (tp->lost_out) { 2981 printk(KERN_DEBUG "Leak l=%u %d\n", 2982 tp->lost_out, icsk->icsk_ca_state); 2983 tp->lost_out = 0; 2984 } 2985 if (tp->sacked_out) { 2986 printk(KERN_DEBUG "Leak s=%u %d\n", 2987 tp->sacked_out, icsk->icsk_ca_state); 2988 tp->sacked_out = 0; 2989 } 2990 if (tp->retrans_out) { 2991 printk(KERN_DEBUG "Leak r=%u %d\n", 2992 tp->retrans_out, icsk->icsk_ca_state); 2993 tp->retrans_out = 0; 2994 } 2995 } 2996 #endif 2997 return flag; 2998 } 2999 3000 static void tcp_ack_probe(struct sock *sk) 3001 { 3002 const struct tcp_sock *tp = tcp_sk(sk); 3003 struct inet_connection_sock *icsk = inet_csk(sk); 3004 3005 /* Was it a usable window open? */ 3006 3007 if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { 3008 icsk->icsk_backoff = 0; 3009 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 3010 /* Socket must be waked up by subsequent tcp_data_snd_check(). 3011 * This function is not for random using! 3012 */ 3013 } else { 3014 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3015 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3016 TCP_RTO_MAX); 3017 } 3018 } 3019 3020 static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag) 3021 { 3022 return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || 3023 inet_csk(sk)->icsk_ca_state != TCP_CA_Open); 3024 } 3025 3026 static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag) 3027 { 3028 const struct tcp_sock *tp = tcp_sk(sk); 3029 return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && 3030 !((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR)); 3031 } 3032 3033 /* Check that window update is acceptable. 3034 * The function assumes that snd_una<=ack<=snd_next. 3035 */ 3036 static inline int tcp_may_update_window(const struct tcp_sock *tp, 3037 const u32 ack, const u32 ack_seq, 3038 const u32 nwin) 3039 { 3040 return (after(ack, tp->snd_una) || 3041 after(ack_seq, tp->snd_wl1) || 3042 (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd)); 3043 } 3044 3045 /* Update our send window. 3046 * 3047 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3048 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3049 */ 3050 static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, 3051 u32 ack_seq) 3052 { 3053 struct tcp_sock *tp = tcp_sk(sk); 3054 int flag = 0; 3055 u32 nwin = ntohs(tcp_hdr(skb)->window); 3056 3057 if (likely(!tcp_hdr(skb)->syn)) 3058 nwin <<= tp->rx_opt.snd_wscale; 3059 3060 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { 3061 flag |= FLAG_WIN_UPDATE; 3062 tcp_update_wl(tp, ack, ack_seq); 3063 3064 if (tp->snd_wnd != nwin) { 3065 tp->snd_wnd = nwin; 3066 3067 /* Note, it is the only place, where 3068 * fast path is recovered for sending TCP. 3069 */ 3070 tp->pred_flags = 0; 3071 tcp_fast_path_check(sk); 3072 3073 if (nwin > tp->max_window) { 3074 tp->max_window = nwin; 3075 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); 3076 } 3077 } 3078 } 3079 3080 tp->snd_una = ack; 3081 3082 return flag; 3083 } 3084 3085 /* A very conservative spurious RTO response algorithm: reduce cwnd and 3086 * continue in congestion avoidance. 3087 */ 3088 static void tcp_conservative_spur_to_response(struct tcp_sock *tp) 3089 { 3090 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3091 tp->snd_cwnd_cnt = 0; 3092 tp->bytes_acked = 0; 3093 TCP_ECN_queue_cwr(tp); 3094 tcp_moderate_cwnd(tp); 3095 } 3096 3097 /* A conservative spurious RTO response algorithm: reduce cwnd using 3098 * rate halving and continue in congestion avoidance. 3099 */ 3100 static void tcp_ratehalving_spur_to_response(struct sock *sk) 3101 { 3102 tcp_enter_cwr(sk, 0); 3103 } 3104 3105 static void tcp_undo_spur_to_response(struct sock *sk, int flag) 3106 { 3107 if (flag & FLAG_ECE) 3108 tcp_ratehalving_spur_to_response(sk); 3109 else 3110 tcp_undo_cwr(sk, 1); 3111 } 3112 3113 /* F-RTO spurious RTO detection algorithm (RFC4138) 3114 * 3115 * F-RTO affects during two new ACKs following RTO (well, almost, see inline 3116 * comments). State (ACK number) is kept in frto_counter. When ACK advances 3117 * window (but not to or beyond highest sequence sent before RTO): 3118 * On First ACK, send two new segments out. 3119 * On Second ACK, RTO was likely spurious. Do spurious response (response 3120 * algorithm is not part of the F-RTO detection algorithm 3121 * given in RFC4138 but can be selected separately). 3122 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss 3123 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding 3124 * of Nagle, this is done using frto_counter states 2 and 3, when a new data 3125 * segment of any size sent during F-RTO, state 2 is upgraded to 3. 3126 * 3127 * Rationale: if the RTO was spurious, new ACKs should arrive from the 3128 * original window even after we transmit two new data segments. 3129 * 3130 * SACK version: 3131 * on first step, wait until first cumulative ACK arrives, then move to 3132 * the second step. In second step, the next ACK decides. 3133 * 3134 * F-RTO is implemented (mainly) in four functions: 3135 * - tcp_use_frto() is used to determine if TCP is can use F-RTO 3136 * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is 3137 * called when tcp_use_frto() showed green light 3138 * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm 3139 * - tcp_enter_frto_loss() is called if there is not enough evidence 3140 * to prove that the RTO is indeed spurious. It transfers the control 3141 * from F-RTO to the conventional RTO recovery 3142 */ 3143 static int tcp_process_frto(struct sock *sk, int flag) 3144 { 3145 struct tcp_sock *tp = tcp_sk(sk); 3146 3147 tcp_verify_left_out(tp); 3148 3149 /* Duplicate the behavior from Loss state (fastretrans_alert) */ 3150 if (flag & FLAG_DATA_ACKED) 3151 inet_csk(sk)->icsk_retransmits = 0; 3152 3153 if ((flag & FLAG_NONHEAD_RETRANS_ACKED) || 3154 ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) 3155 tp->undo_marker = 0; 3156 3157 if (!before(tp->snd_una, tp->frto_highmark)) { 3158 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); 3159 return 1; 3160 } 3161 3162 if (!tcp_is_sackfrto(tp)) { 3163 /* RFC4138 shortcoming in step 2; should also have case c): 3164 * ACK isn't duplicate nor advances window, e.g., opposite dir 3165 * data, winupdate 3166 */ 3167 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP)) 3168 return 1; 3169 3170 if (!(flag & FLAG_DATA_ACKED)) { 3171 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3), 3172 flag); 3173 return 1; 3174 } 3175 } else { 3176 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) { 3177 /* Prevent sending of new data. */ 3178 tp->snd_cwnd = min(tp->snd_cwnd, 3179 tcp_packets_in_flight(tp)); 3180 return 1; 3181 } 3182 3183 if ((tp->frto_counter >= 2) && 3184 (!(flag & FLAG_FORWARD_PROGRESS) || 3185 ((flag & FLAG_DATA_SACKED) && 3186 !(flag & FLAG_ONLY_ORIG_SACKED)))) { 3187 /* RFC4138 shortcoming (see comment above) */ 3188 if (!(flag & FLAG_FORWARD_PROGRESS) && 3189 (flag & FLAG_NOT_DUP)) 3190 return 1; 3191 3192 tcp_enter_frto_loss(sk, 3, flag); 3193 return 1; 3194 } 3195 } 3196 3197 if (tp->frto_counter == 1) { 3198 /* tcp_may_send_now needs to see updated state */ 3199 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; 3200 tp->frto_counter = 2; 3201 3202 if (!tcp_may_send_now(sk)) 3203 tcp_enter_frto_loss(sk, 2, flag); 3204 3205 return 1; 3206 } else { 3207 switch (sysctl_tcp_frto_response) { 3208 case 2: 3209 tcp_undo_spur_to_response(sk, flag); 3210 break; 3211 case 1: 3212 tcp_conservative_spur_to_response(tp); 3213 break; 3214 default: 3215 tcp_ratehalving_spur_to_response(sk); 3216 break; 3217 } 3218 tp->frto_counter = 0; 3219 tp->undo_marker = 0; 3220 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); 3221 } 3222 return 0; 3223 } 3224 3225 /* This routine deals with incoming acks, but not outgoing ones. */ 3226 static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 3227 { 3228 struct inet_connection_sock *icsk = inet_csk(sk); 3229 struct tcp_sock *tp = tcp_sk(sk); 3230 u32 prior_snd_una = tp->snd_una; 3231 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3232 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3233 u32 prior_in_flight; 3234 u32 prior_fackets; 3235 int prior_packets; 3236 int frto_cwnd = 0; 3237 3238 /* If the ack is newer than sent or older than previous acks 3239 * then we can probably ignore it. 3240 */ 3241 if (after(ack, tp->snd_nxt)) 3242 goto uninteresting_ack; 3243 3244 if (before(ack, prior_snd_una)) 3245 goto old_ack; 3246 3247 if (after(ack, prior_snd_una)) 3248 flag |= FLAG_SND_UNA_ADVANCED; 3249 3250 if (sysctl_tcp_abc) { 3251 if (icsk->icsk_ca_state < TCP_CA_CWR) 3252 tp->bytes_acked += ack - prior_snd_una; 3253 else if (icsk->icsk_ca_state == TCP_CA_Loss) 3254 /* we assume just one segment left network */ 3255 tp->bytes_acked += min(ack - prior_snd_una, 3256 tp->mss_cache); 3257 } 3258 3259 prior_fackets = tp->fackets_out; 3260 prior_in_flight = tcp_packets_in_flight(tp); 3261 3262 if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { 3263 /* Window is constant, pure forward advance. 3264 * No more checks are required. 3265 * Note, we use the fact that SND.UNA>=SND.WL2. 3266 */ 3267 tcp_update_wl(tp, ack, ack_seq); 3268 tp->snd_una = ack; 3269 flag |= FLAG_WIN_UPDATE; 3270 3271 tcp_ca_event(sk, CA_EVENT_FAST_ACK); 3272 3273 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); 3274 } else { 3275 if (ack_seq != TCP_SKB_CB(skb)->end_seq) 3276 flag |= FLAG_DATA; 3277 else 3278 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); 3279 3280 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); 3281 3282 if (TCP_SKB_CB(skb)->sacked) 3283 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3284 3285 if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) 3286 flag |= FLAG_ECE; 3287 3288 tcp_ca_event(sk, CA_EVENT_SLOW_ACK); 3289 } 3290 3291 /* We passed data and got it acked, remove any soft error 3292 * log. Something worked... 3293 */ 3294 sk->sk_err_soft = 0; 3295 icsk->icsk_probes_out = 0; 3296 tp->rcv_tstamp = tcp_time_stamp; 3297 prior_packets = tp->packets_out; 3298 if (!prior_packets) 3299 goto no_queue; 3300 3301 /* See if we can take anything off of the retransmit queue. */ 3302 flag |= tcp_clean_rtx_queue(sk, prior_fackets); 3303 3304 if (tp->frto_counter) 3305 frto_cwnd = tcp_process_frto(sk, flag); 3306 /* Guarantee sacktag reordering detection against wrap-arounds */ 3307 if (before(tp->frto_highmark, tp->snd_una)) 3308 tp->frto_highmark = 0; 3309 3310 if (tcp_ack_is_dubious(sk, flag)) { 3311 /* Advance CWND, if state allows this. */ 3312 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 3313 tcp_may_raise_cwnd(sk, flag)) 3314 tcp_cong_avoid(sk, ack, prior_in_flight); 3315 tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, 3316 flag); 3317 } else { 3318 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3319 tcp_cong_avoid(sk, ack, prior_in_flight); 3320 } 3321 3322 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3323 dst_confirm(sk->sk_dst_cache); 3324 3325 return 1; 3326 3327 no_queue: 3328 /* If this ack opens up a zero window, clear backoff. It was 3329 * being used to time the probes, and is probably far higher than 3330 * it needs to be for normal retransmission. 3331 */ 3332 if (tcp_send_head(sk)) 3333 tcp_ack_probe(sk); 3334 return 1; 3335 3336 old_ack: 3337 if (TCP_SKB_CB(skb)->sacked) { 3338 tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3339 if (icsk->icsk_ca_state == TCP_CA_Open) 3340 tcp_try_keep_open(sk); 3341 } 3342 3343 uninteresting_ack: 3344 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3345 return 0; 3346 } 3347 3348 /* Look for tcp options. Normally only called on SYN and SYNACK packets. 3349 * But, this can also be called on packets in the established flow when 3350 * the fast version below fails. 3351 */ 3352 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3353 int estab) 3354 { 3355 unsigned char *ptr; 3356 struct tcphdr *th = tcp_hdr(skb); 3357 int length = (th->doff * 4) - sizeof(struct tcphdr); 3358 3359 ptr = (unsigned char *)(th + 1); 3360 opt_rx->saw_tstamp = 0; 3361 3362 while (length > 0) { 3363 int opcode = *ptr++; 3364 int opsize; 3365 3366 switch (opcode) { 3367 case TCPOPT_EOL: 3368 return; 3369 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */ 3370 length--; 3371 continue; 3372 default: 3373 opsize = *ptr++; 3374 if (opsize < 2) /* "silly options" */ 3375 return; 3376 if (opsize > length) 3377 return; /* don't parse partial options */ 3378 switch (opcode) { 3379 case TCPOPT_MSS: 3380 if (opsize == TCPOLEN_MSS && th->syn && !estab) { 3381 u16 in_mss = get_unaligned_be16(ptr); 3382 if (in_mss) { 3383 if (opt_rx->user_mss && 3384 opt_rx->user_mss < in_mss) 3385 in_mss = opt_rx->user_mss; 3386 opt_rx->mss_clamp = in_mss; 3387 } 3388 } 3389 break; 3390 case TCPOPT_WINDOW: 3391 if (opsize == TCPOLEN_WINDOW && th->syn && 3392 !estab && sysctl_tcp_window_scaling) { 3393 __u8 snd_wscale = *(__u8 *)ptr; 3394 opt_rx->wscale_ok = 1; 3395 if (snd_wscale > 14) { 3396 if (net_ratelimit()) 3397 printk(KERN_INFO "tcp_parse_options: Illegal window " 3398 "scaling value %d >14 received.\n", 3399 snd_wscale); 3400 snd_wscale = 14; 3401 } 3402 opt_rx->snd_wscale = snd_wscale; 3403 } 3404 break; 3405 case TCPOPT_TIMESTAMP: 3406 if ((opsize == TCPOLEN_TIMESTAMP) && 3407 ((estab && opt_rx->tstamp_ok) || 3408 (!estab && sysctl_tcp_timestamps))) { 3409 opt_rx->saw_tstamp = 1; 3410 opt_rx->rcv_tsval = get_unaligned_be32(ptr); 3411 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); 3412 } 3413 break; 3414 case TCPOPT_SACK_PERM: 3415 if (opsize == TCPOLEN_SACK_PERM && th->syn && 3416 !estab && sysctl_tcp_sack) { 3417 opt_rx->sack_ok = 1; 3418 tcp_sack_reset(opt_rx); 3419 } 3420 break; 3421 3422 case TCPOPT_SACK: 3423 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) && 3424 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) && 3425 opt_rx->sack_ok) { 3426 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th; 3427 } 3428 break; 3429 #ifdef CONFIG_TCP_MD5SIG 3430 case TCPOPT_MD5SIG: 3431 /* 3432 * The MD5 Hash has already been 3433 * checked (see tcp_v{4,6}_do_rcv()). 3434 */ 3435 break; 3436 #endif 3437 } 3438 3439 ptr += opsize-2; 3440 length -= opsize; 3441 } 3442 } 3443 } 3444 3445 /* Fast parse options. This hopes to only see timestamps. 3446 * If it is wrong it falls back on tcp_parse_options(). 3447 */ 3448 static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 3449 struct tcp_sock *tp) 3450 { 3451 if (th->doff == sizeof(struct tcphdr) >> 2) { 3452 tp->rx_opt.saw_tstamp = 0; 3453 return 0; 3454 } else if (tp->rx_opt.tstamp_ok && 3455 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 3456 __be32 *ptr = (__be32 *)(th + 1); 3457 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3458 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { 3459 tp->rx_opt.saw_tstamp = 1; 3460 ++ptr; 3461 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3462 ++ptr; 3463 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 3464 return 1; 3465 } 3466 } 3467 tcp_parse_options(skb, &tp->rx_opt, 1); 3468 return 1; 3469 } 3470 3471 #ifdef CONFIG_TCP_MD5SIG 3472 /* 3473 * Parse MD5 Signature option 3474 */ 3475 u8 *tcp_parse_md5sig_option(struct tcphdr *th) 3476 { 3477 int length = (th->doff << 2) - sizeof (*th); 3478 u8 *ptr = (u8*)(th + 1); 3479 3480 /* If the TCP option is too short, we can short cut */ 3481 if (length < TCPOLEN_MD5SIG) 3482 return NULL; 3483 3484 while (length > 0) { 3485 int opcode = *ptr++; 3486 int opsize; 3487 3488 switch(opcode) { 3489 case TCPOPT_EOL: 3490 return NULL; 3491 case TCPOPT_NOP: 3492 length--; 3493 continue; 3494 default: 3495 opsize = *ptr++; 3496 if (opsize < 2 || opsize > length) 3497 return NULL; 3498 if (opcode == TCPOPT_MD5SIG) 3499 return ptr; 3500 } 3501 ptr += opsize - 2; 3502 length -= opsize; 3503 } 3504 return NULL; 3505 } 3506 #endif 3507 3508 static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3509 { 3510 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; 3511 tp->rx_opt.ts_recent_stamp = get_seconds(); 3512 } 3513 3514 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) 3515 { 3516 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { 3517 /* PAWS bug workaround wrt. ACK frames, the PAWS discard 3518 * extra check below makes sure this can only happen 3519 * for pure ACK frames. -DaveM 3520 * 3521 * Not only, also it occurs for expired timestamps. 3522 */ 3523 3524 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 || 3525 get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS) 3526 tcp_store_ts_recent(tp); 3527 } 3528 } 3529 3530 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM 3531 * 3532 * It is not fatal. If this ACK does _not_ change critical state (seqs, window) 3533 * it can pass through stack. So, the following predicate verifies that 3534 * this segment is not used for anything but congestion avoidance or 3535 * fast retransmit. Moreover, we even are able to eliminate most of such 3536 * second order effects, if we apply some small "replay" window (~RTO) 3537 * to timestamp space. 3538 * 3539 * All these measures still do not guarantee that we reject wrapped ACKs 3540 * on networks with high bandwidth, when sequence space is recycled fastly, 3541 * but it guarantees that such events will be very rare and do not affect 3542 * connection seriously. This doesn't look nice, but alas, PAWS is really 3543 * buggy extension. 3544 * 3545 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC 3546 * states that events when retransmit arrives after original data are rare. 3547 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is 3548 * the biggest problem on large power networks even with minor reordering. 3549 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe 3550 * up to bandwidth of 18Gigabit/sec. 8) ] 3551 */ 3552 3553 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 3554 { 3555 struct tcp_sock *tp = tcp_sk(sk); 3556 struct tcphdr *th = tcp_hdr(skb); 3557 u32 seq = TCP_SKB_CB(skb)->seq; 3558 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3559 3560 return (/* 1. Pure ACK with correct sequence number. */ 3561 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && 3562 3563 /* 2. ... and duplicate ACK. */ 3564 ack == tp->snd_una && 3565 3566 /* 3. ... and does not update window. */ 3567 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 3568 3569 /* 4. ... and sits in replay window. */ 3570 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 3571 } 3572 3573 static inline int tcp_paws_discard(const struct sock *sk, 3574 const struct sk_buff *skb) 3575 { 3576 const struct tcp_sock *tp = tcp_sk(sk); 3577 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 3578 get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 3579 !tcp_disordered_ack(sk, skb)); 3580 } 3581 3582 /* Check segment sequence number for validity. 3583 * 3584 * Segment controls are considered valid, if the segment 3585 * fits to the window after truncation to the window. Acceptability 3586 * of data (and SYN, FIN, of course) is checked separately. 3587 * See tcp_data_queue(), for example. 3588 * 3589 * Also, controls (RST is main one) are accepted using RCV.WUP instead 3590 * of RCV.NXT. Peer still did not advance his SND.UNA when we 3591 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP. 3592 * (borrowed from freebsd) 3593 */ 3594 3595 static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 3596 { 3597 return !before(end_seq, tp->rcv_wup) && 3598 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); 3599 } 3600 3601 /* When we get a reset we do this. */ 3602 static void tcp_reset(struct sock *sk) 3603 { 3604 /* We want the right error as BSD sees it (and indeed as we do). */ 3605 switch (sk->sk_state) { 3606 case TCP_SYN_SENT: 3607 sk->sk_err = ECONNREFUSED; 3608 break; 3609 case TCP_CLOSE_WAIT: 3610 sk->sk_err = EPIPE; 3611 break; 3612 case TCP_CLOSE: 3613 return; 3614 default: 3615 sk->sk_err = ECONNRESET; 3616 } 3617 3618 if (!sock_flag(sk, SOCK_DEAD)) 3619 sk->sk_error_report(sk); 3620 3621 tcp_done(sk); 3622 } 3623 3624 /* 3625 * Process the FIN bit. This now behaves as it is supposed to work 3626 * and the FIN takes effect when it is validly part of sequence 3627 * space. Not before when we get holes. 3628 * 3629 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT 3630 * (and thence onto LAST-ACK and finally, CLOSE, we never enter 3631 * TIME-WAIT) 3632 * 3633 * If we are in FINWAIT-1, a received FIN indicates simultaneous 3634 * close and we go into CLOSING (and later onto TIME-WAIT) 3635 * 3636 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT. 3637 */ 3638 static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) 3639 { 3640 struct tcp_sock *tp = tcp_sk(sk); 3641 3642 inet_csk_schedule_ack(sk); 3643 3644 sk->sk_shutdown |= RCV_SHUTDOWN; 3645 sock_set_flag(sk, SOCK_DONE); 3646 3647 switch (sk->sk_state) { 3648 case TCP_SYN_RECV: 3649 case TCP_ESTABLISHED: 3650 /* Move to CLOSE_WAIT */ 3651 tcp_set_state(sk, TCP_CLOSE_WAIT); 3652 inet_csk(sk)->icsk_ack.pingpong = 1; 3653 break; 3654 3655 case TCP_CLOSE_WAIT: 3656 case TCP_CLOSING: 3657 /* Received a retransmission of the FIN, do 3658 * nothing. 3659 */ 3660 break; 3661 case TCP_LAST_ACK: 3662 /* RFC793: Remain in the LAST-ACK state. */ 3663 break; 3664 3665 case TCP_FIN_WAIT1: 3666 /* This case occurs when a simultaneous close 3667 * happens, we must ack the received FIN and 3668 * enter the CLOSING state. 3669 */ 3670 tcp_send_ack(sk); 3671 tcp_set_state(sk, TCP_CLOSING); 3672 break; 3673 case TCP_FIN_WAIT2: 3674 /* Received a FIN -- send ACK and enter TIME_WAIT. */ 3675 tcp_send_ack(sk); 3676 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 3677 break; 3678 default: 3679 /* Only TCP_LISTEN and TCP_CLOSE are left, in these 3680 * cases we should never reach this piece of code. 3681 */ 3682 printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", 3683 __func__, sk->sk_state); 3684 break; 3685 } 3686 3687 /* It _is_ possible, that we have something out-of-order _after_ FIN. 3688 * Probably, we should reset in this case. For now drop them. 3689 */ 3690 __skb_queue_purge(&tp->out_of_order_queue); 3691 if (tcp_is_sack(tp)) 3692 tcp_sack_reset(&tp->rx_opt); 3693 sk_mem_reclaim(sk); 3694 3695 if (!sock_flag(sk, SOCK_DEAD)) { 3696 sk->sk_state_change(sk); 3697 3698 /* Do not send POLL_HUP for half duplex close. */ 3699 if (sk->sk_shutdown == SHUTDOWN_MASK || 3700 sk->sk_state == TCP_CLOSE) 3701 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); 3702 else 3703 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 3704 } 3705 } 3706 3707 static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, 3708 u32 end_seq) 3709 { 3710 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) { 3711 if (before(seq, sp->start_seq)) 3712 sp->start_seq = seq; 3713 if (after(end_seq, sp->end_seq)) 3714 sp->end_seq = end_seq; 3715 return 1; 3716 } 3717 return 0; 3718 } 3719 3720 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq) 3721 { 3722 struct tcp_sock *tp = tcp_sk(sk); 3723 3724 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3725 int mib_idx; 3726 3727 if (before(seq, tp->rcv_nxt)) 3728 mib_idx = LINUX_MIB_TCPDSACKOLDSENT; 3729 else 3730 mib_idx = LINUX_MIB_TCPDSACKOFOSENT; 3731 3732 NET_INC_STATS_BH(sock_net(sk), mib_idx); 3733 3734 tp->rx_opt.dsack = 1; 3735 tp->duplicate_sack[0].start_seq = seq; 3736 tp->duplicate_sack[0].end_seq = end_seq; 3737 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 1; 3738 } 3739 } 3740 3741 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq) 3742 { 3743 struct tcp_sock *tp = tcp_sk(sk); 3744 3745 if (!tp->rx_opt.dsack) 3746 tcp_dsack_set(sk, seq, end_seq); 3747 else 3748 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 3749 } 3750 3751 static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 3752 { 3753 struct tcp_sock *tp = tcp_sk(sk); 3754 3755 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 3756 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 3757 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 3758 tcp_enter_quickack_mode(sk); 3759 3760 if (tcp_is_sack(tp) && sysctl_tcp_dsack) { 3761 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 3762 3763 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) 3764 end_seq = tp->rcv_nxt; 3765 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq); 3766 } 3767 } 3768 3769 tcp_send_ack(sk); 3770 } 3771 3772 /* These routines update the SACK block as out-of-order packets arrive or 3773 * in-order packets close up the sequence space. 3774 */ 3775 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) 3776 { 3777 int this_sack; 3778 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3779 struct tcp_sack_block *swalk = sp + 1; 3780 3781 /* See if the recent change to the first SACK eats into 3782 * or hits the sequence space of other SACK blocks, if so coalesce. 3783 */ 3784 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { 3785 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) { 3786 int i; 3787 3788 /* Zap SWALK, by moving every further SACK up by one slot. 3789 * Decrease num_sacks. 3790 */ 3791 tp->rx_opt.num_sacks--; 3792 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 3793 tp->rx_opt.dsack; 3794 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) 3795 sp[i] = sp[i + 1]; 3796 continue; 3797 } 3798 this_sack++, swalk++; 3799 } 3800 } 3801 3802 static inline void tcp_sack_swap(struct tcp_sack_block *sack1, 3803 struct tcp_sack_block *sack2) 3804 { 3805 __u32 tmp; 3806 3807 tmp = sack1->start_seq; 3808 sack1->start_seq = sack2->start_seq; 3809 sack2->start_seq = tmp; 3810 3811 tmp = sack1->end_seq; 3812 sack1->end_seq = sack2->end_seq; 3813 sack2->end_seq = tmp; 3814 } 3815 3816 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) 3817 { 3818 struct tcp_sock *tp = tcp_sk(sk); 3819 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3820 int cur_sacks = tp->rx_opt.num_sacks; 3821 int this_sack; 3822 3823 if (!cur_sacks) 3824 goto new_sack; 3825 3826 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) { 3827 if (tcp_sack_extend(sp, seq, end_seq)) { 3828 /* Rotate this_sack to the first one. */ 3829 for (; this_sack > 0; this_sack--, sp--) 3830 tcp_sack_swap(sp, sp - 1); 3831 if (cur_sacks > 1) 3832 tcp_sack_maybe_coalesce(tp); 3833 return; 3834 } 3835 } 3836 3837 /* Could not find an adjacent existing SACK, build a new one, 3838 * put it at the front, and shift everyone else down. We 3839 * always know there is at least one SACK present already here. 3840 * 3841 * If the sack array is full, forget about the last one. 3842 */ 3843 if (this_sack >= TCP_NUM_SACKS) { 3844 this_sack--; 3845 tp->rx_opt.num_sacks--; 3846 sp--; 3847 } 3848 for (; this_sack > 0; this_sack--, sp--) 3849 *sp = *(sp - 1); 3850 3851 new_sack: 3852 /* Build the new head SACK, and we're done. */ 3853 sp->start_seq = seq; 3854 sp->end_seq = end_seq; 3855 tp->rx_opt.num_sacks++; 3856 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 3857 } 3858 3859 /* RCV.NXT advances, some SACKs should be eaten. */ 3860 3861 static void tcp_sack_remove(struct tcp_sock *tp) 3862 { 3863 struct tcp_sack_block *sp = &tp->selective_acks[0]; 3864 int num_sacks = tp->rx_opt.num_sacks; 3865 int this_sack; 3866 3867 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ 3868 if (skb_queue_empty(&tp->out_of_order_queue)) { 3869 tp->rx_opt.num_sacks = 0; 3870 tp->rx_opt.eff_sacks = tp->rx_opt.dsack; 3871 return; 3872 } 3873 3874 for (this_sack = 0; this_sack < num_sacks;) { 3875 /* Check if the start of the sack is covered by RCV.NXT. */ 3876 if (!before(tp->rcv_nxt, sp->start_seq)) { 3877 int i; 3878 3879 /* RCV.NXT must cover all the block! */ 3880 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); 3881 3882 /* Zap this SACK, by moving forward any other SACKS. */ 3883 for (i=this_sack+1; i < num_sacks; i++) 3884 tp->selective_acks[i-1] = tp->selective_acks[i]; 3885 num_sacks--; 3886 continue; 3887 } 3888 this_sack++; 3889 sp++; 3890 } 3891 if (num_sacks != tp->rx_opt.num_sacks) { 3892 tp->rx_opt.num_sacks = num_sacks; 3893 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks + 3894 tp->rx_opt.dsack; 3895 } 3896 } 3897 3898 /* This one checks to see if we can put data from the 3899 * out_of_order queue into the receive_queue. 3900 */ 3901 static void tcp_ofo_queue(struct sock *sk) 3902 { 3903 struct tcp_sock *tp = tcp_sk(sk); 3904 __u32 dsack_high = tp->rcv_nxt; 3905 struct sk_buff *skb; 3906 3907 while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) { 3908 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 3909 break; 3910 3911 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) { 3912 __u32 dsack = dsack_high; 3913 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high)) 3914 dsack_high = TCP_SKB_CB(skb)->end_seq; 3915 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack); 3916 } 3917 3918 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 3919 SOCK_DEBUG(sk, "ofo packet was already received \n"); 3920 __skb_unlink(skb, &tp->out_of_order_queue); 3921 __kfree_skb(skb); 3922 continue; 3923 } 3924 SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n", 3925 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 3926 TCP_SKB_CB(skb)->end_seq); 3927 3928 __skb_unlink(skb, &tp->out_of_order_queue); 3929 __skb_queue_tail(&sk->sk_receive_queue, skb); 3930 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 3931 if (tcp_hdr(skb)->fin) 3932 tcp_fin(skb, sk, tcp_hdr(skb)); 3933 } 3934 } 3935 3936 static int tcp_prune_ofo_queue(struct sock *sk); 3937 static int tcp_prune_queue(struct sock *sk); 3938 3939 static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) 3940 { 3941 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || 3942 !sk_rmem_schedule(sk, size)) { 3943 3944 if (tcp_prune_queue(sk) < 0) 3945 return -1; 3946 3947 if (!sk_rmem_schedule(sk, size)) { 3948 if (!tcp_prune_ofo_queue(sk)) 3949 return -1; 3950 3951 if (!sk_rmem_schedule(sk, size)) 3952 return -1; 3953 } 3954 } 3955 return 0; 3956 } 3957 3958 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 3959 { 3960 struct tcphdr *th = tcp_hdr(skb); 3961 struct tcp_sock *tp = tcp_sk(sk); 3962 int eaten = -1; 3963 3964 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 3965 goto drop; 3966 3967 __skb_pull(skb, th->doff * 4); 3968 3969 TCP_ECN_accept_cwr(tp, skb); 3970 3971 if (tp->rx_opt.dsack) { 3972 tp->rx_opt.dsack = 0; 3973 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks; 3974 } 3975 3976 /* Queue data for delivery to the user. 3977 * Packets in sequence go to the receive queue. 3978 * Out of sequence packets to the out_of_order_queue. 3979 */ 3980 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 3981 if (tcp_receive_window(tp) == 0) 3982 goto out_of_window; 3983 3984 /* Ok. In sequence. In window. */ 3985 if (tp->ucopy.task == current && 3986 tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && 3987 sock_owned_by_user(sk) && !tp->urg_data) { 3988 int chunk = min_t(unsigned int, skb->len, 3989 tp->ucopy.len); 3990 3991 __set_current_state(TASK_RUNNING); 3992 3993 local_bh_enable(); 3994 if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) { 3995 tp->ucopy.len -= chunk; 3996 tp->copied_seq += chunk; 3997 eaten = (chunk == skb->len && !th->fin); 3998 tcp_rcv_space_adjust(sk); 3999 } 4000 local_bh_disable(); 4001 } 4002 4003 if (eaten <= 0) { 4004 queue_and_out: 4005 if (eaten < 0 && 4006 tcp_try_rmem_schedule(sk, skb->truesize)) 4007 goto drop; 4008 4009 skb_set_owner_r(skb, sk); 4010 __skb_queue_tail(&sk->sk_receive_queue, skb); 4011 } 4012 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4013 if (skb->len) 4014 tcp_event_data_recv(sk, skb); 4015 if (th->fin) 4016 tcp_fin(skb, sk, th); 4017 4018 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4019 tcp_ofo_queue(sk); 4020 4021 /* RFC2581. 4.2. SHOULD send immediate ACK, when 4022 * gap in queue is filled. 4023 */ 4024 if (skb_queue_empty(&tp->out_of_order_queue)) 4025 inet_csk(sk)->icsk_ack.pingpong = 0; 4026 } 4027 4028 if (tp->rx_opt.num_sacks) 4029 tcp_sack_remove(tp); 4030 4031 tcp_fast_path_check(sk); 4032 4033 if (eaten > 0) 4034 __kfree_skb(skb); 4035 else if (!sock_flag(sk, SOCK_DEAD)) 4036 sk->sk_data_ready(sk, 0); 4037 return; 4038 } 4039 4040 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4041 /* A retransmit, 2nd most common case. Force an immediate ack. */ 4042 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); 4043 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4044 4045 out_of_window: 4046 tcp_enter_quickack_mode(sk); 4047 inet_csk_schedule_ack(sk); 4048 drop: 4049 __kfree_skb(skb); 4050 return; 4051 } 4052 4053 /* Out of window. F.e. zero window probe. */ 4054 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 4055 goto out_of_window; 4056 4057 tcp_enter_quickack_mode(sk); 4058 4059 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4060 /* Partial packet, seq < rcv_next < end_seq */ 4061 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", 4062 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, 4063 TCP_SKB_CB(skb)->end_seq); 4064 4065 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); 4066 4067 /* If window is closed, drop tail of packet. But after 4068 * remembering D-SACK for its head made in previous line. 4069 */ 4070 if (!tcp_receive_window(tp)) 4071 goto out_of_window; 4072 goto queue_and_out; 4073 } 4074 4075 TCP_ECN_check_ce(tp, skb); 4076 4077 if (tcp_try_rmem_schedule(sk, skb->truesize)) 4078 goto drop; 4079 4080 /* Disable header prediction. */ 4081 tp->pred_flags = 0; 4082 inet_csk_schedule_ack(sk); 4083 4084 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 4085 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 4086 4087 skb_set_owner_r(skb, sk); 4088 4089 if (!skb_peek(&tp->out_of_order_queue)) { 4090 /* Initial out of order segment, build 1 SACK. */ 4091 if (tcp_is_sack(tp)) { 4092 tp->rx_opt.num_sacks = 1; 4093 tp->rx_opt.dsack = 0; 4094 tp->rx_opt.eff_sacks = 1; 4095 tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; 4096 tp->selective_acks[0].end_seq = 4097 TCP_SKB_CB(skb)->end_seq; 4098 } 4099 __skb_queue_head(&tp->out_of_order_queue, skb); 4100 } else { 4101 struct sk_buff *skb1 = tp->out_of_order_queue.prev; 4102 u32 seq = TCP_SKB_CB(skb)->seq; 4103 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 4104 4105 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4106 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4107 4108 if (!tp->rx_opt.num_sacks || 4109 tp->selective_acks[0].end_seq != seq) 4110 goto add_sack; 4111 4112 /* Common case: data arrive in order after hole. */ 4113 tp->selective_acks[0].end_seq = end_seq; 4114 return; 4115 } 4116 4117 /* Find place to insert this segment. */ 4118 do { 4119 if (!after(TCP_SKB_CB(skb1)->seq, seq)) 4120 break; 4121 } while ((skb1 = skb1->prev) != 4122 (struct sk_buff *)&tp->out_of_order_queue); 4123 4124 /* Do skb overlap to previous one? */ 4125 if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && 4126 before(seq, TCP_SKB_CB(skb1)->end_seq)) { 4127 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4128 /* All the bits are present. Drop. */ 4129 __kfree_skb(skb); 4130 tcp_dsack_set(sk, seq, end_seq); 4131 goto add_sack; 4132 } 4133 if (after(seq, TCP_SKB_CB(skb1)->seq)) { 4134 /* Partial overlap. */ 4135 tcp_dsack_set(sk, seq, 4136 TCP_SKB_CB(skb1)->end_seq); 4137 } else { 4138 skb1 = skb1->prev; 4139 } 4140 } 4141 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 4142 4143 /* And clean segments covered by new one as whole. */ 4144 while ((skb1 = skb->next) != 4145 (struct sk_buff *)&tp->out_of_order_queue && 4146 after(end_seq, TCP_SKB_CB(skb1)->seq)) { 4147 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { 4148 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4149 end_seq); 4150 break; 4151 } 4152 __skb_unlink(skb1, &tp->out_of_order_queue); 4153 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, 4154 TCP_SKB_CB(skb1)->end_seq); 4155 __kfree_skb(skb1); 4156 } 4157 4158 add_sack: 4159 if (tcp_is_sack(tp)) 4160 tcp_sack_new_ofo_skb(sk, seq, end_seq); 4161 } 4162 } 4163 4164 /* Collapse contiguous sequence of skbs head..tail with 4165 * sequence numbers start..end. 4166 * Segments with FIN/SYN are not collapsed (only because this 4167 * simplifies code) 4168 */ 4169 static void 4170 tcp_collapse(struct sock *sk, struct sk_buff_head *list, 4171 struct sk_buff *head, struct sk_buff *tail, 4172 u32 start, u32 end) 4173 { 4174 struct sk_buff *skb; 4175 4176 /* First, check that queue is collapsible and find 4177 * the point where collapsing can be useful. */ 4178 for (skb = head; skb != tail;) { 4179 /* No new bits? It is possible on ofo queue. */ 4180 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4181 struct sk_buff *next = skb->next; 4182 __skb_unlink(skb, list); 4183 __kfree_skb(skb); 4184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4185 skb = next; 4186 continue; 4187 } 4188 4189 /* The first skb to collapse is: 4190 * - not SYN/FIN and 4191 * - bloated or contains data before "start" or 4192 * overlaps to the next one. 4193 */ 4194 if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && 4195 (tcp_win_from_space(skb->truesize) > skb->len || 4196 before(TCP_SKB_CB(skb)->seq, start) || 4197 (skb->next != tail && 4198 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) 4199 break; 4200 4201 /* Decided to skip this, advance start seq. */ 4202 start = TCP_SKB_CB(skb)->end_seq; 4203 skb = skb->next; 4204 } 4205 if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) 4206 return; 4207 4208 while (before(start, end)) { 4209 struct sk_buff *nskb; 4210 unsigned int header = skb_headroom(skb); 4211 int copy = SKB_MAX_ORDER(header, 0); 4212 4213 /* Too big header? This can happen with IPv6. */ 4214 if (copy < 0) 4215 return; 4216 if (end - start < copy) 4217 copy = end - start; 4218 nskb = alloc_skb(copy + header, GFP_ATOMIC); 4219 if (!nskb) 4220 return; 4221 4222 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 4223 skb_set_network_header(nskb, (skb_network_header(skb) - 4224 skb->head)); 4225 skb_set_transport_header(nskb, (skb_transport_header(skb) - 4226 skb->head)); 4227 skb_reserve(nskb, header); 4228 memcpy(nskb->head, skb->head, header); 4229 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4230 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4231 __skb_insert(nskb, skb->prev, skb, list); 4232 skb_set_owner_r(nskb, sk); 4233 4234 /* Copy data, releasing collapsed skbs. */ 4235 while (copy > 0) { 4236 int offset = start - TCP_SKB_CB(skb)->seq; 4237 int size = TCP_SKB_CB(skb)->end_seq - start; 4238 4239 BUG_ON(offset < 0); 4240 if (size > 0) { 4241 size = min(copy, size); 4242 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size)) 4243 BUG(); 4244 TCP_SKB_CB(nskb)->end_seq += size; 4245 copy -= size; 4246 start += size; 4247 } 4248 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4249 struct sk_buff *next = skb->next; 4250 __skb_unlink(skb, list); 4251 __kfree_skb(skb); 4252 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); 4253 skb = next; 4254 if (skb == tail || 4255 tcp_hdr(skb)->syn || 4256 tcp_hdr(skb)->fin) 4257 return; 4258 } 4259 } 4260 } 4261 } 4262 4263 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs 4264 * and tcp_collapse() them until all the queue is collapsed. 4265 */ 4266 static void tcp_collapse_ofo_queue(struct sock *sk) 4267 { 4268 struct tcp_sock *tp = tcp_sk(sk); 4269 struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); 4270 struct sk_buff *head; 4271 u32 start, end; 4272 4273 if (skb == NULL) 4274 return; 4275 4276 start = TCP_SKB_CB(skb)->seq; 4277 end = TCP_SKB_CB(skb)->end_seq; 4278 head = skb; 4279 4280 for (;;) { 4281 skb = skb->next; 4282 4283 /* Segment is terminated when we see gap or when 4284 * we are at the end of all the queue. */ 4285 if (skb == (struct sk_buff *)&tp->out_of_order_queue || 4286 after(TCP_SKB_CB(skb)->seq, end) || 4287 before(TCP_SKB_CB(skb)->end_seq, start)) { 4288 tcp_collapse(sk, &tp->out_of_order_queue, 4289 head, skb, start, end); 4290 head = skb; 4291 if (skb == (struct sk_buff *)&tp->out_of_order_queue) 4292 break; 4293 /* Start new segment */ 4294 start = TCP_SKB_CB(skb)->seq; 4295 end = TCP_SKB_CB(skb)->end_seq; 4296 } else { 4297 if (before(TCP_SKB_CB(skb)->seq, start)) 4298 start = TCP_SKB_CB(skb)->seq; 4299 if (after(TCP_SKB_CB(skb)->end_seq, end)) 4300 end = TCP_SKB_CB(skb)->end_seq; 4301 } 4302 } 4303 } 4304 4305 /* 4306 * Purge the out-of-order queue. 4307 * Return true if queue was pruned. 4308 */ 4309 static int tcp_prune_ofo_queue(struct sock *sk) 4310 { 4311 struct tcp_sock *tp = tcp_sk(sk); 4312 int res = 0; 4313 4314 if (!skb_queue_empty(&tp->out_of_order_queue)) { 4315 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); 4316 __skb_queue_purge(&tp->out_of_order_queue); 4317 4318 /* Reset SACK state. A conforming SACK implementation will 4319 * do the same at a timeout based retransmit. When a connection 4320 * is in a sad state like this, we care only about integrity 4321 * of the connection not performance. 4322 */ 4323 if (tp->rx_opt.sack_ok) 4324 tcp_sack_reset(&tp->rx_opt); 4325 sk_mem_reclaim(sk); 4326 res = 1; 4327 } 4328 return res; 4329 } 4330 4331 /* Reduce allocated memory if we can, trying to get 4332 * the socket within its memory limits again. 4333 * 4334 * Return less than zero if we should start dropping frames 4335 * until the socket owning process reads some of the data 4336 * to stabilize the situation. 4337 */ 4338 static int tcp_prune_queue(struct sock *sk) 4339 { 4340 struct tcp_sock *tp = tcp_sk(sk); 4341 4342 SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); 4343 4344 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); 4345 4346 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 4347 tcp_clamp_window(sk); 4348 else if (tcp_memory_pressure) 4349 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); 4350 4351 tcp_collapse_ofo_queue(sk); 4352 tcp_collapse(sk, &sk->sk_receive_queue, 4353 sk->sk_receive_queue.next, 4354 (struct sk_buff *)&sk->sk_receive_queue, 4355 tp->copied_seq, tp->rcv_nxt); 4356 sk_mem_reclaim(sk); 4357 4358 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4359 return 0; 4360 4361 /* Collapsing did not help, destructive actions follow. 4362 * This must not ever occur. */ 4363 4364 tcp_prune_ofo_queue(sk); 4365 4366 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) 4367 return 0; 4368 4369 /* If we are really being abused, tell the caller to silently 4370 * drop receive data on the floor. It will get retransmitted 4371 * and hopefully then we'll have sufficient space. 4372 */ 4373 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); 4374 4375 /* Massive buffer overcommit. */ 4376 tp->pred_flags = 0; 4377 return -1; 4378 } 4379 4380 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 4381 * As additional protections, we do not touch cwnd in retransmission phases, 4382 * and if application hit its sndbuf limit recently. 4383 */ 4384 void tcp_cwnd_application_limited(struct sock *sk) 4385 { 4386 struct tcp_sock *tp = tcp_sk(sk); 4387 4388 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 4389 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 4390 /* Limited by application or receiver window. */ 4391 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 4392 u32 win_used = max(tp->snd_cwnd_used, init_win); 4393 if (win_used < tp->snd_cwnd) { 4394 tp->snd_ssthresh = tcp_current_ssthresh(sk); 4395 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 4396 } 4397 tp->snd_cwnd_used = 0; 4398 } 4399 tp->snd_cwnd_stamp = tcp_time_stamp; 4400 } 4401 4402 static int tcp_should_expand_sndbuf(struct sock *sk) 4403 { 4404 struct tcp_sock *tp = tcp_sk(sk); 4405 4406 /* If the user specified a specific send buffer setting, do 4407 * not modify it. 4408 */ 4409 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) 4410 return 0; 4411 4412 /* If we are under global TCP memory pressure, do not expand. */ 4413 if (tcp_memory_pressure) 4414 return 0; 4415 4416 /* If we are under soft global TCP memory pressure, do not expand. */ 4417 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4418 return 0; 4419 4420 /* If we filled the congestion window, do not expand. */ 4421 if (tp->packets_out >= tp->snd_cwnd) 4422 return 0; 4423 4424 return 1; 4425 } 4426 4427 /* When incoming ACK allowed to free some skb from write_queue, 4428 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket 4429 * on the exit from tcp input handler. 4430 * 4431 * PROBLEM: sndbuf expansion does not work well with largesend. 4432 */ 4433 static void tcp_new_space(struct sock *sk) 4434 { 4435 struct tcp_sock *tp = tcp_sk(sk); 4436 4437 if (tcp_should_expand_sndbuf(sk)) { 4438 int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + 4439 MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), 4440 demanded = max_t(unsigned int, tp->snd_cwnd, 4441 tp->reordering + 1); 4442 sndmem *= 2 * demanded; 4443 if (sndmem > sk->sk_sndbuf) 4444 sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); 4445 tp->snd_cwnd_stamp = tcp_time_stamp; 4446 } 4447 4448 sk->sk_write_space(sk); 4449 } 4450 4451 static void tcp_check_space(struct sock *sk) 4452 { 4453 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) { 4454 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); 4455 if (sk->sk_socket && 4456 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 4457 tcp_new_space(sk); 4458 } 4459 } 4460 4461 static inline void tcp_data_snd_check(struct sock *sk) 4462 { 4463 tcp_push_pending_frames(sk); 4464 tcp_check_space(sk); 4465 } 4466 4467 /* 4468 * Check if sending an ack is needed. 4469 */ 4470 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) 4471 { 4472 struct tcp_sock *tp = tcp_sk(sk); 4473 4474 /* More than one full frame received... */ 4475 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss 4476 /* ... and right edge of window advances far enough. 4477 * (tcp_recvmsg() will send ACK otherwise). Or... 4478 */ 4479 && __tcp_select_window(sk) >= tp->rcv_wnd) || 4480 /* We ACK each frame or... */ 4481 tcp_in_quickack_mode(sk) || 4482 /* We have out of order data. */ 4483 (ofo_possible && skb_peek(&tp->out_of_order_queue))) { 4484 /* Then ack it now */ 4485 tcp_send_ack(sk); 4486 } else { 4487 /* Else, send delayed ack. */ 4488 tcp_send_delayed_ack(sk); 4489 } 4490 } 4491 4492 static inline void tcp_ack_snd_check(struct sock *sk) 4493 { 4494 if (!inet_csk_ack_scheduled(sk)) { 4495 /* We sent a data segment already. */ 4496 return; 4497 } 4498 __tcp_ack_snd_check(sk, 1); 4499 } 4500 4501 /* 4502 * This routine is only called when we have urgent data 4503 * signaled. Its the 'slow' part of tcp_urg. It could be 4504 * moved inline now as tcp_urg is only called from one 4505 * place. We handle URGent data wrong. We have to - as 4506 * BSD still doesn't use the correction from RFC961. 4507 * For 1003.1g we should support a new option TCP_STDURG to permit 4508 * either form (or just set the sysctl tcp_stdurg). 4509 */ 4510 4511 static void tcp_check_urg(struct sock *sk, struct tcphdr *th) 4512 { 4513 struct tcp_sock *tp = tcp_sk(sk); 4514 u32 ptr = ntohs(th->urg_ptr); 4515 4516 if (ptr && !sysctl_tcp_stdurg) 4517 ptr--; 4518 ptr += ntohl(th->seq); 4519 4520 /* Ignore urgent data that we've already seen and read. */ 4521 if (after(tp->copied_seq, ptr)) 4522 return; 4523 4524 /* Do not replay urg ptr. 4525 * 4526 * NOTE: interesting situation not covered by specs. 4527 * Misbehaving sender may send urg ptr, pointing to segment, 4528 * which we already have in ofo queue. We are not able to fetch 4529 * such data and will stay in TCP_URG_NOTYET until will be eaten 4530 * by recvmsg(). Seems, we are not obliged to handle such wicked 4531 * situations. But it is worth to think about possibility of some 4532 * DoSes using some hypothetical application level deadlock. 4533 */ 4534 if (before(ptr, tp->rcv_nxt)) 4535 return; 4536 4537 /* Do we already have a newer (or duplicate) urgent pointer? */ 4538 if (tp->urg_data && !after(ptr, tp->urg_seq)) 4539 return; 4540 4541 /* Tell the world about our new urgent pointer. */ 4542 sk_send_sigurg(sk); 4543 4544 /* We may be adding urgent data when the last byte read was 4545 * urgent. To do this requires some care. We cannot just ignore 4546 * tp->copied_seq since we would read the last urgent byte again 4547 * as data, nor can we alter copied_seq until this data arrives 4548 * or we break the semantics of SIOCATMARK (and thus sockatmark()) 4549 * 4550 * NOTE. Double Dutch. Rendering to plain English: author of comment 4551 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); 4552 * and expect that both A and B disappear from stream. This is _wrong_. 4553 * Though this happens in BSD with high probability, this is occasional. 4554 * Any application relying on this is buggy. Note also, that fix "works" 4555 * only in this artificial test. Insert some normal data between A and B and we will 4556 * decline of BSD again. Verdict: it is better to remove to trap 4557 * buggy users. 4558 */ 4559 if (tp->urg_seq == tp->copied_seq && tp->urg_data && 4560 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { 4561 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 4562 tp->copied_seq++; 4563 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { 4564 __skb_unlink(skb, &sk->sk_receive_queue); 4565 __kfree_skb(skb); 4566 } 4567 } 4568 4569 tp->urg_data = TCP_URG_NOTYET; 4570 tp->urg_seq = ptr; 4571 4572 /* Disable header prediction. */ 4573 tp->pred_flags = 0; 4574 } 4575 4576 /* This is the 'fast' part of urgent handling. */ 4577 static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 4578 { 4579 struct tcp_sock *tp = tcp_sk(sk); 4580 4581 /* Check if we get a new urgent pointer - normally not. */ 4582 if (th->urg) 4583 tcp_check_urg(sk, th); 4584 4585 /* Do we wait for any urgent data? - normally not... */ 4586 if (tp->urg_data == TCP_URG_NOTYET) { 4587 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - 4588 th->syn; 4589 4590 /* Is the urgent pointer pointing into this packet? */ 4591 if (ptr < skb->len) { 4592 u8 tmp; 4593 if (skb_copy_bits(skb, ptr, &tmp, 1)) 4594 BUG(); 4595 tp->urg_data = TCP_URG_VALID | tmp; 4596 if (!sock_flag(sk, SOCK_DEAD)) 4597 sk->sk_data_ready(sk, 0); 4598 } 4599 } 4600 } 4601 4602 static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) 4603 { 4604 struct tcp_sock *tp = tcp_sk(sk); 4605 int chunk = skb->len - hlen; 4606 int err; 4607 4608 local_bh_enable(); 4609 if (skb_csum_unnecessary(skb)) 4610 err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); 4611 else 4612 err = skb_copy_and_csum_datagram_iovec(skb, hlen, 4613 tp->ucopy.iov); 4614 4615 if (!err) { 4616 tp->ucopy.len -= chunk; 4617 tp->copied_seq += chunk; 4618 tcp_rcv_space_adjust(sk); 4619 } 4620 4621 local_bh_disable(); 4622 return err; 4623 } 4624 4625 static __sum16 __tcp_checksum_complete_user(struct sock *sk, 4626 struct sk_buff *skb) 4627 { 4628 __sum16 result; 4629 4630 if (sock_owned_by_user(sk)) { 4631 local_bh_enable(); 4632 result = __tcp_checksum_complete(skb); 4633 local_bh_disable(); 4634 } else { 4635 result = __tcp_checksum_complete(skb); 4636 } 4637 return result; 4638 } 4639 4640 static inline int tcp_checksum_complete_user(struct sock *sk, 4641 struct sk_buff *skb) 4642 { 4643 return !skb_csum_unnecessary(skb) && 4644 __tcp_checksum_complete_user(sk, skb); 4645 } 4646 4647 #ifdef CONFIG_NET_DMA 4648 static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, 4649 int hlen) 4650 { 4651 struct tcp_sock *tp = tcp_sk(sk); 4652 int chunk = skb->len - hlen; 4653 int dma_cookie; 4654 int copied_early = 0; 4655 4656 if (tp->ucopy.wakeup) 4657 return 0; 4658 4659 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 4660 tp->ucopy.dma_chan = get_softnet_dma(); 4661 4662 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 4663 4664 dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan, 4665 skb, hlen, 4666 tp->ucopy.iov, chunk, 4667 tp->ucopy.pinned_list); 4668 4669 if (dma_cookie < 0) 4670 goto out; 4671 4672 tp->ucopy.dma_cookie = dma_cookie; 4673 copied_early = 1; 4674 4675 tp->ucopy.len -= chunk; 4676 tp->copied_seq += chunk; 4677 tcp_rcv_space_adjust(sk); 4678 4679 if ((tp->ucopy.len == 0) || 4680 (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) || 4681 (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) { 4682 tp->ucopy.wakeup = 1; 4683 sk->sk_data_ready(sk, 0); 4684 } 4685 } else if (chunk > 0) { 4686 tp->ucopy.wakeup = 1; 4687 sk->sk_data_ready(sk, 0); 4688 } 4689 out: 4690 return copied_early; 4691 } 4692 #endif /* CONFIG_NET_DMA */ 4693 4694 /* 4695 * TCP receive function for the ESTABLISHED state. 4696 * 4697 * It is split into a fast path and a slow path. The fast path is 4698 * disabled when: 4699 * - A zero window was announced from us - zero window probing 4700 * is only handled properly in the slow path. 4701 * - Out of order segments arrived. 4702 * - Urgent data is expected. 4703 * - There is no buffer space left 4704 * - Unexpected TCP flags/window values/header lengths are received 4705 * (detected by checking the TCP header against pred_flags) 4706 * - Data is sent in both directions. Fast path only supports pure senders 4707 * or pure receivers (this means either the sequence number or the ack 4708 * value must stay constant) 4709 * - Unexpected TCP option. 4710 * 4711 * When these conditions are not satisfied it drops into a standard 4712 * receive procedure patterned after RFC793 to handle all cases. 4713 * The first three cases are guaranteed by proper pred_flags setting, 4714 * the rest is checked inline. Fast processing is turned on in 4715 * tcp_data_queue when everything is OK. 4716 */ 4717 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 4718 struct tcphdr *th, unsigned len) 4719 { 4720 struct tcp_sock *tp = tcp_sk(sk); 4721 4722 /* 4723 * Header prediction. 4724 * The code loosely follows the one in the famous 4725 * "30 instruction TCP receive" Van Jacobson mail. 4726 * 4727 * Van's trick is to deposit buffers into socket queue 4728 * on a device interrupt, to call tcp_recv function 4729 * on the receive process context and checksum and copy 4730 * the buffer to user space. smart... 4731 * 4732 * Our current scheme is not silly either but we take the 4733 * extra cost of the net_bh soft interrupt processing... 4734 * We do checksum and copy also but from device to kernel. 4735 */ 4736 4737 tp->rx_opt.saw_tstamp = 0; 4738 4739 /* pred_flags is 0xS?10 << 16 + snd_wnd 4740 * if header_prediction is to be made 4741 * 'S' will always be tp->tcp_header_len >> 2 4742 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to 4743 * turn it off (when there are holes in the receive 4744 * space for instance) 4745 * PSH flag is ignored. 4746 */ 4747 4748 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && 4749 TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { 4750 int tcp_header_len = tp->tcp_header_len; 4751 4752 /* Timestamp header prediction: tcp_header_len 4753 * is automatically equal to th->doff*4 due to pred_flags 4754 * match. 4755 */ 4756 4757 /* Check timestamp */ 4758 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 4759 __be32 *ptr = (__be32 *)(th + 1); 4760 4761 /* No? Slow path! */ 4762 if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4763 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) 4764 goto slow_path; 4765 4766 tp->rx_opt.saw_tstamp = 1; 4767 ++ptr; 4768 tp->rx_opt.rcv_tsval = ntohl(*ptr); 4769 ++ptr; 4770 tp->rx_opt.rcv_tsecr = ntohl(*ptr); 4771 4772 /* If PAWS failed, check it more carefully in slow path */ 4773 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 4774 goto slow_path; 4775 4776 /* DO NOT update ts_recent here, if checksum fails 4777 * and timestamp was corrupted part, it will result 4778 * in a hung connection since we will drop all 4779 * future packets due to the PAWS test. 4780 */ 4781 } 4782 4783 if (len <= tcp_header_len) { 4784 /* Bulk data transfer: sender */ 4785 if (len == tcp_header_len) { 4786 /* Predicted packet is in window by definition. 4787 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 4788 * Hence, check seq<=rcv_wup reduces to: 4789 */ 4790 if (tcp_header_len == 4791 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 4792 tp->rcv_nxt == tp->rcv_wup) 4793 tcp_store_ts_recent(tp); 4794 4795 /* We know that such packets are checksummed 4796 * on entry. 4797 */ 4798 tcp_ack(sk, skb, 0); 4799 __kfree_skb(skb); 4800 tcp_data_snd_check(sk); 4801 return 0; 4802 } else { /* Header too small */ 4803 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4804 goto discard; 4805 } 4806 } else { 4807 int eaten = 0; 4808 int copied_early = 0; 4809 4810 if (tp->copied_seq == tp->rcv_nxt && 4811 len - tcp_header_len <= tp->ucopy.len) { 4812 #ifdef CONFIG_NET_DMA 4813 if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) { 4814 copied_early = 1; 4815 eaten = 1; 4816 } 4817 #endif 4818 if (tp->ucopy.task == current && 4819 sock_owned_by_user(sk) && !copied_early) { 4820 __set_current_state(TASK_RUNNING); 4821 4822 if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) 4823 eaten = 1; 4824 } 4825 if (eaten) { 4826 /* Predicted packet is in window by definition. 4827 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 4828 * Hence, check seq<=rcv_wup reduces to: 4829 */ 4830 if (tcp_header_len == 4831 (sizeof(struct tcphdr) + 4832 TCPOLEN_TSTAMP_ALIGNED) && 4833 tp->rcv_nxt == tp->rcv_wup) 4834 tcp_store_ts_recent(tp); 4835 4836 tcp_rcv_rtt_measure_ts(sk, skb); 4837 4838 __skb_pull(skb, tcp_header_len); 4839 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4840 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); 4841 } 4842 if (copied_early) 4843 tcp_cleanup_rbuf(sk, skb->len); 4844 } 4845 if (!eaten) { 4846 if (tcp_checksum_complete_user(sk, skb)) 4847 goto csum_error; 4848 4849 /* Predicted packet is in window by definition. 4850 * seq == rcv_nxt and rcv_wup <= rcv_nxt. 4851 * Hence, check seq<=rcv_wup reduces to: 4852 */ 4853 if (tcp_header_len == 4854 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && 4855 tp->rcv_nxt == tp->rcv_wup) 4856 tcp_store_ts_recent(tp); 4857 4858 tcp_rcv_rtt_measure_ts(sk, skb); 4859 4860 if ((int)skb->truesize > sk->sk_forward_alloc) 4861 goto step5; 4862 4863 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 4864 4865 /* Bulk data transfer: receiver */ 4866 __skb_pull(skb, tcp_header_len); 4867 __skb_queue_tail(&sk->sk_receive_queue, skb); 4868 skb_set_owner_r(skb, sk); 4869 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4870 } 4871 4872 tcp_event_data_recv(sk, skb); 4873 4874 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { 4875 /* Well, only one small jumplet in fast path... */ 4876 tcp_ack(sk, skb, FLAG_DATA); 4877 tcp_data_snd_check(sk); 4878 if (!inet_csk_ack_scheduled(sk)) 4879 goto no_ack; 4880 } 4881 4882 __tcp_ack_snd_check(sk, 0); 4883 no_ack: 4884 #ifdef CONFIG_NET_DMA 4885 if (copied_early) 4886 __skb_queue_tail(&sk->sk_async_wait_queue, skb); 4887 else 4888 #endif 4889 if (eaten) 4890 __kfree_skb(skb); 4891 else 4892 sk->sk_data_ready(sk, 0); 4893 return 0; 4894 } 4895 } 4896 4897 slow_path: 4898 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 4899 goto csum_error; 4900 4901 /* 4902 * RFC1323: H1. Apply PAWS check first. 4903 */ 4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4905 tcp_paws_discard(sk, skb)) { 4906 if (!th->rst) { 4907 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 4908 tcp_send_dupack(sk, skb); 4909 goto discard; 4910 } 4911 /* Resets are accepted even if PAWS failed. 4912 4913 ts_recent update must be made after we are sure 4914 that the packet is in window. 4915 */ 4916 } 4917 4918 /* 4919 * Standard slow path. 4920 */ 4921 4922 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 4923 /* RFC793, page 37: "In all states except SYN-SENT, all reset 4924 * (RST) segments are validated by checking their SEQ-fields." 4925 * And page 69: "If an incoming segment is not acceptable, 4926 * an acknowledgment should be sent in reply (unless the RST bit 4927 * is set, if so drop the segment and return)". 4928 */ 4929 if (!th->rst) 4930 tcp_send_dupack(sk, skb); 4931 goto discard; 4932 } 4933 4934 if (th->rst) { 4935 tcp_reset(sk); 4936 goto discard; 4937 } 4938 4939 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 4940 4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4943 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 4944 tcp_reset(sk); 4945 return 1; 4946 } 4947 4948 step5: 4949 if (th->ack) 4950 tcp_ack(sk, skb, FLAG_SLOWPATH); 4951 4952 tcp_rcv_rtt_measure_ts(sk, skb); 4953 4954 /* Process urgent data. */ 4955 tcp_urg(sk, skb, th); 4956 4957 /* step 7: process the segment text */ 4958 tcp_data_queue(sk, skb); 4959 4960 tcp_data_snd_check(sk); 4961 tcp_ack_snd_check(sk); 4962 return 0; 4963 4964 csum_error: 4965 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 4966 4967 discard: 4968 __kfree_skb(skb); 4969 return 0; 4970 } 4971 4972 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 4973 struct tcphdr *th, unsigned len) 4974 { 4975 struct tcp_sock *tp = tcp_sk(sk); 4976 struct inet_connection_sock *icsk = inet_csk(sk); 4977 int saved_clamp = tp->rx_opt.mss_clamp; 4978 4979 tcp_parse_options(skb, &tp->rx_opt, 0); 4980 4981 if (th->ack) { 4982 /* rfc793: 4983 * "If the state is SYN-SENT then 4984 * first check the ACK bit 4985 * If the ACK bit is set 4986 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send 4987 * a reset (unless the RST bit is set, if so drop 4988 * the segment and return)" 4989 * 4990 * We do not send data with SYN, so that RFC-correct 4991 * test reduces to: 4992 */ 4993 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt) 4994 goto reset_and_undo; 4995 4996 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4997 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, 4998 tcp_time_stamp)) { 4999 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); 5000 goto reset_and_undo; 5001 } 5002 5003 /* Now ACK is acceptable. 5004 * 5005 * "If the RST bit is set 5006 * If the ACK was acceptable then signal the user "error: 5007 * connection reset", drop the segment, enter CLOSED state, 5008 * delete TCB, and return." 5009 */ 5010 5011 if (th->rst) { 5012 tcp_reset(sk); 5013 goto discard; 5014 } 5015 5016 /* rfc793: 5017 * "fifth, if neither of the SYN or RST bits is set then 5018 * drop the segment and return." 5019 * 5020 * See note below! 5021 * --ANK(990513) 5022 */ 5023 if (!th->syn) 5024 goto discard_and_undo; 5025 5026 /* rfc793: 5027 * "If the SYN bit is on ... 5028 * are acceptable then ... 5029 * (our SYN has been ACKed), change the connection 5030 * state to ESTABLISHED..." 5031 */ 5032 5033 TCP_ECN_rcv_synack(tp, th); 5034 5035 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5036 tcp_ack(sk, skb, FLAG_SLOWPATH); 5037 5038 /* Ok.. it's good. Set up sequence numbers and 5039 * move to established. 5040 */ 5041 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5042 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5043 5044 /* RFC1323: The window in SYN & SYN/ACK segments is 5045 * never scaled. 5046 */ 5047 tp->snd_wnd = ntohs(th->window); 5048 tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq); 5049 5050 if (!tp->rx_opt.wscale_ok) { 5051 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; 5052 tp->window_clamp = min(tp->window_clamp, 65535U); 5053 } 5054 5055 if (tp->rx_opt.saw_tstamp) { 5056 tp->rx_opt.tstamp_ok = 1; 5057 tp->tcp_header_len = 5058 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5059 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5060 tcp_store_ts_recent(tp); 5061 } else { 5062 tp->tcp_header_len = sizeof(struct tcphdr); 5063 } 5064 5065 if (tcp_is_sack(tp) && sysctl_tcp_fack) 5066 tcp_enable_fack(tp); 5067 5068 tcp_mtup_init(sk); 5069 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5070 tcp_initialize_rcv_mss(sk); 5071 5072 /* Remember, tcp_poll() does not lock socket! 5073 * Change state from SYN-SENT only after copied_seq 5074 * is initialized. */ 5075 tp->copied_seq = tp->rcv_nxt; 5076 smp_mb(); 5077 tcp_set_state(sk, TCP_ESTABLISHED); 5078 5079 security_inet_conn_established(sk, skb); 5080 5081 /* Make sure socket is routed, for correct metrics. */ 5082 icsk->icsk_af_ops->rebuild_header(sk); 5083 5084 tcp_init_metrics(sk); 5085 5086 tcp_init_congestion_control(sk); 5087 5088 /* Prevent spurious tcp_cwnd_restart() on first data 5089 * packet. 5090 */ 5091 tp->lsndtime = tcp_time_stamp; 5092 5093 tcp_init_buffer_space(sk); 5094 5095 if (sock_flag(sk, SOCK_KEEPOPEN)) 5096 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 5097 5098 if (!tp->rx_opt.snd_wscale) 5099 __tcp_fast_path_on(tp, tp->snd_wnd); 5100 else 5101 tp->pred_flags = 0; 5102 5103 if (!sock_flag(sk, SOCK_DEAD)) { 5104 sk->sk_state_change(sk); 5105 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); 5106 } 5107 5108 if (sk->sk_write_pending || 5109 icsk->icsk_accept_queue.rskq_defer_accept || 5110 icsk->icsk_ack.pingpong) { 5111 /* Save one ACK. Data will be ready after 5112 * several ticks, if write_pending is set. 5113 * 5114 * It may be deleted, but with this feature tcpdumps 5115 * look so _wonderfully_ clever, that I was not able 5116 * to stand against the temptation 8) --ANK 5117 */ 5118 inet_csk_schedule_ack(sk); 5119 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5120 icsk->icsk_ack.ato = TCP_ATO_MIN; 5121 tcp_incr_quickack(sk); 5122 tcp_enter_quickack_mode(sk); 5123 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5124 TCP_DELACK_MAX, TCP_RTO_MAX); 5125 5126 discard: 5127 __kfree_skb(skb); 5128 return 0; 5129 } else { 5130 tcp_send_ack(sk); 5131 } 5132 return -1; 5133 } 5134 5135 /* No ACK in the segment */ 5136 5137 if (th->rst) { 5138 /* rfc793: 5139 * "If the RST bit is set 5140 * 5141 * Otherwise (no ACK) drop the segment and return." 5142 */ 5143 5144 goto discard_and_undo; 5145 } 5146 5147 /* PAWS check. */ 5148 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && 5149 tcp_paws_check(&tp->rx_opt, 0)) 5150 goto discard_and_undo; 5151 5152 if (th->syn) { 5153 /* We see SYN without ACK. It is attempt of 5154 * simultaneous connect with crossed SYNs. 5155 * Particularly, it can be connect to self. 5156 */ 5157 tcp_set_state(sk, TCP_SYN_RECV); 5158 5159 if (tp->rx_opt.saw_tstamp) { 5160 tp->rx_opt.tstamp_ok = 1; 5161 tcp_store_ts_recent(tp); 5162 tp->tcp_header_len = 5163 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 5164 } else { 5165 tp->tcp_header_len = sizeof(struct tcphdr); 5166 } 5167 5168 tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; 5169 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; 5170 5171 /* RFC1323: The window in SYN & SYN/ACK segments is 5172 * never scaled. 5173 */ 5174 tp->snd_wnd = ntohs(th->window); 5175 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 5176 tp->max_window = tp->snd_wnd; 5177 5178 TCP_ECN_rcv_syn(tp, th); 5179 5180 tcp_mtup_init(sk); 5181 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 5182 tcp_initialize_rcv_mss(sk); 5183 5184 tcp_send_synack(sk); 5185 #if 0 5186 /* Note, we could accept data and URG from this segment. 5187 * There are no obstacles to make this. 5188 * 5189 * However, if we ignore data in ACKless segments sometimes, 5190 * we have no reasons to accept it sometimes. 5191 * Also, seems the code doing it in step6 of tcp_rcv_state_process 5192 * is not flawless. So, discard packet for sanity. 5193 * Uncomment this return to process the data. 5194 */ 5195 return -1; 5196 #else 5197 goto discard; 5198 #endif 5199 } 5200 /* "fifth, if neither of the SYN or RST bits is set then 5201 * drop the segment and return." 5202 */ 5203 5204 discard_and_undo: 5205 tcp_clear_options(&tp->rx_opt); 5206 tp->rx_opt.mss_clamp = saved_clamp; 5207 goto discard; 5208 5209 reset_and_undo: 5210 tcp_clear_options(&tp->rx_opt); 5211 tp->rx_opt.mss_clamp = saved_clamp; 5212 return 1; 5213 } 5214 5215 /* 5216 * This function implements the receiving procedure of RFC 793 for 5217 * all states except ESTABLISHED and TIME_WAIT. 5218 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be 5219 * address independent. 5220 */ 5221 5222 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 5223 struct tcphdr *th, unsigned len) 5224 { 5225 struct tcp_sock *tp = tcp_sk(sk); 5226 struct inet_connection_sock *icsk = inet_csk(sk); 5227 int queued = 0; 5228 5229 tp->rx_opt.saw_tstamp = 0; 5230 5231 switch (sk->sk_state) { 5232 case TCP_CLOSE: 5233 goto discard; 5234 5235 case TCP_LISTEN: 5236 if (th->ack) 5237 return 1; 5238 5239 if (th->rst) 5240 goto discard; 5241 5242 if (th->syn) { 5243 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0) 5244 return 1; 5245 5246 /* Now we have several options: In theory there is 5247 * nothing else in the frame. KA9Q has an option to 5248 * send data with the syn, BSD accepts data with the 5249 * syn up to the [to be] advertised window and 5250 * Solaris 2.1 gives you a protocol error. For now 5251 * we just ignore it, that fits the spec precisely 5252 * and avoids incompatibilities. It would be nice in 5253 * future to drop through and process the data. 5254 * 5255 * Now that TTCP is starting to be used we ought to 5256 * queue this data. 5257 * But, this leaves one open to an easy denial of 5258 * service attack, and SYN cookies can't defend 5259 * against this problem. So, we drop the data 5260 * in the interest of security over speed unless 5261 * it's still in use. 5262 */ 5263 kfree_skb(skb); 5264 return 0; 5265 } 5266 goto discard; 5267 5268 case TCP_SYN_SENT: 5269 queued = tcp_rcv_synsent_state_process(sk, skb, th, len); 5270 if (queued >= 0) 5271 return queued; 5272 5273 /* Do step6 onward by hand. */ 5274 tcp_urg(sk, skb, th); 5275 __kfree_skb(skb); 5276 tcp_data_snd_check(sk); 5277 return 0; 5278 } 5279 5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5281 tcp_paws_discard(sk, skb)) { 5282 if (!th->rst) { 5283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 5284 tcp_send_dupack(sk, skb); 5285 goto discard; 5286 } 5287 /* Reset is accepted even if it did not pass PAWS. */ 5288 } 5289 5290 /* step 1: check sequence number */ 5291 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 5292 if (!th->rst) 5293 tcp_send_dupack(sk, skb); 5294 goto discard; 5295 } 5296 5297 /* step 2: check RST bit */ 5298 if (th->rst) { 5299 tcp_reset(sk); 5300 goto discard; 5301 } 5302 5303 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); 5304 5305 /* step 3: check security and precedence [ignored] */ 5306 5307 /* step 4: 5308 * 5309 * Check for a SYN in window. 5310 */ 5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 5312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN); 5313 tcp_reset(sk); 5314 return 1; 5315 } 5316 5317 /* step 5: check the ACK field */ 5318 if (th->ack) { 5319 int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH); 5320 5321 switch (sk->sk_state) { 5322 case TCP_SYN_RECV: 5323 if (acceptable) { 5324 tp->copied_seq = tp->rcv_nxt; 5325 smp_mb(); 5326 tcp_set_state(sk, TCP_ESTABLISHED); 5327 sk->sk_state_change(sk); 5328 5329 /* Note, that this wakeup is only for marginal 5330 * crossed SYN case. Passively open sockets 5331 * are not waked up, because sk->sk_sleep == 5332 * NULL and sk->sk_socket == NULL. 5333 */ 5334 if (sk->sk_socket) 5335 sk_wake_async(sk, 5336 SOCK_WAKE_IO, POLL_OUT); 5337 5338 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5339 tp->snd_wnd = ntohs(th->window) << 5340 tp->rx_opt.snd_wscale; 5341 tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, 5342 TCP_SKB_CB(skb)->seq); 5343 5344 /* tcp_ack considers this ACK as duplicate 5345 * and does not calculate rtt. 5346 * Fix it at least with timestamps. 5347 */ 5348 if (tp->rx_opt.saw_tstamp && 5349 tp->rx_opt.rcv_tsecr && !tp->srtt) 5350 tcp_ack_saw_tstamp(sk, 0); 5351 5352 if (tp->rx_opt.tstamp_ok) 5353 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5354 5355 /* Make sure socket is routed, for 5356 * correct metrics. 5357 */ 5358 icsk->icsk_af_ops->rebuild_header(sk); 5359 5360 tcp_init_metrics(sk); 5361 5362 tcp_init_congestion_control(sk); 5363 5364 /* Prevent spurious tcp_cwnd_restart() on 5365 * first data packet. 5366 */ 5367 tp->lsndtime = tcp_time_stamp; 5368 5369 tcp_mtup_init(sk); 5370 tcp_initialize_rcv_mss(sk); 5371 tcp_init_buffer_space(sk); 5372 tcp_fast_path_on(tp); 5373 } else { 5374 return 1; 5375 } 5376 break; 5377 5378 case TCP_FIN_WAIT1: 5379 if (tp->snd_una == tp->write_seq) { 5380 tcp_set_state(sk, TCP_FIN_WAIT2); 5381 sk->sk_shutdown |= SEND_SHUTDOWN; 5382 dst_confirm(sk->sk_dst_cache); 5383 5384 if (!sock_flag(sk, SOCK_DEAD)) 5385 /* Wake up lingering close() */ 5386 sk->sk_state_change(sk); 5387 else { 5388 int tmo; 5389 5390 if (tp->linger2 < 0 || 5391 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5392 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { 5393 tcp_done(sk); 5394 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5395 return 1; 5396 } 5397 5398 tmo = tcp_fin_time(sk); 5399 if (tmo > TCP_TIMEWAIT_LEN) { 5400 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 5401 } else if (th->fin || sock_owned_by_user(sk)) { 5402 /* Bad case. We could lose such FIN otherwise. 5403 * It is not a big problem, but it looks confusing 5404 * and not so rare event. We still can lose it now, 5405 * if it spins in bh_lock_sock(), but it is really 5406 * marginal case. 5407 */ 5408 inet_csk_reset_keepalive_timer(sk, tmo); 5409 } else { 5410 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 5411 goto discard; 5412 } 5413 } 5414 } 5415 break; 5416 5417 case TCP_CLOSING: 5418 if (tp->snd_una == tp->write_seq) { 5419 tcp_time_wait(sk, TCP_TIME_WAIT, 0); 5420 goto discard; 5421 } 5422 break; 5423 5424 case TCP_LAST_ACK: 5425 if (tp->snd_una == tp->write_seq) { 5426 tcp_update_metrics(sk); 5427 tcp_done(sk); 5428 goto discard; 5429 } 5430 break; 5431 } 5432 } else 5433 goto discard; 5434 5435 /* step 6: check the URG bit */ 5436 tcp_urg(sk, skb, th); 5437 5438 /* step 7: process the segment text */ 5439 switch (sk->sk_state) { 5440 case TCP_CLOSE_WAIT: 5441 case TCP_CLOSING: 5442 case TCP_LAST_ACK: 5443 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) 5444 break; 5445 case TCP_FIN_WAIT1: 5446 case TCP_FIN_WAIT2: 5447 /* RFC 793 says to queue data in these states, 5448 * RFC 1122 says we MUST send a reset. 5449 * BSD 4.4 also does reset. 5450 */ 5451 if (sk->sk_shutdown & RCV_SHUTDOWN) { 5452 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 5453 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { 5454 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 5455 tcp_reset(sk); 5456 return 1; 5457 } 5458 } 5459 /* Fall through */ 5460 case TCP_ESTABLISHED: 5461 tcp_data_queue(sk, skb); 5462 queued = 1; 5463 break; 5464 } 5465 5466 /* tcp_data could move socket to TIME-WAIT */ 5467 if (sk->sk_state != TCP_CLOSE) { 5468 tcp_data_snd_check(sk); 5469 tcp_ack_snd_check(sk); 5470 } 5471 5472 if (!queued) { 5473 discard: 5474 __kfree_skb(skb); 5475 } 5476 return 0; 5477 } 5478 5479 EXPORT_SYMBOL(sysctl_tcp_ecn); 5480 EXPORT_SYMBOL(sysctl_tcp_reordering); 5481 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); 5482 EXPORT_SYMBOL(tcp_parse_options); 5483 #ifdef CONFIG_TCP_MD5SIG 5484 EXPORT_SYMBOL(tcp_parse_md5sig_option); 5485 #endif 5486 EXPORT_SYMBOL(tcp_rcv_established); 5487 EXPORT_SYMBOL(tcp_rcv_state_process); 5488 EXPORT_SYMBOL(tcp_initialize_rcv_mss); 5489