1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #include <net/tcp.h> 38 39 #include <linux/compiler.h> 40 #include <linux/gfp.h> 41 #include <linux/module.h> 42 43 /* People can turn this off for buggy TCP's found in printers etc. */ 44 int sysctl_tcp_retrans_collapse __read_mostly = 1; 45 46 /* People can turn this on to work with those rare, broken TCPs that 47 * interpret the window field as a signed quantity. 48 */ 49 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 50 51 /* This limits the percentage of the congestion window which we 52 * will allow a single TSO frame to consume. Building TSO frames 53 * which are too large can cause TCP streams to be bursty. 54 */ 55 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 56 57 int sysctl_tcp_mtu_probing __read_mostly = 0; 58 int sysctl_tcp_base_mss __read_mostly = 512; 59 60 /* By default, RFC2861 behavior. */ 61 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 62 63 int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ 64 EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); 65 66 67 /* Account for new data that has been sent to the network. */ 68 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 69 { 70 struct tcp_sock *tp = tcp_sk(sk); 71 unsigned int prior_packets = tp->packets_out; 72 73 tcp_advance_send_head(sk, skb); 74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 75 76 /* Don't override Nagle indefinately with F-RTO */ 77 if (tp->frto_counter == 2) 78 tp->frto_counter = 3; 79 80 tp->packets_out += tcp_skb_pcount(skb); 81 if (!prior_packets) 82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 84 } 85 86 /* SND.NXT, if window was not shrunk. 87 * If window has been shrunk, what should we make? It is not clear at all. 88 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 90 * invalid. OK, let's make this for now: 91 */ 92 static inline __u32 tcp_acceptable_seq(struct sock *sk) 93 { 94 struct tcp_sock *tp = tcp_sk(sk); 95 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 97 return tp->snd_nxt; 98 else 99 return tcp_wnd_end(tp); 100 } 101 102 /* Calculate mss to advertise in SYN segment. 103 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 104 * 105 * 1. It is independent of path mtu. 106 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 107 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 108 * attached devices, because some buggy hosts are confused by 109 * large MSS. 110 * 4. We do not make 3, we advertise MSS, calculated from first 111 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 112 * This may be overridden via information stored in routing table. 113 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 114 * probably even Jumbo". 115 */ 116 static __u16 tcp_advertise_mss(struct sock *sk) 117 { 118 struct tcp_sock *tp = tcp_sk(sk); 119 struct dst_entry *dst = __sk_dst_get(sk); 120 int mss = tp->advmss; 121 122 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 123 mss = dst_metric(dst, RTAX_ADVMSS); 124 tp->advmss = mss; 125 } 126 127 return (__u16)mss; 128 } 129 130 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 131 * This is the first part of cwnd validation mechanism. */ 132 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 133 { 134 struct tcp_sock *tp = tcp_sk(sk); 135 s32 delta = tcp_time_stamp - tp->lsndtime; 136 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 137 u32 cwnd = tp->snd_cwnd; 138 139 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 140 141 tp->snd_ssthresh = tcp_current_ssthresh(sk); 142 restart_cwnd = min(restart_cwnd, cwnd); 143 144 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 145 cwnd >>= 1; 146 tp->snd_cwnd = max(cwnd, restart_cwnd); 147 tp->snd_cwnd_stamp = tcp_time_stamp; 148 tp->snd_cwnd_used = 0; 149 } 150 151 /* Congestion state accounting after a packet has been sent. */ 152 static void tcp_event_data_sent(struct tcp_sock *tp, 153 struct sk_buff *skb, struct sock *sk) 154 { 155 struct inet_connection_sock *icsk = inet_csk(sk); 156 const u32 now = tcp_time_stamp; 157 158 if (sysctl_tcp_slow_start_after_idle && 159 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 160 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 161 162 tp->lsndtime = now; 163 164 /* If it is a reply for ato after last received 165 * packet, enter pingpong mode. 166 */ 167 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 168 icsk->icsk_ack.pingpong = 1; 169 } 170 171 /* Account for an ACK we sent. */ 172 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 173 { 174 tcp_dec_quickack_mode(sk, pkts); 175 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 176 } 177 178 /* Determine a window scaling and initial window to offer. 179 * Based on the assumption that the given amount of space 180 * will be offered. Store the results in the tp structure. 181 * NOTE: for smooth operation initial space offering should 182 * be a multiple of mss if possible. We assume here that mss >= 1. 183 * This MUST be enforced by all callers. 184 */ 185 void tcp_select_initial_window(int __space, __u32 mss, 186 __u32 *rcv_wnd, __u32 *window_clamp, 187 int wscale_ok, __u8 *rcv_wscale, 188 __u32 init_rcv_wnd) 189 { 190 unsigned int space = (__space < 0 ? 0 : __space); 191 192 /* If no clamp set the clamp to the max possible scaled window */ 193 if (*window_clamp == 0) 194 (*window_clamp) = (65535 << 14); 195 space = min(*window_clamp, space); 196 197 /* Quantize space offering to a multiple of mss if possible. */ 198 if (space > mss) 199 space = (space / mss) * mss; 200 201 /* NOTE: offering an initial window larger than 32767 202 * will break some buggy TCP stacks. If the admin tells us 203 * it is likely we could be speaking with such a buggy stack 204 * we will truncate our initial window offering to 32K-1 205 * unless the remote has sent us a window scaling option, 206 * which we interpret as a sign the remote TCP is not 207 * misinterpreting the window field as a signed quantity. 208 */ 209 if (sysctl_tcp_workaround_signed_windows) 210 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 211 else 212 (*rcv_wnd) = space; 213 214 (*rcv_wscale) = 0; 215 if (wscale_ok) { 216 /* Set window scaling on max possible window 217 * See RFC1323 for an explanation of the limit to 14 218 */ 219 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 220 space = min_t(u32, space, *window_clamp); 221 while (space > 65535 && (*rcv_wscale) < 14) { 222 space >>= 1; 223 (*rcv_wscale)++; 224 } 225 } 226 227 /* Set initial window to value enough for senders, 228 * following RFC2414. Senders, not following this RFC, 229 * will be satisfied with 2. 230 */ 231 if (mss > (1 << *rcv_wscale)) { 232 int init_cwnd = 4; 233 if (mss > 1460 * 3) 234 init_cwnd = 2; 235 else if (mss > 1460) 236 init_cwnd = 3; 237 /* when initializing use the value from init_rcv_wnd 238 * rather than the default from above 239 */ 240 if (init_rcv_wnd && 241 (*rcv_wnd > init_rcv_wnd * mss)) 242 *rcv_wnd = init_rcv_wnd * mss; 243 else if (*rcv_wnd > init_cwnd * mss) 244 *rcv_wnd = init_cwnd * mss; 245 } 246 247 /* Set the clamp no higher than max representable value */ 248 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 249 } 250 EXPORT_SYMBOL(tcp_select_initial_window); 251 252 /* Chose a new window to advertise, update state in tcp_sock for the 253 * socket, and return result with RFC1323 scaling applied. The return 254 * value can be stuffed directly into th->window for an outgoing 255 * frame. 256 */ 257 static u16 tcp_select_window(struct sock *sk) 258 { 259 struct tcp_sock *tp = tcp_sk(sk); 260 u32 cur_win = tcp_receive_window(tp); 261 u32 new_win = __tcp_select_window(sk); 262 263 /* Never shrink the offered window */ 264 if (new_win < cur_win) { 265 /* Danger Will Robinson! 266 * Don't update rcv_wup/rcv_wnd here or else 267 * we will not be able to advertise a zero 268 * window in time. --DaveM 269 * 270 * Relax Will Robinson. 271 */ 272 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 273 } 274 tp->rcv_wnd = new_win; 275 tp->rcv_wup = tp->rcv_nxt; 276 277 /* Make sure we do not exceed the maximum possible 278 * scaled window. 279 */ 280 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 281 new_win = min(new_win, MAX_TCP_WINDOW); 282 else 283 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 284 285 /* RFC1323 scaling applied */ 286 new_win >>= tp->rx_opt.rcv_wscale; 287 288 /* If we advertise zero window, disable fast path. */ 289 if (new_win == 0) 290 tp->pred_flags = 0; 291 292 return new_win; 293 } 294 295 /* Packet ECN state for a SYN-ACK */ 296 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 297 { 298 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR; 299 if (!(tp->ecn_flags & TCP_ECN_OK)) 300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE; 301 } 302 303 /* Packet ECN state for a SYN. */ 304 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 305 { 306 struct tcp_sock *tp = tcp_sk(sk); 307 308 tp->ecn_flags = 0; 309 if (sysctl_tcp_ecn == 1) { 310 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR; 311 tp->ecn_flags = TCP_ECN_OK; 312 } 313 } 314 315 static __inline__ void 316 TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 317 { 318 if (inet_rsk(req)->ecn_ok) 319 th->ece = 1; 320 } 321 322 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 323 * be sent. 324 */ 325 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 326 int tcp_header_len) 327 { 328 struct tcp_sock *tp = tcp_sk(sk); 329 330 if (tp->ecn_flags & TCP_ECN_OK) { 331 /* Not-retransmitted data segment: set ECT and inject CWR. */ 332 if (skb->len != tcp_header_len && 333 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 334 INET_ECN_xmit(sk); 335 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 336 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 337 tcp_hdr(skb)->cwr = 1; 338 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 339 } 340 } else { 341 /* ACK or retransmitted segment: clear ECT|CE */ 342 INET_ECN_dontxmit(sk); 343 } 344 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 345 tcp_hdr(skb)->ece = 1; 346 } 347 } 348 349 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 350 * auto increment end seqno. 351 */ 352 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 353 { 354 skb->ip_summed = CHECKSUM_PARTIAL; 355 skb->csum = 0; 356 357 TCP_SKB_CB(skb)->flags = flags; 358 TCP_SKB_CB(skb)->sacked = 0; 359 360 skb_shinfo(skb)->gso_segs = 1; 361 skb_shinfo(skb)->gso_size = 0; 362 skb_shinfo(skb)->gso_type = 0; 363 364 TCP_SKB_CB(skb)->seq = seq; 365 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 366 seq++; 367 TCP_SKB_CB(skb)->end_seq = seq; 368 } 369 370 static inline int tcp_urg_mode(const struct tcp_sock *tp) 371 { 372 return tp->snd_una != tp->snd_up; 373 } 374 375 #define OPTION_SACK_ADVERTISE (1 << 0) 376 #define OPTION_TS (1 << 1) 377 #define OPTION_MD5 (1 << 2) 378 #define OPTION_WSCALE (1 << 3) 379 #define OPTION_COOKIE_EXTENSION (1 << 4) 380 381 struct tcp_out_options { 382 u8 options; /* bit field of OPTION_* */ 383 u8 ws; /* window scale, 0 to disable */ 384 u8 num_sack_blocks; /* number of SACK blocks to include */ 385 u8 hash_size; /* bytes in hash_location */ 386 u16 mss; /* 0 to disable */ 387 __u32 tsval, tsecr; /* need to include OPTION_TS */ 388 __u8 *hash_location; /* temporary pointer, overloaded */ 389 }; 390 391 /* The sysctl int routines are generic, so check consistency here. 392 */ 393 static u8 tcp_cookie_size_check(u8 desired) 394 { 395 if (desired > 0) { 396 /* previously specified */ 397 return desired; 398 } 399 if (sysctl_tcp_cookie_size <= 0) { 400 /* no default specified */ 401 return 0; 402 } 403 if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { 404 /* value too small, specify minimum */ 405 return TCP_COOKIE_MIN; 406 } 407 if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { 408 /* value too large, specify maximum */ 409 return TCP_COOKIE_MAX; 410 } 411 if (0x1 & sysctl_tcp_cookie_size) { 412 /* 8-bit multiple, illegal, fix it */ 413 return (u8)(sysctl_tcp_cookie_size + 0x1); 414 } 415 return (u8)sysctl_tcp_cookie_size; 416 } 417 418 /* Write previously computed TCP options to the packet. 419 * 420 * Beware: Something in the Internet is very sensitive to the ordering of 421 * TCP options, we learned this through the hard way, so be careful here. 422 * Luckily we can at least blame others for their non-compliance but from 423 * inter-operatibility perspective it seems that we're somewhat stuck with 424 * the ordering which we have been using if we want to keep working with 425 * those broken things (not that it currently hurts anybody as there isn't 426 * particular reason why the ordering would need to be changed). 427 * 428 * At least SACK_PERM as the first option is known to lead to a disaster 429 * (but it may well be that other scenarios fail similarly). 430 */ 431 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 432 struct tcp_out_options *opts) 433 { 434 u8 options = opts->options; /* mungable copy */ 435 436 /* Having both authentication and cookies for security is redundant, 437 * and there's certainly not enough room. Instead, the cookie-less 438 * extension variant is proposed. 439 * 440 * Consider the pessimal case with authentication. The options 441 * could look like: 442 * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 443 */ 444 if (unlikely(OPTION_MD5 & options)) { 445 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 446 *ptr++ = htonl((TCPOPT_COOKIE << 24) | 447 (TCPOLEN_COOKIE_BASE << 16) | 448 (TCPOPT_MD5SIG << 8) | 449 TCPOLEN_MD5SIG); 450 } else { 451 *ptr++ = htonl((TCPOPT_NOP << 24) | 452 (TCPOPT_NOP << 16) | 453 (TCPOPT_MD5SIG << 8) | 454 TCPOLEN_MD5SIG); 455 } 456 options &= ~OPTION_COOKIE_EXTENSION; 457 /* overload cookie hash location */ 458 opts->hash_location = (__u8 *)ptr; 459 ptr += 4; 460 } 461 462 if (unlikely(opts->mss)) { 463 *ptr++ = htonl((TCPOPT_MSS << 24) | 464 (TCPOLEN_MSS << 16) | 465 opts->mss); 466 } 467 468 if (likely(OPTION_TS & options)) { 469 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 470 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 471 (TCPOLEN_SACK_PERM << 16) | 472 (TCPOPT_TIMESTAMP << 8) | 473 TCPOLEN_TIMESTAMP); 474 options &= ~OPTION_SACK_ADVERTISE; 475 } else { 476 *ptr++ = htonl((TCPOPT_NOP << 24) | 477 (TCPOPT_NOP << 16) | 478 (TCPOPT_TIMESTAMP << 8) | 479 TCPOLEN_TIMESTAMP); 480 } 481 *ptr++ = htonl(opts->tsval); 482 *ptr++ = htonl(opts->tsecr); 483 } 484 485 /* Specification requires after timestamp, so do it now. 486 * 487 * Consider the pessimal case without authentication. The options 488 * could look like: 489 * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 490 */ 491 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 492 __u8 *cookie_copy = opts->hash_location; 493 u8 cookie_size = opts->hash_size; 494 495 /* 8-bit multiple handled in tcp_cookie_size_check() above, 496 * and elsewhere. 497 */ 498 if (0x2 & cookie_size) { 499 __u8 *p = (__u8 *)ptr; 500 501 /* 16-bit multiple */ 502 *p++ = TCPOPT_COOKIE; 503 *p++ = TCPOLEN_COOKIE_BASE + cookie_size; 504 *p++ = *cookie_copy++; 505 *p++ = *cookie_copy++; 506 ptr++; 507 cookie_size -= 2; 508 } else { 509 /* 32-bit multiple */ 510 *ptr++ = htonl(((TCPOPT_NOP << 24) | 511 (TCPOPT_NOP << 16) | 512 (TCPOPT_COOKIE << 8) | 513 TCPOLEN_COOKIE_BASE) + 514 cookie_size); 515 } 516 517 if (cookie_size > 0) { 518 memcpy(ptr, cookie_copy, cookie_size); 519 ptr += (cookie_size / 4); 520 } 521 } 522 523 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 524 *ptr++ = htonl((TCPOPT_NOP << 24) | 525 (TCPOPT_NOP << 16) | 526 (TCPOPT_SACK_PERM << 8) | 527 TCPOLEN_SACK_PERM); 528 } 529 530 if (unlikely(OPTION_WSCALE & options)) { 531 *ptr++ = htonl((TCPOPT_NOP << 24) | 532 (TCPOPT_WINDOW << 16) | 533 (TCPOLEN_WINDOW << 8) | 534 opts->ws); 535 } 536 537 if (unlikely(opts->num_sack_blocks)) { 538 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 539 tp->duplicate_sack : tp->selective_acks; 540 int this_sack; 541 542 *ptr++ = htonl((TCPOPT_NOP << 24) | 543 (TCPOPT_NOP << 16) | 544 (TCPOPT_SACK << 8) | 545 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 546 TCPOLEN_SACK_PERBLOCK))); 547 548 for (this_sack = 0; this_sack < opts->num_sack_blocks; 549 ++this_sack) { 550 *ptr++ = htonl(sp[this_sack].start_seq); 551 *ptr++ = htonl(sp[this_sack].end_seq); 552 } 553 554 tp->rx_opt.dsack = 0; 555 } 556 } 557 558 /* Compute TCP options for SYN packets. This is not the final 559 * network wire format yet. 560 */ 561 static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 562 struct tcp_out_options *opts, 563 struct tcp_md5sig_key **md5) { 564 struct tcp_sock *tp = tcp_sk(sk); 565 struct tcp_cookie_values *cvp = tp->cookie_values; 566 unsigned remaining = MAX_TCP_OPTION_SPACE; 567 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 568 tcp_cookie_size_check(cvp->cookie_desired) : 569 0; 570 571 #ifdef CONFIG_TCP_MD5SIG 572 *md5 = tp->af_specific->md5_lookup(sk, sk); 573 if (*md5) { 574 opts->options |= OPTION_MD5; 575 remaining -= TCPOLEN_MD5SIG_ALIGNED; 576 } 577 #else 578 *md5 = NULL; 579 #endif 580 581 /* We always get an MSS option. The option bytes which will be seen in 582 * normal data packets should timestamps be used, must be in the MSS 583 * advertised. But we subtract them from tp->mss_cache so that 584 * calculations in tcp_sendmsg are simpler etc. So account for this 585 * fact here if necessary. If we don't do this correctly, as a 586 * receiver we won't recognize data packets as being full sized when we 587 * should, and thus we won't abide by the delayed ACK rules correctly. 588 * SACKs don't matter, we never delay an ACK when we have any of those 589 * going out. */ 590 opts->mss = tcp_advertise_mss(sk); 591 remaining -= TCPOLEN_MSS_ALIGNED; 592 593 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 594 opts->options |= OPTION_TS; 595 opts->tsval = TCP_SKB_CB(skb)->when; 596 opts->tsecr = tp->rx_opt.ts_recent; 597 remaining -= TCPOLEN_TSTAMP_ALIGNED; 598 } 599 if (likely(sysctl_tcp_window_scaling)) { 600 opts->ws = tp->rx_opt.rcv_wscale; 601 opts->options |= OPTION_WSCALE; 602 remaining -= TCPOLEN_WSCALE_ALIGNED; 603 } 604 if (likely(sysctl_tcp_sack)) { 605 opts->options |= OPTION_SACK_ADVERTISE; 606 if (unlikely(!(OPTION_TS & opts->options))) 607 remaining -= TCPOLEN_SACKPERM_ALIGNED; 608 } 609 610 /* Note that timestamps are required by the specification. 611 * 612 * Odd numbers of bytes are prohibited by the specification, ensuring 613 * that the cookie is 16-bit aligned, and the resulting cookie pair is 614 * 32-bit aligned. 615 */ 616 if (*md5 == NULL && 617 (OPTION_TS & opts->options) && 618 cookie_size > 0) { 619 int need = TCPOLEN_COOKIE_BASE + cookie_size; 620 621 if (0x2 & need) { 622 /* 32-bit multiple */ 623 need += 2; /* NOPs */ 624 625 if (need > remaining) { 626 /* try shrinking cookie to fit */ 627 cookie_size -= 2; 628 need -= 4; 629 } 630 } 631 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { 632 cookie_size -= 4; 633 need -= 4; 634 } 635 if (TCP_COOKIE_MIN <= cookie_size) { 636 opts->options |= OPTION_COOKIE_EXTENSION; 637 opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; 638 opts->hash_size = cookie_size; 639 640 /* Remember for future incarnations. */ 641 cvp->cookie_desired = cookie_size; 642 643 if (cvp->cookie_desired != cvp->cookie_pair_size) { 644 /* Currently use random bytes as a nonce, 645 * assuming these are completely unpredictable 646 * by hostile users of the same system. 647 */ 648 get_random_bytes(&cvp->cookie_pair[0], 649 cookie_size); 650 cvp->cookie_pair_size = cookie_size; 651 } 652 653 remaining -= need; 654 } 655 } 656 return MAX_TCP_OPTION_SPACE - remaining; 657 } 658 659 /* Set up TCP options for SYN-ACKs. */ 660 static unsigned tcp_synack_options(struct sock *sk, 661 struct request_sock *req, 662 unsigned mss, struct sk_buff *skb, 663 struct tcp_out_options *opts, 664 struct tcp_md5sig_key **md5, 665 struct tcp_extend_values *xvp) 666 { 667 struct inet_request_sock *ireq = inet_rsk(req); 668 unsigned remaining = MAX_TCP_OPTION_SPACE; 669 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? 670 xvp->cookie_plus : 671 0; 672 673 #ifdef CONFIG_TCP_MD5SIG 674 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 675 if (*md5) { 676 opts->options |= OPTION_MD5; 677 remaining -= TCPOLEN_MD5SIG_ALIGNED; 678 679 /* We can't fit any SACK blocks in a packet with MD5 + TS 680 * options. There was discussion about disabling SACK 681 * rather than TS in order to fit in better with old, 682 * buggy kernels, but that was deemed to be unnecessary. 683 */ 684 ireq->tstamp_ok &= !ireq->sack_ok; 685 } 686 #else 687 *md5 = NULL; 688 #endif 689 690 /* We always send an MSS option. */ 691 opts->mss = mss; 692 remaining -= TCPOLEN_MSS_ALIGNED; 693 694 if (likely(ireq->wscale_ok)) { 695 opts->ws = ireq->rcv_wscale; 696 opts->options |= OPTION_WSCALE; 697 remaining -= TCPOLEN_WSCALE_ALIGNED; 698 } 699 if (likely(ireq->tstamp_ok)) { 700 opts->options |= OPTION_TS; 701 opts->tsval = TCP_SKB_CB(skb)->when; 702 opts->tsecr = req->ts_recent; 703 remaining -= TCPOLEN_TSTAMP_ALIGNED; 704 } 705 if (likely(ireq->sack_ok)) { 706 opts->options |= OPTION_SACK_ADVERTISE; 707 if (unlikely(!ireq->tstamp_ok)) 708 remaining -= TCPOLEN_SACKPERM_ALIGNED; 709 } 710 711 /* Similar rationale to tcp_syn_options() applies here, too. 712 * If the <SYN> options fit, the same options should fit now! 713 */ 714 if (*md5 == NULL && 715 ireq->tstamp_ok && 716 cookie_plus > TCPOLEN_COOKIE_BASE) { 717 int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ 718 719 if (0x2 & need) { 720 /* 32-bit multiple */ 721 need += 2; /* NOPs */ 722 } 723 if (need <= remaining) { 724 opts->options |= OPTION_COOKIE_EXTENSION; 725 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; 726 remaining -= need; 727 } else { 728 /* There's no error return, so flag it. */ 729 xvp->cookie_out_never = 1; /* true */ 730 opts->hash_size = 0; 731 } 732 } 733 return MAX_TCP_OPTION_SPACE - remaining; 734 } 735 736 /* Compute TCP options for ESTABLISHED sockets. This is not the 737 * final wire format yet. 738 */ 739 static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 740 struct tcp_out_options *opts, 741 struct tcp_md5sig_key **md5) { 742 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 743 struct tcp_sock *tp = tcp_sk(sk); 744 unsigned size = 0; 745 unsigned int eff_sacks; 746 747 #ifdef CONFIG_TCP_MD5SIG 748 *md5 = tp->af_specific->md5_lookup(sk, sk); 749 if (unlikely(*md5)) { 750 opts->options |= OPTION_MD5; 751 size += TCPOLEN_MD5SIG_ALIGNED; 752 } 753 #else 754 *md5 = NULL; 755 #endif 756 757 if (likely(tp->rx_opt.tstamp_ok)) { 758 opts->options |= OPTION_TS; 759 opts->tsval = tcb ? tcb->when : 0; 760 opts->tsecr = tp->rx_opt.ts_recent; 761 size += TCPOLEN_TSTAMP_ALIGNED; 762 } 763 764 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 765 if (unlikely(eff_sacks)) { 766 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 767 opts->num_sack_blocks = 768 min_t(unsigned, eff_sacks, 769 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 770 TCPOLEN_SACK_PERBLOCK); 771 size += TCPOLEN_SACK_BASE_ALIGNED + 772 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 773 } 774 775 return size; 776 } 777 778 /* This routine actually transmits TCP packets queued in by 779 * tcp_do_sendmsg(). This is used by both the initial 780 * transmission and possible later retransmissions. 781 * All SKB's seen here are completely headerless. It is our 782 * job to build the TCP header, and pass the packet down to 783 * IP so it can do the same plus pass the packet off to the 784 * device. 785 * 786 * We are working here with either a clone of the original 787 * SKB, or a fresh unique copy made by the retransmit engine. 788 */ 789 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 790 gfp_t gfp_mask) 791 { 792 const struct inet_connection_sock *icsk = inet_csk(sk); 793 struct inet_sock *inet; 794 struct tcp_sock *tp; 795 struct tcp_skb_cb *tcb; 796 struct tcp_out_options opts; 797 unsigned tcp_options_size, tcp_header_size; 798 struct tcp_md5sig_key *md5; 799 struct tcphdr *th; 800 int err; 801 802 BUG_ON(!skb || !tcp_skb_pcount(skb)); 803 804 /* If congestion control is doing timestamping, we must 805 * take such a timestamp before we potentially clone/copy. 806 */ 807 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 808 __net_timestamp(skb); 809 810 if (likely(clone_it)) { 811 if (unlikely(skb_cloned(skb))) 812 skb = pskb_copy(skb, gfp_mask); 813 else 814 skb = skb_clone(skb, gfp_mask); 815 if (unlikely(!skb)) 816 return -ENOBUFS; 817 } 818 819 inet = inet_sk(sk); 820 tp = tcp_sk(sk); 821 tcb = TCP_SKB_CB(skb); 822 memset(&opts, 0, sizeof(opts)); 823 824 if (unlikely(tcb->flags & TCPHDR_SYN)) 825 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 826 else 827 tcp_options_size = tcp_established_options(sk, skb, &opts, 828 &md5); 829 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 830 831 if (tcp_packets_in_flight(tp) == 0) 832 tcp_ca_event(sk, CA_EVENT_TX_START); 833 834 skb_push(skb, tcp_header_size); 835 skb_reset_transport_header(skb); 836 skb_set_owner_w(skb, sk); 837 838 /* Build TCP header and checksum it. */ 839 th = tcp_hdr(skb); 840 th->source = inet->inet_sport; 841 th->dest = inet->inet_dport; 842 th->seq = htonl(tcb->seq); 843 th->ack_seq = htonl(tp->rcv_nxt); 844 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 845 tcb->flags); 846 847 if (unlikely(tcb->flags & TCPHDR_SYN)) { 848 /* RFC1323: The window in SYN & SYN/ACK segments 849 * is never scaled. 850 */ 851 th->window = htons(min(tp->rcv_wnd, 65535U)); 852 } else { 853 th->window = htons(tcp_select_window(sk)); 854 } 855 th->check = 0; 856 th->urg_ptr = 0; 857 858 /* The urg_mode check is necessary during a below snd_una win probe */ 859 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 860 if (before(tp->snd_up, tcb->seq + 0x10000)) { 861 th->urg_ptr = htons(tp->snd_up - tcb->seq); 862 th->urg = 1; 863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 864 th->urg_ptr = htons(0xFFFF); 865 th->urg = 1; 866 } 867 } 868 869 tcp_options_write((__be32 *)(th + 1), tp, &opts); 870 if (likely((tcb->flags & TCPHDR_SYN) == 0)) 871 TCP_ECN_send(sk, skb, tcp_header_size); 872 873 #ifdef CONFIG_TCP_MD5SIG 874 /* Calculate the MD5 hash, as we have all we need now */ 875 if (md5) { 876 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 877 tp->af_specific->calc_md5_hash(opts.hash_location, 878 md5, sk, NULL, skb); 879 } 880 #endif 881 882 icsk->icsk_af_ops->send_check(sk, skb); 883 884 if (likely(tcb->flags & TCPHDR_ACK)) 885 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 886 887 if (skb->len != tcp_header_size) 888 tcp_event_data_sent(tp, skb, sk); 889 890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 891 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 892 tcp_skb_pcount(skb)); 893 894 err = icsk->icsk_af_ops->queue_xmit(skb); 895 if (likely(err <= 0)) 896 return err; 897 898 tcp_enter_cwr(sk, 1); 899 900 return net_xmit_eval(err); 901 } 902 903 /* This routine just queues the buffer for sending. 904 * 905 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 906 * otherwise socket can stall. 907 */ 908 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 909 { 910 struct tcp_sock *tp = tcp_sk(sk); 911 912 /* Advance write_seq and place onto the write_queue. */ 913 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 914 skb_header_release(skb); 915 tcp_add_write_queue_tail(sk, skb); 916 sk->sk_wmem_queued += skb->truesize; 917 sk_mem_charge(sk, skb->truesize); 918 } 919 920 /* Initialize TSO segments for a packet. */ 921 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 922 unsigned int mss_now) 923 { 924 if (skb->len <= mss_now || !sk_can_gso(sk) || 925 skb->ip_summed == CHECKSUM_NONE) { 926 /* Avoid the costly divide in the normal 927 * non-TSO case. 928 */ 929 skb_shinfo(skb)->gso_segs = 1; 930 skb_shinfo(skb)->gso_size = 0; 931 skb_shinfo(skb)->gso_type = 0; 932 } else { 933 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 934 skb_shinfo(skb)->gso_size = mss_now; 935 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 936 } 937 } 938 939 /* When a modification to fackets out becomes necessary, we need to check 940 * skb is counted to fackets_out or not. 941 */ 942 static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 943 int decr) 944 { 945 struct tcp_sock *tp = tcp_sk(sk); 946 947 if (!tp->sacked_out || tcp_is_reno(tp)) 948 return; 949 950 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 951 tp->fackets_out -= decr; 952 } 953 954 /* Pcount in the middle of the write queue got changed, we need to do various 955 * tweaks to fix counters 956 */ 957 static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 958 { 959 struct tcp_sock *tp = tcp_sk(sk); 960 961 tp->packets_out -= decr; 962 963 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 964 tp->sacked_out -= decr; 965 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 966 tp->retrans_out -= decr; 967 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 968 tp->lost_out -= decr; 969 970 /* Reno case is special. Sigh... */ 971 if (tcp_is_reno(tp) && decr > 0) 972 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 973 974 tcp_adjust_fackets_out(sk, skb, decr); 975 976 if (tp->lost_skb_hint && 977 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 978 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 979 tp->lost_cnt_hint -= decr; 980 981 tcp_verify_left_out(tp); 982 } 983 984 /* Function to create two new TCP segments. Shrinks the given segment 985 * to the specified size and appends a new segment with the rest of the 986 * packet to the list. This won't be called frequently, I hope. 987 * Remember, these are still headerless SKBs at this point. 988 */ 989 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 990 unsigned int mss_now) 991 { 992 struct tcp_sock *tp = tcp_sk(sk); 993 struct sk_buff *buff; 994 int nsize, old_factor; 995 int nlen; 996 u8 flags; 997 998 BUG_ON(len > skb->len); 999 1000 nsize = skb_headlen(skb) - len; 1001 if (nsize < 0) 1002 nsize = 0; 1003 1004 if (skb_cloned(skb) && 1005 skb_is_nonlinear(skb) && 1006 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1007 return -ENOMEM; 1008 1009 /* Get a new skb... force flag on. */ 1010 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1011 if (buff == NULL) 1012 return -ENOMEM; /* We'll just try again later. */ 1013 1014 sk->sk_wmem_queued += buff->truesize; 1015 sk_mem_charge(sk, buff->truesize); 1016 nlen = skb->len - len - nsize; 1017 buff->truesize += nlen; 1018 skb->truesize -= nlen; 1019 1020 /* Correct the sequence numbers. */ 1021 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1022 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1023 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1024 1025 /* PSH and FIN should only be set in the second packet. */ 1026 flags = TCP_SKB_CB(skb)->flags; 1027 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1028 TCP_SKB_CB(buff)->flags = flags; 1029 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1030 1031 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1032 /* Copy and checksum data tail into the new buffer. */ 1033 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1034 skb_put(buff, nsize), 1035 nsize, 0); 1036 1037 skb_trim(skb, len); 1038 1039 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1040 } else { 1041 skb->ip_summed = CHECKSUM_PARTIAL; 1042 skb_split(skb, buff, len); 1043 } 1044 1045 buff->ip_summed = skb->ip_summed; 1046 1047 /* Looks stupid, but our code really uses when of 1048 * skbs, which it never sent before. --ANK 1049 */ 1050 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1051 buff->tstamp = skb->tstamp; 1052 1053 old_factor = tcp_skb_pcount(skb); 1054 1055 /* Fix up tso_factor for both original and new SKB. */ 1056 tcp_set_skb_tso_segs(sk, skb, mss_now); 1057 tcp_set_skb_tso_segs(sk, buff, mss_now); 1058 1059 /* If this packet has been sent out already, we must 1060 * adjust the various packet counters. 1061 */ 1062 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1063 int diff = old_factor - tcp_skb_pcount(skb) - 1064 tcp_skb_pcount(buff); 1065 1066 if (diff) 1067 tcp_adjust_pcount(sk, skb, diff); 1068 } 1069 1070 /* Link BUFF into the send queue. */ 1071 skb_header_release(buff); 1072 tcp_insert_write_queue_after(skb, buff, sk); 1073 1074 return 0; 1075 } 1076 1077 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1078 * eventually). The difference is that pulled data not copied, but 1079 * immediately discarded. 1080 */ 1081 static void __pskb_trim_head(struct sk_buff *skb, int len) 1082 { 1083 int i, k, eat; 1084 1085 eat = len; 1086 k = 0; 1087 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1088 if (skb_shinfo(skb)->frags[i].size <= eat) { 1089 put_page(skb_shinfo(skb)->frags[i].page); 1090 eat -= skb_shinfo(skb)->frags[i].size; 1091 } else { 1092 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1093 if (eat) { 1094 skb_shinfo(skb)->frags[k].page_offset += eat; 1095 skb_shinfo(skb)->frags[k].size -= eat; 1096 eat = 0; 1097 } 1098 k++; 1099 } 1100 } 1101 skb_shinfo(skb)->nr_frags = k; 1102 1103 skb_reset_tail_pointer(skb); 1104 skb->data_len -= len; 1105 skb->len = skb->data_len; 1106 } 1107 1108 /* Remove acked data from a packet in the transmit queue. */ 1109 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1110 { 1111 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1112 return -ENOMEM; 1113 1114 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 1115 if (unlikely(len < skb_headlen(skb))) 1116 __skb_pull(skb, len); 1117 else 1118 __pskb_trim_head(skb, len - skb_headlen(skb)); 1119 1120 TCP_SKB_CB(skb)->seq += len; 1121 skb->ip_summed = CHECKSUM_PARTIAL; 1122 1123 skb->truesize -= len; 1124 sk->sk_wmem_queued -= len; 1125 sk_mem_uncharge(sk, len); 1126 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1127 1128 /* Any change of skb->len requires recalculation of tso 1129 * factor and mss. 1130 */ 1131 if (tcp_skb_pcount(skb) > 1) 1132 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 1133 1134 return 0; 1135 } 1136 1137 /* Calculate MSS. Not accounting for SACKs here. */ 1138 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1139 { 1140 struct tcp_sock *tp = tcp_sk(sk); 1141 struct inet_connection_sock *icsk = inet_csk(sk); 1142 int mss_now; 1143 1144 /* Calculate base mss without TCP options: 1145 It is MMS_S - sizeof(tcphdr) of rfc1122 1146 */ 1147 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1148 1149 /* Clamp it (mss_clamp does not include tcp options) */ 1150 if (mss_now > tp->rx_opt.mss_clamp) 1151 mss_now = tp->rx_opt.mss_clamp; 1152 1153 /* Now subtract optional transport overhead */ 1154 mss_now -= icsk->icsk_ext_hdr_len; 1155 1156 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1157 if (mss_now < 48) 1158 mss_now = 48; 1159 1160 /* Now subtract TCP options size, not including SACKs */ 1161 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 1162 1163 return mss_now; 1164 } 1165 1166 /* Inverse of above */ 1167 int tcp_mss_to_mtu(struct sock *sk, int mss) 1168 { 1169 struct tcp_sock *tp = tcp_sk(sk); 1170 struct inet_connection_sock *icsk = inet_csk(sk); 1171 int mtu; 1172 1173 mtu = mss + 1174 tp->tcp_header_len + 1175 icsk->icsk_ext_hdr_len + 1176 icsk->icsk_af_ops->net_header_len; 1177 1178 return mtu; 1179 } 1180 1181 /* MTU probing init per socket */ 1182 void tcp_mtup_init(struct sock *sk) 1183 { 1184 struct tcp_sock *tp = tcp_sk(sk); 1185 struct inet_connection_sock *icsk = inet_csk(sk); 1186 1187 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1188 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1189 icsk->icsk_af_ops->net_header_len; 1190 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1191 icsk->icsk_mtup.probe_size = 0; 1192 } 1193 EXPORT_SYMBOL(tcp_mtup_init); 1194 1195 /* This function synchronize snd mss to current pmtu/exthdr set. 1196 1197 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1198 for TCP options, but includes only bare TCP header. 1199 1200 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1201 It is minimum of user_mss and mss received with SYN. 1202 It also does not include TCP options. 1203 1204 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1205 1206 tp->mss_cache is current effective sending mss, including 1207 all tcp options except for SACKs. It is evaluated, 1208 taking into account current pmtu, but never exceeds 1209 tp->rx_opt.mss_clamp. 1210 1211 NOTE1. rfc1122 clearly states that advertised MSS 1212 DOES NOT include either tcp or ip options. 1213 1214 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1215 are READ ONLY outside this function. --ANK (980731) 1216 */ 1217 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1218 { 1219 struct tcp_sock *tp = tcp_sk(sk); 1220 struct inet_connection_sock *icsk = inet_csk(sk); 1221 int mss_now; 1222 1223 if (icsk->icsk_mtup.search_high > pmtu) 1224 icsk->icsk_mtup.search_high = pmtu; 1225 1226 mss_now = tcp_mtu_to_mss(sk, pmtu); 1227 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1228 1229 /* And store cached results */ 1230 icsk->icsk_pmtu_cookie = pmtu; 1231 if (icsk->icsk_mtup.enabled) 1232 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1233 tp->mss_cache = mss_now; 1234 1235 return mss_now; 1236 } 1237 EXPORT_SYMBOL(tcp_sync_mss); 1238 1239 /* Compute the current effective MSS, taking SACKs and IP options, 1240 * and even PMTU discovery events into account. 1241 */ 1242 unsigned int tcp_current_mss(struct sock *sk) 1243 { 1244 struct tcp_sock *tp = tcp_sk(sk); 1245 struct dst_entry *dst = __sk_dst_get(sk); 1246 u32 mss_now; 1247 unsigned header_len; 1248 struct tcp_out_options opts; 1249 struct tcp_md5sig_key *md5; 1250 1251 mss_now = tp->mss_cache; 1252 1253 if (dst) { 1254 u32 mtu = dst_mtu(dst); 1255 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1256 mss_now = tcp_sync_mss(sk, mtu); 1257 } 1258 1259 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1260 sizeof(struct tcphdr); 1261 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1262 * some common options. If this is an odd packet (because we have SACK 1263 * blocks etc) then our calculated header_len will be different, and 1264 * we have to adjust mss_now correspondingly */ 1265 if (header_len != tp->tcp_header_len) { 1266 int delta = (int) header_len - tp->tcp_header_len; 1267 mss_now -= delta; 1268 } 1269 1270 return mss_now; 1271 } 1272 1273 /* Congestion window validation. (RFC2861) */ 1274 static void tcp_cwnd_validate(struct sock *sk) 1275 { 1276 struct tcp_sock *tp = tcp_sk(sk); 1277 1278 if (tp->packets_out >= tp->snd_cwnd) { 1279 /* Network is feed fully. */ 1280 tp->snd_cwnd_used = 0; 1281 tp->snd_cwnd_stamp = tcp_time_stamp; 1282 } else { 1283 /* Network starves. */ 1284 if (tp->packets_out > tp->snd_cwnd_used) 1285 tp->snd_cwnd_used = tp->packets_out; 1286 1287 if (sysctl_tcp_slow_start_after_idle && 1288 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1289 tcp_cwnd_application_limited(sk); 1290 } 1291 } 1292 1293 /* Returns the portion of skb which can be sent right away without 1294 * introducing MSS oddities to segment boundaries. In rare cases where 1295 * mss_now != mss_cache, we will request caller to create a small skb 1296 * per input skb which could be mostly avoided here (if desired). 1297 * 1298 * We explicitly want to create a request for splitting write queue tail 1299 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1300 * thus all the complexity (cwnd_len is always MSS multiple which we 1301 * return whenever allowed by the other factors). Basically we need the 1302 * modulo only when the receiver window alone is the limiting factor or 1303 * when we would be allowed to send the split-due-to-Nagle skb fully. 1304 */ 1305 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1306 unsigned int mss_now, unsigned int cwnd) 1307 { 1308 struct tcp_sock *tp = tcp_sk(sk); 1309 u32 needed, window, cwnd_len; 1310 1311 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1312 cwnd_len = mss_now * cwnd; 1313 1314 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1315 return cwnd_len; 1316 1317 needed = min(skb->len, window); 1318 1319 if (cwnd_len <= needed) 1320 return cwnd_len; 1321 1322 return needed - needed % mss_now; 1323 } 1324 1325 /* Can at least one segment of SKB be sent right now, according to the 1326 * congestion window rules? If so, return how many segments are allowed. 1327 */ 1328 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1329 struct sk_buff *skb) 1330 { 1331 u32 in_flight, cwnd; 1332 1333 /* Don't be strict about the congestion window for the final FIN. */ 1334 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) 1335 return 1; 1336 1337 in_flight = tcp_packets_in_flight(tp); 1338 cwnd = tp->snd_cwnd; 1339 if (in_flight < cwnd) 1340 return (cwnd - in_flight); 1341 1342 return 0; 1343 } 1344 1345 /* Intialize TSO state of a skb. 1346 * This must be invoked the first time we consider transmitting 1347 * SKB onto the wire. 1348 */ 1349 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1350 unsigned int mss_now) 1351 { 1352 int tso_segs = tcp_skb_pcount(skb); 1353 1354 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1355 tcp_set_skb_tso_segs(sk, skb, mss_now); 1356 tso_segs = tcp_skb_pcount(skb); 1357 } 1358 return tso_segs; 1359 } 1360 1361 /* Minshall's variant of the Nagle send check. */ 1362 static inline int tcp_minshall_check(const struct tcp_sock *tp) 1363 { 1364 return after(tp->snd_sml, tp->snd_una) && 1365 !after(tp->snd_sml, tp->snd_nxt); 1366 } 1367 1368 /* Return 0, if packet can be sent now without violation Nagle's rules: 1369 * 1. It is full sized. 1370 * 2. Or it contains FIN. (already checked by caller) 1371 * 3. Or TCP_NODELAY was set. 1372 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1373 * With Minshall's modification: all sent small packets are ACKed. 1374 */ 1375 static inline int tcp_nagle_check(const struct tcp_sock *tp, 1376 const struct sk_buff *skb, 1377 unsigned mss_now, int nonagle) 1378 { 1379 return (skb->len < mss_now && 1380 ((nonagle & TCP_NAGLE_CORK) || 1381 (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); 1382 } 1383 1384 /* Return non-zero if the Nagle test allows this packet to be 1385 * sent now. 1386 */ 1387 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1388 unsigned int cur_mss, int nonagle) 1389 { 1390 /* Nagle rule does not apply to frames, which sit in the middle of the 1391 * write_queue (they have no chances to get new data). 1392 * 1393 * This is implemented in the callers, where they modify the 'nonagle' 1394 * argument based upon the location of SKB in the send queue. 1395 */ 1396 if (nonagle & TCP_NAGLE_PUSH) 1397 return 1; 1398 1399 /* Don't use the nagle rule for urgent data (or for the final FIN). 1400 * Nagle can be ignored during F-RTO too (see RFC4138). 1401 */ 1402 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1403 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)) 1404 return 1; 1405 1406 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1407 return 1; 1408 1409 return 0; 1410 } 1411 1412 /* Does at least the first segment of SKB fit into the send window? */ 1413 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1414 unsigned int cur_mss) 1415 { 1416 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1417 1418 if (skb->len > cur_mss) 1419 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1420 1421 return !after(end_seq, tcp_wnd_end(tp)); 1422 } 1423 1424 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1425 * should be put on the wire right now. If so, it returns the number of 1426 * packets allowed by the congestion window. 1427 */ 1428 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1429 unsigned int cur_mss, int nonagle) 1430 { 1431 struct tcp_sock *tp = tcp_sk(sk); 1432 unsigned int cwnd_quota; 1433 1434 tcp_init_tso_segs(sk, skb, cur_mss); 1435 1436 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1437 return 0; 1438 1439 cwnd_quota = tcp_cwnd_test(tp, skb); 1440 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1441 cwnd_quota = 0; 1442 1443 return cwnd_quota; 1444 } 1445 1446 /* Test if sending is allowed right now. */ 1447 int tcp_may_send_now(struct sock *sk) 1448 { 1449 struct tcp_sock *tp = tcp_sk(sk); 1450 struct sk_buff *skb = tcp_send_head(sk); 1451 1452 return (skb && 1453 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1454 (tcp_skb_is_last(sk, skb) ? 1455 tp->nonagle : TCP_NAGLE_PUSH))); 1456 } 1457 1458 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1459 * which is put after SKB on the list. It is very much like 1460 * tcp_fragment() except that it may make several kinds of assumptions 1461 * in order to speed up the splitting operation. In particular, we 1462 * know that all the data is in scatter-gather pages, and that the 1463 * packet has never been sent out before (and thus is not cloned). 1464 */ 1465 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1466 unsigned int mss_now, gfp_t gfp) 1467 { 1468 struct sk_buff *buff; 1469 int nlen = skb->len - len; 1470 u8 flags; 1471 1472 /* All of a TSO frame must be composed of paged data. */ 1473 if (skb->len != skb->data_len) 1474 return tcp_fragment(sk, skb, len, mss_now); 1475 1476 buff = sk_stream_alloc_skb(sk, 0, gfp); 1477 if (unlikely(buff == NULL)) 1478 return -ENOMEM; 1479 1480 sk->sk_wmem_queued += buff->truesize; 1481 sk_mem_charge(sk, buff->truesize); 1482 buff->truesize += nlen; 1483 skb->truesize -= nlen; 1484 1485 /* Correct the sequence numbers. */ 1486 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1487 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1488 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1489 1490 /* PSH and FIN should only be set in the second packet. */ 1491 flags = TCP_SKB_CB(skb)->flags; 1492 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1493 TCP_SKB_CB(buff)->flags = flags; 1494 1495 /* This packet was never sent out yet, so no SACK bits. */ 1496 TCP_SKB_CB(buff)->sacked = 0; 1497 1498 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1499 skb_split(skb, buff, len); 1500 1501 /* Fix up tso_factor for both original and new SKB. */ 1502 tcp_set_skb_tso_segs(sk, skb, mss_now); 1503 tcp_set_skb_tso_segs(sk, buff, mss_now); 1504 1505 /* Link BUFF into the send queue. */ 1506 skb_header_release(buff); 1507 tcp_insert_write_queue_after(skb, buff, sk); 1508 1509 return 0; 1510 } 1511 1512 /* Try to defer sending, if possible, in order to minimize the amount 1513 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1514 * 1515 * This algorithm is from John Heffner. 1516 */ 1517 static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1518 { 1519 struct tcp_sock *tp = tcp_sk(sk); 1520 const struct inet_connection_sock *icsk = inet_csk(sk); 1521 u32 send_win, cong_win, limit, in_flight; 1522 1523 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1524 goto send_now; 1525 1526 if (icsk->icsk_ca_state != TCP_CA_Open) 1527 goto send_now; 1528 1529 /* Defer for less than two clock ticks. */ 1530 if (tp->tso_deferred && 1531 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1532 goto send_now; 1533 1534 in_flight = tcp_packets_in_flight(tp); 1535 1536 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1537 1538 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1539 1540 /* From in_flight test above, we know that cwnd > in_flight. */ 1541 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1542 1543 limit = min(send_win, cong_win); 1544 1545 /* If a full-sized TSO skb can be sent, do it. */ 1546 if (limit >= sk->sk_gso_max_size) 1547 goto send_now; 1548 1549 /* Middle in queue won't get any more data, full sendable already? */ 1550 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1551 goto send_now; 1552 1553 if (sysctl_tcp_tso_win_divisor) { 1554 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1555 1556 /* If at least some fraction of a window is available, 1557 * just use it. 1558 */ 1559 chunk /= sysctl_tcp_tso_win_divisor; 1560 if (limit >= chunk) 1561 goto send_now; 1562 } else { 1563 /* Different approach, try not to defer past a single 1564 * ACK. Receiver should ACK every other full sized 1565 * frame, so if we have space for more than 3 frames 1566 * then send now. 1567 */ 1568 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1569 goto send_now; 1570 } 1571 1572 /* Ok, it looks like it is advisable to defer. */ 1573 tp->tso_deferred = 1 | (jiffies << 1); 1574 1575 return 1; 1576 1577 send_now: 1578 tp->tso_deferred = 0; 1579 return 0; 1580 } 1581 1582 /* Create a new MTU probe if we are ready. 1583 * MTU probe is regularly attempting to increase the path MTU by 1584 * deliberately sending larger packets. This discovers routing 1585 * changes resulting in larger path MTUs. 1586 * 1587 * Returns 0 if we should wait to probe (no cwnd available), 1588 * 1 if a probe was sent, 1589 * -1 otherwise 1590 */ 1591 static int tcp_mtu_probe(struct sock *sk) 1592 { 1593 struct tcp_sock *tp = tcp_sk(sk); 1594 struct inet_connection_sock *icsk = inet_csk(sk); 1595 struct sk_buff *skb, *nskb, *next; 1596 int len; 1597 int probe_size; 1598 int size_needed; 1599 int copy; 1600 int mss_now; 1601 1602 /* Not currently probing/verifying, 1603 * not in recovery, 1604 * have enough cwnd, and 1605 * not SACKing (the variable headers throw things off) */ 1606 if (!icsk->icsk_mtup.enabled || 1607 icsk->icsk_mtup.probe_size || 1608 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1609 tp->snd_cwnd < 11 || 1610 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1611 return -1; 1612 1613 /* Very simple search strategy: just double the MSS. */ 1614 mss_now = tcp_current_mss(sk); 1615 probe_size = 2 * tp->mss_cache; 1616 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1617 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1618 /* TODO: set timer for probe_converge_event */ 1619 return -1; 1620 } 1621 1622 /* Have enough data in the send queue to probe? */ 1623 if (tp->write_seq - tp->snd_nxt < size_needed) 1624 return -1; 1625 1626 if (tp->snd_wnd < size_needed) 1627 return -1; 1628 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1629 return 0; 1630 1631 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1632 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1633 if (!tcp_packets_in_flight(tp)) 1634 return -1; 1635 else 1636 return 0; 1637 } 1638 1639 /* We're allowed to probe. Build it now. */ 1640 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1641 return -1; 1642 sk->sk_wmem_queued += nskb->truesize; 1643 sk_mem_charge(sk, nskb->truesize); 1644 1645 skb = tcp_send_head(sk); 1646 1647 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1648 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1649 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK; 1650 TCP_SKB_CB(nskb)->sacked = 0; 1651 nskb->csum = 0; 1652 nskb->ip_summed = skb->ip_summed; 1653 1654 tcp_insert_write_queue_before(nskb, skb, sk); 1655 1656 len = 0; 1657 tcp_for_write_queue_from_safe(skb, next, sk) { 1658 copy = min_t(int, skb->len, probe_size - len); 1659 if (nskb->ip_summed) 1660 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1661 else 1662 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1663 skb_put(nskb, copy), 1664 copy, nskb->csum); 1665 1666 if (skb->len <= copy) { 1667 /* We've eaten all the data from this skb. 1668 * Throw it away. */ 1669 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1670 tcp_unlink_write_queue(skb, sk); 1671 sk_wmem_free_skb(sk, skb); 1672 } else { 1673 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1674 ~(TCPHDR_FIN|TCPHDR_PSH); 1675 if (!skb_shinfo(skb)->nr_frags) { 1676 skb_pull(skb, copy); 1677 if (skb->ip_summed != CHECKSUM_PARTIAL) 1678 skb->csum = csum_partial(skb->data, 1679 skb->len, 0); 1680 } else { 1681 __pskb_trim_head(skb, copy); 1682 tcp_set_skb_tso_segs(sk, skb, mss_now); 1683 } 1684 TCP_SKB_CB(skb)->seq += copy; 1685 } 1686 1687 len += copy; 1688 1689 if (len >= probe_size) 1690 break; 1691 } 1692 tcp_init_tso_segs(sk, nskb, nskb->len); 1693 1694 /* We're ready to send. If this fails, the probe will 1695 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1696 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1697 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1698 /* Decrement cwnd here because we are sending 1699 * effectively two packets. */ 1700 tp->snd_cwnd--; 1701 tcp_event_new_data_sent(sk, nskb); 1702 1703 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1704 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1705 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1706 1707 return 1; 1708 } 1709 1710 return -1; 1711 } 1712 1713 /* This routine writes packets to the network. It advances the 1714 * send_head. This happens as incoming acks open up the remote 1715 * window for us. 1716 * 1717 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1718 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1719 * account rare use of URG, this is not a big flaw. 1720 * 1721 * Returns 1, if no segments are in flight and we have queued segments, but 1722 * cannot send anything now because of SWS or another problem. 1723 */ 1724 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1725 int push_one, gfp_t gfp) 1726 { 1727 struct tcp_sock *tp = tcp_sk(sk); 1728 struct sk_buff *skb; 1729 unsigned int tso_segs, sent_pkts; 1730 int cwnd_quota; 1731 int result; 1732 1733 sent_pkts = 0; 1734 1735 if (!push_one) { 1736 /* Do MTU probing. */ 1737 result = tcp_mtu_probe(sk); 1738 if (!result) { 1739 return 0; 1740 } else if (result > 0) { 1741 sent_pkts = 1; 1742 } 1743 } 1744 1745 while ((skb = tcp_send_head(sk))) { 1746 unsigned int limit; 1747 1748 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1749 BUG_ON(!tso_segs); 1750 1751 cwnd_quota = tcp_cwnd_test(tp, skb); 1752 if (!cwnd_quota) 1753 break; 1754 1755 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1756 break; 1757 1758 if (tso_segs == 1) { 1759 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1760 (tcp_skb_is_last(sk, skb) ? 1761 nonagle : TCP_NAGLE_PUSH)))) 1762 break; 1763 } else { 1764 if (!push_one && tcp_tso_should_defer(sk, skb)) 1765 break; 1766 } 1767 1768 limit = mss_now; 1769 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1770 limit = tcp_mss_split_point(sk, skb, mss_now, 1771 cwnd_quota); 1772 1773 if (skb->len > limit && 1774 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1775 break; 1776 1777 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1778 1779 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1780 break; 1781 1782 /* Advance the send_head. This one is sent out. 1783 * This call will increment packets_out. 1784 */ 1785 tcp_event_new_data_sent(sk, skb); 1786 1787 tcp_minshall_update(tp, mss_now, skb); 1788 sent_pkts++; 1789 1790 if (push_one) 1791 break; 1792 } 1793 1794 if (likely(sent_pkts)) { 1795 tcp_cwnd_validate(sk); 1796 return 0; 1797 } 1798 return !tp->packets_out && tcp_send_head(sk); 1799 } 1800 1801 /* Push out any pending frames which were held back due to 1802 * TCP_CORK or attempt at coalescing tiny packets. 1803 * The socket must be locked by the caller. 1804 */ 1805 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1806 int nonagle) 1807 { 1808 /* If we are closed, the bytes will have to remain here. 1809 * In time closedown will finish, we empty the write queue and 1810 * all will be happy. 1811 */ 1812 if (unlikely(sk->sk_state == TCP_CLOSE)) 1813 return; 1814 1815 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1816 tcp_check_probe_timer(sk); 1817 } 1818 1819 /* Send _single_ skb sitting at the send head. This function requires 1820 * true push pending frames to setup probe timer etc. 1821 */ 1822 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1823 { 1824 struct sk_buff *skb = tcp_send_head(sk); 1825 1826 BUG_ON(!skb || skb->len < mss_now); 1827 1828 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1829 } 1830 1831 /* This function returns the amount that we can raise the 1832 * usable window based on the following constraints 1833 * 1834 * 1. The window can never be shrunk once it is offered (RFC 793) 1835 * 2. We limit memory per socket 1836 * 1837 * RFC 1122: 1838 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1839 * RECV.NEXT + RCV.WIN fixed until: 1840 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1841 * 1842 * i.e. don't raise the right edge of the window until you can raise 1843 * it at least MSS bytes. 1844 * 1845 * Unfortunately, the recommended algorithm breaks header prediction, 1846 * since header prediction assumes th->window stays fixed. 1847 * 1848 * Strictly speaking, keeping th->window fixed violates the receiver 1849 * side SWS prevention criteria. The problem is that under this rule 1850 * a stream of single byte packets will cause the right side of the 1851 * window to always advance by a single byte. 1852 * 1853 * Of course, if the sender implements sender side SWS prevention 1854 * then this will not be a problem. 1855 * 1856 * BSD seems to make the following compromise: 1857 * 1858 * If the free space is less than the 1/4 of the maximum 1859 * space available and the free space is less than 1/2 mss, 1860 * then set the window to 0. 1861 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1862 * Otherwise, just prevent the window from shrinking 1863 * and from being larger than the largest representable value. 1864 * 1865 * This prevents incremental opening of the window in the regime 1866 * where TCP is limited by the speed of the reader side taking 1867 * data out of the TCP receive queue. It does nothing about 1868 * those cases where the window is constrained on the sender side 1869 * because the pipeline is full. 1870 * 1871 * BSD also seems to "accidentally" limit itself to windows that are a 1872 * multiple of MSS, at least until the free space gets quite small. 1873 * This would appear to be a side effect of the mbuf implementation. 1874 * Combining these two algorithms results in the observed behavior 1875 * of having a fixed window size at almost all times. 1876 * 1877 * Below we obtain similar behavior by forcing the offered window to 1878 * a multiple of the mss when it is feasible to do so. 1879 * 1880 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1881 * Regular options like TIMESTAMP are taken into account. 1882 */ 1883 u32 __tcp_select_window(struct sock *sk) 1884 { 1885 struct inet_connection_sock *icsk = inet_csk(sk); 1886 struct tcp_sock *tp = tcp_sk(sk); 1887 /* MSS for the peer's data. Previous versions used mss_clamp 1888 * here. I don't know if the value based on our guesses 1889 * of peer's MSS is better for the performance. It's more correct 1890 * but may be worse for the performance because of rcv_mss 1891 * fluctuations. --SAW 1998/11/1 1892 */ 1893 int mss = icsk->icsk_ack.rcv_mss; 1894 int free_space = tcp_space(sk); 1895 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1896 int window; 1897 1898 if (mss > full_space) 1899 mss = full_space; 1900 1901 if (free_space < (full_space >> 1)) { 1902 icsk->icsk_ack.quick = 0; 1903 1904 if (tcp_memory_pressure) 1905 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1906 4U * tp->advmss); 1907 1908 if (free_space < mss) 1909 return 0; 1910 } 1911 1912 if (free_space > tp->rcv_ssthresh) 1913 free_space = tp->rcv_ssthresh; 1914 1915 /* Don't do rounding if we are using window scaling, since the 1916 * scaled window will not line up with the MSS boundary anyway. 1917 */ 1918 window = tp->rcv_wnd; 1919 if (tp->rx_opt.rcv_wscale) { 1920 window = free_space; 1921 1922 /* Advertise enough space so that it won't get scaled away. 1923 * Import case: prevent zero window announcement if 1924 * 1<<rcv_wscale > mss. 1925 */ 1926 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1927 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1928 << tp->rx_opt.rcv_wscale); 1929 } else { 1930 /* Get the largest window that is a nice multiple of mss. 1931 * Window clamp already applied above. 1932 * If our current window offering is within 1 mss of the 1933 * free space we just keep it. This prevents the divide 1934 * and multiply from happening most of the time. 1935 * We also don't do any window rounding when the free space 1936 * is too small. 1937 */ 1938 if (window <= free_space - mss || window > free_space) 1939 window = (free_space / mss) * mss; 1940 else if (mss == full_space && 1941 free_space > window + (full_space >> 1)) 1942 window = free_space; 1943 } 1944 1945 return window; 1946 } 1947 1948 /* Collapses two adjacent SKB's during retransmission. */ 1949 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1950 { 1951 struct tcp_sock *tp = tcp_sk(sk); 1952 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1953 int skb_size, next_skb_size; 1954 1955 skb_size = skb->len; 1956 next_skb_size = next_skb->len; 1957 1958 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1959 1960 tcp_highest_sack_combine(sk, next_skb, skb); 1961 1962 tcp_unlink_write_queue(next_skb, sk); 1963 1964 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1965 next_skb_size); 1966 1967 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1968 skb->ip_summed = CHECKSUM_PARTIAL; 1969 1970 if (skb->ip_summed != CHECKSUM_PARTIAL) 1971 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1972 1973 /* Update sequence range on original skb. */ 1974 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1975 1976 /* Merge over control information. This moves PSH/FIN etc. over */ 1977 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1978 1979 /* All done, get rid of second SKB and account for it so 1980 * packet counting does not break. 1981 */ 1982 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1983 1984 /* changed transmit queue under us so clear hints */ 1985 tcp_clear_retrans_hints_partial(tp); 1986 if (next_skb == tp->retransmit_skb_hint) 1987 tp->retransmit_skb_hint = skb; 1988 1989 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 1990 1991 sk_wmem_free_skb(sk, next_skb); 1992 } 1993 1994 /* Check if coalescing SKBs is legal. */ 1995 static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 1996 { 1997 if (tcp_skb_pcount(skb) > 1) 1998 return 0; 1999 /* TODO: SACK collapsing could be used to remove this condition */ 2000 if (skb_shinfo(skb)->nr_frags != 0) 2001 return 0; 2002 if (skb_cloned(skb)) 2003 return 0; 2004 if (skb == tcp_send_head(sk)) 2005 return 0; 2006 /* Some heurestics for collapsing over SACK'd could be invented */ 2007 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2008 return 0; 2009 2010 return 1; 2011 } 2012 2013 /* Collapse packets in the retransmit queue to make to create 2014 * less packets on the wire. This is only done on retransmission. 2015 */ 2016 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2017 int space) 2018 { 2019 struct tcp_sock *tp = tcp_sk(sk); 2020 struct sk_buff *skb = to, *tmp; 2021 int first = 1; 2022 2023 if (!sysctl_tcp_retrans_collapse) 2024 return; 2025 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN) 2026 return; 2027 2028 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2029 if (!tcp_can_collapse(sk, skb)) 2030 break; 2031 2032 space -= skb->len; 2033 2034 if (first) { 2035 first = 0; 2036 continue; 2037 } 2038 2039 if (space < 0) 2040 break; 2041 /* Punt if not enough space exists in the first SKB for 2042 * the data in the second 2043 */ 2044 if (skb->len > skb_tailroom(to)) 2045 break; 2046 2047 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2048 break; 2049 2050 tcp_collapse_retrans(sk, to); 2051 } 2052 } 2053 2054 /* This retransmits one SKB. Policy decisions and retransmit queue 2055 * state updates are done by the caller. Returns non-zero if an 2056 * error occurred which prevented the send. 2057 */ 2058 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2059 { 2060 struct tcp_sock *tp = tcp_sk(sk); 2061 struct inet_connection_sock *icsk = inet_csk(sk); 2062 unsigned int cur_mss; 2063 int err; 2064 2065 /* Inconslusive MTU probe */ 2066 if (icsk->icsk_mtup.probe_size) { 2067 icsk->icsk_mtup.probe_size = 0; 2068 } 2069 2070 /* Do not sent more than we queued. 1/4 is reserved for possible 2071 * copying overhead: fragmentation, tunneling, mangling etc. 2072 */ 2073 if (atomic_read(&sk->sk_wmem_alloc) > 2074 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2075 return -EAGAIN; 2076 2077 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2078 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2079 BUG(); 2080 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2081 return -ENOMEM; 2082 } 2083 2084 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2085 return -EHOSTUNREACH; /* Routing failure or similar. */ 2086 2087 cur_mss = tcp_current_mss(sk); 2088 2089 /* If receiver has shrunk his window, and skb is out of 2090 * new window, do not retransmit it. The exception is the 2091 * case, when window is shrunk to zero. In this case 2092 * our retransmit serves as a zero window probe. 2093 */ 2094 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2095 TCP_SKB_CB(skb)->seq != tp->snd_una) 2096 return -EAGAIN; 2097 2098 if (skb->len > cur_mss) { 2099 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2100 return -ENOMEM; /* We'll try again later. */ 2101 } else { 2102 int oldpcount = tcp_skb_pcount(skb); 2103 2104 if (unlikely(oldpcount > 1)) { 2105 tcp_init_tso_segs(sk, skb, cur_mss); 2106 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2107 } 2108 } 2109 2110 tcp_retrans_try_collapse(sk, skb, cur_mss); 2111 2112 /* Some Solaris stacks overoptimize and ignore the FIN on a 2113 * retransmit when old data is attached. So strip it off 2114 * since it is cheap to do so and saves bytes on the network. 2115 */ 2116 if (skb->len > 0 && 2117 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && 2118 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2119 if (!pskb_trim(skb, 0)) { 2120 /* Reuse, even though it does some unnecessary work */ 2121 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2122 TCP_SKB_CB(skb)->flags); 2123 skb->ip_summed = CHECKSUM_NONE; 2124 } 2125 } 2126 2127 /* Make a copy, if the first transmission SKB clone we made 2128 * is still in somebody's hands, else make a clone. 2129 */ 2130 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2131 2132 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2133 2134 if (err == 0) { 2135 /* Update global TCP statistics. */ 2136 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2137 2138 tp->total_retrans++; 2139 2140 #if FASTRETRANS_DEBUG > 0 2141 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2142 if (net_ratelimit()) 2143 printk(KERN_DEBUG "retrans_out leaked.\n"); 2144 } 2145 #endif 2146 if (!tp->retrans_out) 2147 tp->lost_retrans_low = tp->snd_nxt; 2148 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2149 tp->retrans_out += tcp_skb_pcount(skb); 2150 2151 /* Save stamp of the first retransmit. */ 2152 if (!tp->retrans_stamp) 2153 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2154 2155 tp->undo_retrans++; 2156 2157 /* snd_nxt is stored to detect loss of retransmitted segment, 2158 * see tcp_input.c tcp_sacktag_write_queue(). 2159 */ 2160 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2161 } 2162 return err; 2163 } 2164 2165 /* Check if we forward retransmits are possible in the current 2166 * window/congestion state. 2167 */ 2168 static int tcp_can_forward_retransmit(struct sock *sk) 2169 { 2170 const struct inet_connection_sock *icsk = inet_csk(sk); 2171 struct tcp_sock *tp = tcp_sk(sk); 2172 2173 /* Forward retransmissions are possible only during Recovery. */ 2174 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2175 return 0; 2176 2177 /* No forward retransmissions in Reno are possible. */ 2178 if (tcp_is_reno(tp)) 2179 return 0; 2180 2181 /* Yeah, we have to make difficult choice between forward transmission 2182 * and retransmission... Both ways have their merits... 2183 * 2184 * For now we do not retransmit anything, while we have some new 2185 * segments to send. In the other cases, follow rule 3 for 2186 * NextSeg() specified in RFC3517. 2187 */ 2188 2189 if (tcp_may_send_now(sk)) 2190 return 0; 2191 2192 return 1; 2193 } 2194 2195 /* This gets called after a retransmit timeout, and the initially 2196 * retransmitted data is acknowledged. It tries to continue 2197 * resending the rest of the retransmit queue, until either 2198 * we've sent it all or the congestion window limit is reached. 2199 * If doing SACK, the first ACK which comes back for a timeout 2200 * based retransmit packet might feed us FACK information again. 2201 * If so, we use it to avoid unnecessarily retransmissions. 2202 */ 2203 void tcp_xmit_retransmit_queue(struct sock *sk) 2204 { 2205 const struct inet_connection_sock *icsk = inet_csk(sk); 2206 struct tcp_sock *tp = tcp_sk(sk); 2207 struct sk_buff *skb; 2208 struct sk_buff *hole = NULL; 2209 u32 last_lost; 2210 int mib_idx; 2211 int fwd_rexmitting = 0; 2212 2213 if (!tp->packets_out) 2214 return; 2215 2216 if (!tp->lost_out) 2217 tp->retransmit_high = tp->snd_una; 2218 2219 if (tp->retransmit_skb_hint) { 2220 skb = tp->retransmit_skb_hint; 2221 last_lost = TCP_SKB_CB(skb)->end_seq; 2222 if (after(last_lost, tp->retransmit_high)) 2223 last_lost = tp->retransmit_high; 2224 } else { 2225 skb = tcp_write_queue_head(sk); 2226 last_lost = tp->snd_una; 2227 } 2228 2229 tcp_for_write_queue_from(skb, sk) { 2230 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2231 2232 if (skb == tcp_send_head(sk)) 2233 break; 2234 /* we could do better than to assign each time */ 2235 if (hole == NULL) 2236 tp->retransmit_skb_hint = skb; 2237 2238 /* Assume this retransmit will generate 2239 * only one packet for congestion window 2240 * calculation purposes. This works because 2241 * tcp_retransmit_skb() will chop up the 2242 * packet to be MSS sized and all the 2243 * packet counting works out. 2244 */ 2245 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2246 return; 2247 2248 if (fwd_rexmitting) { 2249 begin_fwd: 2250 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2251 break; 2252 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2253 2254 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2255 tp->retransmit_high = last_lost; 2256 if (!tcp_can_forward_retransmit(sk)) 2257 break; 2258 /* Backtrack if necessary to non-L'ed skb */ 2259 if (hole != NULL) { 2260 skb = hole; 2261 hole = NULL; 2262 } 2263 fwd_rexmitting = 1; 2264 goto begin_fwd; 2265 2266 } else if (!(sacked & TCPCB_LOST)) { 2267 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2268 hole = skb; 2269 continue; 2270 2271 } else { 2272 last_lost = TCP_SKB_CB(skb)->end_seq; 2273 if (icsk->icsk_ca_state != TCP_CA_Loss) 2274 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2275 else 2276 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2277 } 2278 2279 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2280 continue; 2281 2282 if (tcp_retransmit_skb(sk, skb)) 2283 return; 2284 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2285 2286 if (skb == tcp_write_queue_head(sk)) 2287 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2288 inet_csk(sk)->icsk_rto, 2289 TCP_RTO_MAX); 2290 } 2291 } 2292 2293 /* Send a fin. The caller locks the socket for us. This cannot be 2294 * allowed to fail queueing a FIN frame under any circumstances. 2295 */ 2296 void tcp_send_fin(struct sock *sk) 2297 { 2298 struct tcp_sock *tp = tcp_sk(sk); 2299 struct sk_buff *skb = tcp_write_queue_tail(sk); 2300 int mss_now; 2301 2302 /* Optimization, tack on the FIN if we have a queue of 2303 * unsent frames. But be careful about outgoing SACKS 2304 * and IP options. 2305 */ 2306 mss_now = tcp_current_mss(sk); 2307 2308 if (tcp_send_head(sk) != NULL) { 2309 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN; 2310 TCP_SKB_CB(skb)->end_seq++; 2311 tp->write_seq++; 2312 } else { 2313 /* Socket is locked, keep trying until memory is available. */ 2314 for (;;) { 2315 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2316 sk->sk_allocation); 2317 if (skb) 2318 break; 2319 yield(); 2320 } 2321 2322 /* Reserve space for headers and prepare control bits. */ 2323 skb_reserve(skb, MAX_TCP_HEADER); 2324 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2325 tcp_init_nondata_skb(skb, tp->write_seq, 2326 TCPHDR_ACK | TCPHDR_FIN); 2327 tcp_queue_skb(sk, skb); 2328 } 2329 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2330 } 2331 2332 /* We get here when a process closes a file descriptor (either due to 2333 * an explicit close() or as a byproduct of exit()'ing) and there 2334 * was unread data in the receive queue. This behavior is recommended 2335 * by RFC 2525, section 2.17. -DaveM 2336 */ 2337 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2338 { 2339 struct sk_buff *skb; 2340 2341 /* NOTE: No TCP options attached and we never retransmit this. */ 2342 skb = alloc_skb(MAX_TCP_HEADER, priority); 2343 if (!skb) { 2344 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2345 return; 2346 } 2347 2348 /* Reserve space for headers and prepare control bits. */ 2349 skb_reserve(skb, MAX_TCP_HEADER); 2350 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2351 TCPHDR_ACK | TCPHDR_RST); 2352 /* Send it off. */ 2353 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2354 if (tcp_transmit_skb(sk, skb, 0, priority)) 2355 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2356 2357 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2358 } 2359 2360 /* Send a crossed SYN-ACK during socket establishment. 2361 * WARNING: This routine must only be called when we have already sent 2362 * a SYN packet that crossed the incoming SYN that caused this routine 2363 * to get called. If this assumption fails then the initial rcv_wnd 2364 * and rcv_wscale values will not be correct. 2365 */ 2366 int tcp_send_synack(struct sock *sk) 2367 { 2368 struct sk_buff *skb; 2369 2370 skb = tcp_write_queue_head(sk); 2371 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) { 2372 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2373 return -EFAULT; 2374 } 2375 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) { 2376 if (skb_cloned(skb)) { 2377 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2378 if (nskb == NULL) 2379 return -ENOMEM; 2380 tcp_unlink_write_queue(skb, sk); 2381 skb_header_release(nskb); 2382 __tcp_add_write_queue_head(sk, nskb); 2383 sk_wmem_free_skb(sk, skb); 2384 sk->sk_wmem_queued += nskb->truesize; 2385 sk_mem_charge(sk, nskb->truesize); 2386 skb = nskb; 2387 } 2388 2389 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK; 2390 TCP_ECN_send_synack(tcp_sk(sk), skb); 2391 } 2392 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2393 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2394 } 2395 2396 /* Prepare a SYN-ACK. */ 2397 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2398 struct request_sock *req, 2399 struct request_values *rvp) 2400 { 2401 struct tcp_out_options opts; 2402 struct tcp_extend_values *xvp = tcp_xv(rvp); 2403 struct inet_request_sock *ireq = inet_rsk(req); 2404 struct tcp_sock *tp = tcp_sk(sk); 2405 const struct tcp_cookie_values *cvp = tp->cookie_values; 2406 struct tcphdr *th; 2407 struct sk_buff *skb; 2408 struct tcp_md5sig_key *md5; 2409 int tcp_header_size; 2410 int mss; 2411 int s_data_desired = 0; 2412 2413 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2414 s_data_desired = cvp->s_data_desired; 2415 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); 2416 if (skb == NULL) 2417 return NULL; 2418 2419 /* Reserve space for headers. */ 2420 skb_reserve(skb, MAX_TCP_HEADER); 2421 2422 skb_dst_set(skb, dst_clone(dst)); 2423 2424 mss = dst_metric(dst, RTAX_ADVMSS); 2425 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2426 mss = tp->rx_opt.user_mss; 2427 2428 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2429 __u8 rcv_wscale; 2430 /* Set this up on the first call only */ 2431 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2432 /* tcp_full_space because it is guaranteed to be the first packet */ 2433 tcp_select_initial_window(tcp_full_space(sk), 2434 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2435 &req->rcv_wnd, 2436 &req->window_clamp, 2437 ireq->wscale_ok, 2438 &rcv_wscale, 2439 dst_metric(dst, RTAX_INITRWND)); 2440 ireq->rcv_wscale = rcv_wscale; 2441 } 2442 2443 memset(&opts, 0, sizeof(opts)); 2444 #ifdef CONFIG_SYN_COOKIES 2445 if (unlikely(req->cookie_ts)) 2446 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2447 else 2448 #endif 2449 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2450 tcp_header_size = tcp_synack_options(sk, req, mss, 2451 skb, &opts, &md5, xvp) 2452 + sizeof(*th); 2453 2454 skb_push(skb, tcp_header_size); 2455 skb_reset_transport_header(skb); 2456 2457 th = tcp_hdr(skb); 2458 memset(th, 0, sizeof(struct tcphdr)); 2459 th->syn = 1; 2460 th->ack = 1; 2461 TCP_ECN_make_synack(req, th); 2462 th->source = ireq->loc_port; 2463 th->dest = ireq->rmt_port; 2464 /* Setting of flags are superfluous here for callers (and ECE is 2465 * not even correctly set) 2466 */ 2467 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2468 TCPHDR_SYN | TCPHDR_ACK); 2469 2470 if (OPTION_COOKIE_EXTENSION & opts.options) { 2471 if (s_data_desired) { 2472 u8 *buf = skb_put(skb, s_data_desired); 2473 2474 /* copy data directly from the listening socket. */ 2475 memcpy(buf, cvp->s_data_payload, s_data_desired); 2476 TCP_SKB_CB(skb)->end_seq += s_data_desired; 2477 } 2478 2479 if (opts.hash_size > 0) { 2480 __u32 workspace[SHA_WORKSPACE_WORDS]; 2481 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; 2482 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; 2483 2484 /* Secret recipe depends on the Timestamp, (future) 2485 * Sequence and Acknowledgment Numbers, Initiator 2486 * Cookie, and others handled by IP variant caller. 2487 */ 2488 *tail-- ^= opts.tsval; 2489 *tail-- ^= tcp_rsk(req)->rcv_isn + 1; 2490 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2491 2492 /* recommended */ 2493 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); 2494 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2495 2496 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2497 (char *)mess, 2498 &workspace[0]); 2499 opts.hash_location = 2500 (__u8 *)&xvp->cookie_bakery[0]; 2501 } 2502 } 2503 2504 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2505 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2506 2507 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2508 th->window = htons(min(req->rcv_wnd, 65535U)); 2509 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2510 th->doff = (tcp_header_size >> 2); 2511 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2512 2513 #ifdef CONFIG_TCP_MD5SIG 2514 /* Okay, we have all we need - do the md5 hash if needed */ 2515 if (md5) { 2516 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 2517 md5, NULL, req, skb); 2518 } 2519 #endif 2520 2521 return skb; 2522 } 2523 EXPORT_SYMBOL(tcp_make_synack); 2524 2525 /* Do all connect socket setups that can be done AF independent. */ 2526 static void tcp_connect_init(struct sock *sk) 2527 { 2528 struct dst_entry *dst = __sk_dst_get(sk); 2529 struct tcp_sock *tp = tcp_sk(sk); 2530 __u8 rcv_wscale; 2531 2532 /* We'll fix this up when we get a response from the other end. 2533 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2534 */ 2535 tp->tcp_header_len = sizeof(struct tcphdr) + 2536 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2537 2538 #ifdef CONFIG_TCP_MD5SIG 2539 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2540 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2541 #endif 2542 2543 /* If user gave his TCP_MAXSEG, record it to clamp */ 2544 if (tp->rx_opt.user_mss) 2545 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2546 tp->max_window = 0; 2547 tcp_mtup_init(sk); 2548 tcp_sync_mss(sk, dst_mtu(dst)); 2549 2550 if (!tp->window_clamp) 2551 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2552 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2553 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2554 tp->advmss = tp->rx_opt.user_mss; 2555 2556 tcp_initialize_rcv_mss(sk); 2557 2558 tcp_select_initial_window(tcp_full_space(sk), 2559 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2560 &tp->rcv_wnd, 2561 &tp->window_clamp, 2562 sysctl_tcp_window_scaling, 2563 &rcv_wscale, 2564 dst_metric(dst, RTAX_INITRWND)); 2565 2566 tp->rx_opt.rcv_wscale = rcv_wscale; 2567 tp->rcv_ssthresh = tp->rcv_wnd; 2568 2569 sk->sk_err = 0; 2570 sock_reset_flag(sk, SOCK_DONE); 2571 tp->snd_wnd = 0; 2572 tcp_init_wl(tp, 0); 2573 tp->snd_una = tp->write_seq; 2574 tp->snd_sml = tp->write_seq; 2575 tp->snd_up = tp->write_seq; 2576 tp->rcv_nxt = 0; 2577 tp->rcv_wup = 0; 2578 tp->copied_seq = 0; 2579 2580 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2581 inet_csk(sk)->icsk_retransmits = 0; 2582 tcp_clear_retrans(tp); 2583 } 2584 2585 /* Build a SYN and send it off. */ 2586 int tcp_connect(struct sock *sk) 2587 { 2588 struct tcp_sock *tp = tcp_sk(sk); 2589 struct sk_buff *buff; 2590 2591 tcp_connect_init(sk); 2592 2593 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2594 if (unlikely(buff == NULL)) 2595 return -ENOBUFS; 2596 2597 /* Reserve space for headers. */ 2598 skb_reserve(buff, MAX_TCP_HEADER); 2599 2600 tp->snd_nxt = tp->write_seq; 2601 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2602 TCP_ECN_send_syn(sk, buff); 2603 2604 /* Send it off. */ 2605 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2606 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2607 skb_header_release(buff); 2608 __tcp_add_write_queue_tail(sk, buff); 2609 sk->sk_wmem_queued += buff->truesize; 2610 sk_mem_charge(sk, buff->truesize); 2611 tp->packets_out += tcp_skb_pcount(buff); 2612 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2613 2614 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2615 * in order to make this packet get counted in tcpOutSegs. 2616 */ 2617 tp->snd_nxt = tp->write_seq; 2618 tp->pushed_seq = tp->write_seq; 2619 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2620 2621 /* Timer for repeating the SYN until an answer. */ 2622 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2623 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2624 return 0; 2625 } 2626 EXPORT_SYMBOL(tcp_connect); 2627 2628 /* Send out a delayed ack, the caller does the policy checking 2629 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2630 * for details. 2631 */ 2632 void tcp_send_delayed_ack(struct sock *sk) 2633 { 2634 struct inet_connection_sock *icsk = inet_csk(sk); 2635 int ato = icsk->icsk_ack.ato; 2636 unsigned long timeout; 2637 2638 if (ato > TCP_DELACK_MIN) { 2639 const struct tcp_sock *tp = tcp_sk(sk); 2640 int max_ato = HZ / 2; 2641 2642 if (icsk->icsk_ack.pingpong || 2643 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2644 max_ato = TCP_DELACK_MAX; 2645 2646 /* Slow path, intersegment interval is "high". */ 2647 2648 /* If some rtt estimate is known, use it to bound delayed ack. 2649 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2650 * directly. 2651 */ 2652 if (tp->srtt) { 2653 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2654 2655 if (rtt < max_ato) 2656 max_ato = rtt; 2657 } 2658 2659 ato = min(ato, max_ato); 2660 } 2661 2662 /* Stay within the limit we were given */ 2663 timeout = jiffies + ato; 2664 2665 /* Use new timeout only if there wasn't a older one earlier. */ 2666 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2667 /* If delack timer was blocked or is about to expire, 2668 * send ACK now. 2669 */ 2670 if (icsk->icsk_ack.blocked || 2671 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2672 tcp_send_ack(sk); 2673 return; 2674 } 2675 2676 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2677 timeout = icsk->icsk_ack.timeout; 2678 } 2679 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2680 icsk->icsk_ack.timeout = timeout; 2681 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2682 } 2683 2684 /* This routine sends an ack and also updates the window. */ 2685 void tcp_send_ack(struct sock *sk) 2686 { 2687 struct sk_buff *buff; 2688 2689 /* If we have been reset, we may not send again. */ 2690 if (sk->sk_state == TCP_CLOSE) 2691 return; 2692 2693 /* We are not putting this on the write queue, so 2694 * tcp_transmit_skb() will set the ownership to this 2695 * sock. 2696 */ 2697 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2698 if (buff == NULL) { 2699 inet_csk_schedule_ack(sk); 2700 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2701 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2702 TCP_DELACK_MAX, TCP_RTO_MAX); 2703 return; 2704 } 2705 2706 /* Reserve space for headers and prepare control bits. */ 2707 skb_reserve(buff, MAX_TCP_HEADER); 2708 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 2709 2710 /* Send it off, this clears delayed acks for us. */ 2711 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2712 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2713 } 2714 2715 /* This routine sends a packet with an out of date sequence 2716 * number. It assumes the other end will try to ack it. 2717 * 2718 * Question: what should we make while urgent mode? 2719 * 4.4BSD forces sending single byte of data. We cannot send 2720 * out of window data, because we have SND.NXT==SND.MAX... 2721 * 2722 * Current solution: to send TWO zero-length segments in urgent mode: 2723 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2724 * out-of-date with SND.UNA-1 to probe window. 2725 */ 2726 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2727 { 2728 struct tcp_sock *tp = tcp_sk(sk); 2729 struct sk_buff *skb; 2730 2731 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2732 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2733 if (skb == NULL) 2734 return -1; 2735 2736 /* Reserve space for headers and set control bits. */ 2737 skb_reserve(skb, MAX_TCP_HEADER); 2738 /* Use a previous sequence. This should cause the other 2739 * end to send an ack. Don't queue or clone SKB, just 2740 * send it. 2741 */ 2742 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 2743 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2744 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2745 } 2746 2747 /* Initiate keepalive or window probe from timer. */ 2748 int tcp_write_wakeup(struct sock *sk) 2749 { 2750 struct tcp_sock *tp = tcp_sk(sk); 2751 struct sk_buff *skb; 2752 2753 if (sk->sk_state == TCP_CLOSE) 2754 return -1; 2755 2756 if ((skb = tcp_send_head(sk)) != NULL && 2757 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2758 int err; 2759 unsigned int mss = tcp_current_mss(sk); 2760 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2761 2762 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2763 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2764 2765 /* We are probing the opening of a window 2766 * but the window size is != 0 2767 * must have been a result SWS avoidance ( sender ) 2768 */ 2769 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2770 skb->len > mss) { 2771 seg_size = min(seg_size, mss); 2772 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2773 if (tcp_fragment(sk, skb, seg_size, mss)) 2774 return -1; 2775 } else if (!tcp_skb_pcount(skb)) 2776 tcp_set_skb_tso_segs(sk, skb, mss); 2777 2778 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2779 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2780 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2781 if (!err) 2782 tcp_event_new_data_sent(sk, skb); 2783 return err; 2784 } else { 2785 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2786 tcp_xmit_probe_skb(sk, 1); 2787 return tcp_xmit_probe_skb(sk, 0); 2788 } 2789 } 2790 2791 /* A window probe timeout has occurred. If window is not closed send 2792 * a partial packet else a zero probe. 2793 */ 2794 void tcp_send_probe0(struct sock *sk) 2795 { 2796 struct inet_connection_sock *icsk = inet_csk(sk); 2797 struct tcp_sock *tp = tcp_sk(sk); 2798 int err; 2799 2800 err = tcp_write_wakeup(sk); 2801 2802 if (tp->packets_out || !tcp_send_head(sk)) { 2803 /* Cancel probe timer, if it is not required. */ 2804 icsk->icsk_probes_out = 0; 2805 icsk->icsk_backoff = 0; 2806 return; 2807 } 2808 2809 if (err <= 0) { 2810 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2811 icsk->icsk_backoff++; 2812 icsk->icsk_probes_out++; 2813 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2814 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2815 TCP_RTO_MAX); 2816 } else { 2817 /* If packet was not sent due to local congestion, 2818 * do not backoff and do not remember icsk_probes_out. 2819 * Let local senders to fight for local resources. 2820 * 2821 * Use accumulated backoff yet. 2822 */ 2823 if (!icsk->icsk_probes_out) 2824 icsk->icsk_probes_out = 1; 2825 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2826 min(icsk->icsk_rto << icsk->icsk_backoff, 2827 TCP_RESOURCE_PROBE_INTERVAL), 2828 TCP_RTO_MAX); 2829 } 2830 } 2831