1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #include <net/tcp.h> 38 39 #include <linux/compiler.h> 40 #include <linux/gfp.h> 41 #include <linux/module.h> 42 43 /* People can turn this off for buggy TCP's found in printers etc. */ 44 int sysctl_tcp_retrans_collapse __read_mostly = 1; 45 46 /* People can turn this on to work with those rare, broken TCPs that 47 * interpret the window field as a signed quantity. 48 */ 49 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 50 51 /* This limits the percentage of the congestion window which we 52 * will allow a single TSO frame to consume. Building TSO frames 53 * which are too large can cause TCP streams to be bursty. 54 */ 55 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 56 57 int sysctl_tcp_mtu_probing __read_mostly = 0; 58 int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS; 59 60 /* By default, RFC2861 behavior. */ 61 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 62 63 int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */ 64 EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size); 65 66 67 /* Account for new data that has been sent to the network. */ 68 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 69 { 70 struct tcp_sock *tp = tcp_sk(sk); 71 unsigned int prior_packets = tp->packets_out; 72 73 tcp_advance_send_head(sk, skb); 74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 75 76 /* Don't override Nagle indefinately with F-RTO */ 77 if (tp->frto_counter == 2) 78 tp->frto_counter = 3; 79 80 tp->packets_out += tcp_skb_pcount(skb); 81 if (!prior_packets) 82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 84 } 85 86 /* SND.NXT, if window was not shrunk. 87 * If window has been shrunk, what should we make? It is not clear at all. 88 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 90 * invalid. OK, let's make this for now: 91 */ 92 static inline __u32 tcp_acceptable_seq(struct sock *sk) 93 { 94 struct tcp_sock *tp = tcp_sk(sk); 95 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 97 return tp->snd_nxt; 98 else 99 return tcp_wnd_end(tp); 100 } 101 102 /* Calculate mss to advertise in SYN segment. 103 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 104 * 105 * 1. It is independent of path mtu. 106 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 107 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 108 * attached devices, because some buggy hosts are confused by 109 * large MSS. 110 * 4. We do not make 3, we advertise MSS, calculated from first 111 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 112 * This may be overridden via information stored in routing table. 113 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 114 * probably even Jumbo". 115 */ 116 static __u16 tcp_advertise_mss(struct sock *sk) 117 { 118 struct tcp_sock *tp = tcp_sk(sk); 119 struct dst_entry *dst = __sk_dst_get(sk); 120 int mss = tp->advmss; 121 122 if (dst) { 123 unsigned int metric = dst_metric_advmss(dst); 124 125 if (metric < mss) { 126 mss = metric; 127 tp->advmss = mss; 128 } 129 } 130 131 return (__u16)mss; 132 } 133 134 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 135 * This is the first part of cwnd validation mechanism. */ 136 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 137 { 138 struct tcp_sock *tp = tcp_sk(sk); 139 s32 delta = tcp_time_stamp - tp->lsndtime; 140 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 141 u32 cwnd = tp->snd_cwnd; 142 143 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 144 145 tp->snd_ssthresh = tcp_current_ssthresh(sk); 146 restart_cwnd = min(restart_cwnd, cwnd); 147 148 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 149 cwnd >>= 1; 150 tp->snd_cwnd = max(cwnd, restart_cwnd); 151 tp->snd_cwnd_stamp = tcp_time_stamp; 152 tp->snd_cwnd_used = 0; 153 } 154 155 /* Congestion state accounting after a packet has been sent. */ 156 static void tcp_event_data_sent(struct tcp_sock *tp, 157 struct sk_buff *skb, struct sock *sk) 158 { 159 struct inet_connection_sock *icsk = inet_csk(sk); 160 const u32 now = tcp_time_stamp; 161 162 if (sysctl_tcp_slow_start_after_idle && 163 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 164 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 165 166 tp->lsndtime = now; 167 168 /* If it is a reply for ato after last received 169 * packet, enter pingpong mode. 170 */ 171 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 172 icsk->icsk_ack.pingpong = 1; 173 } 174 175 /* Account for an ACK we sent. */ 176 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 177 { 178 tcp_dec_quickack_mode(sk, pkts); 179 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 180 } 181 182 /* Determine a window scaling and initial window to offer. 183 * Based on the assumption that the given amount of space 184 * will be offered. Store the results in the tp structure. 185 * NOTE: for smooth operation initial space offering should 186 * be a multiple of mss if possible. We assume here that mss >= 1. 187 * This MUST be enforced by all callers. 188 */ 189 void tcp_select_initial_window(int __space, __u32 mss, 190 __u32 *rcv_wnd, __u32 *window_clamp, 191 int wscale_ok, __u8 *rcv_wscale, 192 __u32 init_rcv_wnd) 193 { 194 unsigned int space = (__space < 0 ? 0 : __space); 195 196 /* If no clamp set the clamp to the max possible scaled window */ 197 if (*window_clamp == 0) 198 (*window_clamp) = (65535 << 14); 199 space = min(*window_clamp, space); 200 201 /* Quantize space offering to a multiple of mss if possible. */ 202 if (space > mss) 203 space = (space / mss) * mss; 204 205 /* NOTE: offering an initial window larger than 32767 206 * will break some buggy TCP stacks. If the admin tells us 207 * it is likely we could be speaking with such a buggy stack 208 * we will truncate our initial window offering to 32K-1 209 * unless the remote has sent us a window scaling option, 210 * which we interpret as a sign the remote TCP is not 211 * misinterpreting the window field as a signed quantity. 212 */ 213 if (sysctl_tcp_workaround_signed_windows) 214 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 215 else 216 (*rcv_wnd) = space; 217 218 (*rcv_wscale) = 0; 219 if (wscale_ok) { 220 /* Set window scaling on max possible window 221 * See RFC1323 for an explanation of the limit to 14 222 */ 223 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 224 space = min_t(u32, space, *window_clamp); 225 while (space > 65535 && (*rcv_wscale) < 14) { 226 space >>= 1; 227 (*rcv_wscale)++; 228 } 229 } 230 231 /* Set initial window to a value enough for senders starting with 232 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place 233 * a limit on the initial window when mss is larger than 1460. 234 */ 235 if (mss > (1 << *rcv_wscale)) { 236 int init_cwnd = TCP_DEFAULT_INIT_RCVWND; 237 if (mss > 1460) 238 init_cwnd = 239 max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2); 240 /* when initializing use the value from init_rcv_wnd 241 * rather than the default from above 242 */ 243 if (init_rcv_wnd) 244 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 245 else 246 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); 247 } 248 249 /* Set the clamp no higher than max representable value */ 250 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 251 } 252 EXPORT_SYMBOL(tcp_select_initial_window); 253 254 /* Chose a new window to advertise, update state in tcp_sock for the 255 * socket, and return result with RFC1323 scaling applied. The return 256 * value can be stuffed directly into th->window for an outgoing 257 * frame. 258 */ 259 static u16 tcp_select_window(struct sock *sk) 260 { 261 struct tcp_sock *tp = tcp_sk(sk); 262 u32 cur_win = tcp_receive_window(tp); 263 u32 new_win = __tcp_select_window(sk); 264 265 /* Never shrink the offered window */ 266 if (new_win < cur_win) { 267 /* Danger Will Robinson! 268 * Don't update rcv_wup/rcv_wnd here or else 269 * we will not be able to advertise a zero 270 * window in time. --DaveM 271 * 272 * Relax Will Robinson. 273 */ 274 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 275 } 276 tp->rcv_wnd = new_win; 277 tp->rcv_wup = tp->rcv_nxt; 278 279 /* Make sure we do not exceed the maximum possible 280 * scaled window. 281 */ 282 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 283 new_win = min(new_win, MAX_TCP_WINDOW); 284 else 285 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 286 287 /* RFC1323 scaling applied */ 288 new_win >>= tp->rx_opt.rcv_wscale; 289 290 /* If we advertise zero window, disable fast path. */ 291 if (new_win == 0) 292 tp->pred_flags = 0; 293 294 return new_win; 295 } 296 297 /* Packet ECN state for a SYN-ACK */ 298 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 299 { 300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR; 301 if (!(tp->ecn_flags & TCP_ECN_OK)) 302 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE; 303 } 304 305 /* Packet ECN state for a SYN. */ 306 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 307 { 308 struct tcp_sock *tp = tcp_sk(sk); 309 310 tp->ecn_flags = 0; 311 if (sysctl_tcp_ecn == 1) { 312 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR; 313 tp->ecn_flags = TCP_ECN_OK; 314 } 315 } 316 317 static __inline__ void 318 TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 319 { 320 if (inet_rsk(req)->ecn_ok) 321 th->ece = 1; 322 } 323 324 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 325 * be sent. 326 */ 327 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 328 int tcp_header_len) 329 { 330 struct tcp_sock *tp = tcp_sk(sk); 331 332 if (tp->ecn_flags & TCP_ECN_OK) { 333 /* Not-retransmitted data segment: set ECT and inject CWR. */ 334 if (skb->len != tcp_header_len && 335 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 336 INET_ECN_xmit(sk); 337 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 338 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 339 tcp_hdr(skb)->cwr = 1; 340 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 341 } 342 } else { 343 /* ACK or retransmitted segment: clear ECT|CE */ 344 INET_ECN_dontxmit(sk); 345 } 346 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 347 tcp_hdr(skb)->ece = 1; 348 } 349 } 350 351 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 352 * auto increment end seqno. 353 */ 354 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 355 { 356 skb->ip_summed = CHECKSUM_PARTIAL; 357 skb->csum = 0; 358 359 TCP_SKB_CB(skb)->flags = flags; 360 TCP_SKB_CB(skb)->sacked = 0; 361 362 skb_shinfo(skb)->gso_segs = 1; 363 skb_shinfo(skb)->gso_size = 0; 364 skb_shinfo(skb)->gso_type = 0; 365 366 TCP_SKB_CB(skb)->seq = seq; 367 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 368 seq++; 369 TCP_SKB_CB(skb)->end_seq = seq; 370 } 371 372 static inline int tcp_urg_mode(const struct tcp_sock *tp) 373 { 374 return tp->snd_una != tp->snd_up; 375 } 376 377 #define OPTION_SACK_ADVERTISE (1 << 0) 378 #define OPTION_TS (1 << 1) 379 #define OPTION_MD5 (1 << 2) 380 #define OPTION_WSCALE (1 << 3) 381 #define OPTION_COOKIE_EXTENSION (1 << 4) 382 383 struct tcp_out_options { 384 u8 options; /* bit field of OPTION_* */ 385 u8 ws; /* window scale, 0 to disable */ 386 u8 num_sack_blocks; /* number of SACK blocks to include */ 387 u8 hash_size; /* bytes in hash_location */ 388 u16 mss; /* 0 to disable */ 389 __u32 tsval, tsecr; /* need to include OPTION_TS */ 390 __u8 *hash_location; /* temporary pointer, overloaded */ 391 }; 392 393 /* The sysctl int routines are generic, so check consistency here. 394 */ 395 static u8 tcp_cookie_size_check(u8 desired) 396 { 397 int cookie_size; 398 399 if (desired > 0) 400 /* previously specified */ 401 return desired; 402 403 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); 404 if (cookie_size <= 0) 405 /* no default specified */ 406 return 0; 407 408 if (cookie_size <= TCP_COOKIE_MIN) 409 /* value too small, specify minimum */ 410 return TCP_COOKIE_MIN; 411 412 if (cookie_size >= TCP_COOKIE_MAX) 413 /* value too large, specify maximum */ 414 return TCP_COOKIE_MAX; 415 416 if (cookie_size & 1) 417 /* 8-bit multiple, illegal, fix it */ 418 cookie_size++; 419 420 return (u8)cookie_size; 421 } 422 423 /* Write previously computed TCP options to the packet. 424 * 425 * Beware: Something in the Internet is very sensitive to the ordering of 426 * TCP options, we learned this through the hard way, so be careful here. 427 * Luckily we can at least blame others for their non-compliance but from 428 * inter-operatibility perspective it seems that we're somewhat stuck with 429 * the ordering which we have been using if we want to keep working with 430 * those broken things (not that it currently hurts anybody as there isn't 431 * particular reason why the ordering would need to be changed). 432 * 433 * At least SACK_PERM as the first option is known to lead to a disaster 434 * (but it may well be that other scenarios fail similarly). 435 */ 436 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 437 struct tcp_out_options *opts) 438 { 439 u8 options = opts->options; /* mungable copy */ 440 441 /* Having both authentication and cookies for security is redundant, 442 * and there's certainly not enough room. Instead, the cookie-less 443 * extension variant is proposed. 444 * 445 * Consider the pessimal case with authentication. The options 446 * could look like: 447 * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40 448 */ 449 if (unlikely(OPTION_MD5 & options)) { 450 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 451 *ptr++ = htonl((TCPOPT_COOKIE << 24) | 452 (TCPOLEN_COOKIE_BASE << 16) | 453 (TCPOPT_MD5SIG << 8) | 454 TCPOLEN_MD5SIG); 455 } else { 456 *ptr++ = htonl((TCPOPT_NOP << 24) | 457 (TCPOPT_NOP << 16) | 458 (TCPOPT_MD5SIG << 8) | 459 TCPOLEN_MD5SIG); 460 } 461 options &= ~OPTION_COOKIE_EXTENSION; 462 /* overload cookie hash location */ 463 opts->hash_location = (__u8 *)ptr; 464 ptr += 4; 465 } 466 467 if (unlikely(opts->mss)) { 468 *ptr++ = htonl((TCPOPT_MSS << 24) | 469 (TCPOLEN_MSS << 16) | 470 opts->mss); 471 } 472 473 if (likely(OPTION_TS & options)) { 474 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 475 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 476 (TCPOLEN_SACK_PERM << 16) | 477 (TCPOPT_TIMESTAMP << 8) | 478 TCPOLEN_TIMESTAMP); 479 options &= ~OPTION_SACK_ADVERTISE; 480 } else { 481 *ptr++ = htonl((TCPOPT_NOP << 24) | 482 (TCPOPT_NOP << 16) | 483 (TCPOPT_TIMESTAMP << 8) | 484 TCPOLEN_TIMESTAMP); 485 } 486 *ptr++ = htonl(opts->tsval); 487 *ptr++ = htonl(opts->tsecr); 488 } 489 490 /* Specification requires after timestamp, so do it now. 491 * 492 * Consider the pessimal case without authentication. The options 493 * could look like: 494 * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40 495 */ 496 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 497 __u8 *cookie_copy = opts->hash_location; 498 u8 cookie_size = opts->hash_size; 499 500 /* 8-bit multiple handled in tcp_cookie_size_check() above, 501 * and elsewhere. 502 */ 503 if (0x2 & cookie_size) { 504 __u8 *p = (__u8 *)ptr; 505 506 /* 16-bit multiple */ 507 *p++ = TCPOPT_COOKIE; 508 *p++ = TCPOLEN_COOKIE_BASE + cookie_size; 509 *p++ = *cookie_copy++; 510 *p++ = *cookie_copy++; 511 ptr++; 512 cookie_size -= 2; 513 } else { 514 /* 32-bit multiple */ 515 *ptr++ = htonl(((TCPOPT_NOP << 24) | 516 (TCPOPT_NOP << 16) | 517 (TCPOPT_COOKIE << 8) | 518 TCPOLEN_COOKIE_BASE) + 519 cookie_size); 520 } 521 522 if (cookie_size > 0) { 523 memcpy(ptr, cookie_copy, cookie_size); 524 ptr += (cookie_size / 4); 525 } 526 } 527 528 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 529 *ptr++ = htonl((TCPOPT_NOP << 24) | 530 (TCPOPT_NOP << 16) | 531 (TCPOPT_SACK_PERM << 8) | 532 TCPOLEN_SACK_PERM); 533 } 534 535 if (unlikely(OPTION_WSCALE & options)) { 536 *ptr++ = htonl((TCPOPT_NOP << 24) | 537 (TCPOPT_WINDOW << 16) | 538 (TCPOLEN_WINDOW << 8) | 539 opts->ws); 540 } 541 542 if (unlikely(opts->num_sack_blocks)) { 543 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 544 tp->duplicate_sack : tp->selective_acks; 545 int this_sack; 546 547 *ptr++ = htonl((TCPOPT_NOP << 24) | 548 (TCPOPT_NOP << 16) | 549 (TCPOPT_SACK << 8) | 550 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 551 TCPOLEN_SACK_PERBLOCK))); 552 553 for (this_sack = 0; this_sack < opts->num_sack_blocks; 554 ++this_sack) { 555 *ptr++ = htonl(sp[this_sack].start_seq); 556 *ptr++ = htonl(sp[this_sack].end_seq); 557 } 558 559 tp->rx_opt.dsack = 0; 560 } 561 } 562 563 /* Compute TCP options for SYN packets. This is not the final 564 * network wire format yet. 565 */ 566 static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 567 struct tcp_out_options *opts, 568 struct tcp_md5sig_key **md5) { 569 struct tcp_sock *tp = tcp_sk(sk); 570 struct tcp_cookie_values *cvp = tp->cookie_values; 571 unsigned remaining = MAX_TCP_OPTION_SPACE; 572 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 573 tcp_cookie_size_check(cvp->cookie_desired) : 574 0; 575 576 #ifdef CONFIG_TCP_MD5SIG 577 *md5 = tp->af_specific->md5_lookup(sk, sk); 578 if (*md5) { 579 opts->options |= OPTION_MD5; 580 remaining -= TCPOLEN_MD5SIG_ALIGNED; 581 } 582 #else 583 *md5 = NULL; 584 #endif 585 586 /* We always get an MSS option. The option bytes which will be seen in 587 * normal data packets should timestamps be used, must be in the MSS 588 * advertised. But we subtract them from tp->mss_cache so that 589 * calculations in tcp_sendmsg are simpler etc. So account for this 590 * fact here if necessary. If we don't do this correctly, as a 591 * receiver we won't recognize data packets as being full sized when we 592 * should, and thus we won't abide by the delayed ACK rules correctly. 593 * SACKs don't matter, we never delay an ACK when we have any of those 594 * going out. */ 595 opts->mss = tcp_advertise_mss(sk); 596 remaining -= TCPOLEN_MSS_ALIGNED; 597 598 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 599 opts->options |= OPTION_TS; 600 opts->tsval = TCP_SKB_CB(skb)->when; 601 opts->tsecr = tp->rx_opt.ts_recent; 602 remaining -= TCPOLEN_TSTAMP_ALIGNED; 603 } 604 if (likely(sysctl_tcp_window_scaling)) { 605 opts->ws = tp->rx_opt.rcv_wscale; 606 opts->options |= OPTION_WSCALE; 607 remaining -= TCPOLEN_WSCALE_ALIGNED; 608 } 609 if (likely(sysctl_tcp_sack)) { 610 opts->options |= OPTION_SACK_ADVERTISE; 611 if (unlikely(!(OPTION_TS & opts->options))) 612 remaining -= TCPOLEN_SACKPERM_ALIGNED; 613 } 614 615 /* Note that timestamps are required by the specification. 616 * 617 * Odd numbers of bytes are prohibited by the specification, ensuring 618 * that the cookie is 16-bit aligned, and the resulting cookie pair is 619 * 32-bit aligned. 620 */ 621 if (*md5 == NULL && 622 (OPTION_TS & opts->options) && 623 cookie_size > 0) { 624 int need = TCPOLEN_COOKIE_BASE + cookie_size; 625 626 if (0x2 & need) { 627 /* 32-bit multiple */ 628 need += 2; /* NOPs */ 629 630 if (need > remaining) { 631 /* try shrinking cookie to fit */ 632 cookie_size -= 2; 633 need -= 4; 634 } 635 } 636 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) { 637 cookie_size -= 4; 638 need -= 4; 639 } 640 if (TCP_COOKIE_MIN <= cookie_size) { 641 opts->options |= OPTION_COOKIE_EXTENSION; 642 opts->hash_location = (__u8 *)&cvp->cookie_pair[0]; 643 opts->hash_size = cookie_size; 644 645 /* Remember for future incarnations. */ 646 cvp->cookie_desired = cookie_size; 647 648 if (cvp->cookie_desired != cvp->cookie_pair_size) { 649 /* Currently use random bytes as a nonce, 650 * assuming these are completely unpredictable 651 * by hostile users of the same system. 652 */ 653 get_random_bytes(&cvp->cookie_pair[0], 654 cookie_size); 655 cvp->cookie_pair_size = cookie_size; 656 } 657 658 remaining -= need; 659 } 660 } 661 return MAX_TCP_OPTION_SPACE - remaining; 662 } 663 664 /* Set up TCP options for SYN-ACKs. */ 665 static unsigned tcp_synack_options(struct sock *sk, 666 struct request_sock *req, 667 unsigned mss, struct sk_buff *skb, 668 struct tcp_out_options *opts, 669 struct tcp_md5sig_key **md5, 670 struct tcp_extend_values *xvp) 671 { 672 struct inet_request_sock *ireq = inet_rsk(req); 673 unsigned remaining = MAX_TCP_OPTION_SPACE; 674 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? 675 xvp->cookie_plus : 676 0; 677 678 #ifdef CONFIG_TCP_MD5SIG 679 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 680 if (*md5) { 681 opts->options |= OPTION_MD5; 682 remaining -= TCPOLEN_MD5SIG_ALIGNED; 683 684 /* We can't fit any SACK blocks in a packet with MD5 + TS 685 * options. There was discussion about disabling SACK 686 * rather than TS in order to fit in better with old, 687 * buggy kernels, but that was deemed to be unnecessary. 688 */ 689 ireq->tstamp_ok &= !ireq->sack_ok; 690 } 691 #else 692 *md5 = NULL; 693 #endif 694 695 /* We always send an MSS option. */ 696 opts->mss = mss; 697 remaining -= TCPOLEN_MSS_ALIGNED; 698 699 if (likely(ireq->wscale_ok)) { 700 opts->ws = ireq->rcv_wscale; 701 opts->options |= OPTION_WSCALE; 702 remaining -= TCPOLEN_WSCALE_ALIGNED; 703 } 704 if (likely(ireq->tstamp_ok)) { 705 opts->options |= OPTION_TS; 706 opts->tsval = TCP_SKB_CB(skb)->when; 707 opts->tsecr = req->ts_recent; 708 remaining -= TCPOLEN_TSTAMP_ALIGNED; 709 } 710 if (likely(ireq->sack_ok)) { 711 opts->options |= OPTION_SACK_ADVERTISE; 712 if (unlikely(!ireq->tstamp_ok)) 713 remaining -= TCPOLEN_SACKPERM_ALIGNED; 714 } 715 716 /* Similar rationale to tcp_syn_options() applies here, too. 717 * If the <SYN> options fit, the same options should fit now! 718 */ 719 if (*md5 == NULL && 720 ireq->tstamp_ok && 721 cookie_plus > TCPOLEN_COOKIE_BASE) { 722 int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */ 723 724 if (0x2 & need) { 725 /* 32-bit multiple */ 726 need += 2; /* NOPs */ 727 } 728 if (need <= remaining) { 729 opts->options |= OPTION_COOKIE_EXTENSION; 730 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE; 731 remaining -= need; 732 } else { 733 /* There's no error return, so flag it. */ 734 xvp->cookie_out_never = 1; /* true */ 735 opts->hash_size = 0; 736 } 737 } 738 return MAX_TCP_OPTION_SPACE - remaining; 739 } 740 741 /* Compute TCP options for ESTABLISHED sockets. This is not the 742 * final wire format yet. 743 */ 744 static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 745 struct tcp_out_options *opts, 746 struct tcp_md5sig_key **md5) { 747 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 748 struct tcp_sock *tp = tcp_sk(sk); 749 unsigned size = 0; 750 unsigned int eff_sacks; 751 752 #ifdef CONFIG_TCP_MD5SIG 753 *md5 = tp->af_specific->md5_lookup(sk, sk); 754 if (unlikely(*md5)) { 755 opts->options |= OPTION_MD5; 756 size += TCPOLEN_MD5SIG_ALIGNED; 757 } 758 #else 759 *md5 = NULL; 760 #endif 761 762 if (likely(tp->rx_opt.tstamp_ok)) { 763 opts->options |= OPTION_TS; 764 opts->tsval = tcb ? tcb->when : 0; 765 opts->tsecr = tp->rx_opt.ts_recent; 766 size += TCPOLEN_TSTAMP_ALIGNED; 767 } 768 769 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 770 if (unlikely(eff_sacks)) { 771 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 772 opts->num_sack_blocks = 773 min_t(unsigned, eff_sacks, 774 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 775 TCPOLEN_SACK_PERBLOCK); 776 size += TCPOLEN_SACK_BASE_ALIGNED + 777 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 778 } 779 780 return size; 781 } 782 783 /* This routine actually transmits TCP packets queued in by 784 * tcp_do_sendmsg(). This is used by both the initial 785 * transmission and possible later retransmissions. 786 * All SKB's seen here are completely headerless. It is our 787 * job to build the TCP header, and pass the packet down to 788 * IP so it can do the same plus pass the packet off to the 789 * device. 790 * 791 * We are working here with either a clone of the original 792 * SKB, or a fresh unique copy made by the retransmit engine. 793 */ 794 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 795 gfp_t gfp_mask) 796 { 797 const struct inet_connection_sock *icsk = inet_csk(sk); 798 struct inet_sock *inet; 799 struct tcp_sock *tp; 800 struct tcp_skb_cb *tcb; 801 struct tcp_out_options opts; 802 unsigned tcp_options_size, tcp_header_size; 803 struct tcp_md5sig_key *md5; 804 struct tcphdr *th; 805 int err; 806 807 BUG_ON(!skb || !tcp_skb_pcount(skb)); 808 809 /* If congestion control is doing timestamping, we must 810 * take such a timestamp before we potentially clone/copy. 811 */ 812 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 813 __net_timestamp(skb); 814 815 if (likely(clone_it)) { 816 if (unlikely(skb_cloned(skb))) 817 skb = pskb_copy(skb, gfp_mask); 818 else 819 skb = skb_clone(skb, gfp_mask); 820 if (unlikely(!skb)) 821 return -ENOBUFS; 822 } 823 824 inet = inet_sk(sk); 825 tp = tcp_sk(sk); 826 tcb = TCP_SKB_CB(skb); 827 memset(&opts, 0, sizeof(opts)); 828 829 if (unlikely(tcb->flags & TCPHDR_SYN)) 830 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 831 else 832 tcp_options_size = tcp_established_options(sk, skb, &opts, 833 &md5); 834 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 835 836 if (tcp_packets_in_flight(tp) == 0) { 837 tcp_ca_event(sk, CA_EVENT_TX_START); 838 skb->ooo_okay = 1; 839 } else 840 skb->ooo_okay = 0; 841 842 skb_push(skb, tcp_header_size); 843 skb_reset_transport_header(skb); 844 skb_set_owner_w(skb, sk); 845 846 /* Build TCP header and checksum it. */ 847 th = tcp_hdr(skb); 848 th->source = inet->inet_sport; 849 th->dest = inet->inet_dport; 850 th->seq = htonl(tcb->seq); 851 th->ack_seq = htonl(tp->rcv_nxt); 852 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 853 tcb->flags); 854 855 if (unlikely(tcb->flags & TCPHDR_SYN)) { 856 /* RFC1323: The window in SYN & SYN/ACK segments 857 * is never scaled. 858 */ 859 th->window = htons(min(tp->rcv_wnd, 65535U)); 860 } else { 861 th->window = htons(tcp_select_window(sk)); 862 } 863 th->check = 0; 864 th->urg_ptr = 0; 865 866 /* The urg_mode check is necessary during a below snd_una win probe */ 867 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 868 if (before(tp->snd_up, tcb->seq + 0x10000)) { 869 th->urg_ptr = htons(tp->snd_up - tcb->seq); 870 th->urg = 1; 871 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 872 th->urg_ptr = htons(0xFFFF); 873 th->urg = 1; 874 } 875 } 876 877 tcp_options_write((__be32 *)(th + 1), tp, &opts); 878 if (likely((tcb->flags & TCPHDR_SYN) == 0)) 879 TCP_ECN_send(sk, skb, tcp_header_size); 880 881 #ifdef CONFIG_TCP_MD5SIG 882 /* Calculate the MD5 hash, as we have all we need now */ 883 if (md5) { 884 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 885 tp->af_specific->calc_md5_hash(opts.hash_location, 886 md5, sk, NULL, skb); 887 } 888 #endif 889 890 icsk->icsk_af_ops->send_check(sk, skb); 891 892 if (likely(tcb->flags & TCPHDR_ACK)) 893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 894 895 if (skb->len != tcp_header_size) 896 tcp_event_data_sent(tp, skb, sk); 897 898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 900 tcp_skb_pcount(skb)); 901 902 err = icsk->icsk_af_ops->queue_xmit(skb); 903 if (likely(err <= 0)) 904 return err; 905 906 tcp_enter_cwr(sk, 1); 907 908 return net_xmit_eval(err); 909 } 910 911 /* This routine just queues the buffer for sending. 912 * 913 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 914 * otherwise socket can stall. 915 */ 916 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 917 { 918 struct tcp_sock *tp = tcp_sk(sk); 919 920 /* Advance write_seq and place onto the write_queue. */ 921 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 922 skb_header_release(skb); 923 tcp_add_write_queue_tail(sk, skb); 924 sk->sk_wmem_queued += skb->truesize; 925 sk_mem_charge(sk, skb->truesize); 926 } 927 928 /* Initialize TSO segments for a packet. */ 929 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 930 unsigned int mss_now) 931 { 932 if (skb->len <= mss_now || !sk_can_gso(sk) || 933 skb->ip_summed == CHECKSUM_NONE) { 934 /* Avoid the costly divide in the normal 935 * non-TSO case. 936 */ 937 skb_shinfo(skb)->gso_segs = 1; 938 skb_shinfo(skb)->gso_size = 0; 939 skb_shinfo(skb)->gso_type = 0; 940 } else { 941 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 942 skb_shinfo(skb)->gso_size = mss_now; 943 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 944 } 945 } 946 947 /* When a modification to fackets out becomes necessary, we need to check 948 * skb is counted to fackets_out or not. 949 */ 950 static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 951 int decr) 952 { 953 struct tcp_sock *tp = tcp_sk(sk); 954 955 if (!tp->sacked_out || tcp_is_reno(tp)) 956 return; 957 958 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 959 tp->fackets_out -= decr; 960 } 961 962 /* Pcount in the middle of the write queue got changed, we need to do various 963 * tweaks to fix counters 964 */ 965 static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 966 { 967 struct tcp_sock *tp = tcp_sk(sk); 968 969 tp->packets_out -= decr; 970 971 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 972 tp->sacked_out -= decr; 973 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 974 tp->retrans_out -= decr; 975 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 976 tp->lost_out -= decr; 977 978 /* Reno case is special. Sigh... */ 979 if (tcp_is_reno(tp) && decr > 0) 980 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 981 982 tcp_adjust_fackets_out(sk, skb, decr); 983 984 if (tp->lost_skb_hint && 985 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 986 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 987 tp->lost_cnt_hint -= decr; 988 989 tcp_verify_left_out(tp); 990 } 991 992 /* Function to create two new TCP segments. Shrinks the given segment 993 * to the specified size and appends a new segment with the rest of the 994 * packet to the list. This won't be called frequently, I hope. 995 * Remember, these are still headerless SKBs at this point. 996 */ 997 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 998 unsigned int mss_now) 999 { 1000 struct tcp_sock *tp = tcp_sk(sk); 1001 struct sk_buff *buff; 1002 int nsize, old_factor; 1003 int nlen; 1004 u8 flags; 1005 1006 BUG_ON(len > skb->len); 1007 1008 nsize = skb_headlen(skb) - len; 1009 if (nsize < 0) 1010 nsize = 0; 1011 1012 if (skb_cloned(skb) && 1013 skb_is_nonlinear(skb) && 1014 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1015 return -ENOMEM; 1016 1017 /* Get a new skb... force flag on. */ 1018 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 1019 if (buff == NULL) 1020 return -ENOMEM; /* We'll just try again later. */ 1021 1022 sk->sk_wmem_queued += buff->truesize; 1023 sk_mem_charge(sk, buff->truesize); 1024 nlen = skb->len - len - nsize; 1025 buff->truesize += nlen; 1026 skb->truesize -= nlen; 1027 1028 /* Correct the sequence numbers. */ 1029 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1030 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1031 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1032 1033 /* PSH and FIN should only be set in the second packet. */ 1034 flags = TCP_SKB_CB(skb)->flags; 1035 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1036 TCP_SKB_CB(buff)->flags = flags; 1037 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1038 1039 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1040 /* Copy and checksum data tail into the new buffer. */ 1041 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1042 skb_put(buff, nsize), 1043 nsize, 0); 1044 1045 skb_trim(skb, len); 1046 1047 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1048 } else { 1049 skb->ip_summed = CHECKSUM_PARTIAL; 1050 skb_split(skb, buff, len); 1051 } 1052 1053 buff->ip_summed = skb->ip_summed; 1054 1055 /* Looks stupid, but our code really uses when of 1056 * skbs, which it never sent before. --ANK 1057 */ 1058 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1059 buff->tstamp = skb->tstamp; 1060 1061 old_factor = tcp_skb_pcount(skb); 1062 1063 /* Fix up tso_factor for both original and new SKB. */ 1064 tcp_set_skb_tso_segs(sk, skb, mss_now); 1065 tcp_set_skb_tso_segs(sk, buff, mss_now); 1066 1067 /* If this packet has been sent out already, we must 1068 * adjust the various packet counters. 1069 */ 1070 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1071 int diff = old_factor - tcp_skb_pcount(skb) - 1072 tcp_skb_pcount(buff); 1073 1074 if (diff) 1075 tcp_adjust_pcount(sk, skb, diff); 1076 } 1077 1078 /* Link BUFF into the send queue. */ 1079 skb_header_release(buff); 1080 tcp_insert_write_queue_after(skb, buff, sk); 1081 1082 return 0; 1083 } 1084 1085 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1086 * eventually). The difference is that pulled data not copied, but 1087 * immediately discarded. 1088 */ 1089 static void __pskb_trim_head(struct sk_buff *skb, int len) 1090 { 1091 int i, k, eat; 1092 1093 eat = len; 1094 k = 0; 1095 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1096 if (skb_shinfo(skb)->frags[i].size <= eat) { 1097 put_page(skb_shinfo(skb)->frags[i].page); 1098 eat -= skb_shinfo(skb)->frags[i].size; 1099 } else { 1100 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1101 if (eat) { 1102 skb_shinfo(skb)->frags[k].page_offset += eat; 1103 skb_shinfo(skb)->frags[k].size -= eat; 1104 eat = 0; 1105 } 1106 k++; 1107 } 1108 } 1109 skb_shinfo(skb)->nr_frags = k; 1110 1111 skb_reset_tail_pointer(skb); 1112 skb->data_len -= len; 1113 skb->len = skb->data_len; 1114 } 1115 1116 /* Remove acked data from a packet in the transmit queue. */ 1117 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1118 { 1119 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 1120 return -ENOMEM; 1121 1122 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 1123 if (unlikely(len < skb_headlen(skb))) 1124 __skb_pull(skb, len); 1125 else 1126 __pskb_trim_head(skb, len - skb_headlen(skb)); 1127 1128 TCP_SKB_CB(skb)->seq += len; 1129 skb->ip_summed = CHECKSUM_PARTIAL; 1130 1131 skb->truesize -= len; 1132 sk->sk_wmem_queued -= len; 1133 sk_mem_uncharge(sk, len); 1134 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1135 1136 /* Any change of skb->len requires recalculation of tso 1137 * factor and mss. 1138 */ 1139 if (tcp_skb_pcount(skb) > 1) 1140 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 1141 1142 return 0; 1143 } 1144 1145 /* Calculate MSS. Not accounting for SACKs here. */ 1146 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1147 { 1148 struct tcp_sock *tp = tcp_sk(sk); 1149 struct inet_connection_sock *icsk = inet_csk(sk); 1150 int mss_now; 1151 1152 /* Calculate base mss without TCP options: 1153 It is MMS_S - sizeof(tcphdr) of rfc1122 1154 */ 1155 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1156 1157 /* Clamp it (mss_clamp does not include tcp options) */ 1158 if (mss_now > tp->rx_opt.mss_clamp) 1159 mss_now = tp->rx_opt.mss_clamp; 1160 1161 /* Now subtract optional transport overhead */ 1162 mss_now -= icsk->icsk_ext_hdr_len; 1163 1164 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1165 if (mss_now < 48) 1166 mss_now = 48; 1167 1168 /* Now subtract TCP options size, not including SACKs */ 1169 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 1170 1171 return mss_now; 1172 } 1173 1174 /* Inverse of above */ 1175 int tcp_mss_to_mtu(struct sock *sk, int mss) 1176 { 1177 struct tcp_sock *tp = tcp_sk(sk); 1178 struct inet_connection_sock *icsk = inet_csk(sk); 1179 int mtu; 1180 1181 mtu = mss + 1182 tp->tcp_header_len + 1183 icsk->icsk_ext_hdr_len + 1184 icsk->icsk_af_ops->net_header_len; 1185 1186 return mtu; 1187 } 1188 1189 /* MTU probing init per socket */ 1190 void tcp_mtup_init(struct sock *sk) 1191 { 1192 struct tcp_sock *tp = tcp_sk(sk); 1193 struct inet_connection_sock *icsk = inet_csk(sk); 1194 1195 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1196 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1197 icsk->icsk_af_ops->net_header_len; 1198 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1199 icsk->icsk_mtup.probe_size = 0; 1200 } 1201 EXPORT_SYMBOL(tcp_mtup_init); 1202 1203 /* This function synchronize snd mss to current pmtu/exthdr set. 1204 1205 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1206 for TCP options, but includes only bare TCP header. 1207 1208 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1209 It is minimum of user_mss and mss received with SYN. 1210 It also does not include TCP options. 1211 1212 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1213 1214 tp->mss_cache is current effective sending mss, including 1215 all tcp options except for SACKs. It is evaluated, 1216 taking into account current pmtu, but never exceeds 1217 tp->rx_opt.mss_clamp. 1218 1219 NOTE1. rfc1122 clearly states that advertised MSS 1220 DOES NOT include either tcp or ip options. 1221 1222 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1223 are READ ONLY outside this function. --ANK (980731) 1224 */ 1225 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1226 { 1227 struct tcp_sock *tp = tcp_sk(sk); 1228 struct inet_connection_sock *icsk = inet_csk(sk); 1229 int mss_now; 1230 1231 if (icsk->icsk_mtup.search_high > pmtu) 1232 icsk->icsk_mtup.search_high = pmtu; 1233 1234 mss_now = tcp_mtu_to_mss(sk, pmtu); 1235 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1236 1237 /* And store cached results */ 1238 icsk->icsk_pmtu_cookie = pmtu; 1239 if (icsk->icsk_mtup.enabled) 1240 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1241 tp->mss_cache = mss_now; 1242 1243 return mss_now; 1244 } 1245 EXPORT_SYMBOL(tcp_sync_mss); 1246 1247 /* Compute the current effective MSS, taking SACKs and IP options, 1248 * and even PMTU discovery events into account. 1249 */ 1250 unsigned int tcp_current_mss(struct sock *sk) 1251 { 1252 struct tcp_sock *tp = tcp_sk(sk); 1253 struct dst_entry *dst = __sk_dst_get(sk); 1254 u32 mss_now; 1255 unsigned header_len; 1256 struct tcp_out_options opts; 1257 struct tcp_md5sig_key *md5; 1258 1259 mss_now = tp->mss_cache; 1260 1261 if (dst) { 1262 u32 mtu = dst_mtu(dst); 1263 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1264 mss_now = tcp_sync_mss(sk, mtu); 1265 } 1266 1267 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1268 sizeof(struct tcphdr); 1269 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1270 * some common options. If this is an odd packet (because we have SACK 1271 * blocks etc) then our calculated header_len will be different, and 1272 * we have to adjust mss_now correspondingly */ 1273 if (header_len != tp->tcp_header_len) { 1274 int delta = (int) header_len - tp->tcp_header_len; 1275 mss_now -= delta; 1276 } 1277 1278 return mss_now; 1279 } 1280 1281 /* Congestion window validation. (RFC2861) */ 1282 static void tcp_cwnd_validate(struct sock *sk) 1283 { 1284 struct tcp_sock *tp = tcp_sk(sk); 1285 1286 if (tp->packets_out >= tp->snd_cwnd) { 1287 /* Network is feed fully. */ 1288 tp->snd_cwnd_used = 0; 1289 tp->snd_cwnd_stamp = tcp_time_stamp; 1290 } else { 1291 /* Network starves. */ 1292 if (tp->packets_out > tp->snd_cwnd_used) 1293 tp->snd_cwnd_used = tp->packets_out; 1294 1295 if (sysctl_tcp_slow_start_after_idle && 1296 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1297 tcp_cwnd_application_limited(sk); 1298 } 1299 } 1300 1301 /* Returns the portion of skb which can be sent right away without 1302 * introducing MSS oddities to segment boundaries. In rare cases where 1303 * mss_now != mss_cache, we will request caller to create a small skb 1304 * per input skb which could be mostly avoided here (if desired). 1305 * 1306 * We explicitly want to create a request for splitting write queue tail 1307 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1308 * thus all the complexity (cwnd_len is always MSS multiple which we 1309 * return whenever allowed by the other factors). Basically we need the 1310 * modulo only when the receiver window alone is the limiting factor or 1311 * when we would be allowed to send the split-due-to-Nagle skb fully. 1312 */ 1313 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1314 unsigned int mss_now, unsigned int cwnd) 1315 { 1316 struct tcp_sock *tp = tcp_sk(sk); 1317 u32 needed, window, cwnd_len; 1318 1319 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1320 cwnd_len = mss_now * cwnd; 1321 1322 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1323 return cwnd_len; 1324 1325 needed = min(skb->len, window); 1326 1327 if (cwnd_len <= needed) 1328 return cwnd_len; 1329 1330 return needed - needed % mss_now; 1331 } 1332 1333 /* Can at least one segment of SKB be sent right now, according to the 1334 * congestion window rules? If so, return how many segments are allowed. 1335 */ 1336 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1337 struct sk_buff *skb) 1338 { 1339 u32 in_flight, cwnd; 1340 1341 /* Don't be strict about the congestion window for the final FIN. */ 1342 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1) 1343 return 1; 1344 1345 in_flight = tcp_packets_in_flight(tp); 1346 cwnd = tp->snd_cwnd; 1347 if (in_flight < cwnd) 1348 return (cwnd - in_flight); 1349 1350 return 0; 1351 } 1352 1353 /* Initialize TSO state of a skb. 1354 * This must be invoked the first time we consider transmitting 1355 * SKB onto the wire. 1356 */ 1357 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1358 unsigned int mss_now) 1359 { 1360 int tso_segs = tcp_skb_pcount(skb); 1361 1362 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1363 tcp_set_skb_tso_segs(sk, skb, mss_now); 1364 tso_segs = tcp_skb_pcount(skb); 1365 } 1366 return tso_segs; 1367 } 1368 1369 /* Minshall's variant of the Nagle send check. */ 1370 static inline int tcp_minshall_check(const struct tcp_sock *tp) 1371 { 1372 return after(tp->snd_sml, tp->snd_una) && 1373 !after(tp->snd_sml, tp->snd_nxt); 1374 } 1375 1376 /* Return 0, if packet can be sent now without violation Nagle's rules: 1377 * 1. It is full sized. 1378 * 2. Or it contains FIN. (already checked by caller) 1379 * 3. Or TCP_NODELAY was set. 1380 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1381 * With Minshall's modification: all sent small packets are ACKed. 1382 */ 1383 static inline int tcp_nagle_check(const struct tcp_sock *tp, 1384 const struct sk_buff *skb, 1385 unsigned mss_now, int nonagle) 1386 { 1387 return skb->len < mss_now && 1388 ((nonagle & TCP_NAGLE_CORK) || 1389 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1390 } 1391 1392 /* Return non-zero if the Nagle test allows this packet to be 1393 * sent now. 1394 */ 1395 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1396 unsigned int cur_mss, int nonagle) 1397 { 1398 /* Nagle rule does not apply to frames, which sit in the middle of the 1399 * write_queue (they have no chances to get new data). 1400 * 1401 * This is implemented in the callers, where they modify the 'nonagle' 1402 * argument based upon the location of SKB in the send queue. 1403 */ 1404 if (nonagle & TCP_NAGLE_PUSH) 1405 return 1; 1406 1407 /* Don't use the nagle rule for urgent data (or for the final FIN). 1408 * Nagle can be ignored during F-RTO too (see RFC4138). 1409 */ 1410 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1411 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)) 1412 return 1; 1413 1414 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1415 return 1; 1416 1417 return 0; 1418 } 1419 1420 /* Does at least the first segment of SKB fit into the send window? */ 1421 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1422 unsigned int cur_mss) 1423 { 1424 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1425 1426 if (skb->len > cur_mss) 1427 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1428 1429 return !after(end_seq, tcp_wnd_end(tp)); 1430 } 1431 1432 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1433 * should be put on the wire right now. If so, it returns the number of 1434 * packets allowed by the congestion window. 1435 */ 1436 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1437 unsigned int cur_mss, int nonagle) 1438 { 1439 struct tcp_sock *tp = tcp_sk(sk); 1440 unsigned int cwnd_quota; 1441 1442 tcp_init_tso_segs(sk, skb, cur_mss); 1443 1444 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1445 return 0; 1446 1447 cwnd_quota = tcp_cwnd_test(tp, skb); 1448 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1449 cwnd_quota = 0; 1450 1451 return cwnd_quota; 1452 } 1453 1454 /* Test if sending is allowed right now. */ 1455 int tcp_may_send_now(struct sock *sk) 1456 { 1457 struct tcp_sock *tp = tcp_sk(sk); 1458 struct sk_buff *skb = tcp_send_head(sk); 1459 1460 return skb && 1461 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1462 (tcp_skb_is_last(sk, skb) ? 1463 tp->nonagle : TCP_NAGLE_PUSH)); 1464 } 1465 1466 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1467 * which is put after SKB on the list. It is very much like 1468 * tcp_fragment() except that it may make several kinds of assumptions 1469 * in order to speed up the splitting operation. In particular, we 1470 * know that all the data is in scatter-gather pages, and that the 1471 * packet has never been sent out before (and thus is not cloned). 1472 */ 1473 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1474 unsigned int mss_now, gfp_t gfp) 1475 { 1476 struct sk_buff *buff; 1477 int nlen = skb->len - len; 1478 u8 flags; 1479 1480 /* All of a TSO frame must be composed of paged data. */ 1481 if (skb->len != skb->data_len) 1482 return tcp_fragment(sk, skb, len, mss_now); 1483 1484 buff = sk_stream_alloc_skb(sk, 0, gfp); 1485 if (unlikely(buff == NULL)) 1486 return -ENOMEM; 1487 1488 sk->sk_wmem_queued += buff->truesize; 1489 sk_mem_charge(sk, buff->truesize); 1490 buff->truesize += nlen; 1491 skb->truesize -= nlen; 1492 1493 /* Correct the sequence numbers. */ 1494 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1495 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1496 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1497 1498 /* PSH and FIN should only be set in the second packet. */ 1499 flags = TCP_SKB_CB(skb)->flags; 1500 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1501 TCP_SKB_CB(buff)->flags = flags; 1502 1503 /* This packet was never sent out yet, so no SACK bits. */ 1504 TCP_SKB_CB(buff)->sacked = 0; 1505 1506 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1507 skb_split(skb, buff, len); 1508 1509 /* Fix up tso_factor for both original and new SKB. */ 1510 tcp_set_skb_tso_segs(sk, skb, mss_now); 1511 tcp_set_skb_tso_segs(sk, buff, mss_now); 1512 1513 /* Link BUFF into the send queue. */ 1514 skb_header_release(buff); 1515 tcp_insert_write_queue_after(skb, buff, sk); 1516 1517 return 0; 1518 } 1519 1520 /* Try to defer sending, if possible, in order to minimize the amount 1521 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1522 * 1523 * This algorithm is from John Heffner. 1524 */ 1525 static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1526 { 1527 struct tcp_sock *tp = tcp_sk(sk); 1528 const struct inet_connection_sock *icsk = inet_csk(sk); 1529 u32 send_win, cong_win, limit, in_flight; 1530 int win_divisor; 1531 1532 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) 1533 goto send_now; 1534 1535 if (icsk->icsk_ca_state != TCP_CA_Open) 1536 goto send_now; 1537 1538 /* Defer for less than two clock ticks. */ 1539 if (tp->tso_deferred && 1540 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1541 goto send_now; 1542 1543 in_flight = tcp_packets_in_flight(tp); 1544 1545 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1546 1547 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1548 1549 /* From in_flight test above, we know that cwnd > in_flight. */ 1550 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1551 1552 limit = min(send_win, cong_win); 1553 1554 /* If a full-sized TSO skb can be sent, do it. */ 1555 if (limit >= sk->sk_gso_max_size) 1556 goto send_now; 1557 1558 /* Middle in queue won't get any more data, full sendable already? */ 1559 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1560 goto send_now; 1561 1562 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1563 if (win_divisor) { 1564 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1565 1566 /* If at least some fraction of a window is available, 1567 * just use it. 1568 */ 1569 chunk /= win_divisor; 1570 if (limit >= chunk) 1571 goto send_now; 1572 } else { 1573 /* Different approach, try not to defer past a single 1574 * ACK. Receiver should ACK every other full sized 1575 * frame, so if we have space for more than 3 frames 1576 * then send now. 1577 */ 1578 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1579 goto send_now; 1580 } 1581 1582 /* Ok, it looks like it is advisable to defer. */ 1583 tp->tso_deferred = 1 | (jiffies << 1); 1584 1585 return 1; 1586 1587 send_now: 1588 tp->tso_deferred = 0; 1589 return 0; 1590 } 1591 1592 /* Create a new MTU probe if we are ready. 1593 * MTU probe is regularly attempting to increase the path MTU by 1594 * deliberately sending larger packets. This discovers routing 1595 * changes resulting in larger path MTUs. 1596 * 1597 * Returns 0 if we should wait to probe (no cwnd available), 1598 * 1 if a probe was sent, 1599 * -1 otherwise 1600 */ 1601 static int tcp_mtu_probe(struct sock *sk) 1602 { 1603 struct tcp_sock *tp = tcp_sk(sk); 1604 struct inet_connection_sock *icsk = inet_csk(sk); 1605 struct sk_buff *skb, *nskb, *next; 1606 int len; 1607 int probe_size; 1608 int size_needed; 1609 int copy; 1610 int mss_now; 1611 1612 /* Not currently probing/verifying, 1613 * not in recovery, 1614 * have enough cwnd, and 1615 * not SACKing (the variable headers throw things off) */ 1616 if (!icsk->icsk_mtup.enabled || 1617 icsk->icsk_mtup.probe_size || 1618 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1619 tp->snd_cwnd < 11 || 1620 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1621 return -1; 1622 1623 /* Very simple search strategy: just double the MSS. */ 1624 mss_now = tcp_current_mss(sk); 1625 probe_size = 2 * tp->mss_cache; 1626 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1627 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1628 /* TODO: set timer for probe_converge_event */ 1629 return -1; 1630 } 1631 1632 /* Have enough data in the send queue to probe? */ 1633 if (tp->write_seq - tp->snd_nxt < size_needed) 1634 return -1; 1635 1636 if (tp->snd_wnd < size_needed) 1637 return -1; 1638 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1639 return 0; 1640 1641 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1642 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1643 if (!tcp_packets_in_flight(tp)) 1644 return -1; 1645 else 1646 return 0; 1647 } 1648 1649 /* We're allowed to probe. Build it now. */ 1650 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1651 return -1; 1652 sk->sk_wmem_queued += nskb->truesize; 1653 sk_mem_charge(sk, nskb->truesize); 1654 1655 skb = tcp_send_head(sk); 1656 1657 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1658 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1659 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK; 1660 TCP_SKB_CB(nskb)->sacked = 0; 1661 nskb->csum = 0; 1662 nskb->ip_summed = skb->ip_summed; 1663 1664 tcp_insert_write_queue_before(nskb, skb, sk); 1665 1666 len = 0; 1667 tcp_for_write_queue_from_safe(skb, next, sk) { 1668 copy = min_t(int, skb->len, probe_size - len); 1669 if (nskb->ip_summed) 1670 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1671 else 1672 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1673 skb_put(nskb, copy), 1674 copy, nskb->csum); 1675 1676 if (skb->len <= copy) { 1677 /* We've eaten all the data from this skb. 1678 * Throw it away. */ 1679 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1680 tcp_unlink_write_queue(skb, sk); 1681 sk_wmem_free_skb(sk, skb); 1682 } else { 1683 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1684 ~(TCPHDR_FIN|TCPHDR_PSH); 1685 if (!skb_shinfo(skb)->nr_frags) { 1686 skb_pull(skb, copy); 1687 if (skb->ip_summed != CHECKSUM_PARTIAL) 1688 skb->csum = csum_partial(skb->data, 1689 skb->len, 0); 1690 } else { 1691 __pskb_trim_head(skb, copy); 1692 tcp_set_skb_tso_segs(sk, skb, mss_now); 1693 } 1694 TCP_SKB_CB(skb)->seq += copy; 1695 } 1696 1697 len += copy; 1698 1699 if (len >= probe_size) 1700 break; 1701 } 1702 tcp_init_tso_segs(sk, nskb, nskb->len); 1703 1704 /* We're ready to send. If this fails, the probe will 1705 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1706 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1707 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1708 /* Decrement cwnd here because we are sending 1709 * effectively two packets. */ 1710 tp->snd_cwnd--; 1711 tcp_event_new_data_sent(sk, nskb); 1712 1713 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1714 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1715 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1716 1717 return 1; 1718 } 1719 1720 return -1; 1721 } 1722 1723 /* This routine writes packets to the network. It advances the 1724 * send_head. This happens as incoming acks open up the remote 1725 * window for us. 1726 * 1727 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1728 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1729 * account rare use of URG, this is not a big flaw. 1730 * 1731 * Returns 1, if no segments are in flight and we have queued segments, but 1732 * cannot send anything now because of SWS or another problem. 1733 */ 1734 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1735 int push_one, gfp_t gfp) 1736 { 1737 struct tcp_sock *tp = tcp_sk(sk); 1738 struct sk_buff *skb; 1739 unsigned int tso_segs, sent_pkts; 1740 int cwnd_quota; 1741 int result; 1742 1743 sent_pkts = 0; 1744 1745 if (!push_one) { 1746 /* Do MTU probing. */ 1747 result = tcp_mtu_probe(sk); 1748 if (!result) { 1749 return 0; 1750 } else if (result > 0) { 1751 sent_pkts = 1; 1752 } 1753 } 1754 1755 while ((skb = tcp_send_head(sk))) { 1756 unsigned int limit; 1757 1758 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1759 BUG_ON(!tso_segs); 1760 1761 cwnd_quota = tcp_cwnd_test(tp, skb); 1762 if (!cwnd_quota) 1763 break; 1764 1765 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1766 break; 1767 1768 if (tso_segs == 1) { 1769 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1770 (tcp_skb_is_last(sk, skb) ? 1771 nonagle : TCP_NAGLE_PUSH)))) 1772 break; 1773 } else { 1774 if (!push_one && tcp_tso_should_defer(sk, skb)) 1775 break; 1776 } 1777 1778 limit = mss_now; 1779 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1780 limit = tcp_mss_split_point(sk, skb, mss_now, 1781 cwnd_quota); 1782 1783 if (skb->len > limit && 1784 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 1785 break; 1786 1787 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1788 1789 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1790 break; 1791 1792 /* Advance the send_head. This one is sent out. 1793 * This call will increment packets_out. 1794 */ 1795 tcp_event_new_data_sent(sk, skb); 1796 1797 tcp_minshall_update(tp, mss_now, skb); 1798 sent_pkts++; 1799 1800 if (push_one) 1801 break; 1802 } 1803 1804 if (likely(sent_pkts)) { 1805 tcp_cwnd_validate(sk); 1806 return 0; 1807 } 1808 return !tp->packets_out && tcp_send_head(sk); 1809 } 1810 1811 /* Push out any pending frames which were held back due to 1812 * TCP_CORK or attempt at coalescing tiny packets. 1813 * The socket must be locked by the caller. 1814 */ 1815 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1816 int nonagle) 1817 { 1818 /* If we are closed, the bytes will have to remain here. 1819 * In time closedown will finish, we empty the write queue and 1820 * all will be happy. 1821 */ 1822 if (unlikely(sk->sk_state == TCP_CLOSE)) 1823 return; 1824 1825 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1826 tcp_check_probe_timer(sk); 1827 } 1828 1829 /* Send _single_ skb sitting at the send head. This function requires 1830 * true push pending frames to setup probe timer etc. 1831 */ 1832 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1833 { 1834 struct sk_buff *skb = tcp_send_head(sk); 1835 1836 BUG_ON(!skb || skb->len < mss_now); 1837 1838 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1839 } 1840 1841 /* This function returns the amount that we can raise the 1842 * usable window based on the following constraints 1843 * 1844 * 1. The window can never be shrunk once it is offered (RFC 793) 1845 * 2. We limit memory per socket 1846 * 1847 * RFC 1122: 1848 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1849 * RECV.NEXT + RCV.WIN fixed until: 1850 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1851 * 1852 * i.e. don't raise the right edge of the window until you can raise 1853 * it at least MSS bytes. 1854 * 1855 * Unfortunately, the recommended algorithm breaks header prediction, 1856 * since header prediction assumes th->window stays fixed. 1857 * 1858 * Strictly speaking, keeping th->window fixed violates the receiver 1859 * side SWS prevention criteria. The problem is that under this rule 1860 * a stream of single byte packets will cause the right side of the 1861 * window to always advance by a single byte. 1862 * 1863 * Of course, if the sender implements sender side SWS prevention 1864 * then this will not be a problem. 1865 * 1866 * BSD seems to make the following compromise: 1867 * 1868 * If the free space is less than the 1/4 of the maximum 1869 * space available and the free space is less than 1/2 mss, 1870 * then set the window to 0. 1871 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1872 * Otherwise, just prevent the window from shrinking 1873 * and from being larger than the largest representable value. 1874 * 1875 * This prevents incremental opening of the window in the regime 1876 * where TCP is limited by the speed of the reader side taking 1877 * data out of the TCP receive queue. It does nothing about 1878 * those cases where the window is constrained on the sender side 1879 * because the pipeline is full. 1880 * 1881 * BSD also seems to "accidentally" limit itself to windows that are a 1882 * multiple of MSS, at least until the free space gets quite small. 1883 * This would appear to be a side effect of the mbuf implementation. 1884 * Combining these two algorithms results in the observed behavior 1885 * of having a fixed window size at almost all times. 1886 * 1887 * Below we obtain similar behavior by forcing the offered window to 1888 * a multiple of the mss when it is feasible to do so. 1889 * 1890 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1891 * Regular options like TIMESTAMP are taken into account. 1892 */ 1893 u32 __tcp_select_window(struct sock *sk) 1894 { 1895 struct inet_connection_sock *icsk = inet_csk(sk); 1896 struct tcp_sock *tp = tcp_sk(sk); 1897 /* MSS for the peer's data. Previous versions used mss_clamp 1898 * here. I don't know if the value based on our guesses 1899 * of peer's MSS is better for the performance. It's more correct 1900 * but may be worse for the performance because of rcv_mss 1901 * fluctuations. --SAW 1998/11/1 1902 */ 1903 int mss = icsk->icsk_ack.rcv_mss; 1904 int free_space = tcp_space(sk); 1905 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1906 int window; 1907 1908 if (mss > full_space) 1909 mss = full_space; 1910 1911 if (free_space < (full_space >> 1)) { 1912 icsk->icsk_ack.quick = 0; 1913 1914 if (tcp_memory_pressure) 1915 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1916 4U * tp->advmss); 1917 1918 if (free_space < mss) 1919 return 0; 1920 } 1921 1922 if (free_space > tp->rcv_ssthresh) 1923 free_space = tp->rcv_ssthresh; 1924 1925 /* Don't do rounding if we are using window scaling, since the 1926 * scaled window will not line up with the MSS boundary anyway. 1927 */ 1928 window = tp->rcv_wnd; 1929 if (tp->rx_opt.rcv_wscale) { 1930 window = free_space; 1931 1932 /* Advertise enough space so that it won't get scaled away. 1933 * Import case: prevent zero window announcement if 1934 * 1<<rcv_wscale > mss. 1935 */ 1936 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1937 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1938 << tp->rx_opt.rcv_wscale); 1939 } else { 1940 /* Get the largest window that is a nice multiple of mss. 1941 * Window clamp already applied above. 1942 * If our current window offering is within 1 mss of the 1943 * free space we just keep it. This prevents the divide 1944 * and multiply from happening most of the time. 1945 * We also don't do any window rounding when the free space 1946 * is too small. 1947 */ 1948 if (window <= free_space - mss || window > free_space) 1949 window = (free_space / mss) * mss; 1950 else if (mss == full_space && 1951 free_space > window + (full_space >> 1)) 1952 window = free_space; 1953 } 1954 1955 return window; 1956 } 1957 1958 /* Collapses two adjacent SKB's during retransmission. */ 1959 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1960 { 1961 struct tcp_sock *tp = tcp_sk(sk); 1962 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1963 int skb_size, next_skb_size; 1964 1965 skb_size = skb->len; 1966 next_skb_size = next_skb->len; 1967 1968 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1969 1970 tcp_highest_sack_combine(sk, next_skb, skb); 1971 1972 tcp_unlink_write_queue(next_skb, sk); 1973 1974 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1975 next_skb_size); 1976 1977 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1978 skb->ip_summed = CHECKSUM_PARTIAL; 1979 1980 if (skb->ip_summed != CHECKSUM_PARTIAL) 1981 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1982 1983 /* Update sequence range on original skb. */ 1984 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1985 1986 /* Merge over control information. This moves PSH/FIN etc. over */ 1987 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1988 1989 /* All done, get rid of second SKB and account for it so 1990 * packet counting does not break. 1991 */ 1992 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1993 1994 /* changed transmit queue under us so clear hints */ 1995 tcp_clear_retrans_hints_partial(tp); 1996 if (next_skb == tp->retransmit_skb_hint) 1997 tp->retransmit_skb_hint = skb; 1998 1999 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2000 2001 sk_wmem_free_skb(sk, next_skb); 2002 } 2003 2004 /* Check if coalescing SKBs is legal. */ 2005 static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 2006 { 2007 if (tcp_skb_pcount(skb) > 1) 2008 return 0; 2009 /* TODO: SACK collapsing could be used to remove this condition */ 2010 if (skb_shinfo(skb)->nr_frags != 0) 2011 return 0; 2012 if (skb_cloned(skb)) 2013 return 0; 2014 if (skb == tcp_send_head(sk)) 2015 return 0; 2016 /* Some heurestics for collapsing over SACK'd could be invented */ 2017 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2018 return 0; 2019 2020 return 1; 2021 } 2022 2023 /* Collapse packets in the retransmit queue to make to create 2024 * less packets on the wire. This is only done on retransmission. 2025 */ 2026 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2027 int space) 2028 { 2029 struct tcp_sock *tp = tcp_sk(sk); 2030 struct sk_buff *skb = to, *tmp; 2031 int first = 1; 2032 2033 if (!sysctl_tcp_retrans_collapse) 2034 return; 2035 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN) 2036 return; 2037 2038 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2039 if (!tcp_can_collapse(sk, skb)) 2040 break; 2041 2042 space -= skb->len; 2043 2044 if (first) { 2045 first = 0; 2046 continue; 2047 } 2048 2049 if (space < 0) 2050 break; 2051 /* Punt if not enough space exists in the first SKB for 2052 * the data in the second 2053 */ 2054 if (skb->len > skb_tailroom(to)) 2055 break; 2056 2057 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2058 break; 2059 2060 tcp_collapse_retrans(sk, to); 2061 } 2062 } 2063 2064 /* This retransmits one SKB. Policy decisions and retransmit queue 2065 * state updates are done by the caller. Returns non-zero if an 2066 * error occurred which prevented the send. 2067 */ 2068 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 2069 { 2070 struct tcp_sock *tp = tcp_sk(sk); 2071 struct inet_connection_sock *icsk = inet_csk(sk); 2072 unsigned int cur_mss; 2073 int err; 2074 2075 /* Inconslusive MTU probe */ 2076 if (icsk->icsk_mtup.probe_size) { 2077 icsk->icsk_mtup.probe_size = 0; 2078 } 2079 2080 /* Do not sent more than we queued. 1/4 is reserved for possible 2081 * copying overhead: fragmentation, tunneling, mangling etc. 2082 */ 2083 if (atomic_read(&sk->sk_wmem_alloc) > 2084 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2085 return -EAGAIN; 2086 2087 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2088 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2089 BUG(); 2090 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2091 return -ENOMEM; 2092 } 2093 2094 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2095 return -EHOSTUNREACH; /* Routing failure or similar. */ 2096 2097 cur_mss = tcp_current_mss(sk); 2098 2099 /* If receiver has shrunk his window, and skb is out of 2100 * new window, do not retransmit it. The exception is the 2101 * case, when window is shrunk to zero. In this case 2102 * our retransmit serves as a zero window probe. 2103 */ 2104 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2105 TCP_SKB_CB(skb)->seq != tp->snd_una) 2106 return -EAGAIN; 2107 2108 if (skb->len > cur_mss) { 2109 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 2110 return -ENOMEM; /* We'll try again later. */ 2111 } else { 2112 int oldpcount = tcp_skb_pcount(skb); 2113 2114 if (unlikely(oldpcount > 1)) { 2115 tcp_init_tso_segs(sk, skb, cur_mss); 2116 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 2117 } 2118 } 2119 2120 tcp_retrans_try_collapse(sk, skb, cur_mss); 2121 2122 /* Some Solaris stacks overoptimize and ignore the FIN on a 2123 * retransmit when old data is attached. So strip it off 2124 * since it is cheap to do so and saves bytes on the network. 2125 */ 2126 if (skb->len > 0 && 2127 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && 2128 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2129 if (!pskb_trim(skb, 0)) { 2130 /* Reuse, even though it does some unnecessary work */ 2131 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 2132 TCP_SKB_CB(skb)->flags); 2133 skb->ip_summed = CHECKSUM_NONE; 2134 } 2135 } 2136 2137 /* Make a copy, if the first transmission SKB clone we made 2138 * is still in somebody's hands, else make a clone. 2139 */ 2140 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2141 2142 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2143 2144 if (err == 0) { 2145 /* Update global TCP statistics. */ 2146 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 2147 2148 tp->total_retrans++; 2149 2150 #if FASTRETRANS_DEBUG > 0 2151 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2152 if (net_ratelimit()) 2153 printk(KERN_DEBUG "retrans_out leaked.\n"); 2154 } 2155 #endif 2156 if (!tp->retrans_out) 2157 tp->lost_retrans_low = tp->snd_nxt; 2158 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2159 tp->retrans_out += tcp_skb_pcount(skb); 2160 2161 /* Save stamp of the first retransmit. */ 2162 if (!tp->retrans_stamp) 2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2164 2165 tp->undo_retrans += tcp_skb_pcount(skb); 2166 2167 /* snd_nxt is stored to detect loss of retransmitted segment, 2168 * see tcp_input.c tcp_sacktag_write_queue(). 2169 */ 2170 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 2171 } 2172 return err; 2173 } 2174 2175 /* Check if we forward retransmits are possible in the current 2176 * window/congestion state. 2177 */ 2178 static int tcp_can_forward_retransmit(struct sock *sk) 2179 { 2180 const struct inet_connection_sock *icsk = inet_csk(sk); 2181 struct tcp_sock *tp = tcp_sk(sk); 2182 2183 /* Forward retransmissions are possible only during Recovery. */ 2184 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2185 return 0; 2186 2187 /* No forward retransmissions in Reno are possible. */ 2188 if (tcp_is_reno(tp)) 2189 return 0; 2190 2191 /* Yeah, we have to make difficult choice between forward transmission 2192 * and retransmission... Both ways have their merits... 2193 * 2194 * For now we do not retransmit anything, while we have some new 2195 * segments to send. In the other cases, follow rule 3 for 2196 * NextSeg() specified in RFC3517. 2197 */ 2198 2199 if (tcp_may_send_now(sk)) 2200 return 0; 2201 2202 return 1; 2203 } 2204 2205 /* This gets called after a retransmit timeout, and the initially 2206 * retransmitted data is acknowledged. It tries to continue 2207 * resending the rest of the retransmit queue, until either 2208 * we've sent it all or the congestion window limit is reached. 2209 * If doing SACK, the first ACK which comes back for a timeout 2210 * based retransmit packet might feed us FACK information again. 2211 * If so, we use it to avoid unnecessarily retransmissions. 2212 */ 2213 void tcp_xmit_retransmit_queue(struct sock *sk) 2214 { 2215 const struct inet_connection_sock *icsk = inet_csk(sk); 2216 struct tcp_sock *tp = tcp_sk(sk); 2217 struct sk_buff *skb; 2218 struct sk_buff *hole = NULL; 2219 u32 last_lost; 2220 int mib_idx; 2221 int fwd_rexmitting = 0; 2222 2223 if (!tp->packets_out) 2224 return; 2225 2226 if (!tp->lost_out) 2227 tp->retransmit_high = tp->snd_una; 2228 2229 if (tp->retransmit_skb_hint) { 2230 skb = tp->retransmit_skb_hint; 2231 last_lost = TCP_SKB_CB(skb)->end_seq; 2232 if (after(last_lost, tp->retransmit_high)) 2233 last_lost = tp->retransmit_high; 2234 } else { 2235 skb = tcp_write_queue_head(sk); 2236 last_lost = tp->snd_una; 2237 } 2238 2239 tcp_for_write_queue_from(skb, sk) { 2240 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2241 2242 if (skb == tcp_send_head(sk)) 2243 break; 2244 /* we could do better than to assign each time */ 2245 if (hole == NULL) 2246 tp->retransmit_skb_hint = skb; 2247 2248 /* Assume this retransmit will generate 2249 * only one packet for congestion window 2250 * calculation purposes. This works because 2251 * tcp_retransmit_skb() will chop up the 2252 * packet to be MSS sized and all the 2253 * packet counting works out. 2254 */ 2255 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2256 return; 2257 2258 if (fwd_rexmitting) { 2259 begin_fwd: 2260 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2261 break; 2262 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2263 2264 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2265 tp->retransmit_high = last_lost; 2266 if (!tcp_can_forward_retransmit(sk)) 2267 break; 2268 /* Backtrack if necessary to non-L'ed skb */ 2269 if (hole != NULL) { 2270 skb = hole; 2271 hole = NULL; 2272 } 2273 fwd_rexmitting = 1; 2274 goto begin_fwd; 2275 2276 } else if (!(sacked & TCPCB_LOST)) { 2277 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2278 hole = skb; 2279 continue; 2280 2281 } else { 2282 last_lost = TCP_SKB_CB(skb)->end_seq; 2283 if (icsk->icsk_ca_state != TCP_CA_Loss) 2284 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2285 else 2286 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2287 } 2288 2289 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2290 continue; 2291 2292 if (tcp_retransmit_skb(sk, skb)) 2293 return; 2294 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2295 2296 if (skb == tcp_write_queue_head(sk)) 2297 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2298 inet_csk(sk)->icsk_rto, 2299 TCP_RTO_MAX); 2300 } 2301 } 2302 2303 /* Send a fin. The caller locks the socket for us. This cannot be 2304 * allowed to fail queueing a FIN frame under any circumstances. 2305 */ 2306 void tcp_send_fin(struct sock *sk) 2307 { 2308 struct tcp_sock *tp = tcp_sk(sk); 2309 struct sk_buff *skb = tcp_write_queue_tail(sk); 2310 int mss_now; 2311 2312 /* Optimization, tack on the FIN if we have a queue of 2313 * unsent frames. But be careful about outgoing SACKS 2314 * and IP options. 2315 */ 2316 mss_now = tcp_current_mss(sk); 2317 2318 if (tcp_send_head(sk) != NULL) { 2319 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN; 2320 TCP_SKB_CB(skb)->end_seq++; 2321 tp->write_seq++; 2322 } else { 2323 /* Socket is locked, keep trying until memory is available. */ 2324 for (;;) { 2325 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2326 sk->sk_allocation); 2327 if (skb) 2328 break; 2329 yield(); 2330 } 2331 2332 /* Reserve space for headers and prepare control bits. */ 2333 skb_reserve(skb, MAX_TCP_HEADER); 2334 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2335 tcp_init_nondata_skb(skb, tp->write_seq, 2336 TCPHDR_ACK | TCPHDR_FIN); 2337 tcp_queue_skb(sk, skb); 2338 } 2339 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2340 } 2341 2342 /* We get here when a process closes a file descriptor (either due to 2343 * an explicit close() or as a byproduct of exit()'ing) and there 2344 * was unread data in the receive queue. This behavior is recommended 2345 * by RFC 2525, section 2.17. -DaveM 2346 */ 2347 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2348 { 2349 struct sk_buff *skb; 2350 2351 /* NOTE: No TCP options attached and we never retransmit this. */ 2352 skb = alloc_skb(MAX_TCP_HEADER, priority); 2353 if (!skb) { 2354 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2355 return; 2356 } 2357 2358 /* Reserve space for headers and prepare control bits. */ 2359 skb_reserve(skb, MAX_TCP_HEADER); 2360 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2361 TCPHDR_ACK | TCPHDR_RST); 2362 /* Send it off. */ 2363 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2364 if (tcp_transmit_skb(sk, skb, 0, priority)) 2365 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2366 2367 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2368 } 2369 2370 /* Send a crossed SYN-ACK during socket establishment. 2371 * WARNING: This routine must only be called when we have already sent 2372 * a SYN packet that crossed the incoming SYN that caused this routine 2373 * to get called. If this assumption fails then the initial rcv_wnd 2374 * and rcv_wscale values will not be correct. 2375 */ 2376 int tcp_send_synack(struct sock *sk) 2377 { 2378 struct sk_buff *skb; 2379 2380 skb = tcp_write_queue_head(sk); 2381 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) { 2382 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2383 return -EFAULT; 2384 } 2385 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) { 2386 if (skb_cloned(skb)) { 2387 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2388 if (nskb == NULL) 2389 return -ENOMEM; 2390 tcp_unlink_write_queue(skb, sk); 2391 skb_header_release(nskb); 2392 __tcp_add_write_queue_head(sk, nskb); 2393 sk_wmem_free_skb(sk, skb); 2394 sk->sk_wmem_queued += nskb->truesize; 2395 sk_mem_charge(sk, nskb->truesize); 2396 skb = nskb; 2397 } 2398 2399 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK; 2400 TCP_ECN_send_synack(tcp_sk(sk), skb); 2401 } 2402 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2403 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2404 } 2405 2406 /* Prepare a SYN-ACK. */ 2407 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2408 struct request_sock *req, 2409 struct request_values *rvp) 2410 { 2411 struct tcp_out_options opts; 2412 struct tcp_extend_values *xvp = tcp_xv(rvp); 2413 struct inet_request_sock *ireq = inet_rsk(req); 2414 struct tcp_sock *tp = tcp_sk(sk); 2415 const struct tcp_cookie_values *cvp = tp->cookie_values; 2416 struct tcphdr *th; 2417 struct sk_buff *skb; 2418 struct tcp_md5sig_key *md5; 2419 int tcp_header_size; 2420 int mss; 2421 int s_data_desired = 0; 2422 2423 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2424 s_data_desired = cvp->s_data_desired; 2425 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC); 2426 if (skb == NULL) 2427 return NULL; 2428 2429 /* Reserve space for headers. */ 2430 skb_reserve(skb, MAX_TCP_HEADER); 2431 2432 skb_dst_set(skb, dst_clone(dst)); 2433 2434 mss = dst_metric_advmss(dst); 2435 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2436 mss = tp->rx_opt.user_mss; 2437 2438 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2439 __u8 rcv_wscale; 2440 /* Set this up on the first call only */ 2441 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2442 2443 /* limit the window selection if the user enforce a smaller rx buffer */ 2444 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2445 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) 2446 req->window_clamp = tcp_full_space(sk); 2447 2448 /* tcp_full_space because it is guaranteed to be the first packet */ 2449 tcp_select_initial_window(tcp_full_space(sk), 2450 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2451 &req->rcv_wnd, 2452 &req->window_clamp, 2453 ireq->wscale_ok, 2454 &rcv_wscale, 2455 dst_metric(dst, RTAX_INITRWND)); 2456 ireq->rcv_wscale = rcv_wscale; 2457 } 2458 2459 memset(&opts, 0, sizeof(opts)); 2460 #ifdef CONFIG_SYN_COOKIES 2461 if (unlikely(req->cookie_ts)) 2462 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2463 else 2464 #endif 2465 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2466 tcp_header_size = tcp_synack_options(sk, req, mss, 2467 skb, &opts, &md5, xvp) 2468 + sizeof(*th); 2469 2470 skb_push(skb, tcp_header_size); 2471 skb_reset_transport_header(skb); 2472 2473 th = tcp_hdr(skb); 2474 memset(th, 0, sizeof(struct tcphdr)); 2475 th->syn = 1; 2476 th->ack = 1; 2477 TCP_ECN_make_synack(req, th); 2478 th->source = ireq->loc_port; 2479 th->dest = ireq->rmt_port; 2480 /* Setting of flags are superfluous here for callers (and ECE is 2481 * not even correctly set) 2482 */ 2483 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2484 TCPHDR_SYN | TCPHDR_ACK); 2485 2486 if (OPTION_COOKIE_EXTENSION & opts.options) { 2487 if (s_data_desired) { 2488 u8 *buf = skb_put(skb, s_data_desired); 2489 2490 /* copy data directly from the listening socket. */ 2491 memcpy(buf, cvp->s_data_payload, s_data_desired); 2492 TCP_SKB_CB(skb)->end_seq += s_data_desired; 2493 } 2494 2495 if (opts.hash_size > 0) { 2496 __u32 workspace[SHA_WORKSPACE_WORDS]; 2497 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS]; 2498 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1]; 2499 2500 /* Secret recipe depends on the Timestamp, (future) 2501 * Sequence and Acknowledgment Numbers, Initiator 2502 * Cookie, and others handled by IP variant caller. 2503 */ 2504 *tail-- ^= opts.tsval; 2505 *tail-- ^= tcp_rsk(req)->rcv_isn + 1; 2506 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2507 2508 /* recommended */ 2509 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source); 2510 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2511 2512 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2513 (char *)mess, 2514 &workspace[0]); 2515 opts.hash_location = 2516 (__u8 *)&xvp->cookie_bakery[0]; 2517 } 2518 } 2519 2520 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2521 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2522 2523 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2524 th->window = htons(min(req->rcv_wnd, 65535U)); 2525 tcp_options_write((__be32 *)(th + 1), tp, &opts); 2526 th->doff = (tcp_header_size >> 2); 2527 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); 2528 2529 #ifdef CONFIG_TCP_MD5SIG 2530 /* Okay, we have all we need - do the md5 hash if needed */ 2531 if (md5) { 2532 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 2533 md5, NULL, req, skb); 2534 } 2535 #endif 2536 2537 return skb; 2538 } 2539 EXPORT_SYMBOL(tcp_make_synack); 2540 2541 /* Do all connect socket setups that can be done AF independent. */ 2542 static void tcp_connect_init(struct sock *sk) 2543 { 2544 struct dst_entry *dst = __sk_dst_get(sk); 2545 struct tcp_sock *tp = tcp_sk(sk); 2546 __u8 rcv_wscale; 2547 2548 /* We'll fix this up when we get a response from the other end. 2549 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2550 */ 2551 tp->tcp_header_len = sizeof(struct tcphdr) + 2552 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2553 2554 #ifdef CONFIG_TCP_MD5SIG 2555 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2556 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2557 #endif 2558 2559 /* If user gave his TCP_MAXSEG, record it to clamp */ 2560 if (tp->rx_opt.user_mss) 2561 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2562 tp->max_window = 0; 2563 tcp_mtup_init(sk); 2564 tcp_sync_mss(sk, dst_mtu(dst)); 2565 2566 if (!tp->window_clamp) 2567 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2568 tp->advmss = dst_metric_advmss(dst); 2569 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2570 tp->advmss = tp->rx_opt.user_mss; 2571 2572 tcp_initialize_rcv_mss(sk); 2573 2574 /* limit the window selection if the user enforce a smaller rx buffer */ 2575 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 2576 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 2577 tp->window_clamp = tcp_full_space(sk); 2578 2579 tcp_select_initial_window(tcp_full_space(sk), 2580 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2581 &tp->rcv_wnd, 2582 &tp->window_clamp, 2583 sysctl_tcp_window_scaling, 2584 &rcv_wscale, 2585 dst_metric(dst, RTAX_INITRWND)); 2586 2587 tp->rx_opt.rcv_wscale = rcv_wscale; 2588 tp->rcv_ssthresh = tp->rcv_wnd; 2589 2590 sk->sk_err = 0; 2591 sock_reset_flag(sk, SOCK_DONE); 2592 tp->snd_wnd = 0; 2593 tcp_init_wl(tp, 0); 2594 tp->snd_una = tp->write_seq; 2595 tp->snd_sml = tp->write_seq; 2596 tp->snd_up = tp->write_seq; 2597 tp->rcv_nxt = 0; 2598 tp->rcv_wup = 0; 2599 tp->copied_seq = 0; 2600 2601 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2602 inet_csk(sk)->icsk_retransmits = 0; 2603 tcp_clear_retrans(tp); 2604 } 2605 2606 /* Build a SYN and send it off. */ 2607 int tcp_connect(struct sock *sk) 2608 { 2609 struct tcp_sock *tp = tcp_sk(sk); 2610 struct sk_buff *buff; 2611 int err; 2612 2613 tcp_connect_init(sk); 2614 2615 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2616 if (unlikely(buff == NULL)) 2617 return -ENOBUFS; 2618 2619 /* Reserve space for headers. */ 2620 skb_reserve(buff, MAX_TCP_HEADER); 2621 2622 tp->snd_nxt = tp->write_seq; 2623 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2624 TCP_ECN_send_syn(sk, buff); 2625 2626 /* Send it off. */ 2627 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2628 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2629 skb_header_release(buff); 2630 __tcp_add_write_queue_tail(sk, buff); 2631 sk->sk_wmem_queued += buff->truesize; 2632 sk_mem_charge(sk, buff->truesize); 2633 tp->packets_out += tcp_skb_pcount(buff); 2634 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2635 if (err == -ECONNREFUSED) 2636 return err; 2637 2638 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2639 * in order to make this packet get counted in tcpOutSegs. 2640 */ 2641 tp->snd_nxt = tp->write_seq; 2642 tp->pushed_seq = tp->write_seq; 2643 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2644 2645 /* Timer for repeating the SYN until an answer. */ 2646 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2647 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2648 return 0; 2649 } 2650 EXPORT_SYMBOL(tcp_connect); 2651 2652 /* Send out a delayed ack, the caller does the policy checking 2653 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2654 * for details. 2655 */ 2656 void tcp_send_delayed_ack(struct sock *sk) 2657 { 2658 struct inet_connection_sock *icsk = inet_csk(sk); 2659 int ato = icsk->icsk_ack.ato; 2660 unsigned long timeout; 2661 2662 if (ato > TCP_DELACK_MIN) { 2663 const struct tcp_sock *tp = tcp_sk(sk); 2664 int max_ato = HZ / 2; 2665 2666 if (icsk->icsk_ack.pingpong || 2667 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2668 max_ato = TCP_DELACK_MAX; 2669 2670 /* Slow path, intersegment interval is "high". */ 2671 2672 /* If some rtt estimate is known, use it to bound delayed ack. 2673 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2674 * directly. 2675 */ 2676 if (tp->srtt) { 2677 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2678 2679 if (rtt < max_ato) 2680 max_ato = rtt; 2681 } 2682 2683 ato = min(ato, max_ato); 2684 } 2685 2686 /* Stay within the limit we were given */ 2687 timeout = jiffies + ato; 2688 2689 /* Use new timeout only if there wasn't a older one earlier. */ 2690 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2691 /* If delack timer was blocked or is about to expire, 2692 * send ACK now. 2693 */ 2694 if (icsk->icsk_ack.blocked || 2695 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2696 tcp_send_ack(sk); 2697 return; 2698 } 2699 2700 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2701 timeout = icsk->icsk_ack.timeout; 2702 } 2703 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2704 icsk->icsk_ack.timeout = timeout; 2705 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2706 } 2707 2708 /* This routine sends an ack and also updates the window. */ 2709 void tcp_send_ack(struct sock *sk) 2710 { 2711 struct sk_buff *buff; 2712 2713 /* If we have been reset, we may not send again. */ 2714 if (sk->sk_state == TCP_CLOSE) 2715 return; 2716 2717 /* We are not putting this on the write queue, so 2718 * tcp_transmit_skb() will set the ownership to this 2719 * sock. 2720 */ 2721 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2722 if (buff == NULL) { 2723 inet_csk_schedule_ack(sk); 2724 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2725 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2726 TCP_DELACK_MAX, TCP_RTO_MAX); 2727 return; 2728 } 2729 2730 /* Reserve space for headers and prepare control bits. */ 2731 skb_reserve(buff, MAX_TCP_HEADER); 2732 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 2733 2734 /* Send it off, this clears delayed acks for us. */ 2735 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2736 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2737 } 2738 2739 /* This routine sends a packet with an out of date sequence 2740 * number. It assumes the other end will try to ack it. 2741 * 2742 * Question: what should we make while urgent mode? 2743 * 4.4BSD forces sending single byte of data. We cannot send 2744 * out of window data, because we have SND.NXT==SND.MAX... 2745 * 2746 * Current solution: to send TWO zero-length segments in urgent mode: 2747 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2748 * out-of-date with SND.UNA-1 to probe window. 2749 */ 2750 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2751 { 2752 struct tcp_sock *tp = tcp_sk(sk); 2753 struct sk_buff *skb; 2754 2755 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2756 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2757 if (skb == NULL) 2758 return -1; 2759 2760 /* Reserve space for headers and set control bits. */ 2761 skb_reserve(skb, MAX_TCP_HEADER); 2762 /* Use a previous sequence. This should cause the other 2763 * end to send an ack. Don't queue or clone SKB, just 2764 * send it. 2765 */ 2766 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 2767 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2768 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2769 } 2770 2771 /* Initiate keepalive or window probe from timer. */ 2772 int tcp_write_wakeup(struct sock *sk) 2773 { 2774 struct tcp_sock *tp = tcp_sk(sk); 2775 struct sk_buff *skb; 2776 2777 if (sk->sk_state == TCP_CLOSE) 2778 return -1; 2779 2780 if ((skb = tcp_send_head(sk)) != NULL && 2781 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2782 int err; 2783 unsigned int mss = tcp_current_mss(sk); 2784 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2785 2786 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2787 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2788 2789 /* We are probing the opening of a window 2790 * but the window size is != 0 2791 * must have been a result SWS avoidance ( sender ) 2792 */ 2793 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2794 skb->len > mss) { 2795 seg_size = min(seg_size, mss); 2796 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2797 if (tcp_fragment(sk, skb, seg_size, mss)) 2798 return -1; 2799 } else if (!tcp_skb_pcount(skb)) 2800 tcp_set_skb_tso_segs(sk, skb, mss); 2801 2802 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH; 2803 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2804 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2805 if (!err) 2806 tcp_event_new_data_sent(sk, skb); 2807 return err; 2808 } else { 2809 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2810 tcp_xmit_probe_skb(sk, 1); 2811 return tcp_xmit_probe_skb(sk, 0); 2812 } 2813 } 2814 2815 /* A window probe timeout has occurred. If window is not closed send 2816 * a partial packet else a zero probe. 2817 */ 2818 void tcp_send_probe0(struct sock *sk) 2819 { 2820 struct inet_connection_sock *icsk = inet_csk(sk); 2821 struct tcp_sock *tp = tcp_sk(sk); 2822 int err; 2823 2824 err = tcp_write_wakeup(sk); 2825 2826 if (tp->packets_out || !tcp_send_head(sk)) { 2827 /* Cancel probe timer, if it is not required. */ 2828 icsk->icsk_probes_out = 0; 2829 icsk->icsk_backoff = 0; 2830 return; 2831 } 2832 2833 if (err <= 0) { 2834 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2835 icsk->icsk_backoff++; 2836 icsk->icsk_probes_out++; 2837 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2838 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2839 TCP_RTO_MAX); 2840 } else { 2841 /* If packet was not sent due to local congestion, 2842 * do not backoff and do not remember icsk_probes_out. 2843 * Let local senders to fight for local resources. 2844 * 2845 * Use accumulated backoff yet. 2846 */ 2847 if (!icsk->icsk_probes_out) 2848 icsk->icsk_probes_out = 1; 2849 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2850 min(icsk->icsk_rto << icsk->icsk_backoff, 2851 TCP_RESOURCE_PROBE_INTERVAL), 2852 TCP_RTO_MAX); 2853 } 2854 } 2855