1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #include <net/tcp.h> 38 39 #include <linux/compiler.h> 40 #include <linux/module.h> 41 42 /* People can turn this off for buggy TCP's found in printers etc. */ 43 int sysctl_tcp_retrans_collapse __read_mostly = 1; 44 45 /* People can turn this on to work with those rare, broken TCPs that 46 * interpret the window field as a signed quantity. 47 */ 48 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 49 50 /* This limits the percentage of the congestion window which we 51 * will allow a single TSO frame to consume. Building TSO frames 52 * which are too large can cause TCP streams to be bursty. 53 */ 54 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 55 56 int sysctl_tcp_mtu_probing __read_mostly = 0; 57 int sysctl_tcp_base_mss __read_mostly = 512; 58 59 /* By default, RFC2861 behavior. */ 60 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 61 62 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 63 { 64 struct tcp_sock *tp = tcp_sk(sk); 65 unsigned int prior_packets = tp->packets_out; 66 67 tcp_advance_send_head(sk, skb); 68 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 69 70 /* Don't override Nagle indefinately with F-RTO */ 71 if (tp->frto_counter == 2) 72 tp->frto_counter = 3; 73 74 tp->packets_out += tcp_skb_pcount(skb); 75 if (!prior_packets) 76 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 77 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 78 } 79 80 /* SND.NXT, if window was not shrunk. 81 * If window has been shrunk, what should we make? It is not clear at all. 82 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 83 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 84 * invalid. OK, let's make this for now: 85 */ 86 static inline __u32 tcp_acceptable_seq(struct sock *sk) 87 { 88 struct tcp_sock *tp = tcp_sk(sk); 89 90 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 91 return tp->snd_nxt; 92 else 93 return tcp_wnd_end(tp); 94 } 95 96 /* Calculate mss to advertise in SYN segment. 97 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 98 * 99 * 1. It is independent of path mtu. 100 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 101 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 102 * attached devices, because some buggy hosts are confused by 103 * large MSS. 104 * 4. We do not make 3, we advertise MSS, calculated from first 105 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 106 * This may be overridden via information stored in routing table. 107 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 108 * probably even Jumbo". 109 */ 110 static __u16 tcp_advertise_mss(struct sock *sk) 111 { 112 struct tcp_sock *tp = tcp_sk(sk); 113 struct dst_entry *dst = __sk_dst_get(sk); 114 int mss = tp->advmss; 115 116 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 117 mss = dst_metric(dst, RTAX_ADVMSS); 118 tp->advmss = mss; 119 } 120 121 return (__u16)mss; 122 } 123 124 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 125 * This is the first part of cwnd validation mechanism. */ 126 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 127 { 128 struct tcp_sock *tp = tcp_sk(sk); 129 s32 delta = tcp_time_stamp - tp->lsndtime; 130 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 131 u32 cwnd = tp->snd_cwnd; 132 133 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 134 135 tp->snd_ssthresh = tcp_current_ssthresh(sk); 136 restart_cwnd = min(restart_cwnd, cwnd); 137 138 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 139 cwnd >>= 1; 140 tp->snd_cwnd = max(cwnd, restart_cwnd); 141 tp->snd_cwnd_stamp = tcp_time_stamp; 142 tp->snd_cwnd_used = 0; 143 } 144 145 static void tcp_event_data_sent(struct tcp_sock *tp, 146 struct sk_buff *skb, struct sock *sk) 147 { 148 struct inet_connection_sock *icsk = inet_csk(sk); 149 const u32 now = tcp_time_stamp; 150 151 if (sysctl_tcp_slow_start_after_idle && 152 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 153 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 154 155 tp->lsndtime = now; 156 157 /* If it is a reply for ato after last received 158 * packet, enter pingpong mode. 159 */ 160 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 161 icsk->icsk_ack.pingpong = 1; 162 } 163 164 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 165 { 166 tcp_dec_quickack_mode(sk, pkts); 167 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 168 } 169 170 /* Determine a window scaling and initial window to offer. 171 * Based on the assumption that the given amount of space 172 * will be offered. Store the results in the tp structure. 173 * NOTE: for smooth operation initial space offering should 174 * be a multiple of mss if possible. We assume here that mss >= 1. 175 * This MUST be enforced by all callers. 176 */ 177 void tcp_select_initial_window(int __space, __u32 mss, 178 __u32 *rcv_wnd, __u32 *window_clamp, 179 int wscale_ok, __u8 *rcv_wscale) 180 { 181 unsigned int space = (__space < 0 ? 0 : __space); 182 183 /* If no clamp set the clamp to the max possible scaled window */ 184 if (*window_clamp == 0) 185 (*window_clamp) = (65535 << 14); 186 space = min(*window_clamp, space); 187 188 /* Quantize space offering to a multiple of mss if possible. */ 189 if (space > mss) 190 space = (space / mss) * mss; 191 192 /* NOTE: offering an initial window larger than 32767 193 * will break some buggy TCP stacks. If the admin tells us 194 * it is likely we could be speaking with such a buggy stack 195 * we will truncate our initial window offering to 32K-1 196 * unless the remote has sent us a window scaling option, 197 * which we interpret as a sign the remote TCP is not 198 * misinterpreting the window field as a signed quantity. 199 */ 200 if (sysctl_tcp_workaround_signed_windows) 201 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 202 else 203 (*rcv_wnd) = space; 204 205 (*rcv_wscale) = 0; 206 if (wscale_ok) { 207 /* Set window scaling on max possible window 208 * See RFC1323 for an explanation of the limit to 14 209 */ 210 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 211 space = min_t(u32, space, *window_clamp); 212 while (space > 65535 && (*rcv_wscale) < 14) { 213 space >>= 1; 214 (*rcv_wscale)++; 215 } 216 } 217 218 /* Set initial window to value enough for senders, 219 * following RFC2414. Senders, not following this RFC, 220 * will be satisfied with 2. 221 */ 222 if (mss > (1 << *rcv_wscale)) { 223 int init_cwnd = 4; 224 if (mss > 1460 * 3) 225 init_cwnd = 2; 226 else if (mss > 1460) 227 init_cwnd = 3; 228 if (*rcv_wnd > init_cwnd * mss) 229 *rcv_wnd = init_cwnd * mss; 230 } 231 232 /* Set the clamp no higher than max representable value */ 233 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 234 } 235 236 /* Chose a new window to advertise, update state in tcp_sock for the 237 * socket, and return result with RFC1323 scaling applied. The return 238 * value can be stuffed directly into th->window for an outgoing 239 * frame. 240 */ 241 static u16 tcp_select_window(struct sock *sk) 242 { 243 struct tcp_sock *tp = tcp_sk(sk); 244 u32 cur_win = tcp_receive_window(tp); 245 u32 new_win = __tcp_select_window(sk); 246 247 /* Never shrink the offered window */ 248 if (new_win < cur_win) { 249 /* Danger Will Robinson! 250 * Don't update rcv_wup/rcv_wnd here or else 251 * we will not be able to advertise a zero 252 * window in time. --DaveM 253 * 254 * Relax Will Robinson. 255 */ 256 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 257 } 258 tp->rcv_wnd = new_win; 259 tp->rcv_wup = tp->rcv_nxt; 260 261 /* Make sure we do not exceed the maximum possible 262 * scaled window. 263 */ 264 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 265 new_win = min(new_win, MAX_TCP_WINDOW); 266 else 267 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 268 269 /* RFC1323 scaling applied */ 270 new_win >>= tp->rx_opt.rcv_wscale; 271 272 /* If we advertise zero window, disable fast path. */ 273 if (new_win == 0) 274 tp->pred_flags = 0; 275 276 return new_win; 277 } 278 279 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 280 { 281 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 282 if (!(tp->ecn_flags & TCP_ECN_OK)) 283 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 284 } 285 286 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 287 { 288 struct tcp_sock *tp = tcp_sk(sk); 289 290 tp->ecn_flags = 0; 291 if (sysctl_tcp_ecn) { 292 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 293 tp->ecn_flags = TCP_ECN_OK; 294 } 295 } 296 297 static __inline__ void 298 TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 299 { 300 if (inet_rsk(req)->ecn_ok) 301 th->ece = 1; 302 } 303 304 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 305 int tcp_header_len) 306 { 307 struct tcp_sock *tp = tcp_sk(sk); 308 309 if (tp->ecn_flags & TCP_ECN_OK) { 310 /* Not-retransmitted data segment: set ECT and inject CWR. */ 311 if (skb->len != tcp_header_len && 312 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 313 INET_ECN_xmit(sk); 314 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 315 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 316 tcp_hdr(skb)->cwr = 1; 317 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 318 } 319 } else { 320 /* ACK or retransmitted segment: clear ECT|CE */ 321 INET_ECN_dontxmit(sk); 322 } 323 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 324 tcp_hdr(skb)->ece = 1; 325 } 326 } 327 328 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 329 * auto increment end seqno. 330 */ 331 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 332 { 333 skb->csum = 0; 334 335 TCP_SKB_CB(skb)->flags = flags; 336 TCP_SKB_CB(skb)->sacked = 0; 337 338 skb_shinfo(skb)->gso_segs = 1; 339 skb_shinfo(skb)->gso_size = 0; 340 skb_shinfo(skb)->gso_type = 0; 341 342 TCP_SKB_CB(skb)->seq = seq; 343 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) 344 seq++; 345 TCP_SKB_CB(skb)->end_seq = seq; 346 } 347 348 static inline int tcp_urg_mode(const struct tcp_sock *tp) 349 { 350 return tp->snd_una != tp->snd_up; 351 } 352 353 #define OPTION_SACK_ADVERTISE (1 << 0) 354 #define OPTION_TS (1 << 1) 355 #define OPTION_MD5 (1 << 2) 356 357 struct tcp_out_options { 358 u8 options; /* bit field of OPTION_* */ 359 u8 ws; /* window scale, 0 to disable */ 360 u8 num_sack_blocks; /* number of SACK blocks to include */ 361 u16 mss; /* 0 to disable */ 362 __u32 tsval, tsecr; /* need to include OPTION_TS */ 363 }; 364 365 /* Beware: Something in the Internet is very sensitive to the ordering of 366 * TCP options, we learned this through the hard way, so be careful here. 367 * Luckily we can at least blame others for their non-compliance but from 368 * inter-operatibility perspective it seems that we're somewhat stuck with 369 * the ordering which we have been using if we want to keep working with 370 * those broken things (not that it currently hurts anybody as there isn't 371 * particular reason why the ordering would need to be changed). 372 * 373 * At least SACK_PERM as the first option is known to lead to a disaster 374 * (but it may well be that other scenarios fail similarly). 375 */ 376 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 377 const struct tcp_out_options *opts, 378 __u8 **md5_hash) { 379 if (unlikely(OPTION_MD5 & opts->options)) { 380 *ptr++ = htonl((TCPOPT_NOP << 24) | 381 (TCPOPT_NOP << 16) | 382 (TCPOPT_MD5SIG << 8) | 383 TCPOLEN_MD5SIG); 384 *md5_hash = (__u8 *)ptr; 385 ptr += 4; 386 } else { 387 *md5_hash = NULL; 388 } 389 390 if (unlikely(opts->mss)) { 391 *ptr++ = htonl((TCPOPT_MSS << 24) | 392 (TCPOLEN_MSS << 16) | 393 opts->mss); 394 } 395 396 if (likely(OPTION_TS & opts->options)) { 397 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { 398 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 399 (TCPOLEN_SACK_PERM << 16) | 400 (TCPOPT_TIMESTAMP << 8) | 401 TCPOLEN_TIMESTAMP); 402 } else { 403 *ptr++ = htonl((TCPOPT_NOP << 24) | 404 (TCPOPT_NOP << 16) | 405 (TCPOPT_TIMESTAMP << 8) | 406 TCPOLEN_TIMESTAMP); 407 } 408 *ptr++ = htonl(opts->tsval); 409 *ptr++ = htonl(opts->tsecr); 410 } 411 412 if (unlikely(OPTION_SACK_ADVERTISE & opts->options && 413 !(OPTION_TS & opts->options))) { 414 *ptr++ = htonl((TCPOPT_NOP << 24) | 415 (TCPOPT_NOP << 16) | 416 (TCPOPT_SACK_PERM << 8) | 417 TCPOLEN_SACK_PERM); 418 } 419 420 if (unlikely(opts->ws)) { 421 *ptr++ = htonl((TCPOPT_NOP << 24) | 422 (TCPOPT_WINDOW << 16) | 423 (TCPOLEN_WINDOW << 8) | 424 opts->ws); 425 } 426 427 if (unlikely(opts->num_sack_blocks)) { 428 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 429 tp->duplicate_sack : tp->selective_acks; 430 int this_sack; 431 432 *ptr++ = htonl((TCPOPT_NOP << 24) | 433 (TCPOPT_NOP << 16) | 434 (TCPOPT_SACK << 8) | 435 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 436 TCPOLEN_SACK_PERBLOCK))); 437 438 for (this_sack = 0; this_sack < opts->num_sack_blocks; 439 ++this_sack) { 440 *ptr++ = htonl(sp[this_sack].start_seq); 441 *ptr++ = htonl(sp[this_sack].end_seq); 442 } 443 444 tp->rx_opt.dsack = 0; 445 } 446 } 447 448 static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 449 struct tcp_out_options *opts, 450 struct tcp_md5sig_key **md5) { 451 struct tcp_sock *tp = tcp_sk(sk); 452 unsigned size = 0; 453 454 #ifdef CONFIG_TCP_MD5SIG 455 *md5 = tp->af_specific->md5_lookup(sk, sk); 456 if (*md5) { 457 opts->options |= OPTION_MD5; 458 size += TCPOLEN_MD5SIG_ALIGNED; 459 } 460 #else 461 *md5 = NULL; 462 #endif 463 464 /* We always get an MSS option. The option bytes which will be seen in 465 * normal data packets should timestamps be used, must be in the MSS 466 * advertised. But we subtract them from tp->mss_cache so that 467 * calculations in tcp_sendmsg are simpler etc. So account for this 468 * fact here if necessary. If we don't do this correctly, as a 469 * receiver we won't recognize data packets as being full sized when we 470 * should, and thus we won't abide by the delayed ACK rules correctly. 471 * SACKs don't matter, we never delay an ACK when we have any of those 472 * going out. */ 473 opts->mss = tcp_advertise_mss(sk); 474 size += TCPOLEN_MSS_ALIGNED; 475 476 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 477 opts->options |= OPTION_TS; 478 opts->tsval = TCP_SKB_CB(skb)->when; 479 opts->tsecr = tp->rx_opt.ts_recent; 480 size += TCPOLEN_TSTAMP_ALIGNED; 481 } 482 if (likely(sysctl_tcp_window_scaling)) { 483 opts->ws = tp->rx_opt.rcv_wscale; 484 if (likely(opts->ws)) 485 size += TCPOLEN_WSCALE_ALIGNED; 486 } 487 if (likely(sysctl_tcp_sack)) { 488 opts->options |= OPTION_SACK_ADVERTISE; 489 if (unlikely(!(OPTION_TS & opts->options))) 490 size += TCPOLEN_SACKPERM_ALIGNED; 491 } 492 493 return size; 494 } 495 496 static unsigned tcp_synack_options(struct sock *sk, 497 struct request_sock *req, 498 unsigned mss, struct sk_buff *skb, 499 struct tcp_out_options *opts, 500 struct tcp_md5sig_key **md5) { 501 unsigned size = 0; 502 struct inet_request_sock *ireq = inet_rsk(req); 503 char doing_ts; 504 505 #ifdef CONFIG_TCP_MD5SIG 506 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 507 if (*md5) { 508 opts->options |= OPTION_MD5; 509 size += TCPOLEN_MD5SIG_ALIGNED; 510 } 511 #else 512 *md5 = NULL; 513 #endif 514 515 /* we can't fit any SACK blocks in a packet with MD5 + TS 516 options. There was discussion about disabling SACK rather than TS in 517 order to fit in better with old, buggy kernels, but that was deemed 518 to be unnecessary. */ 519 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok); 520 521 opts->mss = mss; 522 size += TCPOLEN_MSS_ALIGNED; 523 524 if (likely(ireq->wscale_ok)) { 525 opts->ws = ireq->rcv_wscale; 526 if (likely(opts->ws)) 527 size += TCPOLEN_WSCALE_ALIGNED; 528 } 529 if (likely(doing_ts)) { 530 opts->options |= OPTION_TS; 531 opts->tsval = TCP_SKB_CB(skb)->when; 532 opts->tsecr = req->ts_recent; 533 size += TCPOLEN_TSTAMP_ALIGNED; 534 } 535 if (likely(ireq->sack_ok)) { 536 opts->options |= OPTION_SACK_ADVERTISE; 537 if (unlikely(!doing_ts)) 538 size += TCPOLEN_SACKPERM_ALIGNED; 539 } 540 541 return size; 542 } 543 544 static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 545 struct tcp_out_options *opts, 546 struct tcp_md5sig_key **md5) { 547 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 548 struct tcp_sock *tp = tcp_sk(sk); 549 unsigned size = 0; 550 unsigned int eff_sacks; 551 552 #ifdef CONFIG_TCP_MD5SIG 553 *md5 = tp->af_specific->md5_lookup(sk, sk); 554 if (unlikely(*md5)) { 555 opts->options |= OPTION_MD5; 556 size += TCPOLEN_MD5SIG_ALIGNED; 557 } 558 #else 559 *md5 = NULL; 560 #endif 561 562 if (likely(tp->rx_opt.tstamp_ok)) { 563 opts->options |= OPTION_TS; 564 opts->tsval = tcb ? tcb->when : 0; 565 opts->tsecr = tp->rx_opt.ts_recent; 566 size += TCPOLEN_TSTAMP_ALIGNED; 567 } 568 569 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 570 if (unlikely(eff_sacks)) { 571 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 572 opts->num_sack_blocks = 573 min_t(unsigned, eff_sacks, 574 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 575 TCPOLEN_SACK_PERBLOCK); 576 size += TCPOLEN_SACK_BASE_ALIGNED + 577 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 578 } 579 580 return size; 581 } 582 583 /* This routine actually transmits TCP packets queued in by 584 * tcp_do_sendmsg(). This is used by both the initial 585 * transmission and possible later retransmissions. 586 * All SKB's seen here are completely headerless. It is our 587 * job to build the TCP header, and pass the packet down to 588 * IP so it can do the same plus pass the packet off to the 589 * device. 590 * 591 * We are working here with either a clone of the original 592 * SKB, or a fresh unique copy made by the retransmit engine. 593 */ 594 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 595 gfp_t gfp_mask) 596 { 597 const struct inet_connection_sock *icsk = inet_csk(sk); 598 struct inet_sock *inet; 599 struct tcp_sock *tp; 600 struct tcp_skb_cb *tcb; 601 struct tcp_out_options opts; 602 unsigned tcp_options_size, tcp_header_size; 603 struct tcp_md5sig_key *md5; 604 __u8 *md5_hash_location; 605 struct tcphdr *th; 606 int err; 607 608 BUG_ON(!skb || !tcp_skb_pcount(skb)); 609 610 /* If congestion control is doing timestamping, we must 611 * take such a timestamp before we potentially clone/copy. 612 */ 613 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 614 __net_timestamp(skb); 615 616 if (likely(clone_it)) { 617 if (unlikely(skb_cloned(skb))) 618 skb = pskb_copy(skb, gfp_mask); 619 else 620 skb = skb_clone(skb, gfp_mask); 621 if (unlikely(!skb)) 622 return -ENOBUFS; 623 } 624 625 inet = inet_sk(sk); 626 tp = tcp_sk(sk); 627 tcb = TCP_SKB_CB(skb); 628 memset(&opts, 0, sizeof(opts)); 629 630 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) 631 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 632 else 633 tcp_options_size = tcp_established_options(sk, skb, &opts, 634 &md5); 635 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 636 637 if (tcp_packets_in_flight(tp) == 0) 638 tcp_ca_event(sk, CA_EVENT_TX_START); 639 640 skb_push(skb, tcp_header_size); 641 skb_reset_transport_header(skb); 642 skb_set_owner_w(skb, sk); 643 644 /* Build TCP header and checksum it. */ 645 th = tcp_hdr(skb); 646 th->source = inet->sport; 647 th->dest = inet->dport; 648 th->seq = htonl(tcb->seq); 649 th->ack_seq = htonl(tp->rcv_nxt); 650 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 651 tcb->flags); 652 653 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 654 /* RFC1323: The window in SYN & SYN/ACK segments 655 * is never scaled. 656 */ 657 th->window = htons(min(tp->rcv_wnd, 65535U)); 658 } else { 659 th->window = htons(tcp_select_window(sk)); 660 } 661 th->check = 0; 662 th->urg_ptr = 0; 663 664 /* The urg_mode check is necessary during a below snd_una win probe */ 665 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 666 if (before(tp->snd_up, tcb->seq + 0x10000)) { 667 th->urg_ptr = htons(tp->snd_up - tcb->seq); 668 th->urg = 1; 669 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 670 th->urg_ptr = 0xFFFF; 671 th->urg = 1; 672 } 673 } 674 675 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 676 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) 677 TCP_ECN_send(sk, skb, tcp_header_size); 678 679 #ifdef CONFIG_TCP_MD5SIG 680 /* Calculate the MD5 hash, as we have all we need now */ 681 if (md5) { 682 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 683 tp->af_specific->calc_md5_hash(md5_hash_location, 684 md5, sk, NULL, skb); 685 } 686 #endif 687 688 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 689 690 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 691 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 692 693 if (skb->len != tcp_header_size) 694 tcp_event_data_sent(tp, skb, sk); 695 696 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 697 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 698 699 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 700 if (likely(err <= 0)) 701 return err; 702 703 tcp_enter_cwr(sk, 1); 704 705 return net_xmit_eval(err); 706 } 707 708 /* This routine just queue's the buffer 709 * 710 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 711 * otherwise socket can stall. 712 */ 713 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 714 { 715 struct tcp_sock *tp = tcp_sk(sk); 716 717 /* Advance write_seq and place onto the write_queue. */ 718 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 719 skb_header_release(skb); 720 tcp_add_write_queue_tail(sk, skb); 721 sk->sk_wmem_queued += skb->truesize; 722 sk_mem_charge(sk, skb->truesize); 723 } 724 725 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 726 unsigned int mss_now) 727 { 728 if (skb->len <= mss_now || !sk_can_gso(sk)) { 729 /* Avoid the costly divide in the normal 730 * non-TSO case. 731 */ 732 skb_shinfo(skb)->gso_segs = 1; 733 skb_shinfo(skb)->gso_size = 0; 734 skb_shinfo(skb)->gso_type = 0; 735 } else { 736 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 737 skb_shinfo(skb)->gso_size = mss_now; 738 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 739 } 740 } 741 742 /* When a modification to fackets out becomes necessary, we need to check 743 * skb is counted to fackets_out or not. 744 */ 745 static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 746 int decr) 747 { 748 struct tcp_sock *tp = tcp_sk(sk); 749 750 if (!tp->sacked_out || tcp_is_reno(tp)) 751 return; 752 753 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 754 tp->fackets_out -= decr; 755 } 756 757 /* Pcount in the middle of the write queue got changed, we need to do various 758 * tweaks to fix counters 759 */ 760 static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 761 { 762 struct tcp_sock *tp = tcp_sk(sk); 763 764 tp->packets_out -= decr; 765 766 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 767 tp->sacked_out -= decr; 768 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 769 tp->retrans_out -= decr; 770 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 771 tp->lost_out -= decr; 772 773 /* Reno case is special. Sigh... */ 774 if (tcp_is_reno(tp) && decr > 0) 775 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 776 777 tcp_adjust_fackets_out(sk, skb, decr); 778 779 if (tp->lost_skb_hint && 780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 781 (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked)) 782 tp->lost_cnt_hint -= decr; 783 784 tcp_verify_left_out(tp); 785 } 786 787 /* Function to create two new TCP segments. Shrinks the given segment 788 * to the specified size and appends a new segment with the rest of the 789 * packet to the list. This won't be called frequently, I hope. 790 * Remember, these are still headerless SKBs at this point. 791 */ 792 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 793 unsigned int mss_now) 794 { 795 struct tcp_sock *tp = tcp_sk(sk); 796 struct sk_buff *buff; 797 int nsize, old_factor; 798 int nlen; 799 u8 flags; 800 801 BUG_ON(len > skb->len); 802 803 nsize = skb_headlen(skb) - len; 804 if (nsize < 0) 805 nsize = 0; 806 807 if (skb_cloned(skb) && 808 skb_is_nonlinear(skb) && 809 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 810 return -ENOMEM; 811 812 /* Get a new skb... force flag on. */ 813 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 814 if (buff == NULL) 815 return -ENOMEM; /* We'll just try again later. */ 816 817 sk->sk_wmem_queued += buff->truesize; 818 sk_mem_charge(sk, buff->truesize); 819 nlen = skb->len - len - nsize; 820 buff->truesize += nlen; 821 skb->truesize -= nlen; 822 823 /* Correct the sequence numbers. */ 824 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 825 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 826 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 827 828 /* PSH and FIN should only be set in the second packet. */ 829 flags = TCP_SKB_CB(skb)->flags; 830 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 831 TCP_SKB_CB(buff)->flags = flags; 832 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 833 834 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 835 /* Copy and checksum data tail into the new buffer. */ 836 buff->csum = csum_partial_copy_nocheck(skb->data + len, 837 skb_put(buff, nsize), 838 nsize, 0); 839 840 skb_trim(skb, len); 841 842 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 843 } else { 844 skb->ip_summed = CHECKSUM_PARTIAL; 845 skb_split(skb, buff, len); 846 } 847 848 buff->ip_summed = skb->ip_summed; 849 850 /* Looks stupid, but our code really uses when of 851 * skbs, which it never sent before. --ANK 852 */ 853 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 854 buff->tstamp = skb->tstamp; 855 856 old_factor = tcp_skb_pcount(skb); 857 858 /* Fix up tso_factor for both original and new SKB. */ 859 tcp_set_skb_tso_segs(sk, skb, mss_now); 860 tcp_set_skb_tso_segs(sk, buff, mss_now); 861 862 /* If this packet has been sent out already, we must 863 * adjust the various packet counters. 864 */ 865 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 866 int diff = old_factor - tcp_skb_pcount(skb) - 867 tcp_skb_pcount(buff); 868 869 if (diff) 870 tcp_adjust_pcount(sk, skb, diff); 871 } 872 873 /* Link BUFF into the send queue. */ 874 skb_header_release(buff); 875 tcp_insert_write_queue_after(skb, buff, sk); 876 877 return 0; 878 } 879 880 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 881 * eventually). The difference is that pulled data not copied, but 882 * immediately discarded. 883 */ 884 static void __pskb_trim_head(struct sk_buff *skb, int len) 885 { 886 int i, k, eat; 887 888 eat = len; 889 k = 0; 890 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 891 if (skb_shinfo(skb)->frags[i].size <= eat) { 892 put_page(skb_shinfo(skb)->frags[i].page); 893 eat -= skb_shinfo(skb)->frags[i].size; 894 } else { 895 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 896 if (eat) { 897 skb_shinfo(skb)->frags[k].page_offset += eat; 898 skb_shinfo(skb)->frags[k].size -= eat; 899 eat = 0; 900 } 901 k++; 902 } 903 } 904 skb_shinfo(skb)->nr_frags = k; 905 906 skb_reset_tail_pointer(skb); 907 skb->data_len -= len; 908 skb->len = skb->data_len; 909 } 910 911 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 912 { 913 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 914 return -ENOMEM; 915 916 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 917 if (unlikely(len < skb_headlen(skb))) 918 __skb_pull(skb, len); 919 else 920 __pskb_trim_head(skb, len - skb_headlen(skb)); 921 922 TCP_SKB_CB(skb)->seq += len; 923 skb->ip_summed = CHECKSUM_PARTIAL; 924 925 skb->truesize -= len; 926 sk->sk_wmem_queued -= len; 927 sk_mem_uncharge(sk, len); 928 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 929 930 /* Any change of skb->len requires recalculation of tso 931 * factor and mss. 932 */ 933 if (tcp_skb_pcount(skb) > 1) 934 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 935 936 return 0; 937 } 938 939 /* Not accounting for SACKs here. */ 940 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 941 { 942 struct tcp_sock *tp = tcp_sk(sk); 943 struct inet_connection_sock *icsk = inet_csk(sk); 944 int mss_now; 945 946 /* Calculate base mss without TCP options: 947 It is MMS_S - sizeof(tcphdr) of rfc1122 948 */ 949 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 950 951 /* Clamp it (mss_clamp does not include tcp options) */ 952 if (mss_now > tp->rx_opt.mss_clamp) 953 mss_now = tp->rx_opt.mss_clamp; 954 955 /* Now subtract optional transport overhead */ 956 mss_now -= icsk->icsk_ext_hdr_len; 957 958 /* Then reserve room for full set of TCP options and 8 bytes of data */ 959 if (mss_now < 48) 960 mss_now = 48; 961 962 /* Now subtract TCP options size, not including SACKs */ 963 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 964 965 return mss_now; 966 } 967 968 /* Inverse of above */ 969 int tcp_mss_to_mtu(struct sock *sk, int mss) 970 { 971 struct tcp_sock *tp = tcp_sk(sk); 972 struct inet_connection_sock *icsk = inet_csk(sk); 973 int mtu; 974 975 mtu = mss + 976 tp->tcp_header_len + 977 icsk->icsk_ext_hdr_len + 978 icsk->icsk_af_ops->net_header_len; 979 980 return mtu; 981 } 982 983 void tcp_mtup_init(struct sock *sk) 984 { 985 struct tcp_sock *tp = tcp_sk(sk); 986 struct inet_connection_sock *icsk = inet_csk(sk); 987 988 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 989 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 990 icsk->icsk_af_ops->net_header_len; 991 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 992 icsk->icsk_mtup.probe_size = 0; 993 } 994 995 /* This function synchronize snd mss to current pmtu/exthdr set. 996 997 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 998 for TCP options, but includes only bare TCP header. 999 1000 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1001 It is minimum of user_mss and mss received with SYN. 1002 It also does not include TCP options. 1003 1004 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1005 1006 tp->mss_cache is current effective sending mss, including 1007 all tcp options except for SACKs. It is evaluated, 1008 taking into account current pmtu, but never exceeds 1009 tp->rx_opt.mss_clamp. 1010 1011 NOTE1. rfc1122 clearly states that advertised MSS 1012 DOES NOT include either tcp or ip options. 1013 1014 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1015 are READ ONLY outside this function. --ANK (980731) 1016 */ 1017 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1018 { 1019 struct tcp_sock *tp = tcp_sk(sk); 1020 struct inet_connection_sock *icsk = inet_csk(sk); 1021 int mss_now; 1022 1023 if (icsk->icsk_mtup.search_high > pmtu) 1024 icsk->icsk_mtup.search_high = pmtu; 1025 1026 mss_now = tcp_mtu_to_mss(sk, pmtu); 1027 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1028 1029 /* And store cached results */ 1030 icsk->icsk_pmtu_cookie = pmtu; 1031 if (icsk->icsk_mtup.enabled) 1032 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1033 tp->mss_cache = mss_now; 1034 1035 return mss_now; 1036 } 1037 1038 /* Compute the current effective MSS, taking SACKs and IP options, 1039 * and even PMTU discovery events into account. 1040 */ 1041 unsigned int tcp_current_mss(struct sock *sk) 1042 { 1043 struct tcp_sock *tp = tcp_sk(sk); 1044 struct dst_entry *dst = __sk_dst_get(sk); 1045 u32 mss_now; 1046 unsigned header_len; 1047 struct tcp_out_options opts; 1048 struct tcp_md5sig_key *md5; 1049 1050 mss_now = tp->mss_cache; 1051 1052 if (dst) { 1053 u32 mtu = dst_mtu(dst); 1054 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1055 mss_now = tcp_sync_mss(sk, mtu); 1056 } 1057 1058 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1059 sizeof(struct tcphdr); 1060 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1061 * some common options. If this is an odd packet (because we have SACK 1062 * blocks etc) then our calculated header_len will be different, and 1063 * we have to adjust mss_now correspondingly */ 1064 if (header_len != tp->tcp_header_len) { 1065 int delta = (int) header_len - tp->tcp_header_len; 1066 mss_now -= delta; 1067 } 1068 1069 return mss_now; 1070 } 1071 1072 /* Congestion window validation. (RFC2861) */ 1073 static void tcp_cwnd_validate(struct sock *sk) 1074 { 1075 struct tcp_sock *tp = tcp_sk(sk); 1076 1077 if (tp->packets_out >= tp->snd_cwnd) { 1078 /* Network is feed fully. */ 1079 tp->snd_cwnd_used = 0; 1080 tp->snd_cwnd_stamp = tcp_time_stamp; 1081 } else { 1082 /* Network starves. */ 1083 if (tp->packets_out > tp->snd_cwnd_used) 1084 tp->snd_cwnd_used = tp->packets_out; 1085 1086 if (sysctl_tcp_slow_start_after_idle && 1087 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1088 tcp_cwnd_application_limited(sk); 1089 } 1090 } 1091 1092 /* Returns the portion of skb which can be sent right away without 1093 * introducing MSS oddities to segment boundaries. In rare cases where 1094 * mss_now != mss_cache, we will request caller to create a small skb 1095 * per input skb which could be mostly avoided here (if desired). 1096 * 1097 * We explicitly want to create a request for splitting write queue tail 1098 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1099 * thus all the complexity (cwnd_len is always MSS multiple which we 1100 * return whenever allowed by the other factors). Basically we need the 1101 * modulo only when the receiver window alone is the limiting factor or 1102 * when we would be allowed to send the split-due-to-Nagle skb fully. 1103 */ 1104 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1105 unsigned int mss_now, unsigned int cwnd) 1106 { 1107 struct tcp_sock *tp = tcp_sk(sk); 1108 u32 needed, window, cwnd_len; 1109 1110 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1111 cwnd_len = mss_now * cwnd; 1112 1113 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1114 return cwnd_len; 1115 1116 needed = min(skb->len, window); 1117 1118 if (cwnd_len <= needed) 1119 return cwnd_len; 1120 1121 return needed - needed % mss_now; 1122 } 1123 1124 /* Can at least one segment of SKB be sent right now, according to the 1125 * congestion window rules? If so, return how many segments are allowed. 1126 */ 1127 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1128 struct sk_buff *skb) 1129 { 1130 u32 in_flight, cwnd; 1131 1132 /* Don't be strict about the congestion window for the final FIN. */ 1133 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1134 tcp_skb_pcount(skb) == 1) 1135 return 1; 1136 1137 in_flight = tcp_packets_in_flight(tp); 1138 cwnd = tp->snd_cwnd; 1139 if (in_flight < cwnd) 1140 return (cwnd - in_flight); 1141 1142 return 0; 1143 } 1144 1145 /* This must be invoked the first time we consider transmitting 1146 * SKB onto the wire. 1147 */ 1148 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1149 unsigned int mss_now) 1150 { 1151 int tso_segs = tcp_skb_pcount(skb); 1152 1153 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1154 tcp_set_skb_tso_segs(sk, skb, mss_now); 1155 tso_segs = tcp_skb_pcount(skb); 1156 } 1157 return tso_segs; 1158 } 1159 1160 static inline int tcp_minshall_check(const struct tcp_sock *tp) 1161 { 1162 return after(tp->snd_sml, tp->snd_una) && 1163 !after(tp->snd_sml, tp->snd_nxt); 1164 } 1165 1166 /* Return 0, if packet can be sent now without violation Nagle's rules: 1167 * 1. It is full sized. 1168 * 2. Or it contains FIN. (already checked by caller) 1169 * 3. Or TCP_NODELAY was set. 1170 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1171 * With Minshall's modification: all sent small packets are ACKed. 1172 */ 1173 static inline int tcp_nagle_check(const struct tcp_sock *tp, 1174 const struct sk_buff *skb, 1175 unsigned mss_now, int nonagle) 1176 { 1177 return (skb->len < mss_now && 1178 ((nonagle & TCP_NAGLE_CORK) || 1179 (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); 1180 } 1181 1182 /* Return non-zero if the Nagle test allows this packet to be 1183 * sent now. 1184 */ 1185 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1186 unsigned int cur_mss, int nonagle) 1187 { 1188 /* Nagle rule does not apply to frames, which sit in the middle of the 1189 * write_queue (they have no chances to get new data). 1190 * 1191 * This is implemented in the callers, where they modify the 'nonagle' 1192 * argument based upon the location of SKB in the send queue. 1193 */ 1194 if (nonagle & TCP_NAGLE_PUSH) 1195 return 1; 1196 1197 /* Don't use the nagle rule for urgent data (or for the final FIN). 1198 * Nagle can be ignored during F-RTO too (see RFC4138). 1199 */ 1200 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1201 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1202 return 1; 1203 1204 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1205 return 1; 1206 1207 return 0; 1208 } 1209 1210 /* Does at least the first segment of SKB fit into the send window? */ 1211 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1212 unsigned int cur_mss) 1213 { 1214 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1215 1216 if (skb->len > cur_mss) 1217 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1218 1219 return !after(end_seq, tcp_wnd_end(tp)); 1220 } 1221 1222 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1223 * should be put on the wire right now. If so, it returns the number of 1224 * packets allowed by the congestion window. 1225 */ 1226 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1227 unsigned int cur_mss, int nonagle) 1228 { 1229 struct tcp_sock *tp = tcp_sk(sk); 1230 unsigned int cwnd_quota; 1231 1232 tcp_init_tso_segs(sk, skb, cur_mss); 1233 1234 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1235 return 0; 1236 1237 cwnd_quota = tcp_cwnd_test(tp, skb); 1238 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1239 cwnd_quota = 0; 1240 1241 return cwnd_quota; 1242 } 1243 1244 int tcp_may_send_now(struct sock *sk) 1245 { 1246 struct tcp_sock *tp = tcp_sk(sk); 1247 struct sk_buff *skb = tcp_send_head(sk); 1248 1249 return (skb && 1250 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1251 (tcp_skb_is_last(sk, skb) ? 1252 tp->nonagle : TCP_NAGLE_PUSH))); 1253 } 1254 1255 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1256 * which is put after SKB on the list. It is very much like 1257 * tcp_fragment() except that it may make several kinds of assumptions 1258 * in order to speed up the splitting operation. In particular, we 1259 * know that all the data is in scatter-gather pages, and that the 1260 * packet has never been sent out before (and thus is not cloned). 1261 */ 1262 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1263 unsigned int mss_now) 1264 { 1265 struct sk_buff *buff; 1266 int nlen = skb->len - len; 1267 u8 flags; 1268 1269 /* All of a TSO frame must be composed of paged data. */ 1270 if (skb->len != skb->data_len) 1271 return tcp_fragment(sk, skb, len, mss_now); 1272 1273 buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC); 1274 if (unlikely(buff == NULL)) 1275 return -ENOMEM; 1276 1277 sk->sk_wmem_queued += buff->truesize; 1278 sk_mem_charge(sk, buff->truesize); 1279 buff->truesize += nlen; 1280 skb->truesize -= nlen; 1281 1282 /* Correct the sequence numbers. */ 1283 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1284 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1285 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1286 1287 /* PSH and FIN should only be set in the second packet. */ 1288 flags = TCP_SKB_CB(skb)->flags; 1289 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 1290 TCP_SKB_CB(buff)->flags = flags; 1291 1292 /* This packet was never sent out yet, so no SACK bits. */ 1293 TCP_SKB_CB(buff)->sacked = 0; 1294 1295 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1296 skb_split(skb, buff, len); 1297 1298 /* Fix up tso_factor for both original and new SKB. */ 1299 tcp_set_skb_tso_segs(sk, skb, mss_now); 1300 tcp_set_skb_tso_segs(sk, buff, mss_now); 1301 1302 /* Link BUFF into the send queue. */ 1303 skb_header_release(buff); 1304 tcp_insert_write_queue_after(skb, buff, sk); 1305 1306 return 0; 1307 } 1308 1309 /* Try to defer sending, if possible, in order to minimize the amount 1310 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1311 * 1312 * This algorithm is from John Heffner. 1313 */ 1314 static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1315 { 1316 struct tcp_sock *tp = tcp_sk(sk); 1317 const struct inet_connection_sock *icsk = inet_csk(sk); 1318 u32 send_win, cong_win, limit, in_flight; 1319 1320 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1321 goto send_now; 1322 1323 if (icsk->icsk_ca_state != TCP_CA_Open) 1324 goto send_now; 1325 1326 /* Defer for less than two clock ticks. */ 1327 if (tp->tso_deferred && 1328 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1329 goto send_now; 1330 1331 in_flight = tcp_packets_in_flight(tp); 1332 1333 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1334 1335 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1336 1337 /* From in_flight test above, we know that cwnd > in_flight. */ 1338 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1339 1340 limit = min(send_win, cong_win); 1341 1342 /* If a full-sized TSO skb can be sent, do it. */ 1343 if (limit >= sk->sk_gso_max_size) 1344 goto send_now; 1345 1346 /* Middle in queue won't get any more data, full sendable already? */ 1347 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1348 goto send_now; 1349 1350 if (sysctl_tcp_tso_win_divisor) { 1351 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1352 1353 /* If at least some fraction of a window is available, 1354 * just use it. 1355 */ 1356 chunk /= sysctl_tcp_tso_win_divisor; 1357 if (limit >= chunk) 1358 goto send_now; 1359 } else { 1360 /* Different approach, try not to defer past a single 1361 * ACK. Receiver should ACK every other full sized 1362 * frame, so if we have space for more than 3 frames 1363 * then send now. 1364 */ 1365 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1366 goto send_now; 1367 } 1368 1369 /* Ok, it looks like it is advisable to defer. */ 1370 tp->tso_deferred = 1 | (jiffies << 1); 1371 1372 return 1; 1373 1374 send_now: 1375 tp->tso_deferred = 0; 1376 return 0; 1377 } 1378 1379 /* Create a new MTU probe if we are ready. 1380 * Returns 0 if we should wait to probe (no cwnd available), 1381 * 1 if a probe was sent, 1382 * -1 otherwise 1383 */ 1384 static int tcp_mtu_probe(struct sock *sk) 1385 { 1386 struct tcp_sock *tp = tcp_sk(sk); 1387 struct inet_connection_sock *icsk = inet_csk(sk); 1388 struct sk_buff *skb, *nskb, *next; 1389 int len; 1390 int probe_size; 1391 int size_needed; 1392 int copy; 1393 int mss_now; 1394 1395 /* Not currently probing/verifying, 1396 * not in recovery, 1397 * have enough cwnd, and 1398 * not SACKing (the variable headers throw things off) */ 1399 if (!icsk->icsk_mtup.enabled || 1400 icsk->icsk_mtup.probe_size || 1401 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1402 tp->snd_cwnd < 11 || 1403 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1404 return -1; 1405 1406 /* Very simple search strategy: just double the MSS. */ 1407 mss_now = tcp_current_mss(sk); 1408 probe_size = 2 * tp->mss_cache; 1409 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1410 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1411 /* TODO: set timer for probe_converge_event */ 1412 return -1; 1413 } 1414 1415 /* Have enough data in the send queue to probe? */ 1416 if (tp->write_seq - tp->snd_nxt < size_needed) 1417 return -1; 1418 1419 if (tp->snd_wnd < size_needed) 1420 return -1; 1421 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1422 return 0; 1423 1424 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1425 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1426 if (!tcp_packets_in_flight(tp)) 1427 return -1; 1428 else 1429 return 0; 1430 } 1431 1432 /* We're allowed to probe. Build it now. */ 1433 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1434 return -1; 1435 sk->sk_wmem_queued += nskb->truesize; 1436 sk_mem_charge(sk, nskb->truesize); 1437 1438 skb = tcp_send_head(sk); 1439 1440 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1441 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1442 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1443 TCP_SKB_CB(nskb)->sacked = 0; 1444 nskb->csum = 0; 1445 nskb->ip_summed = skb->ip_summed; 1446 1447 tcp_insert_write_queue_before(nskb, skb, sk); 1448 1449 len = 0; 1450 tcp_for_write_queue_from_safe(skb, next, sk) { 1451 copy = min_t(int, skb->len, probe_size - len); 1452 if (nskb->ip_summed) 1453 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1454 else 1455 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1456 skb_put(nskb, copy), 1457 copy, nskb->csum); 1458 1459 if (skb->len <= copy) { 1460 /* We've eaten all the data from this skb. 1461 * Throw it away. */ 1462 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1463 tcp_unlink_write_queue(skb, sk); 1464 sk_wmem_free_skb(sk, skb); 1465 } else { 1466 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1467 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1468 if (!skb_shinfo(skb)->nr_frags) { 1469 skb_pull(skb, copy); 1470 if (skb->ip_summed != CHECKSUM_PARTIAL) 1471 skb->csum = csum_partial(skb->data, 1472 skb->len, 0); 1473 } else { 1474 __pskb_trim_head(skb, copy); 1475 tcp_set_skb_tso_segs(sk, skb, mss_now); 1476 } 1477 TCP_SKB_CB(skb)->seq += copy; 1478 } 1479 1480 len += copy; 1481 1482 if (len >= probe_size) 1483 break; 1484 } 1485 tcp_init_tso_segs(sk, nskb, nskb->len); 1486 1487 /* We're ready to send. If this fails, the probe will 1488 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1489 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1490 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1491 /* Decrement cwnd here because we are sending 1492 * effectively two packets. */ 1493 tp->snd_cwnd--; 1494 tcp_event_new_data_sent(sk, nskb); 1495 1496 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1497 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1498 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1499 1500 return 1; 1501 } 1502 1503 return -1; 1504 } 1505 1506 /* This routine writes packets to the network. It advances the 1507 * send_head. This happens as incoming acks open up the remote 1508 * window for us. 1509 * 1510 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1511 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1512 * account rare use of URG, this is not a big flaw. 1513 * 1514 * Returns 1, if no segments are in flight and we have queued segments, but 1515 * cannot send anything now because of SWS or another problem. 1516 */ 1517 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1518 int push_one, gfp_t gfp) 1519 { 1520 struct tcp_sock *tp = tcp_sk(sk); 1521 struct sk_buff *skb; 1522 unsigned int tso_segs, sent_pkts; 1523 int cwnd_quota; 1524 int result; 1525 1526 sent_pkts = 0; 1527 1528 if (!push_one) { 1529 /* Do MTU probing. */ 1530 result = tcp_mtu_probe(sk); 1531 if (!result) { 1532 return 0; 1533 } else if (result > 0) { 1534 sent_pkts = 1; 1535 } 1536 } 1537 1538 while ((skb = tcp_send_head(sk))) { 1539 unsigned int limit; 1540 1541 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1542 BUG_ON(!tso_segs); 1543 1544 cwnd_quota = tcp_cwnd_test(tp, skb); 1545 if (!cwnd_quota) 1546 break; 1547 1548 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1549 break; 1550 1551 if (tso_segs == 1) { 1552 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1553 (tcp_skb_is_last(sk, skb) ? 1554 nonagle : TCP_NAGLE_PUSH)))) 1555 break; 1556 } else { 1557 if (!push_one && tcp_tso_should_defer(sk, skb)) 1558 break; 1559 } 1560 1561 limit = mss_now; 1562 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1563 limit = tcp_mss_split_point(sk, skb, mss_now, 1564 cwnd_quota); 1565 1566 if (skb->len > limit && 1567 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1568 break; 1569 1570 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1571 1572 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1573 break; 1574 1575 /* Advance the send_head. This one is sent out. 1576 * This call will increment packets_out. 1577 */ 1578 tcp_event_new_data_sent(sk, skb); 1579 1580 tcp_minshall_update(tp, mss_now, skb); 1581 sent_pkts++; 1582 1583 if (push_one) 1584 break; 1585 } 1586 1587 if (likely(sent_pkts)) { 1588 tcp_cwnd_validate(sk); 1589 return 0; 1590 } 1591 return !tp->packets_out && tcp_send_head(sk); 1592 } 1593 1594 /* Push out any pending frames which were held back due to 1595 * TCP_CORK or attempt at coalescing tiny packets. 1596 * The socket must be locked by the caller. 1597 */ 1598 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1599 int nonagle) 1600 { 1601 struct sk_buff *skb = tcp_send_head(sk); 1602 1603 if (!skb) 1604 return; 1605 1606 /* If we are closed, the bytes will have to remain here. 1607 * In time closedown will finish, we empty the write queue and 1608 * all will be happy. 1609 */ 1610 if (unlikely(sk->sk_state == TCP_CLOSE)) 1611 return; 1612 1613 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1614 tcp_check_probe_timer(sk); 1615 } 1616 1617 /* Send _single_ skb sitting at the send head. This function requires 1618 * true push pending frames to setup probe timer etc. 1619 */ 1620 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1621 { 1622 struct sk_buff *skb = tcp_send_head(sk); 1623 1624 BUG_ON(!skb || skb->len < mss_now); 1625 1626 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1627 } 1628 1629 /* This function returns the amount that we can raise the 1630 * usable window based on the following constraints 1631 * 1632 * 1. The window can never be shrunk once it is offered (RFC 793) 1633 * 2. We limit memory per socket 1634 * 1635 * RFC 1122: 1636 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1637 * RECV.NEXT + RCV.WIN fixed until: 1638 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1639 * 1640 * i.e. don't raise the right edge of the window until you can raise 1641 * it at least MSS bytes. 1642 * 1643 * Unfortunately, the recommended algorithm breaks header prediction, 1644 * since header prediction assumes th->window stays fixed. 1645 * 1646 * Strictly speaking, keeping th->window fixed violates the receiver 1647 * side SWS prevention criteria. The problem is that under this rule 1648 * a stream of single byte packets will cause the right side of the 1649 * window to always advance by a single byte. 1650 * 1651 * Of course, if the sender implements sender side SWS prevention 1652 * then this will not be a problem. 1653 * 1654 * BSD seems to make the following compromise: 1655 * 1656 * If the free space is less than the 1/4 of the maximum 1657 * space available and the free space is less than 1/2 mss, 1658 * then set the window to 0. 1659 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1660 * Otherwise, just prevent the window from shrinking 1661 * and from being larger than the largest representable value. 1662 * 1663 * This prevents incremental opening of the window in the regime 1664 * where TCP is limited by the speed of the reader side taking 1665 * data out of the TCP receive queue. It does nothing about 1666 * those cases where the window is constrained on the sender side 1667 * because the pipeline is full. 1668 * 1669 * BSD also seems to "accidentally" limit itself to windows that are a 1670 * multiple of MSS, at least until the free space gets quite small. 1671 * This would appear to be a side effect of the mbuf implementation. 1672 * Combining these two algorithms results in the observed behavior 1673 * of having a fixed window size at almost all times. 1674 * 1675 * Below we obtain similar behavior by forcing the offered window to 1676 * a multiple of the mss when it is feasible to do so. 1677 * 1678 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1679 * Regular options like TIMESTAMP are taken into account. 1680 */ 1681 u32 __tcp_select_window(struct sock *sk) 1682 { 1683 struct inet_connection_sock *icsk = inet_csk(sk); 1684 struct tcp_sock *tp = tcp_sk(sk); 1685 /* MSS for the peer's data. Previous versions used mss_clamp 1686 * here. I don't know if the value based on our guesses 1687 * of peer's MSS is better for the performance. It's more correct 1688 * but may be worse for the performance because of rcv_mss 1689 * fluctuations. --SAW 1998/11/1 1690 */ 1691 int mss = icsk->icsk_ack.rcv_mss; 1692 int free_space = tcp_space(sk); 1693 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1694 int window; 1695 1696 if (mss > full_space) 1697 mss = full_space; 1698 1699 if (free_space < (full_space >> 1)) { 1700 icsk->icsk_ack.quick = 0; 1701 1702 if (tcp_memory_pressure) 1703 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1704 4U * tp->advmss); 1705 1706 if (free_space < mss) 1707 return 0; 1708 } 1709 1710 if (free_space > tp->rcv_ssthresh) 1711 free_space = tp->rcv_ssthresh; 1712 1713 /* Don't do rounding if we are using window scaling, since the 1714 * scaled window will not line up with the MSS boundary anyway. 1715 */ 1716 window = tp->rcv_wnd; 1717 if (tp->rx_opt.rcv_wscale) { 1718 window = free_space; 1719 1720 /* Advertise enough space so that it won't get scaled away. 1721 * Import case: prevent zero window announcement if 1722 * 1<<rcv_wscale > mss. 1723 */ 1724 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1725 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1726 << tp->rx_opt.rcv_wscale); 1727 } else { 1728 /* Get the largest window that is a nice multiple of mss. 1729 * Window clamp already applied above. 1730 * If our current window offering is within 1 mss of the 1731 * free space we just keep it. This prevents the divide 1732 * and multiply from happening most of the time. 1733 * We also don't do any window rounding when the free space 1734 * is too small. 1735 */ 1736 if (window <= free_space - mss || window > free_space) 1737 window = (free_space / mss) * mss; 1738 else if (mss == full_space && 1739 free_space > window + (full_space >> 1)) 1740 window = free_space; 1741 } 1742 1743 return window; 1744 } 1745 1746 /* Collapses two adjacent SKB's during retransmission. */ 1747 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1748 { 1749 struct tcp_sock *tp = tcp_sk(sk); 1750 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1751 int skb_size, next_skb_size; 1752 1753 skb_size = skb->len; 1754 next_skb_size = next_skb->len; 1755 1756 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1757 1758 tcp_highest_sack_combine(sk, next_skb, skb); 1759 1760 tcp_unlink_write_queue(next_skb, sk); 1761 1762 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1763 next_skb_size); 1764 1765 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1766 skb->ip_summed = CHECKSUM_PARTIAL; 1767 1768 if (skb->ip_summed != CHECKSUM_PARTIAL) 1769 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1770 1771 /* Update sequence range on original skb. */ 1772 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1773 1774 /* Merge over control information. This moves PSH/FIN etc. over */ 1775 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1776 1777 /* All done, get rid of second SKB and account for it so 1778 * packet counting does not break. 1779 */ 1780 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1781 1782 /* changed transmit queue under us so clear hints */ 1783 tcp_clear_retrans_hints_partial(tp); 1784 if (next_skb == tp->retransmit_skb_hint) 1785 tp->retransmit_skb_hint = skb; 1786 1787 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 1788 1789 sk_wmem_free_skb(sk, next_skb); 1790 } 1791 1792 static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 1793 { 1794 if (tcp_skb_pcount(skb) > 1) 1795 return 0; 1796 /* TODO: SACK collapsing could be used to remove this condition */ 1797 if (skb_shinfo(skb)->nr_frags != 0) 1798 return 0; 1799 if (skb_cloned(skb)) 1800 return 0; 1801 if (skb == tcp_send_head(sk)) 1802 return 0; 1803 /* Some heurestics for collapsing over SACK'd could be invented */ 1804 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1805 return 0; 1806 1807 return 1; 1808 } 1809 1810 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 1811 int space) 1812 { 1813 struct tcp_sock *tp = tcp_sk(sk); 1814 struct sk_buff *skb = to, *tmp; 1815 int first = 1; 1816 1817 if (!sysctl_tcp_retrans_collapse) 1818 return; 1819 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) 1820 return; 1821 1822 tcp_for_write_queue_from_safe(skb, tmp, sk) { 1823 if (!tcp_can_collapse(sk, skb)) 1824 break; 1825 1826 space -= skb->len; 1827 1828 if (first) { 1829 first = 0; 1830 continue; 1831 } 1832 1833 if (space < 0) 1834 break; 1835 /* Punt if not enough space exists in the first SKB for 1836 * the data in the second 1837 */ 1838 if (skb->len > skb_tailroom(to)) 1839 break; 1840 1841 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 1842 break; 1843 1844 tcp_collapse_retrans(sk, to); 1845 } 1846 } 1847 1848 /* This retransmits one SKB. Policy decisions and retransmit queue 1849 * state updates are done by the caller. Returns non-zero if an 1850 * error occurred which prevented the send. 1851 */ 1852 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1853 { 1854 struct tcp_sock *tp = tcp_sk(sk); 1855 struct inet_connection_sock *icsk = inet_csk(sk); 1856 unsigned int cur_mss; 1857 int err; 1858 1859 /* Inconslusive MTU probe */ 1860 if (icsk->icsk_mtup.probe_size) { 1861 icsk->icsk_mtup.probe_size = 0; 1862 } 1863 1864 /* Do not sent more than we queued. 1/4 is reserved for possible 1865 * copying overhead: fragmentation, tunneling, mangling etc. 1866 */ 1867 if (atomic_read(&sk->sk_wmem_alloc) > 1868 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1869 return -EAGAIN; 1870 1871 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1872 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1873 BUG(); 1874 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1875 return -ENOMEM; 1876 } 1877 1878 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1879 return -EHOSTUNREACH; /* Routing failure or similar. */ 1880 1881 cur_mss = tcp_current_mss(sk); 1882 1883 /* If receiver has shrunk his window, and skb is out of 1884 * new window, do not retransmit it. The exception is the 1885 * case, when window is shrunk to zero. In this case 1886 * our retransmit serves as a zero window probe. 1887 */ 1888 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) 1889 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1890 return -EAGAIN; 1891 1892 if (skb->len > cur_mss) { 1893 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1894 return -ENOMEM; /* We'll try again later. */ 1895 } else { 1896 int oldpcount = tcp_skb_pcount(skb); 1897 1898 if (unlikely(oldpcount > 1)) { 1899 tcp_init_tso_segs(sk, skb, cur_mss); 1900 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 1901 } 1902 } 1903 1904 tcp_retrans_try_collapse(sk, skb, cur_mss); 1905 1906 /* Some Solaris stacks overoptimize and ignore the FIN on a 1907 * retransmit when old data is attached. So strip it off 1908 * since it is cheap to do so and saves bytes on the network. 1909 */ 1910 if (skb->len > 0 && 1911 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1912 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1913 if (!pskb_trim(skb, 0)) { 1914 /* Reuse, even though it does some unnecessary work */ 1915 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 1916 TCP_SKB_CB(skb)->flags); 1917 skb->ip_summed = CHECKSUM_NONE; 1918 } 1919 } 1920 1921 /* Make a copy, if the first transmission SKB clone we made 1922 * is still in somebody's hands, else make a clone. 1923 */ 1924 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1925 1926 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1927 1928 if (err == 0) { 1929 /* Update global TCP statistics. */ 1930 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 1931 1932 tp->total_retrans++; 1933 1934 #if FASTRETRANS_DEBUG > 0 1935 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1936 if (net_ratelimit()) 1937 printk(KERN_DEBUG "retrans_out leaked.\n"); 1938 } 1939 #endif 1940 if (!tp->retrans_out) 1941 tp->lost_retrans_low = tp->snd_nxt; 1942 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1943 tp->retrans_out += tcp_skb_pcount(skb); 1944 1945 /* Save stamp of the first retransmit. */ 1946 if (!tp->retrans_stamp) 1947 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1948 1949 tp->undo_retrans++; 1950 1951 /* snd_nxt is stored to detect loss of retransmitted segment, 1952 * see tcp_input.c tcp_sacktag_write_queue(). 1953 */ 1954 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1955 } 1956 return err; 1957 } 1958 1959 static int tcp_can_forward_retransmit(struct sock *sk) 1960 { 1961 const struct inet_connection_sock *icsk = inet_csk(sk); 1962 struct tcp_sock *tp = tcp_sk(sk); 1963 1964 /* Forward retransmissions are possible only during Recovery. */ 1965 if (icsk->icsk_ca_state != TCP_CA_Recovery) 1966 return 0; 1967 1968 /* No forward retransmissions in Reno are possible. */ 1969 if (tcp_is_reno(tp)) 1970 return 0; 1971 1972 /* Yeah, we have to make difficult choice between forward transmission 1973 * and retransmission... Both ways have their merits... 1974 * 1975 * For now we do not retransmit anything, while we have some new 1976 * segments to send. In the other cases, follow rule 3 for 1977 * NextSeg() specified in RFC3517. 1978 */ 1979 1980 if (tcp_may_send_now(sk)) 1981 return 0; 1982 1983 return 1; 1984 } 1985 1986 /* This gets called after a retransmit timeout, and the initially 1987 * retransmitted data is acknowledged. It tries to continue 1988 * resending the rest of the retransmit queue, until either 1989 * we've sent it all or the congestion window limit is reached. 1990 * If doing SACK, the first ACK which comes back for a timeout 1991 * based retransmit packet might feed us FACK information again. 1992 * If so, we use it to avoid unnecessarily retransmissions. 1993 */ 1994 void tcp_xmit_retransmit_queue(struct sock *sk) 1995 { 1996 const struct inet_connection_sock *icsk = inet_csk(sk); 1997 struct tcp_sock *tp = tcp_sk(sk); 1998 struct sk_buff *skb; 1999 struct sk_buff *hole = NULL; 2000 u32 last_lost; 2001 int mib_idx; 2002 int fwd_rexmitting = 0; 2003 2004 if (!tp->lost_out) 2005 tp->retransmit_high = tp->snd_una; 2006 2007 if (tp->retransmit_skb_hint) { 2008 skb = tp->retransmit_skb_hint; 2009 last_lost = TCP_SKB_CB(skb)->end_seq; 2010 if (after(last_lost, tp->retransmit_high)) 2011 last_lost = tp->retransmit_high; 2012 } else { 2013 skb = tcp_write_queue_head(sk); 2014 last_lost = tp->snd_una; 2015 } 2016 2017 tcp_for_write_queue_from(skb, sk) { 2018 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2019 2020 if (skb == tcp_send_head(sk)) 2021 break; 2022 /* we could do better than to assign each time */ 2023 if (hole == NULL) 2024 tp->retransmit_skb_hint = skb; 2025 2026 /* Assume this retransmit will generate 2027 * only one packet for congestion window 2028 * calculation purposes. This works because 2029 * tcp_retransmit_skb() will chop up the 2030 * packet to be MSS sized and all the 2031 * packet counting works out. 2032 */ 2033 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2034 return; 2035 2036 if (fwd_rexmitting) { 2037 begin_fwd: 2038 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2039 break; 2040 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2041 2042 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2043 tp->retransmit_high = last_lost; 2044 if (!tcp_can_forward_retransmit(sk)) 2045 break; 2046 /* Backtrack if necessary to non-L'ed skb */ 2047 if (hole != NULL) { 2048 skb = hole; 2049 hole = NULL; 2050 } 2051 fwd_rexmitting = 1; 2052 goto begin_fwd; 2053 2054 } else if (!(sacked & TCPCB_LOST)) { 2055 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2056 hole = skb; 2057 continue; 2058 2059 } else { 2060 last_lost = TCP_SKB_CB(skb)->end_seq; 2061 if (icsk->icsk_ca_state != TCP_CA_Loss) 2062 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2063 else 2064 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2065 } 2066 2067 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2068 continue; 2069 2070 if (tcp_retransmit_skb(sk, skb)) 2071 return; 2072 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2073 2074 if (skb == tcp_write_queue_head(sk)) 2075 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2076 inet_csk(sk)->icsk_rto, 2077 TCP_RTO_MAX); 2078 } 2079 } 2080 2081 /* Send a fin. The caller locks the socket for us. This cannot be 2082 * allowed to fail queueing a FIN frame under any circumstances. 2083 */ 2084 void tcp_send_fin(struct sock *sk) 2085 { 2086 struct tcp_sock *tp = tcp_sk(sk); 2087 struct sk_buff *skb = tcp_write_queue_tail(sk); 2088 int mss_now; 2089 2090 /* Optimization, tack on the FIN if we have a queue of 2091 * unsent frames. But be careful about outgoing SACKS 2092 * and IP options. 2093 */ 2094 mss_now = tcp_current_mss(sk); 2095 2096 if (tcp_send_head(sk) != NULL) { 2097 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2098 TCP_SKB_CB(skb)->end_seq++; 2099 tp->write_seq++; 2100 } else { 2101 /* Socket is locked, keep trying until memory is available. */ 2102 for (;;) { 2103 skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); 2104 if (skb) 2105 break; 2106 yield(); 2107 } 2108 2109 /* Reserve space for headers and prepare control bits. */ 2110 skb_reserve(skb, MAX_TCP_HEADER); 2111 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2112 tcp_init_nondata_skb(skb, tp->write_seq, 2113 TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 2114 tcp_queue_skb(sk, skb); 2115 } 2116 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2117 } 2118 2119 /* We get here when a process closes a file descriptor (either due to 2120 * an explicit close() or as a byproduct of exit()'ing) and there 2121 * was unread data in the receive queue. This behavior is recommended 2122 * by RFC 2525, section 2.17. -DaveM 2123 */ 2124 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2125 { 2126 struct sk_buff *skb; 2127 2128 /* NOTE: No TCP options attached and we never retransmit this. */ 2129 skb = alloc_skb(MAX_TCP_HEADER, priority); 2130 if (!skb) { 2131 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2132 return; 2133 } 2134 2135 /* Reserve space for headers and prepare control bits. */ 2136 skb_reserve(skb, MAX_TCP_HEADER); 2137 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2138 TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 2139 /* Send it off. */ 2140 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2141 if (tcp_transmit_skb(sk, skb, 0, priority)) 2142 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2143 2144 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2145 } 2146 2147 /* WARNING: This routine must only be called when we have already sent 2148 * a SYN packet that crossed the incoming SYN that caused this routine 2149 * to get called. If this assumption fails then the initial rcv_wnd 2150 * and rcv_wscale values will not be correct. 2151 */ 2152 int tcp_send_synack(struct sock *sk) 2153 { 2154 struct sk_buff *skb; 2155 2156 skb = tcp_write_queue_head(sk); 2157 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { 2158 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2159 return -EFAULT; 2160 } 2161 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { 2162 if (skb_cloned(skb)) { 2163 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2164 if (nskb == NULL) 2165 return -ENOMEM; 2166 tcp_unlink_write_queue(skb, sk); 2167 skb_header_release(nskb); 2168 __tcp_add_write_queue_head(sk, nskb); 2169 sk_wmem_free_skb(sk, skb); 2170 sk->sk_wmem_queued += nskb->truesize; 2171 sk_mem_charge(sk, nskb->truesize); 2172 skb = nskb; 2173 } 2174 2175 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2176 TCP_ECN_send_synack(tcp_sk(sk), skb); 2177 } 2178 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2179 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2180 } 2181 2182 /* 2183 * Prepare a SYN-ACK. 2184 */ 2185 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2186 struct request_sock *req) 2187 { 2188 struct inet_request_sock *ireq = inet_rsk(req); 2189 struct tcp_sock *tp = tcp_sk(sk); 2190 struct tcphdr *th; 2191 int tcp_header_size; 2192 struct tcp_out_options opts; 2193 struct sk_buff *skb; 2194 struct tcp_md5sig_key *md5; 2195 __u8 *md5_hash_location; 2196 int mss; 2197 2198 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2199 if (skb == NULL) 2200 return NULL; 2201 2202 /* Reserve space for headers. */ 2203 skb_reserve(skb, MAX_TCP_HEADER); 2204 2205 skb->dst = dst_clone(dst); 2206 2207 mss = dst_metric(dst, RTAX_ADVMSS); 2208 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2209 mss = tp->rx_opt.user_mss; 2210 2211 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2212 __u8 rcv_wscale; 2213 /* Set this up on the first call only */ 2214 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2215 /* tcp_full_space because it is guaranteed to be the first packet */ 2216 tcp_select_initial_window(tcp_full_space(sk), 2217 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2218 &req->rcv_wnd, 2219 &req->window_clamp, 2220 ireq->wscale_ok, 2221 &rcv_wscale); 2222 ireq->rcv_wscale = rcv_wscale; 2223 } 2224 2225 memset(&opts, 0, sizeof(opts)); 2226 #ifdef CONFIG_SYN_COOKIES 2227 if (unlikely(req->cookie_ts)) 2228 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2229 else 2230 #endif 2231 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2232 tcp_header_size = tcp_synack_options(sk, req, mss, 2233 skb, &opts, &md5) + 2234 sizeof(struct tcphdr); 2235 2236 skb_push(skb, tcp_header_size); 2237 skb_reset_transport_header(skb); 2238 2239 th = tcp_hdr(skb); 2240 memset(th, 0, sizeof(struct tcphdr)); 2241 th->syn = 1; 2242 th->ack = 1; 2243 TCP_ECN_make_synack(req, th); 2244 th->source = ireq->loc_port; 2245 th->dest = ireq->rmt_port; 2246 /* Setting of flags are superfluous here for callers (and ECE is 2247 * not even correctly set) 2248 */ 2249 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2250 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2251 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2252 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2253 2254 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2255 th->window = htons(min(req->rcv_wnd, 65535U)); 2256 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 2257 th->doff = (tcp_header_size >> 2); 2258 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2259 2260 #ifdef CONFIG_TCP_MD5SIG 2261 /* Okay, we have all we need - do the md5 hash if needed */ 2262 if (md5) { 2263 tp->af_specific->calc_md5_hash(md5_hash_location, 2264 md5, NULL, req, skb); 2265 } 2266 #endif 2267 2268 return skb; 2269 } 2270 2271 /* 2272 * Do all connect socket setups that can be done AF independent. 2273 */ 2274 static void tcp_connect_init(struct sock *sk) 2275 { 2276 struct dst_entry *dst = __sk_dst_get(sk); 2277 struct tcp_sock *tp = tcp_sk(sk); 2278 __u8 rcv_wscale; 2279 2280 /* We'll fix this up when we get a response from the other end. 2281 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2282 */ 2283 tp->tcp_header_len = sizeof(struct tcphdr) + 2284 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2285 2286 #ifdef CONFIG_TCP_MD5SIG 2287 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2288 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2289 #endif 2290 2291 /* If user gave his TCP_MAXSEG, record it to clamp */ 2292 if (tp->rx_opt.user_mss) 2293 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2294 tp->max_window = 0; 2295 tcp_mtup_init(sk); 2296 tcp_sync_mss(sk, dst_mtu(dst)); 2297 2298 if (!tp->window_clamp) 2299 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2300 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2301 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2302 tp->advmss = tp->rx_opt.user_mss; 2303 2304 tcp_initialize_rcv_mss(sk); 2305 2306 tcp_select_initial_window(tcp_full_space(sk), 2307 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2308 &tp->rcv_wnd, 2309 &tp->window_clamp, 2310 sysctl_tcp_window_scaling, 2311 &rcv_wscale); 2312 2313 tp->rx_opt.rcv_wscale = rcv_wscale; 2314 tp->rcv_ssthresh = tp->rcv_wnd; 2315 2316 sk->sk_err = 0; 2317 sock_reset_flag(sk, SOCK_DONE); 2318 tp->snd_wnd = 0; 2319 tcp_init_wl(tp, 0); 2320 tp->snd_una = tp->write_seq; 2321 tp->snd_sml = tp->write_seq; 2322 tp->snd_up = tp->write_seq; 2323 tp->rcv_nxt = 0; 2324 tp->rcv_wup = 0; 2325 tp->copied_seq = 0; 2326 2327 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2328 inet_csk(sk)->icsk_retransmits = 0; 2329 tcp_clear_retrans(tp); 2330 } 2331 2332 /* 2333 * Build a SYN and send it off. 2334 */ 2335 int tcp_connect(struct sock *sk) 2336 { 2337 struct tcp_sock *tp = tcp_sk(sk); 2338 struct sk_buff *buff; 2339 2340 tcp_connect_init(sk); 2341 2342 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2343 if (unlikely(buff == NULL)) 2344 return -ENOBUFS; 2345 2346 /* Reserve space for headers. */ 2347 skb_reserve(buff, MAX_TCP_HEADER); 2348 2349 tp->snd_nxt = tp->write_seq; 2350 tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); 2351 TCP_ECN_send_syn(sk, buff); 2352 2353 /* Send it off. */ 2354 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2355 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2356 skb_header_release(buff); 2357 __tcp_add_write_queue_tail(sk, buff); 2358 sk->sk_wmem_queued += buff->truesize; 2359 sk_mem_charge(sk, buff->truesize); 2360 tp->packets_out += tcp_skb_pcount(buff); 2361 tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); 2362 2363 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2364 * in order to make this packet get counted in tcpOutSegs. 2365 */ 2366 tp->snd_nxt = tp->write_seq; 2367 tp->pushed_seq = tp->write_seq; 2368 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2369 2370 /* Timer for repeating the SYN until an answer. */ 2371 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2372 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2373 return 0; 2374 } 2375 2376 /* Send out a delayed ack, the caller does the policy checking 2377 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2378 * for details. 2379 */ 2380 void tcp_send_delayed_ack(struct sock *sk) 2381 { 2382 struct inet_connection_sock *icsk = inet_csk(sk); 2383 int ato = icsk->icsk_ack.ato; 2384 unsigned long timeout; 2385 2386 if (ato > TCP_DELACK_MIN) { 2387 const struct tcp_sock *tp = tcp_sk(sk); 2388 int max_ato = HZ / 2; 2389 2390 if (icsk->icsk_ack.pingpong || 2391 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2392 max_ato = TCP_DELACK_MAX; 2393 2394 /* Slow path, intersegment interval is "high". */ 2395 2396 /* If some rtt estimate is known, use it to bound delayed ack. 2397 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2398 * directly. 2399 */ 2400 if (tp->srtt) { 2401 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2402 2403 if (rtt < max_ato) 2404 max_ato = rtt; 2405 } 2406 2407 ato = min(ato, max_ato); 2408 } 2409 2410 /* Stay within the limit we were given */ 2411 timeout = jiffies + ato; 2412 2413 /* Use new timeout only if there wasn't a older one earlier. */ 2414 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2415 /* If delack timer was blocked or is about to expire, 2416 * send ACK now. 2417 */ 2418 if (icsk->icsk_ack.blocked || 2419 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2420 tcp_send_ack(sk); 2421 return; 2422 } 2423 2424 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2425 timeout = icsk->icsk_ack.timeout; 2426 } 2427 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2428 icsk->icsk_ack.timeout = timeout; 2429 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2430 } 2431 2432 /* This routine sends an ack and also updates the window. */ 2433 void tcp_send_ack(struct sock *sk) 2434 { 2435 struct sk_buff *buff; 2436 2437 /* If we have been reset, we may not send again. */ 2438 if (sk->sk_state == TCP_CLOSE) 2439 return; 2440 2441 /* We are not putting this on the write queue, so 2442 * tcp_transmit_skb() will set the ownership to this 2443 * sock. 2444 */ 2445 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2446 if (buff == NULL) { 2447 inet_csk_schedule_ack(sk); 2448 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2449 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2450 TCP_DELACK_MAX, TCP_RTO_MAX); 2451 return; 2452 } 2453 2454 /* Reserve space for headers and prepare control bits. */ 2455 skb_reserve(buff, MAX_TCP_HEADER); 2456 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); 2457 2458 /* Send it off, this clears delayed acks for us. */ 2459 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2460 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2461 } 2462 2463 /* This routine sends a packet with an out of date sequence 2464 * number. It assumes the other end will try to ack it. 2465 * 2466 * Question: what should we make while urgent mode? 2467 * 4.4BSD forces sending single byte of data. We cannot send 2468 * out of window data, because we have SND.NXT==SND.MAX... 2469 * 2470 * Current solution: to send TWO zero-length segments in urgent mode: 2471 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2472 * out-of-date with SND.UNA-1 to probe window. 2473 */ 2474 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2475 { 2476 struct tcp_sock *tp = tcp_sk(sk); 2477 struct sk_buff *skb; 2478 2479 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2480 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2481 if (skb == NULL) 2482 return -1; 2483 2484 /* Reserve space for headers and set control bits. */ 2485 skb_reserve(skb, MAX_TCP_HEADER); 2486 /* Use a previous sequence. This should cause the other 2487 * end to send an ack. Don't queue or clone SKB, just 2488 * send it. 2489 */ 2490 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); 2491 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2492 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2493 } 2494 2495 int tcp_write_wakeup(struct sock *sk) 2496 { 2497 struct tcp_sock *tp = tcp_sk(sk); 2498 struct sk_buff *skb; 2499 2500 if (sk->sk_state == TCP_CLOSE) 2501 return -1; 2502 2503 if ((skb = tcp_send_head(sk)) != NULL && 2504 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2505 int err; 2506 unsigned int mss = tcp_current_mss(sk); 2507 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2508 2509 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2510 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2511 2512 /* We are probing the opening of a window 2513 * but the window size is != 0 2514 * must have been a result SWS avoidance ( sender ) 2515 */ 2516 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2517 skb->len > mss) { 2518 seg_size = min(seg_size, mss); 2519 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2520 if (tcp_fragment(sk, skb, seg_size, mss)) 2521 return -1; 2522 } else if (!tcp_skb_pcount(skb)) 2523 tcp_set_skb_tso_segs(sk, skb, mss); 2524 2525 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2526 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2527 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2528 if (!err) 2529 tcp_event_new_data_sent(sk, skb); 2530 return err; 2531 } else { 2532 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2533 tcp_xmit_probe_skb(sk, 1); 2534 return tcp_xmit_probe_skb(sk, 0); 2535 } 2536 } 2537 2538 /* A window probe timeout has occurred. If window is not closed send 2539 * a partial packet else a zero probe. 2540 */ 2541 void tcp_send_probe0(struct sock *sk) 2542 { 2543 struct inet_connection_sock *icsk = inet_csk(sk); 2544 struct tcp_sock *tp = tcp_sk(sk); 2545 int err; 2546 2547 err = tcp_write_wakeup(sk); 2548 2549 if (tp->packets_out || !tcp_send_head(sk)) { 2550 /* Cancel probe timer, if it is not required. */ 2551 icsk->icsk_probes_out = 0; 2552 icsk->icsk_backoff = 0; 2553 return; 2554 } 2555 2556 if (err <= 0) { 2557 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2558 icsk->icsk_backoff++; 2559 icsk->icsk_probes_out++; 2560 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2561 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2562 TCP_RTO_MAX); 2563 } else { 2564 /* If packet was not sent due to local congestion, 2565 * do not backoff and do not remember icsk_probes_out. 2566 * Let local senders to fight for local resources. 2567 * 2568 * Use accumulated backoff yet. 2569 */ 2570 if (!icsk->icsk_probes_out) 2571 icsk->icsk_probes_out = 1; 2572 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2573 min(icsk->icsk_rto << icsk->icsk_backoff, 2574 TCP_RESOURCE_PROBE_INTERVAL), 2575 TCP_RTO_MAX); 2576 } 2577 } 2578 2579 EXPORT_SYMBOL(tcp_select_initial_window); 2580 EXPORT_SYMBOL(tcp_connect); 2581 EXPORT_SYMBOL(tcp_make_synack); 2582 EXPORT_SYMBOL(tcp_simple_retransmit); 2583 EXPORT_SYMBOL(tcp_sync_mss); 2584 EXPORT_SYMBOL(tcp_mtup_init); 2585