1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #include <net/tcp.h> 38 39 #include <linux/compiler.h> 40 #include <linux/module.h> 41 42 /* People can turn this off for buggy TCP's found in printers etc. */ 43 int sysctl_tcp_retrans_collapse __read_mostly = 1; 44 45 /* People can turn this on to work with those rare, broken TCPs that 46 * interpret the window field as a signed quantity. 47 */ 48 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 49 50 /* This limits the percentage of the congestion window which we 51 * will allow a single TSO frame to consume. Building TSO frames 52 * which are too large can cause TCP streams to be bursty. 53 */ 54 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 55 56 int sysctl_tcp_mtu_probing __read_mostly = 0; 57 int sysctl_tcp_base_mss __read_mostly = 512; 58 59 /* By default, RFC2861 behavior. */ 60 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 61 62 /* Account for new data that has been sent to the network. */ 63 static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 64 { 65 struct tcp_sock *tp = tcp_sk(sk); 66 unsigned int prior_packets = tp->packets_out; 67 68 tcp_advance_send_head(sk, skb); 69 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 70 71 /* Don't override Nagle indefinately with F-RTO */ 72 if (tp->frto_counter == 2) 73 tp->frto_counter = 3; 74 75 tp->packets_out += tcp_skb_pcount(skb); 76 if (!prior_packets) 77 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 78 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 79 } 80 81 /* SND.NXT, if window was not shrunk. 82 * If window has been shrunk, what should we make? It is not clear at all. 83 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 84 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 85 * invalid. OK, let's make this for now: 86 */ 87 static inline __u32 tcp_acceptable_seq(struct sock *sk) 88 { 89 struct tcp_sock *tp = tcp_sk(sk); 90 91 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 92 return tp->snd_nxt; 93 else 94 return tcp_wnd_end(tp); 95 } 96 97 /* Calculate mss to advertise in SYN segment. 98 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 99 * 100 * 1. It is independent of path mtu. 101 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 102 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 103 * attached devices, because some buggy hosts are confused by 104 * large MSS. 105 * 4. We do not make 3, we advertise MSS, calculated from first 106 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 107 * This may be overridden via information stored in routing table. 108 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 109 * probably even Jumbo". 110 */ 111 static __u16 tcp_advertise_mss(struct sock *sk) 112 { 113 struct tcp_sock *tp = tcp_sk(sk); 114 struct dst_entry *dst = __sk_dst_get(sk); 115 int mss = tp->advmss; 116 117 if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { 118 mss = dst_metric(dst, RTAX_ADVMSS); 119 tp->advmss = mss; 120 } 121 122 return (__u16)mss; 123 } 124 125 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 126 * This is the first part of cwnd validation mechanism. */ 127 static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 128 { 129 struct tcp_sock *tp = tcp_sk(sk); 130 s32 delta = tcp_time_stamp - tp->lsndtime; 131 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 132 u32 cwnd = tp->snd_cwnd; 133 134 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 135 136 tp->snd_ssthresh = tcp_current_ssthresh(sk); 137 restart_cwnd = min(restart_cwnd, cwnd); 138 139 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 140 cwnd >>= 1; 141 tp->snd_cwnd = max(cwnd, restart_cwnd); 142 tp->snd_cwnd_stamp = tcp_time_stamp; 143 tp->snd_cwnd_used = 0; 144 } 145 146 /* Congestion state accounting after a packet has been sent. */ 147 static void tcp_event_data_sent(struct tcp_sock *tp, 148 struct sk_buff *skb, struct sock *sk) 149 { 150 struct inet_connection_sock *icsk = inet_csk(sk); 151 const u32 now = tcp_time_stamp; 152 153 if (sysctl_tcp_slow_start_after_idle && 154 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) 155 tcp_cwnd_restart(sk, __sk_dst_get(sk)); 156 157 tp->lsndtime = now; 158 159 /* If it is a reply for ato after last received 160 * packet, enter pingpong mode. 161 */ 162 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 163 icsk->icsk_ack.pingpong = 1; 164 } 165 166 /* Account for an ACK we sent. */ 167 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 168 { 169 tcp_dec_quickack_mode(sk, pkts); 170 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 171 } 172 173 /* Determine a window scaling and initial window to offer. 174 * Based on the assumption that the given amount of space 175 * will be offered. Store the results in the tp structure. 176 * NOTE: for smooth operation initial space offering should 177 * be a multiple of mss if possible. We assume here that mss >= 1. 178 * This MUST be enforced by all callers. 179 */ 180 void tcp_select_initial_window(int __space, __u32 mss, 181 __u32 *rcv_wnd, __u32 *window_clamp, 182 int wscale_ok, __u8 *rcv_wscale) 183 { 184 unsigned int space = (__space < 0 ? 0 : __space); 185 186 /* If no clamp set the clamp to the max possible scaled window */ 187 if (*window_clamp == 0) 188 (*window_clamp) = (65535 << 14); 189 space = min(*window_clamp, space); 190 191 /* Quantize space offering to a multiple of mss if possible. */ 192 if (space > mss) 193 space = (space / mss) * mss; 194 195 /* NOTE: offering an initial window larger than 32767 196 * will break some buggy TCP stacks. If the admin tells us 197 * it is likely we could be speaking with such a buggy stack 198 * we will truncate our initial window offering to 32K-1 199 * unless the remote has sent us a window scaling option, 200 * which we interpret as a sign the remote TCP is not 201 * misinterpreting the window field as a signed quantity. 202 */ 203 if (sysctl_tcp_workaround_signed_windows) 204 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 205 else 206 (*rcv_wnd) = space; 207 208 (*rcv_wscale) = 0; 209 if (wscale_ok) { 210 /* Set window scaling on max possible window 211 * See RFC1323 for an explanation of the limit to 14 212 */ 213 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 214 space = min_t(u32, space, *window_clamp); 215 while (space > 65535 && (*rcv_wscale) < 14) { 216 space >>= 1; 217 (*rcv_wscale)++; 218 } 219 } 220 221 /* Set initial window to value enough for senders, 222 * following RFC2414. Senders, not following this RFC, 223 * will be satisfied with 2. 224 */ 225 if (mss > (1 << *rcv_wscale)) { 226 int init_cwnd = 4; 227 if (mss > 1460 * 3) 228 init_cwnd = 2; 229 else if (mss > 1460) 230 init_cwnd = 3; 231 if (*rcv_wnd > init_cwnd * mss) 232 *rcv_wnd = init_cwnd * mss; 233 } 234 235 /* Set the clamp no higher than max representable value */ 236 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 237 } 238 239 /* Chose a new window to advertise, update state in tcp_sock for the 240 * socket, and return result with RFC1323 scaling applied. The return 241 * value can be stuffed directly into th->window for an outgoing 242 * frame. 243 */ 244 static u16 tcp_select_window(struct sock *sk) 245 { 246 struct tcp_sock *tp = tcp_sk(sk); 247 u32 cur_win = tcp_receive_window(tp); 248 u32 new_win = __tcp_select_window(sk); 249 250 /* Never shrink the offered window */ 251 if (new_win < cur_win) { 252 /* Danger Will Robinson! 253 * Don't update rcv_wup/rcv_wnd here or else 254 * we will not be able to advertise a zero 255 * window in time. --DaveM 256 * 257 * Relax Will Robinson. 258 */ 259 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 260 } 261 tp->rcv_wnd = new_win; 262 tp->rcv_wup = tp->rcv_nxt; 263 264 /* Make sure we do not exceed the maximum possible 265 * scaled window. 266 */ 267 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 268 new_win = min(new_win, MAX_TCP_WINDOW); 269 else 270 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 271 272 /* RFC1323 scaling applied */ 273 new_win >>= tp->rx_opt.rcv_wscale; 274 275 /* If we advertise zero window, disable fast path. */ 276 if (new_win == 0) 277 tp->pred_flags = 0; 278 279 return new_win; 280 } 281 282 /* Packet ECN state for a SYN-ACK */ 283 static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 284 { 285 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 286 if (!(tp->ecn_flags & TCP_ECN_OK)) 287 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 288 } 289 290 /* Packet ECN state for a SYN. */ 291 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 292 { 293 struct tcp_sock *tp = tcp_sk(sk); 294 295 tp->ecn_flags = 0; 296 if (sysctl_tcp_ecn == 1) { 297 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 298 tp->ecn_flags = TCP_ECN_OK; 299 } 300 } 301 302 static __inline__ void 303 TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 304 { 305 if (inet_rsk(req)->ecn_ok) 306 th->ece = 1; 307 } 308 309 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 310 * be sent. 311 */ 312 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 313 int tcp_header_len) 314 { 315 struct tcp_sock *tp = tcp_sk(sk); 316 317 if (tp->ecn_flags & TCP_ECN_OK) { 318 /* Not-retransmitted data segment: set ECT and inject CWR. */ 319 if (skb->len != tcp_header_len && 320 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 321 INET_ECN_xmit(sk); 322 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 323 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 324 tcp_hdr(skb)->cwr = 1; 325 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 326 } 327 } else { 328 /* ACK or retransmitted segment: clear ECT|CE */ 329 INET_ECN_dontxmit(sk); 330 } 331 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 332 tcp_hdr(skb)->ece = 1; 333 } 334 } 335 336 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 337 * auto increment end seqno. 338 */ 339 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 340 { 341 skb->csum = 0; 342 343 TCP_SKB_CB(skb)->flags = flags; 344 TCP_SKB_CB(skb)->sacked = 0; 345 346 skb_shinfo(skb)->gso_segs = 1; 347 skb_shinfo(skb)->gso_size = 0; 348 skb_shinfo(skb)->gso_type = 0; 349 350 TCP_SKB_CB(skb)->seq = seq; 351 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) 352 seq++; 353 TCP_SKB_CB(skb)->end_seq = seq; 354 } 355 356 static inline int tcp_urg_mode(const struct tcp_sock *tp) 357 { 358 return tp->snd_una != tp->snd_up; 359 } 360 361 #define OPTION_SACK_ADVERTISE (1 << 0) 362 #define OPTION_TS (1 << 1) 363 #define OPTION_MD5 (1 << 2) 364 365 struct tcp_out_options { 366 u8 options; /* bit field of OPTION_* */ 367 u8 ws; /* window scale, 0 to disable */ 368 u8 num_sack_blocks; /* number of SACK blocks to include */ 369 u16 mss; /* 0 to disable */ 370 __u32 tsval, tsecr; /* need to include OPTION_TS */ 371 }; 372 373 /* Write previously computed TCP options to the packet. 374 * 375 * Beware: Something in the Internet is very sensitive to the ordering of 376 * TCP options, we learned this through the hard way, so be careful here. 377 * Luckily we can at least blame others for their non-compliance but from 378 * inter-operatibility perspective it seems that we're somewhat stuck with 379 * the ordering which we have been using if we want to keep working with 380 * those broken things (not that it currently hurts anybody as there isn't 381 * particular reason why the ordering would need to be changed). 382 * 383 * At least SACK_PERM as the first option is known to lead to a disaster 384 * (but it may well be that other scenarios fail similarly). 385 */ 386 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 387 const struct tcp_out_options *opts, 388 __u8 **md5_hash) { 389 if (unlikely(OPTION_MD5 & opts->options)) { 390 *ptr++ = htonl((TCPOPT_NOP << 24) | 391 (TCPOPT_NOP << 16) | 392 (TCPOPT_MD5SIG << 8) | 393 TCPOLEN_MD5SIG); 394 *md5_hash = (__u8 *)ptr; 395 ptr += 4; 396 } else { 397 *md5_hash = NULL; 398 } 399 400 if (unlikely(opts->mss)) { 401 *ptr++ = htonl((TCPOPT_MSS << 24) | 402 (TCPOLEN_MSS << 16) | 403 opts->mss); 404 } 405 406 if (likely(OPTION_TS & opts->options)) { 407 if (unlikely(OPTION_SACK_ADVERTISE & opts->options)) { 408 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 409 (TCPOLEN_SACK_PERM << 16) | 410 (TCPOPT_TIMESTAMP << 8) | 411 TCPOLEN_TIMESTAMP); 412 } else { 413 *ptr++ = htonl((TCPOPT_NOP << 24) | 414 (TCPOPT_NOP << 16) | 415 (TCPOPT_TIMESTAMP << 8) | 416 TCPOLEN_TIMESTAMP); 417 } 418 *ptr++ = htonl(opts->tsval); 419 *ptr++ = htonl(opts->tsecr); 420 } 421 422 if (unlikely(OPTION_SACK_ADVERTISE & opts->options && 423 !(OPTION_TS & opts->options))) { 424 *ptr++ = htonl((TCPOPT_NOP << 24) | 425 (TCPOPT_NOP << 16) | 426 (TCPOPT_SACK_PERM << 8) | 427 TCPOLEN_SACK_PERM); 428 } 429 430 if (unlikely(opts->ws)) { 431 *ptr++ = htonl((TCPOPT_NOP << 24) | 432 (TCPOPT_WINDOW << 16) | 433 (TCPOLEN_WINDOW << 8) | 434 opts->ws); 435 } 436 437 if (unlikely(opts->num_sack_blocks)) { 438 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 439 tp->duplicate_sack : tp->selective_acks; 440 int this_sack; 441 442 *ptr++ = htonl((TCPOPT_NOP << 24) | 443 (TCPOPT_NOP << 16) | 444 (TCPOPT_SACK << 8) | 445 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 446 TCPOLEN_SACK_PERBLOCK))); 447 448 for (this_sack = 0; this_sack < opts->num_sack_blocks; 449 ++this_sack) { 450 *ptr++ = htonl(sp[this_sack].start_seq); 451 *ptr++ = htonl(sp[this_sack].end_seq); 452 } 453 454 tp->rx_opt.dsack = 0; 455 } 456 } 457 458 /* Compute TCP options for SYN packets. This is not the final 459 * network wire format yet. 460 */ 461 static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 462 struct tcp_out_options *opts, 463 struct tcp_md5sig_key **md5) { 464 struct tcp_sock *tp = tcp_sk(sk); 465 unsigned size = 0; 466 467 #ifdef CONFIG_TCP_MD5SIG 468 *md5 = tp->af_specific->md5_lookup(sk, sk); 469 if (*md5) { 470 opts->options |= OPTION_MD5; 471 size += TCPOLEN_MD5SIG_ALIGNED; 472 } 473 #else 474 *md5 = NULL; 475 #endif 476 477 /* We always get an MSS option. The option bytes which will be seen in 478 * normal data packets should timestamps be used, must be in the MSS 479 * advertised. But we subtract them from tp->mss_cache so that 480 * calculations in tcp_sendmsg are simpler etc. So account for this 481 * fact here if necessary. If we don't do this correctly, as a 482 * receiver we won't recognize data packets as being full sized when we 483 * should, and thus we won't abide by the delayed ACK rules correctly. 484 * SACKs don't matter, we never delay an ACK when we have any of those 485 * going out. */ 486 opts->mss = tcp_advertise_mss(sk); 487 size += TCPOLEN_MSS_ALIGNED; 488 489 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 490 opts->options |= OPTION_TS; 491 opts->tsval = TCP_SKB_CB(skb)->when; 492 opts->tsecr = tp->rx_opt.ts_recent; 493 size += TCPOLEN_TSTAMP_ALIGNED; 494 } 495 if (likely(sysctl_tcp_window_scaling)) { 496 opts->ws = tp->rx_opt.rcv_wscale; 497 if (likely(opts->ws)) 498 size += TCPOLEN_WSCALE_ALIGNED; 499 } 500 if (likely(sysctl_tcp_sack)) { 501 opts->options |= OPTION_SACK_ADVERTISE; 502 if (unlikely(!(OPTION_TS & opts->options))) 503 size += TCPOLEN_SACKPERM_ALIGNED; 504 } 505 506 return size; 507 } 508 509 /* Set up TCP options for SYN-ACKs. */ 510 static unsigned tcp_synack_options(struct sock *sk, 511 struct request_sock *req, 512 unsigned mss, struct sk_buff *skb, 513 struct tcp_out_options *opts, 514 struct tcp_md5sig_key **md5) { 515 unsigned size = 0; 516 struct inet_request_sock *ireq = inet_rsk(req); 517 char doing_ts; 518 519 #ifdef CONFIG_TCP_MD5SIG 520 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 521 if (*md5) { 522 opts->options |= OPTION_MD5; 523 size += TCPOLEN_MD5SIG_ALIGNED; 524 } 525 #else 526 *md5 = NULL; 527 #endif 528 529 /* we can't fit any SACK blocks in a packet with MD5 + TS 530 options. There was discussion about disabling SACK rather than TS in 531 order to fit in better with old, buggy kernels, but that was deemed 532 to be unnecessary. */ 533 doing_ts = ireq->tstamp_ok && !(*md5 && ireq->sack_ok); 534 535 opts->mss = mss; 536 size += TCPOLEN_MSS_ALIGNED; 537 538 if (likely(ireq->wscale_ok)) { 539 opts->ws = ireq->rcv_wscale; 540 if (likely(opts->ws)) 541 size += TCPOLEN_WSCALE_ALIGNED; 542 } 543 if (likely(doing_ts)) { 544 opts->options |= OPTION_TS; 545 opts->tsval = TCP_SKB_CB(skb)->when; 546 opts->tsecr = req->ts_recent; 547 size += TCPOLEN_TSTAMP_ALIGNED; 548 } 549 if (likely(ireq->sack_ok)) { 550 opts->options |= OPTION_SACK_ADVERTISE; 551 if (unlikely(!doing_ts)) 552 size += TCPOLEN_SACKPERM_ALIGNED; 553 } 554 555 return size; 556 } 557 558 /* Compute TCP options for ESTABLISHED sockets. This is not the 559 * final wire format yet. 560 */ 561 static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 562 struct tcp_out_options *opts, 563 struct tcp_md5sig_key **md5) { 564 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 565 struct tcp_sock *tp = tcp_sk(sk); 566 unsigned size = 0; 567 unsigned int eff_sacks; 568 569 #ifdef CONFIG_TCP_MD5SIG 570 *md5 = tp->af_specific->md5_lookup(sk, sk); 571 if (unlikely(*md5)) { 572 opts->options |= OPTION_MD5; 573 size += TCPOLEN_MD5SIG_ALIGNED; 574 } 575 #else 576 *md5 = NULL; 577 #endif 578 579 if (likely(tp->rx_opt.tstamp_ok)) { 580 opts->options |= OPTION_TS; 581 opts->tsval = tcb ? tcb->when : 0; 582 opts->tsecr = tp->rx_opt.ts_recent; 583 size += TCPOLEN_TSTAMP_ALIGNED; 584 } 585 586 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 587 if (unlikely(eff_sacks)) { 588 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 589 opts->num_sack_blocks = 590 min_t(unsigned, eff_sacks, 591 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 592 TCPOLEN_SACK_PERBLOCK); 593 size += TCPOLEN_SACK_BASE_ALIGNED + 594 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 595 } 596 597 return size; 598 } 599 600 /* This routine actually transmits TCP packets queued in by 601 * tcp_do_sendmsg(). This is used by both the initial 602 * transmission and possible later retransmissions. 603 * All SKB's seen here are completely headerless. It is our 604 * job to build the TCP header, and pass the packet down to 605 * IP so it can do the same plus pass the packet off to the 606 * device. 607 * 608 * We are working here with either a clone of the original 609 * SKB, or a fresh unique copy made by the retransmit engine. 610 */ 611 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 612 gfp_t gfp_mask) 613 { 614 const struct inet_connection_sock *icsk = inet_csk(sk); 615 struct inet_sock *inet; 616 struct tcp_sock *tp; 617 struct tcp_skb_cb *tcb; 618 struct tcp_out_options opts; 619 unsigned tcp_options_size, tcp_header_size; 620 struct tcp_md5sig_key *md5; 621 __u8 *md5_hash_location; 622 struct tcphdr *th; 623 int err; 624 625 BUG_ON(!skb || !tcp_skb_pcount(skb)); 626 627 /* If congestion control is doing timestamping, we must 628 * take such a timestamp before we potentially clone/copy. 629 */ 630 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) 631 __net_timestamp(skb); 632 633 if (likely(clone_it)) { 634 if (unlikely(skb_cloned(skb))) 635 skb = pskb_copy(skb, gfp_mask); 636 else 637 skb = skb_clone(skb, gfp_mask); 638 if (unlikely(!skb)) 639 return -ENOBUFS; 640 } 641 642 inet = inet_sk(sk); 643 tp = tcp_sk(sk); 644 tcb = TCP_SKB_CB(skb); 645 memset(&opts, 0, sizeof(opts)); 646 647 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) 648 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 649 else 650 tcp_options_size = tcp_established_options(sk, skb, &opts, 651 &md5); 652 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 653 654 if (tcp_packets_in_flight(tp) == 0) 655 tcp_ca_event(sk, CA_EVENT_TX_START); 656 657 skb_push(skb, tcp_header_size); 658 skb_reset_transport_header(skb); 659 skb_set_owner_w(skb, sk); 660 661 /* Build TCP header and checksum it. */ 662 th = tcp_hdr(skb); 663 th->source = inet->sport; 664 th->dest = inet->dport; 665 th->seq = htonl(tcb->seq); 666 th->ack_seq = htonl(tp->rcv_nxt); 667 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 668 tcb->flags); 669 670 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 671 /* RFC1323: The window in SYN & SYN/ACK segments 672 * is never scaled. 673 */ 674 th->window = htons(min(tp->rcv_wnd, 65535U)); 675 } else { 676 th->window = htons(tcp_select_window(sk)); 677 } 678 th->check = 0; 679 th->urg_ptr = 0; 680 681 /* The urg_mode check is necessary during a below snd_una win probe */ 682 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 683 if (before(tp->snd_up, tcb->seq + 0x10000)) { 684 th->urg_ptr = htons(tp->snd_up - tcb->seq); 685 th->urg = 1; 686 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 687 th->urg_ptr = 0xFFFF; 688 th->urg = 1; 689 } 690 } 691 692 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 693 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) 694 TCP_ECN_send(sk, skb, tcp_header_size); 695 696 #ifdef CONFIG_TCP_MD5SIG 697 /* Calculate the MD5 hash, as we have all we need now */ 698 if (md5) { 699 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 700 tp->af_specific->calc_md5_hash(md5_hash_location, 701 md5, sk, NULL, skb); 702 } 703 #endif 704 705 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 706 707 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 708 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 709 710 if (skb->len != tcp_header_size) 711 tcp_event_data_sent(tp, skb, sk); 712 713 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 714 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 715 716 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 717 if (likely(err <= 0)) 718 return err; 719 720 tcp_enter_cwr(sk, 1); 721 722 return net_xmit_eval(err); 723 } 724 725 /* This routine just queues the buffer for sending. 726 * 727 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 728 * otherwise socket can stall. 729 */ 730 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 731 { 732 struct tcp_sock *tp = tcp_sk(sk); 733 734 /* Advance write_seq and place onto the write_queue. */ 735 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 736 skb_header_release(skb); 737 tcp_add_write_queue_tail(sk, skb); 738 sk->sk_wmem_queued += skb->truesize; 739 sk_mem_charge(sk, skb->truesize); 740 } 741 742 /* Initialize TSO segments for a packet. */ 743 static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 744 unsigned int mss_now) 745 { 746 if (skb->len <= mss_now || !sk_can_gso(sk) || 747 skb->ip_summed == CHECKSUM_NONE) { 748 /* Avoid the costly divide in the normal 749 * non-TSO case. 750 */ 751 skb_shinfo(skb)->gso_segs = 1; 752 skb_shinfo(skb)->gso_size = 0; 753 skb_shinfo(skb)->gso_type = 0; 754 } else { 755 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 756 skb_shinfo(skb)->gso_size = mss_now; 757 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 758 } 759 } 760 761 /* When a modification to fackets out becomes necessary, we need to check 762 * skb is counted to fackets_out or not. 763 */ 764 static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 765 int decr) 766 { 767 struct tcp_sock *tp = tcp_sk(sk); 768 769 if (!tp->sacked_out || tcp_is_reno(tp)) 770 return; 771 772 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 773 tp->fackets_out -= decr; 774 } 775 776 /* Pcount in the middle of the write queue got changed, we need to do various 777 * tweaks to fix counters 778 */ 779 static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 780 { 781 struct tcp_sock *tp = tcp_sk(sk); 782 783 tp->packets_out -= decr; 784 785 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 786 tp->sacked_out -= decr; 787 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 788 tp->retrans_out -= decr; 789 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 790 tp->lost_out -= decr; 791 792 /* Reno case is special. Sigh... */ 793 if (tcp_is_reno(tp) && decr > 0) 794 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 795 796 tcp_adjust_fackets_out(sk, skb, decr); 797 798 if (tp->lost_skb_hint && 799 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 800 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 801 tp->lost_cnt_hint -= decr; 802 803 tcp_verify_left_out(tp); 804 } 805 806 /* Function to create two new TCP segments. Shrinks the given segment 807 * to the specified size and appends a new segment with the rest of the 808 * packet to the list. This won't be called frequently, I hope. 809 * Remember, these are still headerless SKBs at this point. 810 */ 811 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 812 unsigned int mss_now) 813 { 814 struct tcp_sock *tp = tcp_sk(sk); 815 struct sk_buff *buff; 816 int nsize, old_factor; 817 int nlen; 818 u8 flags; 819 820 BUG_ON(len > skb->len); 821 822 nsize = skb_headlen(skb) - len; 823 if (nsize < 0) 824 nsize = 0; 825 826 if (skb_cloned(skb) && 827 skb_is_nonlinear(skb) && 828 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 829 return -ENOMEM; 830 831 /* Get a new skb... force flag on. */ 832 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); 833 if (buff == NULL) 834 return -ENOMEM; /* We'll just try again later. */ 835 836 sk->sk_wmem_queued += buff->truesize; 837 sk_mem_charge(sk, buff->truesize); 838 nlen = skb->len - len - nsize; 839 buff->truesize += nlen; 840 skb->truesize -= nlen; 841 842 /* Correct the sequence numbers. */ 843 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 844 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 845 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 846 847 /* PSH and FIN should only be set in the second packet. */ 848 flags = TCP_SKB_CB(skb)->flags; 849 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 850 TCP_SKB_CB(buff)->flags = flags; 851 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 852 853 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 854 /* Copy and checksum data tail into the new buffer. */ 855 buff->csum = csum_partial_copy_nocheck(skb->data + len, 856 skb_put(buff, nsize), 857 nsize, 0); 858 859 skb_trim(skb, len); 860 861 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 862 } else { 863 skb->ip_summed = CHECKSUM_PARTIAL; 864 skb_split(skb, buff, len); 865 } 866 867 buff->ip_summed = skb->ip_summed; 868 869 /* Looks stupid, but our code really uses when of 870 * skbs, which it never sent before. --ANK 871 */ 872 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 873 buff->tstamp = skb->tstamp; 874 875 old_factor = tcp_skb_pcount(skb); 876 877 /* Fix up tso_factor for both original and new SKB. */ 878 tcp_set_skb_tso_segs(sk, skb, mss_now); 879 tcp_set_skb_tso_segs(sk, buff, mss_now); 880 881 /* If this packet has been sent out already, we must 882 * adjust the various packet counters. 883 */ 884 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 885 int diff = old_factor - tcp_skb_pcount(skb) - 886 tcp_skb_pcount(buff); 887 888 if (diff) 889 tcp_adjust_pcount(sk, skb, diff); 890 } 891 892 /* Link BUFF into the send queue. */ 893 skb_header_release(buff); 894 tcp_insert_write_queue_after(skb, buff, sk); 895 896 return 0; 897 } 898 899 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 900 * eventually). The difference is that pulled data not copied, but 901 * immediately discarded. 902 */ 903 static void __pskb_trim_head(struct sk_buff *skb, int len) 904 { 905 int i, k, eat; 906 907 eat = len; 908 k = 0; 909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 910 if (skb_shinfo(skb)->frags[i].size <= eat) { 911 put_page(skb_shinfo(skb)->frags[i].page); 912 eat -= skb_shinfo(skb)->frags[i].size; 913 } else { 914 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 915 if (eat) { 916 skb_shinfo(skb)->frags[k].page_offset += eat; 917 skb_shinfo(skb)->frags[k].size -= eat; 918 eat = 0; 919 } 920 k++; 921 } 922 } 923 skb_shinfo(skb)->nr_frags = k; 924 925 skb_reset_tail_pointer(skb); 926 skb->data_len -= len; 927 skb->len = skb->data_len; 928 } 929 930 /* Remove acked data from a packet in the transmit queue. */ 931 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 932 { 933 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 934 return -ENOMEM; 935 936 /* If len == headlen, we avoid __skb_pull to preserve alignment. */ 937 if (unlikely(len < skb_headlen(skb))) 938 __skb_pull(skb, len); 939 else 940 __pskb_trim_head(skb, len - skb_headlen(skb)); 941 942 TCP_SKB_CB(skb)->seq += len; 943 skb->ip_summed = CHECKSUM_PARTIAL; 944 945 skb->truesize -= len; 946 sk->sk_wmem_queued -= len; 947 sk_mem_uncharge(sk, len); 948 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 949 950 /* Any change of skb->len requires recalculation of tso 951 * factor and mss. 952 */ 953 if (tcp_skb_pcount(skb) > 1) 954 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk)); 955 956 return 0; 957 } 958 959 /* Calculate MSS. Not accounting for SACKs here. */ 960 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 961 { 962 struct tcp_sock *tp = tcp_sk(sk); 963 struct inet_connection_sock *icsk = inet_csk(sk); 964 int mss_now; 965 966 /* Calculate base mss without TCP options: 967 It is MMS_S - sizeof(tcphdr) of rfc1122 968 */ 969 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 970 971 /* Clamp it (mss_clamp does not include tcp options) */ 972 if (mss_now > tp->rx_opt.mss_clamp) 973 mss_now = tp->rx_opt.mss_clamp; 974 975 /* Now subtract optional transport overhead */ 976 mss_now -= icsk->icsk_ext_hdr_len; 977 978 /* Then reserve room for full set of TCP options and 8 bytes of data */ 979 if (mss_now < 48) 980 mss_now = 48; 981 982 /* Now subtract TCP options size, not including SACKs */ 983 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); 984 985 return mss_now; 986 } 987 988 /* Inverse of above */ 989 int tcp_mss_to_mtu(struct sock *sk, int mss) 990 { 991 struct tcp_sock *tp = tcp_sk(sk); 992 struct inet_connection_sock *icsk = inet_csk(sk); 993 int mtu; 994 995 mtu = mss + 996 tp->tcp_header_len + 997 icsk->icsk_ext_hdr_len + 998 icsk->icsk_af_ops->net_header_len; 999 1000 return mtu; 1001 } 1002 1003 /* MTU probing init per socket */ 1004 void tcp_mtup_init(struct sock *sk) 1005 { 1006 struct tcp_sock *tp = tcp_sk(sk); 1007 struct inet_connection_sock *icsk = inet_csk(sk); 1008 1009 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; 1010 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1011 icsk->icsk_af_ops->net_header_len; 1012 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1013 icsk->icsk_mtup.probe_size = 0; 1014 } 1015 1016 /* This function synchronize snd mss to current pmtu/exthdr set. 1017 1018 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1019 for TCP options, but includes only bare TCP header. 1020 1021 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1022 It is minimum of user_mss and mss received with SYN. 1023 It also does not include TCP options. 1024 1025 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1026 1027 tp->mss_cache is current effective sending mss, including 1028 all tcp options except for SACKs. It is evaluated, 1029 taking into account current pmtu, but never exceeds 1030 tp->rx_opt.mss_clamp. 1031 1032 NOTE1. rfc1122 clearly states that advertised MSS 1033 DOES NOT include either tcp or ip options. 1034 1035 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1036 are READ ONLY outside this function. --ANK (980731) 1037 */ 1038 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1039 { 1040 struct tcp_sock *tp = tcp_sk(sk); 1041 struct inet_connection_sock *icsk = inet_csk(sk); 1042 int mss_now; 1043 1044 if (icsk->icsk_mtup.search_high > pmtu) 1045 icsk->icsk_mtup.search_high = pmtu; 1046 1047 mss_now = tcp_mtu_to_mss(sk, pmtu); 1048 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1049 1050 /* And store cached results */ 1051 icsk->icsk_pmtu_cookie = pmtu; 1052 if (icsk->icsk_mtup.enabled) 1053 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1054 tp->mss_cache = mss_now; 1055 1056 return mss_now; 1057 } 1058 1059 /* Compute the current effective MSS, taking SACKs and IP options, 1060 * and even PMTU discovery events into account. 1061 */ 1062 unsigned int tcp_current_mss(struct sock *sk) 1063 { 1064 struct tcp_sock *tp = tcp_sk(sk); 1065 struct dst_entry *dst = __sk_dst_get(sk); 1066 u32 mss_now; 1067 unsigned header_len; 1068 struct tcp_out_options opts; 1069 struct tcp_md5sig_key *md5; 1070 1071 mss_now = tp->mss_cache; 1072 1073 if (dst) { 1074 u32 mtu = dst_mtu(dst); 1075 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1076 mss_now = tcp_sync_mss(sk, mtu); 1077 } 1078 1079 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1080 sizeof(struct tcphdr); 1081 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1082 * some common options. If this is an odd packet (because we have SACK 1083 * blocks etc) then our calculated header_len will be different, and 1084 * we have to adjust mss_now correspondingly */ 1085 if (header_len != tp->tcp_header_len) { 1086 int delta = (int) header_len - tp->tcp_header_len; 1087 mss_now -= delta; 1088 } 1089 1090 return mss_now; 1091 } 1092 1093 /* Congestion window validation. (RFC2861) */ 1094 static void tcp_cwnd_validate(struct sock *sk) 1095 { 1096 struct tcp_sock *tp = tcp_sk(sk); 1097 1098 if (tp->packets_out >= tp->snd_cwnd) { 1099 /* Network is feed fully. */ 1100 tp->snd_cwnd_used = 0; 1101 tp->snd_cwnd_stamp = tcp_time_stamp; 1102 } else { 1103 /* Network starves. */ 1104 if (tp->packets_out > tp->snd_cwnd_used) 1105 tp->snd_cwnd_used = tp->packets_out; 1106 1107 if (sysctl_tcp_slow_start_after_idle && 1108 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1109 tcp_cwnd_application_limited(sk); 1110 } 1111 } 1112 1113 /* Returns the portion of skb which can be sent right away without 1114 * introducing MSS oddities to segment boundaries. In rare cases where 1115 * mss_now != mss_cache, we will request caller to create a small skb 1116 * per input skb which could be mostly avoided here (if desired). 1117 * 1118 * We explicitly want to create a request for splitting write queue tail 1119 * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1120 * thus all the complexity (cwnd_len is always MSS multiple which we 1121 * return whenever allowed by the other factors). Basically we need the 1122 * modulo only when the receiver window alone is the limiting factor or 1123 * when we would be allowed to send the split-due-to-Nagle skb fully. 1124 */ 1125 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1126 unsigned int mss_now, unsigned int cwnd) 1127 { 1128 struct tcp_sock *tp = tcp_sk(sk); 1129 u32 needed, window, cwnd_len; 1130 1131 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1132 cwnd_len = mss_now * cwnd; 1133 1134 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1135 return cwnd_len; 1136 1137 needed = min(skb->len, window); 1138 1139 if (cwnd_len <= needed) 1140 return cwnd_len; 1141 1142 return needed - needed % mss_now; 1143 } 1144 1145 /* Can at least one segment of SKB be sent right now, according to the 1146 * congestion window rules? If so, return how many segments are allowed. 1147 */ 1148 static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1149 struct sk_buff *skb) 1150 { 1151 u32 in_flight, cwnd; 1152 1153 /* Don't be strict about the congestion window for the final FIN. */ 1154 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1155 tcp_skb_pcount(skb) == 1) 1156 return 1; 1157 1158 in_flight = tcp_packets_in_flight(tp); 1159 cwnd = tp->snd_cwnd; 1160 if (in_flight < cwnd) 1161 return (cwnd - in_flight); 1162 1163 return 0; 1164 } 1165 1166 /* Intialize TSO state of a skb. 1167 * This must be invoked the first time we consider transmitting 1168 * SKB onto the wire. 1169 */ 1170 static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1171 unsigned int mss_now) 1172 { 1173 int tso_segs = tcp_skb_pcount(skb); 1174 1175 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1176 tcp_set_skb_tso_segs(sk, skb, mss_now); 1177 tso_segs = tcp_skb_pcount(skb); 1178 } 1179 return tso_segs; 1180 } 1181 1182 /* Minshall's variant of the Nagle send check. */ 1183 static inline int tcp_minshall_check(const struct tcp_sock *tp) 1184 { 1185 return after(tp->snd_sml, tp->snd_una) && 1186 !after(tp->snd_sml, tp->snd_nxt); 1187 } 1188 1189 /* Return 0, if packet can be sent now without violation Nagle's rules: 1190 * 1. It is full sized. 1191 * 2. Or it contains FIN. (already checked by caller) 1192 * 3. Or TCP_NODELAY was set. 1193 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1194 * With Minshall's modification: all sent small packets are ACKed. 1195 */ 1196 static inline int tcp_nagle_check(const struct tcp_sock *tp, 1197 const struct sk_buff *skb, 1198 unsigned mss_now, int nonagle) 1199 { 1200 return (skb->len < mss_now && 1201 ((nonagle & TCP_NAGLE_CORK) || 1202 (!nonagle && tp->packets_out && tcp_minshall_check(tp)))); 1203 } 1204 1205 /* Return non-zero if the Nagle test allows this packet to be 1206 * sent now. 1207 */ 1208 static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1209 unsigned int cur_mss, int nonagle) 1210 { 1211 /* Nagle rule does not apply to frames, which sit in the middle of the 1212 * write_queue (they have no chances to get new data). 1213 * 1214 * This is implemented in the callers, where they modify the 'nonagle' 1215 * argument based upon the location of SKB in the send queue. 1216 */ 1217 if (nonagle & TCP_NAGLE_PUSH) 1218 return 1; 1219 1220 /* Don't use the nagle rule for urgent data (or for the final FIN). 1221 * Nagle can be ignored during F-RTO too (see RFC4138). 1222 */ 1223 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1224 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1225 return 1; 1226 1227 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1228 return 1; 1229 1230 return 0; 1231 } 1232 1233 /* Does at least the first segment of SKB fit into the send window? */ 1234 static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1235 unsigned int cur_mss) 1236 { 1237 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1238 1239 if (skb->len > cur_mss) 1240 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1241 1242 return !after(end_seq, tcp_wnd_end(tp)); 1243 } 1244 1245 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1246 * should be put on the wire right now. If so, it returns the number of 1247 * packets allowed by the congestion window. 1248 */ 1249 static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1250 unsigned int cur_mss, int nonagle) 1251 { 1252 struct tcp_sock *tp = tcp_sk(sk); 1253 unsigned int cwnd_quota; 1254 1255 tcp_init_tso_segs(sk, skb, cur_mss); 1256 1257 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1258 return 0; 1259 1260 cwnd_quota = tcp_cwnd_test(tp, skb); 1261 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1262 cwnd_quota = 0; 1263 1264 return cwnd_quota; 1265 } 1266 1267 /* Test if sending is allowed right now. */ 1268 int tcp_may_send_now(struct sock *sk) 1269 { 1270 struct tcp_sock *tp = tcp_sk(sk); 1271 struct sk_buff *skb = tcp_send_head(sk); 1272 1273 return (skb && 1274 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1275 (tcp_skb_is_last(sk, skb) ? 1276 tp->nonagle : TCP_NAGLE_PUSH))); 1277 } 1278 1279 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1280 * which is put after SKB on the list. It is very much like 1281 * tcp_fragment() except that it may make several kinds of assumptions 1282 * in order to speed up the splitting operation. In particular, we 1283 * know that all the data is in scatter-gather pages, and that the 1284 * packet has never been sent out before (and thus is not cloned). 1285 */ 1286 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1287 unsigned int mss_now) 1288 { 1289 struct sk_buff *buff; 1290 int nlen = skb->len - len; 1291 u8 flags; 1292 1293 /* All of a TSO frame must be composed of paged data. */ 1294 if (skb->len != skb->data_len) 1295 return tcp_fragment(sk, skb, len, mss_now); 1296 1297 buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC); 1298 if (unlikely(buff == NULL)) 1299 return -ENOMEM; 1300 1301 sk->sk_wmem_queued += buff->truesize; 1302 sk_mem_charge(sk, buff->truesize); 1303 buff->truesize += nlen; 1304 skb->truesize -= nlen; 1305 1306 /* Correct the sequence numbers. */ 1307 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1308 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1309 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1310 1311 /* PSH and FIN should only be set in the second packet. */ 1312 flags = TCP_SKB_CB(skb)->flags; 1313 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 1314 TCP_SKB_CB(buff)->flags = flags; 1315 1316 /* This packet was never sent out yet, so no SACK bits. */ 1317 TCP_SKB_CB(buff)->sacked = 0; 1318 1319 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1320 skb_split(skb, buff, len); 1321 1322 /* Fix up tso_factor for both original and new SKB. */ 1323 tcp_set_skb_tso_segs(sk, skb, mss_now); 1324 tcp_set_skb_tso_segs(sk, buff, mss_now); 1325 1326 /* Link BUFF into the send queue. */ 1327 skb_header_release(buff); 1328 tcp_insert_write_queue_after(skb, buff, sk); 1329 1330 return 0; 1331 } 1332 1333 /* Try to defer sending, if possible, in order to minimize the amount 1334 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1335 * 1336 * This algorithm is from John Heffner. 1337 */ 1338 static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) 1339 { 1340 struct tcp_sock *tp = tcp_sk(sk); 1341 const struct inet_connection_sock *icsk = inet_csk(sk); 1342 u32 send_win, cong_win, limit, in_flight; 1343 1344 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1345 goto send_now; 1346 1347 if (icsk->icsk_ca_state != TCP_CA_Open) 1348 goto send_now; 1349 1350 /* Defer for less than two clock ticks. */ 1351 if (tp->tso_deferred && 1352 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1) 1353 goto send_now; 1354 1355 in_flight = tcp_packets_in_flight(tp); 1356 1357 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1358 1359 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1360 1361 /* From in_flight test above, we know that cwnd > in_flight. */ 1362 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1363 1364 limit = min(send_win, cong_win); 1365 1366 /* If a full-sized TSO skb can be sent, do it. */ 1367 if (limit >= sk->sk_gso_max_size) 1368 goto send_now; 1369 1370 /* Middle in queue won't get any more data, full sendable already? */ 1371 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1372 goto send_now; 1373 1374 if (sysctl_tcp_tso_win_divisor) { 1375 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1376 1377 /* If at least some fraction of a window is available, 1378 * just use it. 1379 */ 1380 chunk /= sysctl_tcp_tso_win_divisor; 1381 if (limit >= chunk) 1382 goto send_now; 1383 } else { 1384 /* Different approach, try not to defer past a single 1385 * ACK. Receiver should ACK every other full sized 1386 * frame, so if we have space for more than 3 frames 1387 * then send now. 1388 */ 1389 if (limit > tcp_max_burst(tp) * tp->mss_cache) 1390 goto send_now; 1391 } 1392 1393 /* Ok, it looks like it is advisable to defer. */ 1394 tp->tso_deferred = 1 | (jiffies << 1); 1395 1396 return 1; 1397 1398 send_now: 1399 tp->tso_deferred = 0; 1400 return 0; 1401 } 1402 1403 /* Create a new MTU probe if we are ready. 1404 * MTU probe is regularly attempting to increase the path MTU by 1405 * deliberately sending larger packets. This discovers routing 1406 * changes resulting in larger path MTUs. 1407 * 1408 * Returns 0 if we should wait to probe (no cwnd available), 1409 * 1 if a probe was sent, 1410 * -1 otherwise 1411 */ 1412 static int tcp_mtu_probe(struct sock *sk) 1413 { 1414 struct tcp_sock *tp = tcp_sk(sk); 1415 struct inet_connection_sock *icsk = inet_csk(sk); 1416 struct sk_buff *skb, *nskb, *next; 1417 int len; 1418 int probe_size; 1419 int size_needed; 1420 int copy; 1421 int mss_now; 1422 1423 /* Not currently probing/verifying, 1424 * not in recovery, 1425 * have enough cwnd, and 1426 * not SACKing (the variable headers throw things off) */ 1427 if (!icsk->icsk_mtup.enabled || 1428 icsk->icsk_mtup.probe_size || 1429 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1430 tp->snd_cwnd < 11 || 1431 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1432 return -1; 1433 1434 /* Very simple search strategy: just double the MSS. */ 1435 mss_now = tcp_current_mss(sk); 1436 probe_size = 2 * tp->mss_cache; 1437 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1438 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1439 /* TODO: set timer for probe_converge_event */ 1440 return -1; 1441 } 1442 1443 /* Have enough data in the send queue to probe? */ 1444 if (tp->write_seq - tp->snd_nxt < size_needed) 1445 return -1; 1446 1447 if (tp->snd_wnd < size_needed) 1448 return -1; 1449 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1450 return 0; 1451 1452 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1453 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1454 if (!tcp_packets_in_flight(tp)) 1455 return -1; 1456 else 1457 return 0; 1458 } 1459 1460 /* We're allowed to probe. Build it now. */ 1461 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) 1462 return -1; 1463 sk->sk_wmem_queued += nskb->truesize; 1464 sk_mem_charge(sk, nskb->truesize); 1465 1466 skb = tcp_send_head(sk); 1467 1468 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1469 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1470 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1471 TCP_SKB_CB(nskb)->sacked = 0; 1472 nskb->csum = 0; 1473 nskb->ip_summed = skb->ip_summed; 1474 1475 tcp_insert_write_queue_before(nskb, skb, sk); 1476 1477 len = 0; 1478 tcp_for_write_queue_from_safe(skb, next, sk) { 1479 copy = min_t(int, skb->len, probe_size - len); 1480 if (nskb->ip_summed) 1481 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1482 else 1483 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1484 skb_put(nskb, copy), 1485 copy, nskb->csum); 1486 1487 if (skb->len <= copy) { 1488 /* We've eaten all the data from this skb. 1489 * Throw it away. */ 1490 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; 1491 tcp_unlink_write_queue(skb, sk); 1492 sk_wmem_free_skb(sk, skb); 1493 } else { 1494 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1495 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1496 if (!skb_shinfo(skb)->nr_frags) { 1497 skb_pull(skb, copy); 1498 if (skb->ip_summed != CHECKSUM_PARTIAL) 1499 skb->csum = csum_partial(skb->data, 1500 skb->len, 0); 1501 } else { 1502 __pskb_trim_head(skb, copy); 1503 tcp_set_skb_tso_segs(sk, skb, mss_now); 1504 } 1505 TCP_SKB_CB(skb)->seq += copy; 1506 } 1507 1508 len += copy; 1509 1510 if (len >= probe_size) 1511 break; 1512 } 1513 tcp_init_tso_segs(sk, nskb, nskb->len); 1514 1515 /* We're ready to send. If this fails, the probe will 1516 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1517 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1518 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1519 /* Decrement cwnd here because we are sending 1520 * effectively two packets. */ 1521 tp->snd_cwnd--; 1522 tcp_event_new_data_sent(sk, nskb); 1523 1524 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 1525 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 1526 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 1527 1528 return 1; 1529 } 1530 1531 return -1; 1532 } 1533 1534 /* This routine writes packets to the network. It advances the 1535 * send_head. This happens as incoming acks open up the remote 1536 * window for us. 1537 * 1538 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 1539 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1540 * account rare use of URG, this is not a big flaw. 1541 * 1542 * Returns 1, if no segments are in flight and we have queued segments, but 1543 * cannot send anything now because of SWS or another problem. 1544 */ 1545 static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 1546 int push_one, gfp_t gfp) 1547 { 1548 struct tcp_sock *tp = tcp_sk(sk); 1549 struct sk_buff *skb; 1550 unsigned int tso_segs, sent_pkts; 1551 int cwnd_quota; 1552 int result; 1553 1554 sent_pkts = 0; 1555 1556 if (!push_one) { 1557 /* Do MTU probing. */ 1558 result = tcp_mtu_probe(sk); 1559 if (!result) { 1560 return 0; 1561 } else if (result > 0) { 1562 sent_pkts = 1; 1563 } 1564 } 1565 1566 while ((skb = tcp_send_head(sk))) { 1567 unsigned int limit; 1568 1569 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1570 BUG_ON(!tso_segs); 1571 1572 cwnd_quota = tcp_cwnd_test(tp, skb); 1573 if (!cwnd_quota) 1574 break; 1575 1576 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1577 break; 1578 1579 if (tso_segs == 1) { 1580 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 1581 (tcp_skb_is_last(sk, skb) ? 1582 nonagle : TCP_NAGLE_PUSH)))) 1583 break; 1584 } else { 1585 if (!push_one && tcp_tso_should_defer(sk, skb)) 1586 break; 1587 } 1588 1589 limit = mss_now; 1590 if (tso_segs > 1 && !tcp_urg_mode(tp)) 1591 limit = tcp_mss_split_point(sk, skb, mss_now, 1592 cwnd_quota); 1593 1594 if (skb->len > limit && 1595 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1596 break; 1597 1598 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1599 1600 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 1601 break; 1602 1603 /* Advance the send_head. This one is sent out. 1604 * This call will increment packets_out. 1605 */ 1606 tcp_event_new_data_sent(sk, skb); 1607 1608 tcp_minshall_update(tp, mss_now, skb); 1609 sent_pkts++; 1610 1611 if (push_one) 1612 break; 1613 } 1614 1615 if (likely(sent_pkts)) { 1616 tcp_cwnd_validate(sk); 1617 return 0; 1618 } 1619 return !tp->packets_out && tcp_send_head(sk); 1620 } 1621 1622 /* Push out any pending frames which were held back due to 1623 * TCP_CORK or attempt at coalescing tiny packets. 1624 * The socket must be locked by the caller. 1625 */ 1626 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 1627 int nonagle) 1628 { 1629 struct sk_buff *skb = tcp_send_head(sk); 1630 1631 if (!skb) 1632 return; 1633 1634 /* If we are closed, the bytes will have to remain here. 1635 * In time closedown will finish, we empty the write queue and 1636 * all will be happy. 1637 */ 1638 if (unlikely(sk->sk_state == TCP_CLOSE)) 1639 return; 1640 1641 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC)) 1642 tcp_check_probe_timer(sk); 1643 } 1644 1645 /* Send _single_ skb sitting at the send head. This function requires 1646 * true push pending frames to setup probe timer etc. 1647 */ 1648 void tcp_push_one(struct sock *sk, unsigned int mss_now) 1649 { 1650 struct sk_buff *skb = tcp_send_head(sk); 1651 1652 BUG_ON(!skb || skb->len < mss_now); 1653 1654 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 1655 } 1656 1657 /* This function returns the amount that we can raise the 1658 * usable window based on the following constraints 1659 * 1660 * 1. The window can never be shrunk once it is offered (RFC 793) 1661 * 2. We limit memory per socket 1662 * 1663 * RFC 1122: 1664 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 1665 * RECV.NEXT + RCV.WIN fixed until: 1666 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 1667 * 1668 * i.e. don't raise the right edge of the window until you can raise 1669 * it at least MSS bytes. 1670 * 1671 * Unfortunately, the recommended algorithm breaks header prediction, 1672 * since header prediction assumes th->window stays fixed. 1673 * 1674 * Strictly speaking, keeping th->window fixed violates the receiver 1675 * side SWS prevention criteria. The problem is that under this rule 1676 * a stream of single byte packets will cause the right side of the 1677 * window to always advance by a single byte. 1678 * 1679 * Of course, if the sender implements sender side SWS prevention 1680 * then this will not be a problem. 1681 * 1682 * BSD seems to make the following compromise: 1683 * 1684 * If the free space is less than the 1/4 of the maximum 1685 * space available and the free space is less than 1/2 mss, 1686 * then set the window to 0. 1687 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 1688 * Otherwise, just prevent the window from shrinking 1689 * and from being larger than the largest representable value. 1690 * 1691 * This prevents incremental opening of the window in the regime 1692 * where TCP is limited by the speed of the reader side taking 1693 * data out of the TCP receive queue. It does nothing about 1694 * those cases where the window is constrained on the sender side 1695 * because the pipeline is full. 1696 * 1697 * BSD also seems to "accidentally" limit itself to windows that are a 1698 * multiple of MSS, at least until the free space gets quite small. 1699 * This would appear to be a side effect of the mbuf implementation. 1700 * Combining these two algorithms results in the observed behavior 1701 * of having a fixed window size at almost all times. 1702 * 1703 * Below we obtain similar behavior by forcing the offered window to 1704 * a multiple of the mss when it is feasible to do so. 1705 * 1706 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 1707 * Regular options like TIMESTAMP are taken into account. 1708 */ 1709 u32 __tcp_select_window(struct sock *sk) 1710 { 1711 struct inet_connection_sock *icsk = inet_csk(sk); 1712 struct tcp_sock *tp = tcp_sk(sk); 1713 /* MSS for the peer's data. Previous versions used mss_clamp 1714 * here. I don't know if the value based on our guesses 1715 * of peer's MSS is better for the performance. It's more correct 1716 * but may be worse for the performance because of rcv_mss 1717 * fluctuations. --SAW 1998/11/1 1718 */ 1719 int mss = icsk->icsk_ack.rcv_mss; 1720 int free_space = tcp_space(sk); 1721 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1722 int window; 1723 1724 if (mss > full_space) 1725 mss = full_space; 1726 1727 if (free_space < (full_space >> 1)) { 1728 icsk->icsk_ack.quick = 0; 1729 1730 if (tcp_memory_pressure) 1731 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 1732 4U * tp->advmss); 1733 1734 if (free_space < mss) 1735 return 0; 1736 } 1737 1738 if (free_space > tp->rcv_ssthresh) 1739 free_space = tp->rcv_ssthresh; 1740 1741 /* Don't do rounding if we are using window scaling, since the 1742 * scaled window will not line up with the MSS boundary anyway. 1743 */ 1744 window = tp->rcv_wnd; 1745 if (tp->rx_opt.rcv_wscale) { 1746 window = free_space; 1747 1748 /* Advertise enough space so that it won't get scaled away. 1749 * Import case: prevent zero window announcement if 1750 * 1<<rcv_wscale > mss. 1751 */ 1752 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 1753 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 1754 << tp->rx_opt.rcv_wscale); 1755 } else { 1756 /* Get the largest window that is a nice multiple of mss. 1757 * Window clamp already applied above. 1758 * If our current window offering is within 1 mss of the 1759 * free space we just keep it. This prevents the divide 1760 * and multiply from happening most of the time. 1761 * We also don't do any window rounding when the free space 1762 * is too small. 1763 */ 1764 if (window <= free_space - mss || window > free_space) 1765 window = (free_space / mss) * mss; 1766 else if (mss == full_space && 1767 free_space > window + (full_space >> 1)) 1768 window = free_space; 1769 } 1770 1771 return window; 1772 } 1773 1774 /* Collapses two adjacent SKB's during retransmission. */ 1775 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 1776 { 1777 struct tcp_sock *tp = tcp_sk(sk); 1778 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1779 int skb_size, next_skb_size; 1780 1781 skb_size = skb->len; 1782 next_skb_size = next_skb->len; 1783 1784 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1785 1786 tcp_highest_sack_combine(sk, next_skb, skb); 1787 1788 tcp_unlink_write_queue(next_skb, sk); 1789 1790 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 1791 next_skb_size); 1792 1793 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 1794 skb->ip_summed = CHECKSUM_PARTIAL; 1795 1796 if (skb->ip_summed != CHECKSUM_PARTIAL) 1797 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 1798 1799 /* Update sequence range on original skb. */ 1800 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1801 1802 /* Merge over control information. This moves PSH/FIN etc. over */ 1803 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags; 1804 1805 /* All done, get rid of second SKB and account for it so 1806 * packet counting does not break. 1807 */ 1808 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1809 1810 /* changed transmit queue under us so clear hints */ 1811 tcp_clear_retrans_hints_partial(tp); 1812 if (next_skb == tp->retransmit_skb_hint) 1813 tp->retransmit_skb_hint = skb; 1814 1815 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 1816 1817 sk_wmem_free_skb(sk, next_skb); 1818 } 1819 1820 /* Check if coalescing SKBs is legal. */ 1821 static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 1822 { 1823 if (tcp_skb_pcount(skb) > 1) 1824 return 0; 1825 /* TODO: SACK collapsing could be used to remove this condition */ 1826 if (skb_shinfo(skb)->nr_frags != 0) 1827 return 0; 1828 if (skb_cloned(skb)) 1829 return 0; 1830 if (skb == tcp_send_head(sk)) 1831 return 0; 1832 /* Some heurestics for collapsing over SACK'd could be invented */ 1833 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1834 return 0; 1835 1836 return 1; 1837 } 1838 1839 /* Collapse packets in the retransmit queue to make to create 1840 * less packets on the wire. This is only done on retransmission. 1841 */ 1842 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 1843 int space) 1844 { 1845 struct tcp_sock *tp = tcp_sk(sk); 1846 struct sk_buff *skb = to, *tmp; 1847 int first = 1; 1848 1849 if (!sysctl_tcp_retrans_collapse) 1850 return; 1851 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) 1852 return; 1853 1854 tcp_for_write_queue_from_safe(skb, tmp, sk) { 1855 if (!tcp_can_collapse(sk, skb)) 1856 break; 1857 1858 space -= skb->len; 1859 1860 if (first) { 1861 first = 0; 1862 continue; 1863 } 1864 1865 if (space < 0) 1866 break; 1867 /* Punt if not enough space exists in the first SKB for 1868 * the data in the second 1869 */ 1870 if (skb->len > skb_tailroom(to)) 1871 break; 1872 1873 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 1874 break; 1875 1876 tcp_collapse_retrans(sk, to); 1877 } 1878 } 1879 1880 /* This retransmits one SKB. Policy decisions and retransmit queue 1881 * state updates are done by the caller. Returns non-zero if an 1882 * error occurred which prevented the send. 1883 */ 1884 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) 1885 { 1886 struct tcp_sock *tp = tcp_sk(sk); 1887 struct inet_connection_sock *icsk = inet_csk(sk); 1888 unsigned int cur_mss; 1889 int err; 1890 1891 /* Inconslusive MTU probe */ 1892 if (icsk->icsk_mtup.probe_size) { 1893 icsk->icsk_mtup.probe_size = 0; 1894 } 1895 1896 /* Do not sent more than we queued. 1/4 is reserved for possible 1897 * copying overhead: fragmentation, tunneling, mangling etc. 1898 */ 1899 if (atomic_read(&sk->sk_wmem_alloc) > 1900 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 1901 return -EAGAIN; 1902 1903 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 1904 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 1905 BUG(); 1906 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 1907 return -ENOMEM; 1908 } 1909 1910 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1911 return -EHOSTUNREACH; /* Routing failure or similar. */ 1912 1913 cur_mss = tcp_current_mss(sk); 1914 1915 /* If receiver has shrunk his window, and skb is out of 1916 * new window, do not retransmit it. The exception is the 1917 * case, when window is shrunk to zero. In this case 1918 * our retransmit serves as a zero window probe. 1919 */ 1920 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) 1921 && TCP_SKB_CB(skb)->seq != tp->snd_una) 1922 return -EAGAIN; 1923 1924 if (skb->len > cur_mss) { 1925 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1926 return -ENOMEM; /* We'll try again later. */ 1927 } else { 1928 int oldpcount = tcp_skb_pcount(skb); 1929 1930 if (unlikely(oldpcount > 1)) { 1931 tcp_init_tso_segs(sk, skb, cur_mss); 1932 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); 1933 } 1934 } 1935 1936 tcp_retrans_try_collapse(sk, skb, cur_mss); 1937 1938 /* Some Solaris stacks overoptimize and ignore the FIN on a 1939 * retransmit when old data is attached. So strip it off 1940 * since it is cheap to do so and saves bytes on the network. 1941 */ 1942 if (skb->len > 0 && 1943 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1944 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 1945 if (!pskb_trim(skb, 0)) { 1946 /* Reuse, even though it does some unnecessary work */ 1947 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, 1948 TCP_SKB_CB(skb)->flags); 1949 skb->ip_summed = CHECKSUM_NONE; 1950 } 1951 } 1952 1953 /* Make a copy, if the first transmission SKB clone we made 1954 * is still in somebody's hands, else make a clone. 1955 */ 1956 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1957 1958 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 1959 1960 if (err == 0) { 1961 /* Update global TCP statistics. */ 1962 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 1963 1964 tp->total_retrans++; 1965 1966 #if FASTRETRANS_DEBUG > 0 1967 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 1968 if (net_ratelimit()) 1969 printk(KERN_DEBUG "retrans_out leaked.\n"); 1970 } 1971 #endif 1972 if (!tp->retrans_out) 1973 tp->lost_retrans_low = tp->snd_nxt; 1974 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 1975 tp->retrans_out += tcp_skb_pcount(skb); 1976 1977 /* Save stamp of the first retransmit. */ 1978 if (!tp->retrans_stamp) 1979 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 1980 1981 tp->undo_retrans++; 1982 1983 /* snd_nxt is stored to detect loss of retransmitted segment, 1984 * see tcp_input.c tcp_sacktag_write_queue(). 1985 */ 1986 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; 1987 } 1988 return err; 1989 } 1990 1991 /* Check if we forward retransmits are possible in the current 1992 * window/congestion state. 1993 */ 1994 static int tcp_can_forward_retransmit(struct sock *sk) 1995 { 1996 const struct inet_connection_sock *icsk = inet_csk(sk); 1997 struct tcp_sock *tp = tcp_sk(sk); 1998 1999 /* Forward retransmissions are possible only during Recovery. */ 2000 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2001 return 0; 2002 2003 /* No forward retransmissions in Reno are possible. */ 2004 if (tcp_is_reno(tp)) 2005 return 0; 2006 2007 /* Yeah, we have to make difficult choice between forward transmission 2008 * and retransmission... Both ways have their merits... 2009 * 2010 * For now we do not retransmit anything, while we have some new 2011 * segments to send. In the other cases, follow rule 3 for 2012 * NextSeg() specified in RFC3517. 2013 */ 2014 2015 if (tcp_may_send_now(sk)) 2016 return 0; 2017 2018 return 1; 2019 } 2020 2021 /* This gets called after a retransmit timeout, and the initially 2022 * retransmitted data is acknowledged. It tries to continue 2023 * resending the rest of the retransmit queue, until either 2024 * we've sent it all or the congestion window limit is reached. 2025 * If doing SACK, the first ACK which comes back for a timeout 2026 * based retransmit packet might feed us FACK information again. 2027 * If so, we use it to avoid unnecessarily retransmissions. 2028 */ 2029 void tcp_xmit_retransmit_queue(struct sock *sk) 2030 { 2031 const struct inet_connection_sock *icsk = inet_csk(sk); 2032 struct tcp_sock *tp = tcp_sk(sk); 2033 struct sk_buff *skb; 2034 struct sk_buff *hole = NULL; 2035 u32 last_lost; 2036 int mib_idx; 2037 int fwd_rexmitting = 0; 2038 2039 if (!tp->lost_out) 2040 tp->retransmit_high = tp->snd_una; 2041 2042 if (tp->retransmit_skb_hint) { 2043 skb = tp->retransmit_skb_hint; 2044 last_lost = TCP_SKB_CB(skb)->end_seq; 2045 if (after(last_lost, tp->retransmit_high)) 2046 last_lost = tp->retransmit_high; 2047 } else { 2048 skb = tcp_write_queue_head(sk); 2049 last_lost = tp->snd_una; 2050 } 2051 2052 tcp_for_write_queue_from(skb, sk) { 2053 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2054 2055 if (skb == tcp_send_head(sk)) 2056 break; 2057 /* we could do better than to assign each time */ 2058 if (hole == NULL) 2059 tp->retransmit_skb_hint = skb; 2060 2061 /* Assume this retransmit will generate 2062 * only one packet for congestion window 2063 * calculation purposes. This works because 2064 * tcp_retransmit_skb() will chop up the 2065 * packet to be MSS sized and all the 2066 * packet counting works out. 2067 */ 2068 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2069 return; 2070 2071 if (fwd_rexmitting) { 2072 begin_fwd: 2073 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2074 break; 2075 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2076 2077 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2078 tp->retransmit_high = last_lost; 2079 if (!tcp_can_forward_retransmit(sk)) 2080 break; 2081 /* Backtrack if necessary to non-L'ed skb */ 2082 if (hole != NULL) { 2083 skb = hole; 2084 hole = NULL; 2085 } 2086 fwd_rexmitting = 1; 2087 goto begin_fwd; 2088 2089 } else if (!(sacked & TCPCB_LOST)) { 2090 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2091 hole = skb; 2092 continue; 2093 2094 } else { 2095 last_lost = TCP_SKB_CB(skb)->end_seq; 2096 if (icsk->icsk_ca_state != TCP_CA_Loss) 2097 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2098 else 2099 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2100 } 2101 2102 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2103 continue; 2104 2105 if (tcp_retransmit_skb(sk, skb)) 2106 return; 2107 NET_INC_STATS_BH(sock_net(sk), mib_idx); 2108 2109 if (skb == tcp_write_queue_head(sk)) 2110 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2111 inet_csk(sk)->icsk_rto, 2112 TCP_RTO_MAX); 2113 } 2114 } 2115 2116 /* Send a fin. The caller locks the socket for us. This cannot be 2117 * allowed to fail queueing a FIN frame under any circumstances. 2118 */ 2119 void tcp_send_fin(struct sock *sk) 2120 { 2121 struct tcp_sock *tp = tcp_sk(sk); 2122 struct sk_buff *skb = tcp_write_queue_tail(sk); 2123 int mss_now; 2124 2125 /* Optimization, tack on the FIN if we have a queue of 2126 * unsent frames. But be careful about outgoing SACKS 2127 * and IP options. 2128 */ 2129 mss_now = tcp_current_mss(sk); 2130 2131 if (tcp_send_head(sk) != NULL) { 2132 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2133 TCP_SKB_CB(skb)->end_seq++; 2134 tp->write_seq++; 2135 } else { 2136 /* Socket is locked, keep trying until memory is available. */ 2137 for (;;) { 2138 skb = alloc_skb_fclone(MAX_TCP_HEADER, 2139 sk->sk_allocation); 2140 if (skb) 2141 break; 2142 yield(); 2143 } 2144 2145 /* Reserve space for headers and prepare control bits. */ 2146 skb_reserve(skb, MAX_TCP_HEADER); 2147 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2148 tcp_init_nondata_skb(skb, tp->write_seq, 2149 TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 2150 tcp_queue_skb(sk, skb); 2151 } 2152 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2153 } 2154 2155 /* We get here when a process closes a file descriptor (either due to 2156 * an explicit close() or as a byproduct of exit()'ing) and there 2157 * was unread data in the receive queue. This behavior is recommended 2158 * by RFC 2525, section 2.17. -DaveM 2159 */ 2160 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2161 { 2162 struct sk_buff *skb; 2163 2164 /* NOTE: No TCP options attached and we never retransmit this. */ 2165 skb = alloc_skb(MAX_TCP_HEADER, priority); 2166 if (!skb) { 2167 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2168 return; 2169 } 2170 2171 /* Reserve space for headers and prepare control bits. */ 2172 skb_reserve(skb, MAX_TCP_HEADER); 2173 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2174 TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 2175 /* Send it off. */ 2176 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2177 if (tcp_transmit_skb(sk, skb, 0, priority)) 2178 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2179 2180 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2181 } 2182 2183 /* Send a crossed SYN-ACK during socket establishment. 2184 * WARNING: This routine must only be called when we have already sent 2185 * a SYN packet that crossed the incoming SYN that caused this routine 2186 * to get called. If this assumption fails then the initial rcv_wnd 2187 * and rcv_wscale values will not be correct. 2188 */ 2189 int tcp_send_synack(struct sock *sk) 2190 { 2191 struct sk_buff *skb; 2192 2193 skb = tcp_write_queue_head(sk); 2194 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { 2195 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2196 return -EFAULT; 2197 } 2198 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { 2199 if (skb_cloned(skb)) { 2200 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2201 if (nskb == NULL) 2202 return -ENOMEM; 2203 tcp_unlink_write_queue(skb, sk); 2204 skb_header_release(nskb); 2205 __tcp_add_write_queue_head(sk, nskb); 2206 sk_wmem_free_skb(sk, skb); 2207 sk->sk_wmem_queued += nskb->truesize; 2208 sk_mem_charge(sk, nskb->truesize); 2209 skb = nskb; 2210 } 2211 2212 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2213 TCP_ECN_send_synack(tcp_sk(sk), skb); 2214 } 2215 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2216 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2217 } 2218 2219 /* Prepare a SYN-ACK. */ 2220 struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2221 struct request_sock *req) 2222 { 2223 struct inet_request_sock *ireq = inet_rsk(req); 2224 struct tcp_sock *tp = tcp_sk(sk); 2225 struct tcphdr *th; 2226 int tcp_header_size; 2227 struct tcp_out_options opts; 2228 struct sk_buff *skb; 2229 struct tcp_md5sig_key *md5; 2230 __u8 *md5_hash_location; 2231 int mss; 2232 2233 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2234 if (skb == NULL) 2235 return NULL; 2236 2237 /* Reserve space for headers. */ 2238 skb_reserve(skb, MAX_TCP_HEADER); 2239 2240 skb_dst_set(skb, dst_clone(dst)); 2241 2242 mss = dst_metric(dst, RTAX_ADVMSS); 2243 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) 2244 mss = tp->rx_opt.user_mss; 2245 2246 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2247 __u8 rcv_wscale; 2248 /* Set this up on the first call only */ 2249 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2250 /* tcp_full_space because it is guaranteed to be the first packet */ 2251 tcp_select_initial_window(tcp_full_space(sk), 2252 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2253 &req->rcv_wnd, 2254 &req->window_clamp, 2255 ireq->wscale_ok, 2256 &rcv_wscale); 2257 ireq->rcv_wscale = rcv_wscale; 2258 } 2259 2260 memset(&opts, 0, sizeof(opts)); 2261 #ifdef CONFIG_SYN_COOKIES 2262 if (unlikely(req->cookie_ts)) 2263 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2264 else 2265 #endif 2266 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2267 tcp_header_size = tcp_synack_options(sk, req, mss, 2268 skb, &opts, &md5) + 2269 sizeof(struct tcphdr); 2270 2271 skb_push(skb, tcp_header_size); 2272 skb_reset_transport_header(skb); 2273 2274 th = tcp_hdr(skb); 2275 memset(th, 0, sizeof(struct tcphdr)); 2276 th->syn = 1; 2277 th->ack = 1; 2278 TCP_ECN_make_synack(req, th); 2279 th->source = ireq->loc_port; 2280 th->dest = ireq->rmt_port; 2281 /* Setting of flags are superfluous here for callers (and ECE is 2282 * not even correctly set) 2283 */ 2284 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2285 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2286 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2287 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); 2288 2289 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 2290 th->window = htons(min(req->rcv_wnd, 65535U)); 2291 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 2292 th->doff = (tcp_header_size >> 2); 2293 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 2294 2295 #ifdef CONFIG_TCP_MD5SIG 2296 /* Okay, we have all we need - do the md5 hash if needed */ 2297 if (md5) { 2298 tcp_rsk(req)->af_specific->calc_md5_hash(md5_hash_location, 2299 md5, NULL, req, skb); 2300 } 2301 #endif 2302 2303 return skb; 2304 } 2305 2306 /* Do all connect socket setups that can be done AF independent. */ 2307 static void tcp_connect_init(struct sock *sk) 2308 { 2309 struct dst_entry *dst = __sk_dst_get(sk); 2310 struct tcp_sock *tp = tcp_sk(sk); 2311 __u8 rcv_wscale; 2312 2313 /* We'll fix this up when we get a response from the other end. 2314 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 2315 */ 2316 tp->tcp_header_len = sizeof(struct tcphdr) + 2317 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 2318 2319 #ifdef CONFIG_TCP_MD5SIG 2320 if (tp->af_specific->md5_lookup(sk, sk) != NULL) 2321 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 2322 #endif 2323 2324 /* If user gave his TCP_MAXSEG, record it to clamp */ 2325 if (tp->rx_opt.user_mss) 2326 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 2327 tp->max_window = 0; 2328 tcp_mtup_init(sk); 2329 tcp_sync_mss(sk, dst_mtu(dst)); 2330 2331 if (!tp->window_clamp) 2332 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2333 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2334 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 2335 tp->advmss = tp->rx_opt.user_mss; 2336 2337 tcp_initialize_rcv_mss(sk); 2338 2339 tcp_select_initial_window(tcp_full_space(sk), 2340 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 2341 &tp->rcv_wnd, 2342 &tp->window_clamp, 2343 sysctl_tcp_window_scaling, 2344 &rcv_wscale); 2345 2346 tp->rx_opt.rcv_wscale = rcv_wscale; 2347 tp->rcv_ssthresh = tp->rcv_wnd; 2348 2349 sk->sk_err = 0; 2350 sock_reset_flag(sk, SOCK_DONE); 2351 tp->snd_wnd = 0; 2352 tcp_init_wl(tp, 0); 2353 tp->snd_una = tp->write_seq; 2354 tp->snd_sml = tp->write_seq; 2355 tp->snd_up = tp->write_seq; 2356 tp->rcv_nxt = 0; 2357 tp->rcv_wup = 0; 2358 tp->copied_seq = 0; 2359 2360 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2361 inet_csk(sk)->icsk_retransmits = 0; 2362 tcp_clear_retrans(tp); 2363 } 2364 2365 /* Build a SYN and send it off. */ 2366 int tcp_connect(struct sock *sk) 2367 { 2368 struct tcp_sock *tp = tcp_sk(sk); 2369 struct sk_buff *buff; 2370 2371 tcp_connect_init(sk); 2372 2373 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); 2374 if (unlikely(buff == NULL)) 2375 return -ENOBUFS; 2376 2377 /* Reserve space for headers. */ 2378 skb_reserve(buff, MAX_TCP_HEADER); 2379 2380 tp->snd_nxt = tp->write_seq; 2381 tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); 2382 TCP_ECN_send_syn(sk, buff); 2383 2384 /* Send it off. */ 2385 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2386 tp->retrans_stamp = TCP_SKB_CB(buff)->when; 2387 skb_header_release(buff); 2388 __tcp_add_write_queue_tail(sk, buff); 2389 sk->sk_wmem_queued += buff->truesize; 2390 sk_mem_charge(sk, buff->truesize); 2391 tp->packets_out += tcp_skb_pcount(buff); 2392 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 2393 2394 /* We change tp->snd_nxt after the tcp_transmit_skb() call 2395 * in order to make this packet get counted in tcpOutSegs. 2396 */ 2397 tp->snd_nxt = tp->write_seq; 2398 tp->pushed_seq = tp->write_seq; 2399 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 2400 2401 /* Timer for repeating the SYN until an answer. */ 2402 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2403 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2404 return 0; 2405 } 2406 2407 /* Send out a delayed ack, the caller does the policy checking 2408 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2409 * for details. 2410 */ 2411 void tcp_send_delayed_ack(struct sock *sk) 2412 { 2413 struct inet_connection_sock *icsk = inet_csk(sk); 2414 int ato = icsk->icsk_ack.ato; 2415 unsigned long timeout; 2416 2417 if (ato > TCP_DELACK_MIN) { 2418 const struct tcp_sock *tp = tcp_sk(sk); 2419 int max_ato = HZ / 2; 2420 2421 if (icsk->icsk_ack.pingpong || 2422 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 2423 max_ato = TCP_DELACK_MAX; 2424 2425 /* Slow path, intersegment interval is "high". */ 2426 2427 /* If some rtt estimate is known, use it to bound delayed ack. 2428 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 2429 * directly. 2430 */ 2431 if (tp->srtt) { 2432 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN); 2433 2434 if (rtt < max_ato) 2435 max_ato = rtt; 2436 } 2437 2438 ato = min(ato, max_ato); 2439 } 2440 2441 /* Stay within the limit we were given */ 2442 timeout = jiffies + ato; 2443 2444 /* Use new timeout only if there wasn't a older one earlier. */ 2445 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 2446 /* If delack timer was blocked or is about to expire, 2447 * send ACK now. 2448 */ 2449 if (icsk->icsk_ack.blocked || 2450 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 2451 tcp_send_ack(sk); 2452 return; 2453 } 2454 2455 if (!time_before(timeout, icsk->icsk_ack.timeout)) 2456 timeout = icsk->icsk_ack.timeout; 2457 } 2458 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 2459 icsk->icsk_ack.timeout = timeout; 2460 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 2461 } 2462 2463 /* This routine sends an ack and also updates the window. */ 2464 void tcp_send_ack(struct sock *sk) 2465 { 2466 struct sk_buff *buff; 2467 2468 /* If we have been reset, we may not send again. */ 2469 if (sk->sk_state == TCP_CLOSE) 2470 return; 2471 2472 /* We are not putting this on the write queue, so 2473 * tcp_transmit_skb() will set the ownership to this 2474 * sock. 2475 */ 2476 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2477 if (buff == NULL) { 2478 inet_csk_schedule_ack(sk); 2479 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 2480 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 2481 TCP_DELACK_MAX, TCP_RTO_MAX); 2482 return; 2483 } 2484 2485 /* Reserve space for headers and prepare control bits. */ 2486 skb_reserve(buff, MAX_TCP_HEADER); 2487 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); 2488 2489 /* Send it off, this clears delayed acks for us. */ 2490 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2491 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); 2492 } 2493 2494 /* This routine sends a packet with an out of date sequence 2495 * number. It assumes the other end will try to ack it. 2496 * 2497 * Question: what should we make while urgent mode? 2498 * 4.4BSD forces sending single byte of data. We cannot send 2499 * out of window data, because we have SND.NXT==SND.MAX... 2500 * 2501 * Current solution: to send TWO zero-length segments in urgent mode: 2502 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 2503 * out-of-date with SND.UNA-1 to probe window. 2504 */ 2505 static int tcp_xmit_probe_skb(struct sock *sk, int urgent) 2506 { 2507 struct tcp_sock *tp = tcp_sk(sk); 2508 struct sk_buff *skb; 2509 2510 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 2511 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2512 if (skb == NULL) 2513 return -1; 2514 2515 /* Reserve space for headers and set control bits. */ 2516 skb_reserve(skb, MAX_TCP_HEADER); 2517 /* Use a previous sequence. This should cause the other 2518 * end to send an ack. Don't queue or clone SKB, just 2519 * send it. 2520 */ 2521 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); 2522 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2523 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2524 } 2525 2526 /* Initiate keepalive or window probe from timer. */ 2527 int tcp_write_wakeup(struct sock *sk) 2528 { 2529 struct tcp_sock *tp = tcp_sk(sk); 2530 struct sk_buff *skb; 2531 2532 if (sk->sk_state == TCP_CLOSE) 2533 return -1; 2534 2535 if ((skb = tcp_send_head(sk)) != NULL && 2536 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2537 int err; 2538 unsigned int mss = tcp_current_mss(sk); 2539 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2540 2541 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2542 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 2543 2544 /* We are probing the opening of a window 2545 * but the window size is != 0 2546 * must have been a result SWS avoidance ( sender ) 2547 */ 2548 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2549 skb->len > mss) { 2550 seg_size = min(seg_size, mss); 2551 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2552 if (tcp_fragment(sk, skb, seg_size, mss)) 2553 return -1; 2554 } else if (!tcp_skb_pcount(skb)) 2555 tcp_set_skb_tso_segs(sk, skb, mss); 2556 2557 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2558 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2559 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2560 if (!err) 2561 tcp_event_new_data_sent(sk, skb); 2562 return err; 2563 } else { 2564 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 2565 tcp_xmit_probe_skb(sk, 1); 2566 return tcp_xmit_probe_skb(sk, 0); 2567 } 2568 } 2569 2570 /* A window probe timeout has occurred. If window is not closed send 2571 * a partial packet else a zero probe. 2572 */ 2573 void tcp_send_probe0(struct sock *sk) 2574 { 2575 struct inet_connection_sock *icsk = inet_csk(sk); 2576 struct tcp_sock *tp = tcp_sk(sk); 2577 int err; 2578 2579 err = tcp_write_wakeup(sk); 2580 2581 if (tp->packets_out || !tcp_send_head(sk)) { 2582 /* Cancel probe timer, if it is not required. */ 2583 icsk->icsk_probes_out = 0; 2584 icsk->icsk_backoff = 0; 2585 return; 2586 } 2587 2588 if (err <= 0) { 2589 if (icsk->icsk_backoff < sysctl_tcp_retries2) 2590 icsk->icsk_backoff++; 2591 icsk->icsk_probes_out++; 2592 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2593 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 2594 TCP_RTO_MAX); 2595 } else { 2596 /* If packet was not sent due to local congestion, 2597 * do not backoff and do not remember icsk_probes_out. 2598 * Let local senders to fight for local resources. 2599 * 2600 * Use accumulated backoff yet. 2601 */ 2602 if (!icsk->icsk_probes_out) 2603 icsk->icsk_probes_out = 1; 2604 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2605 min(icsk->icsk_rto << icsk->icsk_backoff, 2606 TCP_RESOURCE_PROBE_INTERVAL), 2607 TCP_RTO_MAX); 2608 } 2609 } 2610 2611 EXPORT_SYMBOL(tcp_select_initial_window); 2612 EXPORT_SYMBOL(tcp_connect); 2613 EXPORT_SYMBOL(tcp_make_synack); 2614 EXPORT_SYMBOL(tcp_simple_retransmit); 2615 EXPORT_SYMBOL(tcp_sync_mss); 2616 EXPORT_SYMBOL(tcp_mtup_init); 2617