1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Authors: Ross Biro 9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 10 * Mark Evans, <evansmp@uhura.aston.ac.uk> 11 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 14 * Linus Torvalds, <torvalds@cs.helsinki.fi> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 16 * Matthew Dillon, <dillon@apollo.west.oic.com> 17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 18 * Jorge Cwik, <jorge@laser.satlink.net> 19 */ 20 21 /* 22 * Changes: Pedro Roque : Retransmit queue handled by TCP. 23 * : Fragmentation on mtu decrease 24 * : Segment collapse on retransmit 25 * : AF independence 26 * 27 * Linus Torvalds : send_delayed_ack 28 * David S. Miller : Charge memory using the right skb 29 * during syn/ack processing. 30 * David S. Miller : Output engine completely rewritten. 31 * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. 32 * Cacophonix Gaul : draft-minshall-nagle-01 33 * J Hadi Salim : ECN support 34 * 35 */ 36 37 #define pr_fmt(fmt) "TCP: " fmt 38 39 #include <net/tcp.h> 40 41 #include <linux/compiler.h> 42 #include <linux/gfp.h> 43 #include <linux/module.h> 44 45 /* People can turn this off for buggy TCP's found in printers etc. */ 46 int sysctl_tcp_retrans_collapse __read_mostly = 1; 47 48 /* People can turn this on to work with those rare, broken TCPs that 49 * interpret the window field as a signed quantity. 50 */ 51 int sysctl_tcp_workaround_signed_windows __read_mostly = 0; 52 53 /* Default TSQ limit of four TSO segments */ 54 int sysctl_tcp_limit_output_bytes __read_mostly = 262144; 55 56 /* This limits the percentage of the congestion window which we 57 * will allow a single TSO frame to consume. Building TSO frames 58 * which are too large can cause TCP streams to be bursty. 59 */ 60 int sysctl_tcp_tso_win_divisor __read_mostly = 3; 61 62 /* By default, RFC2861 behavior. */ 63 int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 64 65 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 66 int push_one, gfp_t gfp); 67 68 /* Account for new data that has been sent to the network. */ 69 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 70 { 71 struct inet_connection_sock *icsk = inet_csk(sk); 72 struct tcp_sock *tp = tcp_sk(sk); 73 unsigned int prior_packets = tp->packets_out; 74 75 tcp_advance_send_head(sk, skb); 76 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 77 78 tp->packets_out += tcp_skb_pcount(skb); 79 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || 80 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { 81 tcp_rearm_rto(sk); 82 } 83 84 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, 85 tcp_skb_pcount(skb)); 86 } 87 88 /* SND.NXT, if window was not shrunk. 89 * If window has been shrunk, what should we make? It is not clear at all. 90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( 91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 92 * invalid. OK, let's make this for now: 93 */ 94 static inline __u32 tcp_acceptable_seq(const struct sock *sk) 95 { 96 const struct tcp_sock *tp = tcp_sk(sk); 97 98 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 99 return tp->snd_nxt; 100 else 101 return tcp_wnd_end(tp); 102 } 103 104 /* Calculate mss to advertise in SYN segment. 105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: 106 * 107 * 1. It is independent of path mtu. 108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40. 109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of 110 * attached devices, because some buggy hosts are confused by 111 * large MSS. 112 * 4. We do not make 3, we advertise MSS, calculated from first 113 * hop device mtu, but allow to raise it to ip_rt_min_advmss. 114 * This may be overridden via information stored in routing table. 115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, 116 * probably even Jumbo". 117 */ 118 static __u16 tcp_advertise_mss(struct sock *sk) 119 { 120 struct tcp_sock *tp = tcp_sk(sk); 121 const struct dst_entry *dst = __sk_dst_get(sk); 122 int mss = tp->advmss; 123 124 if (dst) { 125 unsigned int metric = dst_metric_advmss(dst); 126 127 if (metric < mss) { 128 mss = metric; 129 tp->advmss = mss; 130 } 131 } 132 133 return (__u16)mss; 134 } 135 136 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 137 * This is the first part of cwnd validation mechanism. 138 */ 139 void tcp_cwnd_restart(struct sock *sk, s32 delta) 140 { 141 struct tcp_sock *tp = tcp_sk(sk); 142 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 143 u32 cwnd = tp->snd_cwnd; 144 145 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 146 147 tp->snd_ssthresh = tcp_current_ssthresh(sk); 148 restart_cwnd = min(restart_cwnd, cwnd); 149 150 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 151 cwnd >>= 1; 152 tp->snd_cwnd = max(cwnd, restart_cwnd); 153 tp->snd_cwnd_stamp = tcp_time_stamp; 154 tp->snd_cwnd_used = 0; 155 } 156 157 /* Congestion state accounting after a packet has been sent. */ 158 static void tcp_event_data_sent(struct tcp_sock *tp, 159 struct sock *sk) 160 { 161 struct inet_connection_sock *icsk = inet_csk(sk); 162 const u32 now = tcp_time_stamp; 163 164 if (tcp_packets_in_flight(tp) == 0) 165 tcp_ca_event(sk, CA_EVENT_TX_START); 166 167 tp->lsndtime = now; 168 169 /* If it is a reply for ato after last received 170 * packet, enter pingpong mode. 171 */ 172 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 173 icsk->icsk_ack.pingpong = 1; 174 } 175 176 /* Account for an ACK we sent. */ 177 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 178 { 179 tcp_dec_quickack_mode(sk, pkts); 180 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 181 } 182 183 184 u32 tcp_default_init_rwnd(u32 mss) 185 { 186 /* Initial receive window should be twice of TCP_INIT_CWND to 187 * enable proper sending of new unsent data during fast recovery 188 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a 189 * limit when mss is larger than 1460. 190 */ 191 u32 init_rwnd = TCP_INIT_CWND * 2; 192 193 if (mss > 1460) 194 init_rwnd = max((1460 * init_rwnd) / mss, 2U); 195 return init_rwnd; 196 } 197 198 /* Determine a window scaling and initial window to offer. 199 * Based on the assumption that the given amount of space 200 * will be offered. Store the results in the tp structure. 201 * NOTE: for smooth operation initial space offering should 202 * be a multiple of mss if possible. We assume here that mss >= 1. 203 * This MUST be enforced by all callers. 204 */ 205 void tcp_select_initial_window(int __space, __u32 mss, 206 __u32 *rcv_wnd, __u32 *window_clamp, 207 int wscale_ok, __u8 *rcv_wscale, 208 __u32 init_rcv_wnd) 209 { 210 unsigned int space = (__space < 0 ? 0 : __space); 211 212 /* If no clamp set the clamp to the max possible scaled window */ 213 if (*window_clamp == 0) 214 (*window_clamp) = (65535 << 14); 215 space = min(*window_clamp, space); 216 217 /* Quantize space offering to a multiple of mss if possible. */ 218 if (space > mss) 219 space = (space / mss) * mss; 220 221 /* NOTE: offering an initial window larger than 32767 222 * will break some buggy TCP stacks. If the admin tells us 223 * it is likely we could be speaking with such a buggy stack 224 * we will truncate our initial window offering to 32K-1 225 * unless the remote has sent us a window scaling option, 226 * which we interpret as a sign the remote TCP is not 227 * misinterpreting the window field as a signed quantity. 228 */ 229 if (sysctl_tcp_workaround_signed_windows) 230 (*rcv_wnd) = min(space, MAX_TCP_WINDOW); 231 else 232 (*rcv_wnd) = space; 233 234 (*rcv_wscale) = 0; 235 if (wscale_ok) { 236 /* Set window scaling on max possible window 237 * See RFC1323 for an explanation of the limit to 14 238 */ 239 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); 240 space = min_t(u32, space, *window_clamp); 241 while (space > 65535 && (*rcv_wscale) < 14) { 242 space >>= 1; 243 (*rcv_wscale)++; 244 } 245 } 246 247 if (mss > (1 << *rcv_wscale)) { 248 if (!init_rcv_wnd) /* Use default unless specified otherwise */ 249 init_rcv_wnd = tcp_default_init_rwnd(mss); 250 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); 251 } 252 253 /* Set the clamp no higher than max representable value */ 254 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 255 } 256 EXPORT_SYMBOL(tcp_select_initial_window); 257 258 /* Chose a new window to advertise, update state in tcp_sock for the 259 * socket, and return result with RFC1323 scaling applied. The return 260 * value can be stuffed directly into th->window for an outgoing 261 * frame. 262 */ 263 static u16 tcp_select_window(struct sock *sk) 264 { 265 struct tcp_sock *tp = tcp_sk(sk); 266 u32 old_win = tp->rcv_wnd; 267 u32 cur_win = tcp_receive_window(tp); 268 u32 new_win = __tcp_select_window(sk); 269 270 /* Never shrink the offered window */ 271 if (new_win < cur_win) { 272 /* Danger Will Robinson! 273 * Don't update rcv_wup/rcv_wnd here or else 274 * we will not be able to advertise a zero 275 * window in time. --DaveM 276 * 277 * Relax Will Robinson. 278 */ 279 if (new_win == 0) 280 NET_INC_STATS(sock_net(sk), 281 LINUX_MIB_TCPWANTZEROWINDOWADV); 282 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); 283 } 284 tp->rcv_wnd = new_win; 285 tp->rcv_wup = tp->rcv_nxt; 286 287 /* Make sure we do not exceed the maximum possible 288 * scaled window. 289 */ 290 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) 291 new_win = min(new_win, MAX_TCP_WINDOW); 292 else 293 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); 294 295 /* RFC1323 scaling applied */ 296 new_win >>= tp->rx_opt.rcv_wscale; 297 298 /* If we advertise zero window, disable fast path. */ 299 if (new_win == 0) { 300 tp->pred_flags = 0; 301 if (old_win) 302 NET_INC_STATS(sock_net(sk), 303 LINUX_MIB_TCPTOZEROWINDOWADV); 304 } else if (old_win == 0) { 305 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); 306 } 307 308 return new_win; 309 } 310 311 /* Packet ECN state for a SYN-ACK */ 312 static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) 313 { 314 const struct tcp_sock *tp = tcp_sk(sk); 315 316 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 317 if (!(tp->ecn_flags & TCP_ECN_OK)) 318 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 319 else if (tcp_ca_needs_ecn(sk)) 320 INET_ECN_xmit(sk); 321 } 322 323 /* Packet ECN state for a SYN. */ 324 static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) 325 { 326 struct tcp_sock *tp = tcp_sk(sk); 327 bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || 328 tcp_ca_needs_ecn(sk); 329 330 if (!use_ecn) { 331 const struct dst_entry *dst = __sk_dst_get(sk); 332 333 if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) 334 use_ecn = true; 335 } 336 337 tp->ecn_flags = 0; 338 339 if (use_ecn) { 340 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 341 tp->ecn_flags = TCP_ECN_OK; 342 if (tcp_ca_needs_ecn(sk)) 343 INET_ECN_xmit(sk); 344 } 345 } 346 347 static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb) 348 { 349 if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) 350 /* tp->ecn_flags are cleared at a later point in time when 351 * SYN ACK is ultimatively being received. 352 */ 353 TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR); 354 } 355 356 static void 357 tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th) 358 { 359 if (inet_rsk(req)->ecn_ok) 360 th->ece = 1; 361 } 362 363 /* Set up ECN state for a packet on a ESTABLISHED socket that is about to 364 * be sent. 365 */ 366 static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, 367 struct tcphdr *th, int tcp_header_len) 368 { 369 struct tcp_sock *tp = tcp_sk(sk); 370 371 if (tp->ecn_flags & TCP_ECN_OK) { 372 /* Not-retransmitted data segment: set ECT and inject CWR. */ 373 if (skb->len != tcp_header_len && 374 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { 375 INET_ECN_xmit(sk); 376 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { 377 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 378 th->cwr = 1; 379 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 380 } 381 } else if (!tcp_ca_needs_ecn(sk)) { 382 /* ACK or retransmitted segment: clear ECT|CE */ 383 INET_ECN_dontxmit(sk); 384 } 385 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) 386 th->ece = 1; 387 } 388 } 389 390 /* Constructs common control bits of non-data skb. If SYN/FIN is present, 391 * auto increment end seqno. 392 */ 393 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 394 { 395 skb->ip_summed = CHECKSUM_PARTIAL; 396 skb->csum = 0; 397 398 TCP_SKB_CB(skb)->tcp_flags = flags; 399 TCP_SKB_CB(skb)->sacked = 0; 400 401 tcp_skb_pcount_set(skb, 1); 402 403 TCP_SKB_CB(skb)->seq = seq; 404 if (flags & (TCPHDR_SYN | TCPHDR_FIN)) 405 seq++; 406 TCP_SKB_CB(skb)->end_seq = seq; 407 } 408 409 static inline bool tcp_urg_mode(const struct tcp_sock *tp) 410 { 411 return tp->snd_una != tp->snd_up; 412 } 413 414 #define OPTION_SACK_ADVERTISE (1 << 0) 415 #define OPTION_TS (1 << 1) 416 #define OPTION_MD5 (1 << 2) 417 #define OPTION_WSCALE (1 << 3) 418 #define OPTION_FAST_OPEN_COOKIE (1 << 8) 419 420 struct tcp_out_options { 421 u16 options; /* bit field of OPTION_* */ 422 u16 mss; /* 0 to disable */ 423 u8 ws; /* window scale, 0 to disable */ 424 u8 num_sack_blocks; /* number of SACK blocks to include */ 425 u8 hash_size; /* bytes in hash_location */ 426 __u8 *hash_location; /* temporary pointer, overloaded */ 427 __u32 tsval, tsecr; /* need to include OPTION_TS */ 428 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 429 }; 430 431 /* Write previously computed TCP options to the packet. 432 * 433 * Beware: Something in the Internet is very sensitive to the ordering of 434 * TCP options, we learned this through the hard way, so be careful here. 435 * Luckily we can at least blame others for their non-compliance but from 436 * inter-operability perspective it seems that we're somewhat stuck with 437 * the ordering which we have been using if we want to keep working with 438 * those broken things (not that it currently hurts anybody as there isn't 439 * particular reason why the ordering would need to be changed). 440 * 441 * At least SACK_PERM as the first option is known to lead to a disaster 442 * (but it may well be that other scenarios fail similarly). 443 */ 444 static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, 445 struct tcp_out_options *opts) 446 { 447 u16 options = opts->options; /* mungable copy */ 448 449 if (unlikely(OPTION_MD5 & options)) { 450 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | 451 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); 452 /* overload cookie hash location */ 453 opts->hash_location = (__u8 *)ptr; 454 ptr += 4; 455 } 456 457 if (unlikely(opts->mss)) { 458 *ptr++ = htonl((TCPOPT_MSS << 24) | 459 (TCPOLEN_MSS << 16) | 460 opts->mss); 461 } 462 463 if (likely(OPTION_TS & options)) { 464 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 465 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) | 466 (TCPOLEN_SACK_PERM << 16) | 467 (TCPOPT_TIMESTAMP << 8) | 468 TCPOLEN_TIMESTAMP); 469 options &= ~OPTION_SACK_ADVERTISE; 470 } else { 471 *ptr++ = htonl((TCPOPT_NOP << 24) | 472 (TCPOPT_NOP << 16) | 473 (TCPOPT_TIMESTAMP << 8) | 474 TCPOLEN_TIMESTAMP); 475 } 476 *ptr++ = htonl(opts->tsval); 477 *ptr++ = htonl(opts->tsecr); 478 } 479 480 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 481 *ptr++ = htonl((TCPOPT_NOP << 24) | 482 (TCPOPT_NOP << 16) | 483 (TCPOPT_SACK_PERM << 8) | 484 TCPOLEN_SACK_PERM); 485 } 486 487 if (unlikely(OPTION_WSCALE & options)) { 488 *ptr++ = htonl((TCPOPT_NOP << 24) | 489 (TCPOPT_WINDOW << 16) | 490 (TCPOLEN_WINDOW << 8) | 491 opts->ws); 492 } 493 494 if (unlikely(opts->num_sack_blocks)) { 495 struct tcp_sack_block *sp = tp->rx_opt.dsack ? 496 tp->duplicate_sack : tp->selective_acks; 497 int this_sack; 498 499 *ptr++ = htonl((TCPOPT_NOP << 24) | 500 (TCPOPT_NOP << 16) | 501 (TCPOPT_SACK << 8) | 502 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks * 503 TCPOLEN_SACK_PERBLOCK))); 504 505 for (this_sack = 0; this_sack < opts->num_sack_blocks; 506 ++this_sack) { 507 *ptr++ = htonl(sp[this_sack].start_seq); 508 *ptr++ = htonl(sp[this_sack].end_seq); 509 } 510 511 tp->rx_opt.dsack = 0; 512 } 513 514 if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) { 515 struct tcp_fastopen_cookie *foc = opts->fastopen_cookie; 516 u8 *p = (u8 *)ptr; 517 u32 len; /* Fast Open option length */ 518 519 if (foc->exp) { 520 len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; 521 *ptr = htonl((TCPOPT_EXP << 24) | (len << 16) | 522 TCPOPT_FASTOPEN_MAGIC); 523 p += TCPOLEN_EXP_FASTOPEN_BASE; 524 } else { 525 len = TCPOLEN_FASTOPEN_BASE + foc->len; 526 *p++ = TCPOPT_FASTOPEN; 527 *p++ = len; 528 } 529 530 memcpy(p, foc->val, foc->len); 531 if ((len & 3) == 2) { 532 p[foc->len] = TCPOPT_NOP; 533 p[foc->len + 1] = TCPOPT_NOP; 534 } 535 ptr += (len + 3) >> 2; 536 } 537 } 538 539 /* Compute TCP options for SYN packets. This is not the final 540 * network wire format yet. 541 */ 542 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, 543 struct tcp_out_options *opts, 544 struct tcp_md5sig_key **md5) 545 { 546 struct tcp_sock *tp = tcp_sk(sk); 547 unsigned int remaining = MAX_TCP_OPTION_SPACE; 548 struct tcp_fastopen_request *fastopen = tp->fastopen_req; 549 550 #ifdef CONFIG_TCP_MD5SIG 551 *md5 = tp->af_specific->md5_lookup(sk, sk); 552 if (*md5) { 553 opts->options |= OPTION_MD5; 554 remaining -= TCPOLEN_MD5SIG_ALIGNED; 555 } 556 #else 557 *md5 = NULL; 558 #endif 559 560 /* We always get an MSS option. The option bytes which will be seen in 561 * normal data packets should timestamps be used, must be in the MSS 562 * advertised. But we subtract them from tp->mss_cache so that 563 * calculations in tcp_sendmsg are simpler etc. So account for this 564 * fact here if necessary. If we don't do this correctly, as a 565 * receiver we won't recognize data packets as being full sized when we 566 * should, and thus we won't abide by the delayed ACK rules correctly. 567 * SACKs don't matter, we never delay an ACK when we have any of those 568 * going out. */ 569 opts->mss = tcp_advertise_mss(sk); 570 remaining -= TCPOLEN_MSS_ALIGNED; 571 572 if (likely(sysctl_tcp_timestamps && !*md5)) { 573 opts->options |= OPTION_TS; 574 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; 575 opts->tsecr = tp->rx_opt.ts_recent; 576 remaining -= TCPOLEN_TSTAMP_ALIGNED; 577 } 578 if (likely(sysctl_tcp_window_scaling)) { 579 opts->ws = tp->rx_opt.rcv_wscale; 580 opts->options |= OPTION_WSCALE; 581 remaining -= TCPOLEN_WSCALE_ALIGNED; 582 } 583 if (likely(sysctl_tcp_sack)) { 584 opts->options |= OPTION_SACK_ADVERTISE; 585 if (unlikely(!(OPTION_TS & opts->options))) 586 remaining -= TCPOLEN_SACKPERM_ALIGNED; 587 } 588 589 if (fastopen && fastopen->cookie.len >= 0) { 590 u32 need = fastopen->cookie.len; 591 592 need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE : 593 TCPOLEN_FASTOPEN_BASE; 594 need = (need + 3) & ~3U; /* Align to 32 bits */ 595 if (remaining >= need) { 596 opts->options |= OPTION_FAST_OPEN_COOKIE; 597 opts->fastopen_cookie = &fastopen->cookie; 598 remaining -= need; 599 tp->syn_fastopen = 1; 600 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; 601 } 602 } 603 604 return MAX_TCP_OPTION_SPACE - remaining; 605 } 606 607 /* Set up TCP options for SYN-ACKs. */ 608 static unsigned int tcp_synack_options(struct request_sock *req, 609 unsigned int mss, struct sk_buff *skb, 610 struct tcp_out_options *opts, 611 const struct tcp_md5sig_key *md5, 612 struct tcp_fastopen_cookie *foc) 613 { 614 struct inet_request_sock *ireq = inet_rsk(req); 615 unsigned int remaining = MAX_TCP_OPTION_SPACE; 616 617 #ifdef CONFIG_TCP_MD5SIG 618 if (md5) { 619 opts->options |= OPTION_MD5; 620 remaining -= TCPOLEN_MD5SIG_ALIGNED; 621 622 /* We can't fit any SACK blocks in a packet with MD5 + TS 623 * options. There was discussion about disabling SACK 624 * rather than TS in order to fit in better with old, 625 * buggy kernels, but that was deemed to be unnecessary. 626 */ 627 ireq->tstamp_ok &= !ireq->sack_ok; 628 } 629 #endif 630 631 /* We always send an MSS option. */ 632 opts->mss = mss; 633 remaining -= TCPOLEN_MSS_ALIGNED; 634 635 if (likely(ireq->wscale_ok)) { 636 opts->ws = ireq->rcv_wscale; 637 opts->options |= OPTION_WSCALE; 638 remaining -= TCPOLEN_WSCALE_ALIGNED; 639 } 640 if (likely(ireq->tstamp_ok)) { 641 opts->options |= OPTION_TS; 642 opts->tsval = tcp_skb_timestamp(skb); 643 opts->tsecr = req->ts_recent; 644 remaining -= TCPOLEN_TSTAMP_ALIGNED; 645 } 646 if (likely(ireq->sack_ok)) { 647 opts->options |= OPTION_SACK_ADVERTISE; 648 if (unlikely(!ireq->tstamp_ok)) 649 remaining -= TCPOLEN_SACKPERM_ALIGNED; 650 } 651 if (foc != NULL && foc->len >= 0) { 652 u32 need = foc->len; 653 654 need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE : 655 TCPOLEN_FASTOPEN_BASE; 656 need = (need + 3) & ~3U; /* Align to 32 bits */ 657 if (remaining >= need) { 658 opts->options |= OPTION_FAST_OPEN_COOKIE; 659 opts->fastopen_cookie = foc; 660 remaining -= need; 661 } 662 } 663 664 return MAX_TCP_OPTION_SPACE - remaining; 665 } 666 667 /* Compute TCP options for ESTABLISHED sockets. This is not the 668 * final wire format yet. 669 */ 670 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, 671 struct tcp_out_options *opts, 672 struct tcp_md5sig_key **md5) 673 { 674 struct tcp_sock *tp = tcp_sk(sk); 675 unsigned int size = 0; 676 unsigned int eff_sacks; 677 678 opts->options = 0; 679 680 #ifdef CONFIG_TCP_MD5SIG 681 *md5 = tp->af_specific->md5_lookup(sk, sk); 682 if (unlikely(*md5)) { 683 opts->options |= OPTION_MD5; 684 size += TCPOLEN_MD5SIG_ALIGNED; 685 } 686 #else 687 *md5 = NULL; 688 #endif 689 690 if (likely(tp->rx_opt.tstamp_ok)) { 691 opts->options |= OPTION_TS; 692 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; 693 opts->tsecr = tp->rx_opt.ts_recent; 694 size += TCPOLEN_TSTAMP_ALIGNED; 695 } 696 697 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 698 if (unlikely(eff_sacks)) { 699 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; 700 opts->num_sack_blocks = 701 min_t(unsigned int, eff_sacks, 702 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 703 TCPOLEN_SACK_PERBLOCK); 704 size += TCPOLEN_SACK_BASE_ALIGNED + 705 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; 706 } 707 708 return size; 709 } 710 711 712 /* TCP SMALL QUEUES (TSQ) 713 * 714 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev) 715 * to reduce RTT and bufferbloat. 716 * We do this using a special skb destructor (tcp_wfree). 717 * 718 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb 719 * needs to be reallocated in a driver. 720 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc 721 * 722 * Since transmit from skb destructor is forbidden, we use a tasklet 723 * to process all sockets that eventually need to send more skbs. 724 * We use one tasklet per cpu, with its own queue of sockets. 725 */ 726 struct tsq_tasklet { 727 struct tasklet_struct tasklet; 728 struct list_head head; /* queue of tcp sockets */ 729 }; 730 static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); 731 732 static void tcp_tsq_handler(struct sock *sk) 733 { 734 if ((1 << sk->sk_state) & 735 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | 736 TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) 737 tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, 738 0, GFP_ATOMIC); 739 } 740 /* 741 * One tasklet per cpu tries to send more skbs. 742 * We run in tasklet context but need to disable irqs when 743 * transferring tsq->head because tcp_wfree() might 744 * interrupt us (non NAPI drivers) 745 */ 746 static void tcp_tasklet_func(unsigned long data) 747 { 748 struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; 749 LIST_HEAD(list); 750 unsigned long flags; 751 struct list_head *q, *n; 752 struct tcp_sock *tp; 753 struct sock *sk; 754 755 local_irq_save(flags); 756 list_splice_init(&tsq->head, &list); 757 local_irq_restore(flags); 758 759 list_for_each_safe(q, n, &list) { 760 tp = list_entry(q, struct tcp_sock, tsq_node); 761 list_del(&tp->tsq_node); 762 763 sk = (struct sock *)tp; 764 bh_lock_sock(sk); 765 766 if (!sock_owned_by_user(sk)) { 767 tcp_tsq_handler(sk); 768 } else { 769 /* defer the work to tcp_release_cb() */ 770 set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); 771 } 772 bh_unlock_sock(sk); 773 774 clear_bit(TSQ_QUEUED, &tp->tsq_flags); 775 sk_free(sk); 776 } 777 } 778 779 #define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ 780 (1UL << TCP_WRITE_TIMER_DEFERRED) | \ 781 (1UL << TCP_DELACK_TIMER_DEFERRED) | \ 782 (1UL << TCP_MTU_REDUCED_DEFERRED)) 783 /** 784 * tcp_release_cb - tcp release_sock() callback 785 * @sk: socket 786 * 787 * called from release_sock() to perform protocol dependent 788 * actions before socket release. 789 */ 790 void tcp_release_cb(struct sock *sk) 791 { 792 struct tcp_sock *tp = tcp_sk(sk); 793 unsigned long flags, nflags; 794 795 /* perform an atomic operation only if at least one flag is set */ 796 do { 797 flags = tp->tsq_flags; 798 if (!(flags & TCP_DEFERRED_ALL)) 799 return; 800 nflags = flags & ~TCP_DEFERRED_ALL; 801 } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); 802 803 if (flags & (1UL << TCP_TSQ_DEFERRED)) 804 tcp_tsq_handler(sk); 805 806 /* Here begins the tricky part : 807 * We are called from release_sock() with : 808 * 1) BH disabled 809 * 2) sk_lock.slock spinlock held 810 * 3) socket owned by us (sk->sk_lock.owned == 1) 811 * 812 * But following code is meant to be called from BH handlers, 813 * so we should keep BH disabled, but early release socket ownership 814 */ 815 sock_release_ownership(sk); 816 817 if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { 818 tcp_write_timer_handler(sk); 819 __sock_put(sk); 820 } 821 if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { 822 tcp_delack_timer_handler(sk); 823 __sock_put(sk); 824 } 825 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 826 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); 827 __sock_put(sk); 828 } 829 } 830 EXPORT_SYMBOL(tcp_release_cb); 831 832 void __init tcp_tasklet_init(void) 833 { 834 int i; 835 836 for_each_possible_cpu(i) { 837 struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); 838 839 INIT_LIST_HEAD(&tsq->head); 840 tasklet_init(&tsq->tasklet, 841 tcp_tasklet_func, 842 (unsigned long)tsq); 843 } 844 } 845 846 /* 847 * Write buffer destructor automatically called from kfree_skb. 848 * We can't xmit new skbs from this context, as we might already 849 * hold qdisc lock. 850 */ 851 void tcp_wfree(struct sk_buff *skb) 852 { 853 struct sock *sk = skb->sk; 854 struct tcp_sock *tp = tcp_sk(sk); 855 int wmem; 856 857 /* Keep one reference on sk_wmem_alloc. 858 * Will be released by sk_free() from here or tcp_tasklet_func() 859 */ 860 wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc); 861 862 /* If this softirq is serviced by ksoftirqd, we are likely under stress. 863 * Wait until our queues (qdisc + devices) are drained. 864 * This gives : 865 * - less callbacks to tcp_write_xmit(), reducing stress (batches) 866 * - chance for incoming ACK (processed by another cpu maybe) 867 * to migrate this flow (skb->ooo_okay will be eventually set) 868 */ 869 if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current) 870 goto out; 871 872 if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && 873 !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { 874 unsigned long flags; 875 struct tsq_tasklet *tsq; 876 877 /* queue this socket to tasklet queue */ 878 local_irq_save(flags); 879 tsq = this_cpu_ptr(&tsq_tasklet); 880 list_add(&tp->tsq_node, &tsq->head); 881 tasklet_schedule(&tsq->tasklet); 882 local_irq_restore(flags); 883 return; 884 } 885 out: 886 sk_free(sk); 887 } 888 889 /* This routine actually transmits TCP packets queued in by 890 * tcp_do_sendmsg(). This is used by both the initial 891 * transmission and possible later retransmissions. 892 * All SKB's seen here are completely headerless. It is our 893 * job to build the TCP header, and pass the packet down to 894 * IP so it can do the same plus pass the packet off to the 895 * device. 896 * 897 * We are working here with either a clone of the original 898 * SKB, or a fresh unique copy made by the retransmit engine. 899 */ 900 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, 901 gfp_t gfp_mask) 902 { 903 const struct inet_connection_sock *icsk = inet_csk(sk); 904 struct inet_sock *inet; 905 struct tcp_sock *tp; 906 struct tcp_skb_cb *tcb; 907 struct tcp_out_options opts; 908 unsigned int tcp_options_size, tcp_header_size; 909 struct tcp_md5sig_key *md5; 910 struct tcphdr *th; 911 int err; 912 913 BUG_ON(!skb || !tcp_skb_pcount(skb)); 914 tp = tcp_sk(sk); 915 916 if (clone_it) { 917 skb_mstamp_get(&skb->skb_mstamp); 918 TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq 919 - tp->snd_una; 920 921 if (unlikely(skb_cloned(skb))) 922 skb = pskb_copy(skb, gfp_mask); 923 else 924 skb = skb_clone(skb, gfp_mask); 925 if (unlikely(!skb)) 926 return -ENOBUFS; 927 } 928 929 inet = inet_sk(sk); 930 tcb = TCP_SKB_CB(skb); 931 memset(&opts, 0, sizeof(opts)); 932 933 if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) 934 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 935 else 936 tcp_options_size = tcp_established_options(sk, skb, &opts, 937 &md5); 938 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 939 940 /* if no packet is in qdisc/device queue, then allow XPS to select 941 * another queue. We can be called from tcp_tsq_handler() 942 * which holds one reference to sk_wmem_alloc. 943 * 944 * TODO: Ideally, in-flight pure ACK packets should not matter here. 945 * One way to get this would be to set skb->truesize = 2 on them. 946 */ 947 skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1); 948 949 skb_push(skb, tcp_header_size); 950 skb_reset_transport_header(skb); 951 952 skb_orphan(skb); 953 skb->sk = sk; 954 skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree; 955 skb_set_hash_from_sk(skb, sk); 956 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 957 958 /* Build TCP header and checksum it. */ 959 th = (struct tcphdr *)skb->data; 960 th->source = inet->inet_sport; 961 th->dest = inet->inet_dport; 962 th->seq = htonl(tcb->seq); 963 th->ack_seq = htonl(tp->rcv_nxt); 964 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 965 tcb->tcp_flags); 966 967 th->check = 0; 968 th->urg_ptr = 0; 969 970 /* The urg_mode check is necessary during a below snd_una win probe */ 971 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { 972 if (before(tp->snd_up, tcb->seq + 0x10000)) { 973 th->urg_ptr = htons(tp->snd_up - tcb->seq); 974 th->urg = 1; 975 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 976 th->urg_ptr = htons(0xFFFF); 977 th->urg = 1; 978 } 979 } 980 981 tcp_options_write((__be32 *)(th + 1), tp, &opts); 982 skb_shinfo(skb)->gso_type = sk->sk_gso_type; 983 if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) { 984 th->window = htons(tcp_select_window(sk)); 985 tcp_ecn_send(sk, skb, th, tcp_header_size); 986 } else { 987 /* RFC1323: The window in SYN & SYN/ACK segments 988 * is never scaled. 989 */ 990 th->window = htons(min(tp->rcv_wnd, 65535U)); 991 } 992 #ifdef CONFIG_TCP_MD5SIG 993 /* Calculate the MD5 hash, as we have all we need now */ 994 if (md5) { 995 sk_nocaps_add(sk, NETIF_F_GSO_MASK); 996 tp->af_specific->calc_md5_hash(opts.hash_location, 997 md5, sk, skb); 998 } 999 #endif 1000 1001 icsk->icsk_af_ops->send_check(sk, skb); 1002 1003 if (likely(tcb->tcp_flags & TCPHDR_ACK)) 1004 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 1005 1006 if (skb->len != tcp_header_size) { 1007 tcp_event_data_sent(tp, sk); 1008 tp->data_segs_out += tcp_skb_pcount(skb); 1009 } 1010 1011 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 1012 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 1013 tcp_skb_pcount(skb)); 1014 1015 tp->segs_out += tcp_skb_pcount(skb); 1016 /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */ 1017 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); 1018 skb_shinfo(skb)->gso_size = tcp_skb_mss(skb); 1019 1020 /* Our usage of tstamp should remain private */ 1021 skb->tstamp.tv64 = 0; 1022 1023 /* Cleanup our debris for IP stacks */ 1024 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm), 1025 sizeof(struct inet6_skb_parm))); 1026 1027 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 1028 1029 if (likely(err <= 0)) 1030 return err; 1031 1032 tcp_enter_cwr(sk); 1033 1034 return net_xmit_eval(err); 1035 } 1036 1037 /* This routine just queues the buffer for sending. 1038 * 1039 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, 1040 * otherwise socket can stall. 1041 */ 1042 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) 1043 { 1044 struct tcp_sock *tp = tcp_sk(sk); 1045 1046 /* Advance write_seq and place onto the write_queue. */ 1047 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 1048 __skb_header_release(skb); 1049 tcp_add_write_queue_tail(sk, skb); 1050 sk->sk_wmem_queued += skb->truesize; 1051 sk_mem_charge(sk, skb->truesize); 1052 } 1053 1054 /* Initialize TSO segments for a packet. */ 1055 static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1056 { 1057 if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { 1058 /* Avoid the costly divide in the normal 1059 * non-TSO case. 1060 */ 1061 tcp_skb_pcount_set(skb, 1); 1062 TCP_SKB_CB(skb)->tcp_gso_size = 0; 1063 } else { 1064 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); 1065 TCP_SKB_CB(skb)->tcp_gso_size = mss_now; 1066 } 1067 } 1068 1069 /* When a modification to fackets out becomes necessary, we need to check 1070 * skb is counted to fackets_out or not. 1071 */ 1072 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 1073 int decr) 1074 { 1075 struct tcp_sock *tp = tcp_sk(sk); 1076 1077 if (!tp->sacked_out || tcp_is_reno(tp)) 1078 return; 1079 1080 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 1081 tp->fackets_out -= decr; 1082 } 1083 1084 /* Pcount in the middle of the write queue got changed, we need to do various 1085 * tweaks to fix counters 1086 */ 1087 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 1088 { 1089 struct tcp_sock *tp = tcp_sk(sk); 1090 1091 tp->packets_out -= decr; 1092 1093 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1094 tp->sacked_out -= decr; 1095 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) 1096 tp->retrans_out -= decr; 1097 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) 1098 tp->lost_out -= decr; 1099 1100 /* Reno case is special. Sigh... */ 1101 if (tcp_is_reno(tp) && decr > 0) 1102 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); 1103 1104 tcp_adjust_fackets_out(sk, skb, decr); 1105 1106 if (tp->lost_skb_hint && 1107 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && 1108 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) 1109 tp->lost_cnt_hint -= decr; 1110 1111 tcp_verify_left_out(tp); 1112 } 1113 1114 static bool tcp_has_tx_tstamp(const struct sk_buff *skb) 1115 { 1116 return TCP_SKB_CB(skb)->txstamp_ack || 1117 (skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP); 1118 } 1119 1120 static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2) 1121 { 1122 struct skb_shared_info *shinfo = skb_shinfo(skb); 1123 1124 if (unlikely(tcp_has_tx_tstamp(skb)) && 1125 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) { 1126 struct skb_shared_info *shinfo2 = skb_shinfo(skb2); 1127 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP; 1128 1129 shinfo->tx_flags &= ~tsflags; 1130 shinfo2->tx_flags |= tsflags; 1131 swap(shinfo->tskey, shinfo2->tskey); 1132 TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack; 1133 TCP_SKB_CB(skb)->txstamp_ack = 0; 1134 } 1135 } 1136 1137 static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2) 1138 { 1139 TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor; 1140 TCP_SKB_CB(skb)->eor = 0; 1141 } 1142 1143 /* Function to create two new TCP segments. Shrinks the given segment 1144 * to the specified size and appends a new segment with the rest of the 1145 * packet to the list. This won't be called frequently, I hope. 1146 * Remember, these are still headerless SKBs at this point. 1147 */ 1148 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, 1149 unsigned int mss_now, gfp_t gfp) 1150 { 1151 struct tcp_sock *tp = tcp_sk(sk); 1152 struct sk_buff *buff; 1153 int nsize, old_factor; 1154 int nlen; 1155 u8 flags; 1156 1157 if (WARN_ON(len > skb->len)) 1158 return -EINVAL; 1159 1160 nsize = skb_headlen(skb) - len; 1161 if (nsize < 0) 1162 nsize = 0; 1163 1164 if (skb_unclone(skb, gfp)) 1165 return -ENOMEM; 1166 1167 /* Get a new skb... force flag on. */ 1168 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 1169 if (!buff) 1170 return -ENOMEM; /* We'll just try again later. */ 1171 1172 sk->sk_wmem_queued += buff->truesize; 1173 sk_mem_charge(sk, buff->truesize); 1174 nlen = skb->len - len - nsize; 1175 buff->truesize += nlen; 1176 skb->truesize -= nlen; 1177 1178 /* Correct the sequence numbers. */ 1179 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1180 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1181 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1182 1183 /* PSH and FIN should only be set in the second packet. */ 1184 flags = TCP_SKB_CB(skb)->tcp_flags; 1185 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1186 TCP_SKB_CB(buff)->tcp_flags = flags; 1187 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1188 tcp_skb_fragment_eor(skb, buff); 1189 1190 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { 1191 /* Copy and checksum data tail into the new buffer. */ 1192 buff->csum = csum_partial_copy_nocheck(skb->data + len, 1193 skb_put(buff, nsize), 1194 nsize, 0); 1195 1196 skb_trim(skb, len); 1197 1198 skb->csum = csum_block_sub(skb->csum, buff->csum, len); 1199 } else { 1200 skb->ip_summed = CHECKSUM_PARTIAL; 1201 skb_split(skb, buff, len); 1202 } 1203 1204 buff->ip_summed = skb->ip_summed; 1205 1206 buff->tstamp = skb->tstamp; 1207 tcp_fragment_tstamp(skb, buff); 1208 1209 old_factor = tcp_skb_pcount(skb); 1210 1211 /* Fix up tso_factor for both original and new SKB. */ 1212 tcp_set_skb_tso_segs(skb, mss_now); 1213 tcp_set_skb_tso_segs(buff, mss_now); 1214 1215 /* If this packet has been sent out already, we must 1216 * adjust the various packet counters. 1217 */ 1218 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { 1219 int diff = old_factor - tcp_skb_pcount(skb) - 1220 tcp_skb_pcount(buff); 1221 1222 if (diff) 1223 tcp_adjust_pcount(sk, skb, diff); 1224 } 1225 1226 /* Link BUFF into the send queue. */ 1227 __skb_header_release(buff); 1228 tcp_insert_write_queue_after(skb, buff, sk); 1229 1230 return 0; 1231 } 1232 1233 /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c 1234 * eventually). The difference is that pulled data not copied, but 1235 * immediately discarded. 1236 */ 1237 static void __pskb_trim_head(struct sk_buff *skb, int len) 1238 { 1239 struct skb_shared_info *shinfo; 1240 int i, k, eat; 1241 1242 eat = min_t(int, len, skb_headlen(skb)); 1243 if (eat) { 1244 __skb_pull(skb, eat); 1245 len -= eat; 1246 if (!len) 1247 return; 1248 } 1249 eat = len; 1250 k = 0; 1251 shinfo = skb_shinfo(skb); 1252 for (i = 0; i < shinfo->nr_frags; i++) { 1253 int size = skb_frag_size(&shinfo->frags[i]); 1254 1255 if (size <= eat) { 1256 skb_frag_unref(skb, i); 1257 eat -= size; 1258 } else { 1259 shinfo->frags[k] = shinfo->frags[i]; 1260 if (eat) { 1261 shinfo->frags[k].page_offset += eat; 1262 skb_frag_size_sub(&shinfo->frags[k], eat); 1263 eat = 0; 1264 } 1265 k++; 1266 } 1267 } 1268 shinfo->nr_frags = k; 1269 1270 skb_reset_tail_pointer(skb); 1271 skb->data_len -= len; 1272 skb->len = skb->data_len; 1273 } 1274 1275 /* Remove acked data from a packet in the transmit queue. */ 1276 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) 1277 { 1278 if (skb_unclone(skb, GFP_ATOMIC)) 1279 return -ENOMEM; 1280 1281 __pskb_trim_head(skb, len); 1282 1283 TCP_SKB_CB(skb)->seq += len; 1284 skb->ip_summed = CHECKSUM_PARTIAL; 1285 1286 skb->truesize -= len; 1287 sk->sk_wmem_queued -= len; 1288 sk_mem_uncharge(sk, len); 1289 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1290 1291 /* Any change of skb->len requires recalculation of tso factor. */ 1292 if (tcp_skb_pcount(skb) > 1) 1293 tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb)); 1294 1295 return 0; 1296 } 1297 1298 /* Calculate MSS not accounting any TCP options. */ 1299 static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) 1300 { 1301 const struct tcp_sock *tp = tcp_sk(sk); 1302 const struct inet_connection_sock *icsk = inet_csk(sk); 1303 int mss_now; 1304 1305 /* Calculate base mss without TCP options: 1306 It is MMS_S - sizeof(tcphdr) of rfc1122 1307 */ 1308 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1309 1310 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1311 if (icsk->icsk_af_ops->net_frag_header_len) { 1312 const struct dst_entry *dst = __sk_dst_get(sk); 1313 1314 if (dst && dst_allfrag(dst)) 1315 mss_now -= icsk->icsk_af_ops->net_frag_header_len; 1316 } 1317 1318 /* Clamp it (mss_clamp does not include tcp options) */ 1319 if (mss_now > tp->rx_opt.mss_clamp) 1320 mss_now = tp->rx_opt.mss_clamp; 1321 1322 /* Now subtract optional transport overhead */ 1323 mss_now -= icsk->icsk_ext_hdr_len; 1324 1325 /* Then reserve room for full set of TCP options and 8 bytes of data */ 1326 if (mss_now < 48) 1327 mss_now = 48; 1328 return mss_now; 1329 } 1330 1331 /* Calculate MSS. Not accounting for SACKs here. */ 1332 int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1333 { 1334 /* Subtract TCP options size, not including SACKs */ 1335 return __tcp_mtu_to_mss(sk, pmtu) - 1336 (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); 1337 } 1338 1339 /* Inverse of above */ 1340 int tcp_mss_to_mtu(struct sock *sk, int mss) 1341 { 1342 const struct tcp_sock *tp = tcp_sk(sk); 1343 const struct inet_connection_sock *icsk = inet_csk(sk); 1344 int mtu; 1345 1346 mtu = mss + 1347 tp->tcp_header_len + 1348 icsk->icsk_ext_hdr_len + 1349 icsk->icsk_af_ops->net_header_len; 1350 1351 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ 1352 if (icsk->icsk_af_ops->net_frag_header_len) { 1353 const struct dst_entry *dst = __sk_dst_get(sk); 1354 1355 if (dst && dst_allfrag(dst)) 1356 mtu += icsk->icsk_af_ops->net_frag_header_len; 1357 } 1358 return mtu; 1359 } 1360 1361 /* MTU probing init per socket */ 1362 void tcp_mtup_init(struct sock *sk) 1363 { 1364 struct tcp_sock *tp = tcp_sk(sk); 1365 struct inet_connection_sock *icsk = inet_csk(sk); 1366 struct net *net = sock_net(sk); 1367 1368 icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; 1369 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + 1370 icsk->icsk_af_ops->net_header_len; 1371 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); 1372 icsk->icsk_mtup.probe_size = 0; 1373 if (icsk->icsk_mtup.enabled) 1374 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; 1375 } 1376 EXPORT_SYMBOL(tcp_mtup_init); 1377 1378 /* This function synchronize snd mss to current pmtu/exthdr set. 1379 1380 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 1381 for TCP options, but includes only bare TCP header. 1382 1383 tp->rx_opt.mss_clamp is mss negotiated at connection setup. 1384 It is minimum of user_mss and mss received with SYN. 1385 It also does not include TCP options. 1386 1387 inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function. 1388 1389 tp->mss_cache is current effective sending mss, including 1390 all tcp options except for SACKs. It is evaluated, 1391 taking into account current pmtu, but never exceeds 1392 tp->rx_opt.mss_clamp. 1393 1394 NOTE1. rfc1122 clearly states that advertised MSS 1395 DOES NOT include either tcp or ip options. 1396 1397 NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache 1398 are READ ONLY outside this function. --ANK (980731) 1399 */ 1400 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) 1401 { 1402 struct tcp_sock *tp = tcp_sk(sk); 1403 struct inet_connection_sock *icsk = inet_csk(sk); 1404 int mss_now; 1405 1406 if (icsk->icsk_mtup.search_high > pmtu) 1407 icsk->icsk_mtup.search_high = pmtu; 1408 1409 mss_now = tcp_mtu_to_mss(sk, pmtu); 1410 mss_now = tcp_bound_to_half_wnd(tp, mss_now); 1411 1412 /* And store cached results */ 1413 icsk->icsk_pmtu_cookie = pmtu; 1414 if (icsk->icsk_mtup.enabled) 1415 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); 1416 tp->mss_cache = mss_now; 1417 1418 return mss_now; 1419 } 1420 EXPORT_SYMBOL(tcp_sync_mss); 1421 1422 /* Compute the current effective MSS, taking SACKs and IP options, 1423 * and even PMTU discovery events into account. 1424 */ 1425 unsigned int tcp_current_mss(struct sock *sk) 1426 { 1427 const struct tcp_sock *tp = tcp_sk(sk); 1428 const struct dst_entry *dst = __sk_dst_get(sk); 1429 u32 mss_now; 1430 unsigned int header_len; 1431 struct tcp_out_options opts; 1432 struct tcp_md5sig_key *md5; 1433 1434 mss_now = tp->mss_cache; 1435 1436 if (dst) { 1437 u32 mtu = dst_mtu(dst); 1438 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1439 mss_now = tcp_sync_mss(sk, mtu); 1440 } 1441 1442 header_len = tcp_established_options(sk, NULL, &opts, &md5) + 1443 sizeof(struct tcphdr); 1444 /* The mss_cache is sized based on tp->tcp_header_len, which assumes 1445 * some common options. If this is an odd packet (because we have SACK 1446 * blocks etc) then our calculated header_len will be different, and 1447 * we have to adjust mss_now correspondingly */ 1448 if (header_len != tp->tcp_header_len) { 1449 int delta = (int) header_len - tp->tcp_header_len; 1450 mss_now -= delta; 1451 } 1452 1453 return mss_now; 1454 } 1455 1456 /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. 1457 * As additional protections, we do not touch cwnd in retransmission phases, 1458 * and if application hit its sndbuf limit recently. 1459 */ 1460 static void tcp_cwnd_application_limited(struct sock *sk) 1461 { 1462 struct tcp_sock *tp = tcp_sk(sk); 1463 1464 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && 1465 sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 1466 /* Limited by application or receiver window. */ 1467 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 1468 u32 win_used = max(tp->snd_cwnd_used, init_win); 1469 if (win_used < tp->snd_cwnd) { 1470 tp->snd_ssthresh = tcp_current_ssthresh(sk); 1471 tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 1472 } 1473 tp->snd_cwnd_used = 0; 1474 } 1475 tp->snd_cwnd_stamp = tcp_time_stamp; 1476 } 1477 1478 static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) 1479 { 1480 struct tcp_sock *tp = tcp_sk(sk); 1481 1482 /* Track the maximum number of outstanding packets in each 1483 * window, and remember whether we were cwnd-limited then. 1484 */ 1485 if (!before(tp->snd_una, tp->max_packets_seq) || 1486 tp->packets_out > tp->max_packets_out) { 1487 tp->max_packets_out = tp->packets_out; 1488 tp->max_packets_seq = tp->snd_nxt; 1489 tp->is_cwnd_limited = is_cwnd_limited; 1490 } 1491 1492 if (tcp_is_cwnd_limited(sk)) { 1493 /* Network is feed fully. */ 1494 tp->snd_cwnd_used = 0; 1495 tp->snd_cwnd_stamp = tcp_time_stamp; 1496 } else { 1497 /* Network starves. */ 1498 if (tp->packets_out > tp->snd_cwnd_used) 1499 tp->snd_cwnd_used = tp->packets_out; 1500 1501 if (sysctl_tcp_slow_start_after_idle && 1502 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 1503 tcp_cwnd_application_limited(sk); 1504 } 1505 } 1506 1507 /* Minshall's variant of the Nagle send check. */ 1508 static bool tcp_minshall_check(const struct tcp_sock *tp) 1509 { 1510 return after(tp->snd_sml, tp->snd_una) && 1511 !after(tp->snd_sml, tp->snd_nxt); 1512 } 1513 1514 /* Update snd_sml if this skb is under mss 1515 * Note that a TSO packet might end with a sub-mss segment 1516 * The test is really : 1517 * if ((skb->len % mss) != 0) 1518 * tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1519 * But we can avoid doing the divide again given we already have 1520 * skb_pcount = skb->len / mss_now 1521 */ 1522 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, 1523 const struct sk_buff *skb) 1524 { 1525 if (skb->len < tcp_skb_pcount(skb) * mss_now) 1526 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; 1527 } 1528 1529 /* Return false, if packet can be sent now without violation Nagle's rules: 1530 * 1. It is full sized. (provided by caller in %partial bool) 1531 * 2. Or it contains FIN. (already checked by caller) 1532 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set. 1533 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1534 * With Minshall's modification: all sent small packets are ACKed. 1535 */ 1536 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, 1537 int nonagle) 1538 { 1539 return partial && 1540 ((nonagle & TCP_NAGLE_CORK) || 1541 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); 1542 } 1543 1544 /* Return how many segs we'd like on a TSO packet, 1545 * to send one TSO packet per ms 1546 */ 1547 static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now) 1548 { 1549 u32 bytes, segs; 1550 1551 bytes = min(sk->sk_pacing_rate >> 10, 1552 sk->sk_gso_max_size - 1 - MAX_TCP_HEADER); 1553 1554 /* Goal is to send at least one packet per ms, 1555 * not one big TSO packet every 100 ms. 1556 * This preserves ACK clocking and is consistent 1557 * with tcp_tso_should_defer() heuristic. 1558 */ 1559 segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs); 1560 1561 return min_t(u32, segs, sk->sk_gso_max_segs); 1562 } 1563 1564 /* Returns the portion of skb which can be sent right away */ 1565 static unsigned int tcp_mss_split_point(const struct sock *sk, 1566 const struct sk_buff *skb, 1567 unsigned int mss_now, 1568 unsigned int max_segs, 1569 int nonagle) 1570 { 1571 const struct tcp_sock *tp = tcp_sk(sk); 1572 u32 partial, needed, window, max_len; 1573 1574 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1575 max_len = mss_now * max_segs; 1576 1577 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) 1578 return max_len; 1579 1580 needed = min(skb->len, window); 1581 1582 if (max_len <= needed) 1583 return max_len; 1584 1585 partial = needed % mss_now; 1586 /* If last segment is not a full MSS, check if Nagle rules allow us 1587 * to include this last segment in this skb. 1588 * Otherwise, we'll split the skb at last MSS boundary 1589 */ 1590 if (tcp_nagle_check(partial != 0, tp, nonagle)) 1591 return needed - partial; 1592 1593 return needed; 1594 } 1595 1596 /* Can at least one segment of SKB be sent right now, according to the 1597 * congestion window rules? If so, return how many segments are allowed. 1598 */ 1599 static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1600 const struct sk_buff *skb) 1601 { 1602 u32 in_flight, cwnd, halfcwnd; 1603 1604 /* Don't be strict about the congestion window for the final FIN. */ 1605 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && 1606 tcp_skb_pcount(skb) == 1) 1607 return 1; 1608 1609 in_flight = tcp_packets_in_flight(tp); 1610 cwnd = tp->snd_cwnd; 1611 if (in_flight >= cwnd) 1612 return 0; 1613 1614 /* For better scheduling, ensure we have at least 1615 * 2 GSO packets in flight. 1616 */ 1617 halfcwnd = max(cwnd >> 1, 1U); 1618 return min(halfcwnd, cwnd - in_flight); 1619 } 1620 1621 /* Initialize TSO state of a skb. 1622 * This must be invoked the first time we consider transmitting 1623 * SKB onto the wire. 1624 */ 1625 static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now) 1626 { 1627 int tso_segs = tcp_skb_pcount(skb); 1628 1629 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { 1630 tcp_set_skb_tso_segs(skb, mss_now); 1631 tso_segs = tcp_skb_pcount(skb); 1632 } 1633 return tso_segs; 1634 } 1635 1636 1637 /* Return true if the Nagle test allows this packet to be 1638 * sent now. 1639 */ 1640 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1641 unsigned int cur_mss, int nonagle) 1642 { 1643 /* Nagle rule does not apply to frames, which sit in the middle of the 1644 * write_queue (they have no chances to get new data). 1645 * 1646 * This is implemented in the callers, where they modify the 'nonagle' 1647 * argument based upon the location of SKB in the send queue. 1648 */ 1649 if (nonagle & TCP_NAGLE_PUSH) 1650 return true; 1651 1652 /* Don't use the nagle rule for urgent data (or for the final FIN). */ 1653 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) 1654 return true; 1655 1656 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) 1657 return true; 1658 1659 return false; 1660 } 1661 1662 /* Does at least the first segment of SKB fit into the send window? */ 1663 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, 1664 const struct sk_buff *skb, 1665 unsigned int cur_mss) 1666 { 1667 u32 end_seq = TCP_SKB_CB(skb)->end_seq; 1668 1669 if (skb->len > cur_mss) 1670 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; 1671 1672 return !after(end_seq, tcp_wnd_end(tp)); 1673 } 1674 1675 /* This checks if the data bearing packet SKB (usually tcp_send_head(sk)) 1676 * should be put on the wire right now. If so, it returns the number of 1677 * packets allowed by the congestion window. 1678 */ 1679 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1680 unsigned int cur_mss, int nonagle) 1681 { 1682 const struct tcp_sock *tp = tcp_sk(sk); 1683 unsigned int cwnd_quota; 1684 1685 tcp_init_tso_segs(skb, cur_mss); 1686 1687 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 1688 return 0; 1689 1690 cwnd_quota = tcp_cwnd_test(tp, skb); 1691 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) 1692 cwnd_quota = 0; 1693 1694 return cwnd_quota; 1695 } 1696 1697 /* Test if sending is allowed right now. */ 1698 bool tcp_may_send_now(struct sock *sk) 1699 { 1700 const struct tcp_sock *tp = tcp_sk(sk); 1701 struct sk_buff *skb = tcp_send_head(sk); 1702 1703 return skb && 1704 tcp_snd_test(sk, skb, tcp_current_mss(sk), 1705 (tcp_skb_is_last(sk, skb) ? 1706 tp->nonagle : TCP_NAGLE_PUSH)); 1707 } 1708 1709 /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1710 * which is put after SKB on the list. It is very much like 1711 * tcp_fragment() except that it may make several kinds of assumptions 1712 * in order to speed up the splitting operation. In particular, we 1713 * know that all the data is in scatter-gather pages, and that the 1714 * packet has never been sent out before (and thus is not cloned). 1715 */ 1716 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1717 unsigned int mss_now, gfp_t gfp) 1718 { 1719 struct sk_buff *buff; 1720 int nlen = skb->len - len; 1721 u8 flags; 1722 1723 /* All of a TSO frame must be composed of paged data. */ 1724 if (skb->len != skb->data_len) 1725 return tcp_fragment(sk, skb, len, mss_now, gfp); 1726 1727 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1728 if (unlikely(!buff)) 1729 return -ENOMEM; 1730 1731 sk->sk_wmem_queued += buff->truesize; 1732 sk_mem_charge(sk, buff->truesize); 1733 buff->truesize += nlen; 1734 skb->truesize -= nlen; 1735 1736 /* Correct the sequence numbers. */ 1737 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; 1738 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 1739 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 1740 1741 /* PSH and FIN should only be set in the second packet. */ 1742 flags = TCP_SKB_CB(skb)->tcp_flags; 1743 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); 1744 TCP_SKB_CB(buff)->tcp_flags = flags; 1745 1746 /* This packet was never sent out yet, so no SACK bits. */ 1747 TCP_SKB_CB(buff)->sacked = 0; 1748 1749 tcp_skb_fragment_eor(skb, buff); 1750 1751 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1752 skb_split(skb, buff, len); 1753 tcp_fragment_tstamp(skb, buff); 1754 1755 /* Fix up tso_factor for both original and new SKB. */ 1756 tcp_set_skb_tso_segs(skb, mss_now); 1757 tcp_set_skb_tso_segs(buff, mss_now); 1758 1759 /* Link BUFF into the send queue. */ 1760 __skb_header_release(buff); 1761 tcp_insert_write_queue_after(skb, buff, sk); 1762 1763 return 0; 1764 } 1765 1766 /* Try to defer sending, if possible, in order to minimize the amount 1767 * of TSO splitting we do. View it as a kind of TSO Nagle test. 1768 * 1769 * This algorithm is from John Heffner. 1770 */ 1771 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1772 bool *is_cwnd_limited, u32 max_segs) 1773 { 1774 const struct inet_connection_sock *icsk = inet_csk(sk); 1775 u32 age, send_win, cong_win, limit, in_flight; 1776 struct tcp_sock *tp = tcp_sk(sk); 1777 struct skb_mstamp now; 1778 struct sk_buff *head; 1779 int win_divisor; 1780 1781 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1782 goto send_now; 1783 1784 if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1785 goto send_now; 1786 1787 /* Avoid bursty behavior by allowing defer 1788 * only if the last write was recent. 1789 */ 1790 if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) 1791 goto send_now; 1792 1793 in_flight = tcp_packets_in_flight(tp); 1794 1795 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); 1796 1797 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 1798 1799 /* From in_flight test above, we know that cwnd > in_flight. */ 1800 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 1801 1802 limit = min(send_win, cong_win); 1803 1804 /* If a full-sized TSO skb can be sent, do it. */ 1805 if (limit >= max_segs * tp->mss_cache) 1806 goto send_now; 1807 1808 /* Middle in queue won't get any more data, full sendable already? */ 1809 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1810 goto send_now; 1811 1812 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); 1813 if (win_divisor) { 1814 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1815 1816 /* If at least some fraction of a window is available, 1817 * just use it. 1818 */ 1819 chunk /= win_divisor; 1820 if (limit >= chunk) 1821 goto send_now; 1822 } else { 1823 /* Different approach, try not to defer past a single 1824 * ACK. Receiver should ACK every other full sized 1825 * frame, so if we have space for more than 3 frames 1826 * then send now. 1827 */ 1828 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) 1829 goto send_now; 1830 } 1831 1832 head = tcp_write_queue_head(sk); 1833 skb_mstamp_get(&now); 1834 age = skb_mstamp_us_delta(&now, &head->skb_mstamp); 1835 /* If next ACK is likely to come too late (half srtt), do not defer */ 1836 if (age < (tp->srtt_us >> 4)) 1837 goto send_now; 1838 1839 /* Ok, it looks like it is advisable to defer. */ 1840 1841 if (cong_win < send_win && cong_win <= skb->len) 1842 *is_cwnd_limited = true; 1843 1844 return true; 1845 1846 send_now: 1847 return false; 1848 } 1849 1850 static inline void tcp_mtu_check_reprobe(struct sock *sk) 1851 { 1852 struct inet_connection_sock *icsk = inet_csk(sk); 1853 struct tcp_sock *tp = tcp_sk(sk); 1854 struct net *net = sock_net(sk); 1855 u32 interval; 1856 s32 delta; 1857 1858 interval = net->ipv4.sysctl_tcp_probe_interval; 1859 delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp; 1860 if (unlikely(delta >= interval * HZ)) { 1861 int mss = tcp_current_mss(sk); 1862 1863 /* Update current search range */ 1864 icsk->icsk_mtup.probe_size = 0; 1865 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + 1866 sizeof(struct tcphdr) + 1867 icsk->icsk_af_ops->net_header_len; 1868 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 1869 1870 /* Update probe time stamp */ 1871 icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; 1872 } 1873 } 1874 1875 /* Create a new MTU probe if we are ready. 1876 * MTU probe is regularly attempting to increase the path MTU by 1877 * deliberately sending larger packets. This discovers routing 1878 * changes resulting in larger path MTUs. 1879 * 1880 * Returns 0 if we should wait to probe (no cwnd available), 1881 * 1 if a probe was sent, 1882 * -1 otherwise 1883 */ 1884 static int tcp_mtu_probe(struct sock *sk) 1885 { 1886 struct tcp_sock *tp = tcp_sk(sk); 1887 struct inet_connection_sock *icsk = inet_csk(sk); 1888 struct sk_buff *skb, *nskb, *next; 1889 struct net *net = sock_net(sk); 1890 int len; 1891 int probe_size; 1892 int size_needed; 1893 int copy; 1894 int mss_now; 1895 int interval; 1896 1897 /* Not currently probing/verifying, 1898 * not in recovery, 1899 * have enough cwnd, and 1900 * not SACKing (the variable headers throw things off) */ 1901 if (!icsk->icsk_mtup.enabled || 1902 icsk->icsk_mtup.probe_size || 1903 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1904 tp->snd_cwnd < 11 || 1905 tp->rx_opt.num_sacks || tp->rx_opt.dsack) 1906 return -1; 1907 1908 /* Use binary search for probe_size between tcp_mss_base, 1909 * and current mss_clamp. if (search_high - search_low) 1910 * smaller than a threshold, backoff from probing. 1911 */ 1912 mss_now = tcp_current_mss(sk); 1913 probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high + 1914 icsk->icsk_mtup.search_low) >> 1); 1915 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1916 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; 1917 /* When misfortune happens, we are reprobing actively, 1918 * and then reprobe timer has expired. We stick with current 1919 * probing process by not resetting search range to its orignal. 1920 */ 1921 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || 1922 interval < net->ipv4.sysctl_tcp_probe_threshold) { 1923 /* Check whether enough time has elaplased for 1924 * another round of probing. 1925 */ 1926 tcp_mtu_check_reprobe(sk); 1927 return -1; 1928 } 1929 1930 /* Have enough data in the send queue to probe? */ 1931 if (tp->write_seq - tp->snd_nxt < size_needed) 1932 return -1; 1933 1934 if (tp->snd_wnd < size_needed) 1935 return -1; 1936 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) 1937 return 0; 1938 1939 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 1940 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 1941 if (!tcp_packets_in_flight(tp)) 1942 return -1; 1943 else 1944 return 0; 1945 } 1946 1947 /* We're allowed to probe. Build it now. */ 1948 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 1949 if (!nskb) 1950 return -1; 1951 sk->sk_wmem_queued += nskb->truesize; 1952 sk_mem_charge(sk, nskb->truesize); 1953 1954 skb = tcp_send_head(sk); 1955 1956 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1957 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1958 TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK; 1959 TCP_SKB_CB(nskb)->sacked = 0; 1960 nskb->csum = 0; 1961 nskb->ip_summed = skb->ip_summed; 1962 1963 tcp_insert_write_queue_before(nskb, skb, sk); 1964 1965 len = 0; 1966 tcp_for_write_queue_from_safe(skb, next, sk) { 1967 copy = min_t(int, skb->len, probe_size - len); 1968 if (nskb->ip_summed) 1969 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1970 else 1971 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1972 skb_put(nskb, copy), 1973 copy, nskb->csum); 1974 1975 if (skb->len <= copy) { 1976 /* We've eaten all the data from this skb. 1977 * Throw it away. */ 1978 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 1979 tcp_unlink_write_queue(skb, sk); 1980 sk_wmem_free_skb(sk, skb); 1981 } else { 1982 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & 1983 ~(TCPHDR_FIN|TCPHDR_PSH); 1984 if (!skb_shinfo(skb)->nr_frags) { 1985 skb_pull(skb, copy); 1986 if (skb->ip_summed != CHECKSUM_PARTIAL) 1987 skb->csum = csum_partial(skb->data, 1988 skb->len, 0); 1989 } else { 1990 __pskb_trim_head(skb, copy); 1991 tcp_set_skb_tso_segs(skb, mss_now); 1992 } 1993 TCP_SKB_CB(skb)->seq += copy; 1994 } 1995 1996 len += copy; 1997 1998 if (len >= probe_size) 1999 break; 2000 } 2001 tcp_init_tso_segs(nskb, nskb->len); 2002 2003 /* We're ready to send. If this fails, the probe will 2004 * be resegmented into mss-sized pieces by tcp_write_xmit(). 2005 */ 2006 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 2007 /* Decrement cwnd here because we are sending 2008 * effectively two packets. */ 2009 tp->snd_cwnd--; 2010 tcp_event_new_data_sent(sk, nskb); 2011 2012 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); 2013 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; 2014 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; 2015 2016 return 1; 2017 } 2018 2019 return -1; 2020 } 2021 2022 /* This routine writes packets to the network. It advances the 2023 * send_head. This happens as incoming acks open up the remote 2024 * window for us. 2025 * 2026 * LARGESEND note: !tcp_urg_mode is overkill, only frames between 2027 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 2028 * account rare use of URG, this is not a big flaw. 2029 * 2030 * Send at most one packet when push_one > 0. Temporarily ignore 2031 * cwnd limit to force at most one packet out when push_one == 2. 2032 2033 * Returns true, if no segments are in flight and we have queued segments, 2034 * but cannot send anything now because of SWS or another problem. 2035 */ 2036 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 2037 int push_one, gfp_t gfp) 2038 { 2039 struct tcp_sock *tp = tcp_sk(sk); 2040 struct sk_buff *skb; 2041 unsigned int tso_segs, sent_pkts; 2042 int cwnd_quota; 2043 int result; 2044 bool is_cwnd_limited = false; 2045 u32 max_segs; 2046 2047 sent_pkts = 0; 2048 2049 if (!push_one) { 2050 /* Do MTU probing. */ 2051 result = tcp_mtu_probe(sk); 2052 if (!result) { 2053 return false; 2054 } else if (result > 0) { 2055 sent_pkts = 1; 2056 } 2057 } 2058 2059 max_segs = tcp_tso_autosize(sk, mss_now); 2060 while ((skb = tcp_send_head(sk))) { 2061 unsigned int limit; 2062 2063 tso_segs = tcp_init_tso_segs(skb, mss_now); 2064 BUG_ON(!tso_segs); 2065 2066 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 2067 /* "skb_mstamp" is used as a start point for the retransmit timer */ 2068 skb_mstamp_get(&skb->skb_mstamp); 2069 goto repair; /* Skip network transmission */ 2070 } 2071 2072 cwnd_quota = tcp_cwnd_test(tp, skb); 2073 if (!cwnd_quota) { 2074 if (push_one == 2) 2075 /* Force out a loss probe pkt. */ 2076 cwnd_quota = 1; 2077 else 2078 break; 2079 } 2080 2081 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 2082 break; 2083 2084 if (tso_segs == 1) { 2085 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 2086 (tcp_skb_is_last(sk, skb) ? 2087 nonagle : TCP_NAGLE_PUSH)))) 2088 break; 2089 } else { 2090 if (!push_one && 2091 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2092 max_segs)) 2093 break; 2094 } 2095 2096 limit = mss_now; 2097 if (tso_segs > 1 && !tcp_urg_mode(tp)) 2098 limit = tcp_mss_split_point(sk, skb, mss_now, 2099 min_t(unsigned int, 2100 cwnd_quota, 2101 max_segs), 2102 nonagle); 2103 2104 if (skb->len > limit && 2105 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2106 break; 2107 2108 /* TCP Small Queues : 2109 * Control number of packets in qdisc/devices to two packets / or ~1 ms. 2110 * This allows for : 2111 * - better RTT estimation and ACK scheduling 2112 * - faster recovery 2113 * - high rates 2114 * Alas, some drivers / subsystems require a fair amount 2115 * of queued bytes to ensure line rate. 2116 * One example is wifi aggregation (802.11 AMPDU) 2117 */ 2118 limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10); 2119 limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes); 2120 2121 if (atomic_read(&sk->sk_wmem_alloc) > limit) { 2122 set_bit(TSQ_THROTTLED, &tp->tsq_flags); 2123 /* It is possible TX completion already happened 2124 * before we set TSQ_THROTTLED, so we must 2125 * test again the condition. 2126 */ 2127 smp_mb__after_atomic(); 2128 if (atomic_read(&sk->sk_wmem_alloc) > limit) 2129 break; 2130 } 2131 2132 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2133 break; 2134 2135 repair: 2136 /* Advance the send_head. This one is sent out. 2137 * This call will increment packets_out. 2138 */ 2139 tcp_event_new_data_sent(sk, skb); 2140 2141 tcp_minshall_update(tp, mss_now, skb); 2142 sent_pkts += tcp_skb_pcount(skb); 2143 2144 if (push_one) 2145 break; 2146 } 2147 2148 if (likely(sent_pkts)) { 2149 if (tcp_in_cwnd_reduction(sk)) 2150 tp->prr_out += sent_pkts; 2151 2152 /* Send one loss probe per tail loss episode. */ 2153 if (push_one != 2) 2154 tcp_schedule_loss_probe(sk); 2155 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2156 tcp_cwnd_validate(sk, is_cwnd_limited); 2157 return false; 2158 } 2159 return !tp->packets_out && tcp_send_head(sk); 2160 } 2161 2162 bool tcp_schedule_loss_probe(struct sock *sk) 2163 { 2164 struct inet_connection_sock *icsk = inet_csk(sk); 2165 struct tcp_sock *tp = tcp_sk(sk); 2166 u32 timeout, tlp_time_stamp, rto_time_stamp; 2167 u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); 2168 2169 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) 2170 return false; 2171 /* No consecutive loss probes. */ 2172 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { 2173 tcp_rearm_rto(sk); 2174 return false; 2175 } 2176 /* Don't do any loss probe on a Fast Open connection before 3WHS 2177 * finishes. 2178 */ 2179 if (tp->fastopen_rsk) 2180 return false; 2181 2182 /* TLP is only scheduled when next timer event is RTO. */ 2183 if (icsk->icsk_pending != ICSK_TIME_RETRANS) 2184 return false; 2185 2186 /* Schedule a loss probe in 2*RTT for SACK capable connections 2187 * in Open state, that are either limited by cwnd or application. 2188 */ 2189 if (sysctl_tcp_early_retrans < 3 || !tp->packets_out || 2190 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) 2191 return false; 2192 2193 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) && 2194 tcp_send_head(sk)) 2195 return false; 2196 2197 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account 2198 * for delayed ack when there's one outstanding packet. If no RTT 2199 * sample is available then probe after TCP_TIMEOUT_INIT. 2200 */ 2201 timeout = rtt << 1 ? : TCP_TIMEOUT_INIT; 2202 if (tp->packets_out == 1) 2203 timeout = max_t(u32, timeout, 2204 (rtt + (rtt >> 1) + TCP_DELACK_MAX)); 2205 timeout = max_t(u32, timeout, msecs_to_jiffies(10)); 2206 2207 /* If RTO is shorter, just schedule TLP in its place. */ 2208 tlp_time_stamp = tcp_time_stamp + timeout; 2209 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; 2210 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { 2211 s32 delta = rto_time_stamp - tcp_time_stamp; 2212 if (delta > 0) 2213 timeout = delta; 2214 } 2215 2216 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, 2217 TCP_RTO_MAX); 2218 return true; 2219 } 2220 2221 /* Thanks to skb fast clones, we can detect if a prior transmit of 2222 * a packet is still in a qdisc or driver queue. 2223 * In this case, there is very little point doing a retransmit ! 2224 */ 2225 static bool skb_still_in_host_queue(const struct sock *sk, 2226 const struct sk_buff *skb) 2227 { 2228 if (unlikely(skb_fclone_busy(sk, skb))) { 2229 NET_INC_STATS(sock_net(sk), 2230 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 2231 return true; 2232 } 2233 return false; 2234 } 2235 2236 /* When probe timeout (PTO) fires, try send a new segment if possible, else 2237 * retransmit the last segment. 2238 */ 2239 void tcp_send_loss_probe(struct sock *sk) 2240 { 2241 struct tcp_sock *tp = tcp_sk(sk); 2242 struct sk_buff *skb; 2243 int pcount; 2244 int mss = tcp_current_mss(sk); 2245 2246 skb = tcp_send_head(sk); 2247 if (skb) { 2248 if (tcp_snd_wnd_test(tp, skb, mss)) { 2249 pcount = tp->packets_out; 2250 tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); 2251 if (tp->packets_out > pcount) 2252 goto probe_sent; 2253 goto rearm_timer; 2254 } 2255 skb = tcp_write_queue_prev(sk, skb); 2256 } else { 2257 skb = tcp_write_queue_tail(sk); 2258 } 2259 2260 /* At most one outstanding TLP retransmission. */ 2261 if (tp->tlp_high_seq) 2262 goto rearm_timer; 2263 2264 /* Retransmit last segment. */ 2265 if (WARN_ON(!skb)) 2266 goto rearm_timer; 2267 2268 if (skb_still_in_host_queue(sk, skb)) 2269 goto rearm_timer; 2270 2271 pcount = tcp_skb_pcount(skb); 2272 if (WARN_ON(!pcount)) 2273 goto rearm_timer; 2274 2275 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { 2276 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss, 2277 GFP_ATOMIC))) 2278 goto rearm_timer; 2279 skb = tcp_write_queue_next(sk, skb); 2280 } 2281 2282 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) 2283 goto rearm_timer; 2284 2285 if (__tcp_retransmit_skb(sk, skb, 1)) 2286 goto rearm_timer; 2287 2288 /* Record snd_nxt for loss detection. */ 2289 tp->tlp_high_seq = tp->snd_nxt; 2290 2291 probe_sent: 2292 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); 2293 /* Reset s.t. tcp_rearm_rto will restart timer from now */ 2294 inet_csk(sk)->icsk_pending = 0; 2295 rearm_timer: 2296 tcp_rearm_rto(sk); 2297 } 2298 2299 /* Push out any pending frames which were held back due to 2300 * TCP_CORK or attempt at coalescing tiny packets. 2301 * The socket must be locked by the caller. 2302 */ 2303 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 2304 int nonagle) 2305 { 2306 /* If we are closed, the bytes will have to remain here. 2307 * In time closedown will finish, we empty the write queue and 2308 * all will be happy. 2309 */ 2310 if (unlikely(sk->sk_state == TCP_CLOSE)) 2311 return; 2312 2313 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, 2314 sk_gfp_mask(sk, GFP_ATOMIC))) 2315 tcp_check_probe_timer(sk); 2316 } 2317 2318 /* Send _single_ skb sitting at the send head. This function requires 2319 * true push pending frames to setup probe timer etc. 2320 */ 2321 void tcp_push_one(struct sock *sk, unsigned int mss_now) 2322 { 2323 struct sk_buff *skb = tcp_send_head(sk); 2324 2325 BUG_ON(!skb || skb->len < mss_now); 2326 2327 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); 2328 } 2329 2330 /* This function returns the amount that we can raise the 2331 * usable window based on the following constraints 2332 * 2333 * 1. The window can never be shrunk once it is offered (RFC 793) 2334 * 2. We limit memory per socket 2335 * 2336 * RFC 1122: 2337 * "the suggested [SWS] avoidance algorithm for the receiver is to keep 2338 * RECV.NEXT + RCV.WIN fixed until: 2339 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 2340 * 2341 * i.e. don't raise the right edge of the window until you can raise 2342 * it at least MSS bytes. 2343 * 2344 * Unfortunately, the recommended algorithm breaks header prediction, 2345 * since header prediction assumes th->window stays fixed. 2346 * 2347 * Strictly speaking, keeping th->window fixed violates the receiver 2348 * side SWS prevention criteria. The problem is that under this rule 2349 * a stream of single byte packets will cause the right side of the 2350 * window to always advance by a single byte. 2351 * 2352 * Of course, if the sender implements sender side SWS prevention 2353 * then this will not be a problem. 2354 * 2355 * BSD seems to make the following compromise: 2356 * 2357 * If the free space is less than the 1/4 of the maximum 2358 * space available and the free space is less than 1/2 mss, 2359 * then set the window to 0. 2360 * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] 2361 * Otherwise, just prevent the window from shrinking 2362 * and from being larger than the largest representable value. 2363 * 2364 * This prevents incremental opening of the window in the regime 2365 * where TCP is limited by the speed of the reader side taking 2366 * data out of the TCP receive queue. It does nothing about 2367 * those cases where the window is constrained on the sender side 2368 * because the pipeline is full. 2369 * 2370 * BSD also seems to "accidentally" limit itself to windows that are a 2371 * multiple of MSS, at least until the free space gets quite small. 2372 * This would appear to be a side effect of the mbuf implementation. 2373 * Combining these two algorithms results in the observed behavior 2374 * of having a fixed window size at almost all times. 2375 * 2376 * Below we obtain similar behavior by forcing the offered window to 2377 * a multiple of the mss when it is feasible to do so. 2378 * 2379 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. 2380 * Regular options like TIMESTAMP are taken into account. 2381 */ 2382 u32 __tcp_select_window(struct sock *sk) 2383 { 2384 struct inet_connection_sock *icsk = inet_csk(sk); 2385 struct tcp_sock *tp = tcp_sk(sk); 2386 /* MSS for the peer's data. Previous versions used mss_clamp 2387 * here. I don't know if the value based on our guesses 2388 * of peer's MSS is better for the performance. It's more correct 2389 * but may be worse for the performance because of rcv_mss 2390 * fluctuations. --SAW 1998/11/1 2391 */ 2392 int mss = icsk->icsk_ack.rcv_mss; 2393 int free_space = tcp_space(sk); 2394 int allowed_space = tcp_full_space(sk); 2395 int full_space = min_t(int, tp->window_clamp, allowed_space); 2396 int window; 2397 2398 if (mss > full_space) 2399 mss = full_space; 2400 2401 if (free_space < (full_space >> 1)) { 2402 icsk->icsk_ack.quick = 0; 2403 2404 if (tcp_under_memory_pressure(sk)) 2405 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 2406 4U * tp->advmss); 2407 2408 /* free_space might become our new window, make sure we don't 2409 * increase it due to wscale. 2410 */ 2411 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); 2412 2413 /* if free space is less than mss estimate, or is below 1/16th 2414 * of the maximum allowed, try to move to zero-window, else 2415 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and 2416 * new incoming data is dropped due to memory limits. 2417 * With large window, mss test triggers way too late in order 2418 * to announce zero window in time before rmem limit kicks in. 2419 */ 2420 if (free_space < (allowed_space >> 4) || free_space < mss) 2421 return 0; 2422 } 2423 2424 if (free_space > tp->rcv_ssthresh) 2425 free_space = tp->rcv_ssthresh; 2426 2427 /* Don't do rounding if we are using window scaling, since the 2428 * scaled window will not line up with the MSS boundary anyway. 2429 */ 2430 window = tp->rcv_wnd; 2431 if (tp->rx_opt.rcv_wscale) { 2432 window = free_space; 2433 2434 /* Advertise enough space so that it won't get scaled away. 2435 * Import case: prevent zero window announcement if 2436 * 1<<rcv_wscale > mss. 2437 */ 2438 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) 2439 window = (((window >> tp->rx_opt.rcv_wscale) + 1) 2440 << tp->rx_opt.rcv_wscale); 2441 } else { 2442 /* Get the largest window that is a nice multiple of mss. 2443 * Window clamp already applied above. 2444 * If our current window offering is within 1 mss of the 2445 * free space we just keep it. This prevents the divide 2446 * and multiply from happening most of the time. 2447 * We also don't do any window rounding when the free space 2448 * is too small. 2449 */ 2450 if (window <= free_space - mss || window > free_space) 2451 window = (free_space / mss) * mss; 2452 else if (mss == full_space && 2453 free_space > window + (full_space >> 1)) 2454 window = free_space; 2455 } 2456 2457 return window; 2458 } 2459 2460 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 2461 const struct sk_buff *next_skb) 2462 { 2463 if (unlikely(tcp_has_tx_tstamp(next_skb))) { 2464 const struct skb_shared_info *next_shinfo = 2465 skb_shinfo(next_skb); 2466 struct skb_shared_info *shinfo = skb_shinfo(skb); 2467 2468 shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP; 2469 shinfo->tskey = next_shinfo->tskey; 2470 TCP_SKB_CB(skb)->txstamp_ack |= 2471 TCP_SKB_CB(next_skb)->txstamp_ack; 2472 } 2473 } 2474 2475 /* Collapses two adjacent SKB's during retransmission. */ 2476 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) 2477 { 2478 struct tcp_sock *tp = tcp_sk(sk); 2479 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 2480 int skb_size, next_skb_size; 2481 2482 skb_size = skb->len; 2483 next_skb_size = next_skb->len; 2484 2485 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 2486 2487 tcp_highest_sack_combine(sk, next_skb, skb); 2488 2489 tcp_unlink_write_queue(next_skb, sk); 2490 2491 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), 2492 next_skb_size); 2493 2494 if (next_skb->ip_summed == CHECKSUM_PARTIAL) 2495 skb->ip_summed = CHECKSUM_PARTIAL; 2496 2497 if (skb->ip_summed != CHECKSUM_PARTIAL) 2498 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); 2499 2500 /* Update sequence range on original skb. */ 2501 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 2502 2503 /* Merge over control information. This moves PSH/FIN etc. over */ 2504 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; 2505 2506 /* All done, get rid of second SKB and account for it so 2507 * packet counting does not break. 2508 */ 2509 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 2510 TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor; 2511 2512 /* changed transmit queue under us so clear hints */ 2513 tcp_clear_retrans_hints_partial(tp); 2514 if (next_skb == tp->retransmit_skb_hint) 2515 tp->retransmit_skb_hint = skb; 2516 2517 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb)); 2518 2519 tcp_skb_collapse_tstamp(skb, next_skb); 2520 2521 sk_wmem_free_skb(sk, next_skb); 2522 } 2523 2524 /* Check if coalescing SKBs is legal. */ 2525 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2526 { 2527 if (tcp_skb_pcount(skb) > 1) 2528 return false; 2529 /* TODO: SACK collapsing could be used to remove this condition */ 2530 if (skb_shinfo(skb)->nr_frags != 0) 2531 return false; 2532 if (skb_cloned(skb)) 2533 return false; 2534 if (skb == tcp_send_head(sk)) 2535 return false; 2536 /* Some heurestics for collapsing over SACK'd could be invented */ 2537 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 2538 return false; 2539 2540 return true; 2541 } 2542 2543 /* Collapse packets in the retransmit queue to make to create 2544 * less packets on the wire. This is only done on retransmission. 2545 */ 2546 static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, 2547 int space) 2548 { 2549 struct tcp_sock *tp = tcp_sk(sk); 2550 struct sk_buff *skb = to, *tmp; 2551 bool first = true; 2552 2553 if (!sysctl_tcp_retrans_collapse) 2554 return; 2555 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2556 return; 2557 2558 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2559 if (!tcp_can_collapse(sk, skb)) 2560 break; 2561 2562 if (!tcp_skb_can_collapse_to(to)) 2563 break; 2564 2565 space -= skb->len; 2566 2567 if (first) { 2568 first = false; 2569 continue; 2570 } 2571 2572 if (space < 0) 2573 break; 2574 /* Punt if not enough space exists in the first SKB for 2575 * the data in the second 2576 */ 2577 if (skb->len > skb_availroom(to)) 2578 break; 2579 2580 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2581 break; 2582 2583 tcp_collapse_retrans(sk, to); 2584 } 2585 } 2586 2587 /* This retransmits one SKB. Policy decisions and retransmit queue 2588 * state updates are done by the caller. Returns non-zero if an 2589 * error occurred which prevented the send. 2590 */ 2591 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 2592 { 2593 struct inet_connection_sock *icsk = inet_csk(sk); 2594 struct tcp_sock *tp = tcp_sk(sk); 2595 unsigned int cur_mss; 2596 int diff, len, err; 2597 2598 2599 /* Inconclusive MTU probe */ 2600 if (icsk->icsk_mtup.probe_size) 2601 icsk->icsk_mtup.probe_size = 0; 2602 2603 /* Do not sent more than we queued. 1/4 is reserved for possible 2604 * copying overhead: fragmentation, tunneling, mangling etc. 2605 */ 2606 if (atomic_read(&sk->sk_wmem_alloc) > 2607 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2608 return -EAGAIN; 2609 2610 if (skb_still_in_host_queue(sk, skb)) 2611 return -EBUSY; 2612 2613 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { 2614 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) 2615 BUG(); 2616 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) 2617 return -ENOMEM; 2618 } 2619 2620 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 2621 return -EHOSTUNREACH; /* Routing failure or similar. */ 2622 2623 cur_mss = tcp_current_mss(sk); 2624 2625 /* If receiver has shrunk his window, and skb is out of 2626 * new window, do not retransmit it. The exception is the 2627 * case, when window is shrunk to zero. In this case 2628 * our retransmit serves as a zero window probe. 2629 */ 2630 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && 2631 TCP_SKB_CB(skb)->seq != tp->snd_una) 2632 return -EAGAIN; 2633 2634 len = cur_mss * segs; 2635 if (skb->len > len) { 2636 if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC)) 2637 return -ENOMEM; /* We'll try again later. */ 2638 } else { 2639 if (skb_unclone(skb, GFP_ATOMIC)) 2640 return -ENOMEM; 2641 2642 diff = tcp_skb_pcount(skb); 2643 tcp_set_skb_tso_segs(skb, cur_mss); 2644 diff -= tcp_skb_pcount(skb); 2645 if (diff) 2646 tcp_adjust_pcount(sk, skb, diff); 2647 if (skb->len < cur_mss) 2648 tcp_retrans_try_collapse(sk, skb, cur_mss); 2649 } 2650 2651 /* RFC3168, section 6.1.1.1. ECN fallback */ 2652 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN) 2653 tcp_ecn_clear_syn(sk, skb); 2654 2655 /* make sure skb->data is aligned on arches that require it 2656 * and check if ack-trimming & collapsing extended the headroom 2657 * beyond what csum_start can cover. 2658 */ 2659 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || 2660 skb_headroom(skb) >= 0xFFFF)) { 2661 struct sk_buff *nskb; 2662 2663 skb_mstamp_get(&skb->skb_mstamp); 2664 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2665 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2666 -ENOBUFS; 2667 } else { 2668 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2669 } 2670 2671 if (likely(!err)) { 2672 segs = tcp_skb_pcount(skb); 2673 2674 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2675 /* Update global TCP statistics. */ 2676 TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); 2677 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) 2678 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 2679 tp->total_retrans += segs; 2680 } 2681 return err; 2682 } 2683 2684 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) 2685 { 2686 struct tcp_sock *tp = tcp_sk(sk); 2687 int err = __tcp_retransmit_skb(sk, skb, segs); 2688 2689 if (err == 0) { 2690 #if FASTRETRANS_DEBUG > 0 2691 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { 2692 net_dbg_ratelimited("retrans_out leaked\n"); 2693 } 2694 #endif 2695 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; 2696 tp->retrans_out += tcp_skb_pcount(skb); 2697 2698 /* Save stamp of the first retransmit. */ 2699 if (!tp->retrans_stamp) 2700 tp->retrans_stamp = tcp_skb_timestamp(skb); 2701 2702 } else if (err != -EBUSY) { 2703 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2704 } 2705 2706 if (tp->undo_retrans < 0) 2707 tp->undo_retrans = 0; 2708 tp->undo_retrans += tcp_skb_pcount(skb); 2709 return err; 2710 } 2711 2712 /* Check if we forward retransmits are possible in the current 2713 * window/congestion state. 2714 */ 2715 static bool tcp_can_forward_retransmit(struct sock *sk) 2716 { 2717 const struct inet_connection_sock *icsk = inet_csk(sk); 2718 const struct tcp_sock *tp = tcp_sk(sk); 2719 2720 /* Forward retransmissions are possible only during Recovery. */ 2721 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2722 return false; 2723 2724 /* No forward retransmissions in Reno are possible. */ 2725 if (tcp_is_reno(tp)) 2726 return false; 2727 2728 /* Yeah, we have to make difficult choice between forward transmission 2729 * and retransmission... Both ways have their merits... 2730 * 2731 * For now we do not retransmit anything, while we have some new 2732 * segments to send. In the other cases, follow rule 3 for 2733 * NextSeg() specified in RFC3517. 2734 */ 2735 2736 if (tcp_may_send_now(sk)) 2737 return false; 2738 2739 return true; 2740 } 2741 2742 /* This gets called after a retransmit timeout, and the initially 2743 * retransmitted data is acknowledged. It tries to continue 2744 * resending the rest of the retransmit queue, until either 2745 * we've sent it all or the congestion window limit is reached. 2746 * If doing SACK, the first ACK which comes back for a timeout 2747 * based retransmit packet might feed us FACK information again. 2748 * If so, we use it to avoid unnecessarily retransmissions. 2749 */ 2750 void tcp_xmit_retransmit_queue(struct sock *sk) 2751 { 2752 const struct inet_connection_sock *icsk = inet_csk(sk); 2753 struct tcp_sock *tp = tcp_sk(sk); 2754 struct sk_buff *skb; 2755 struct sk_buff *hole = NULL; 2756 u32 max_segs, last_lost; 2757 int mib_idx; 2758 int fwd_rexmitting = 0; 2759 2760 if (!tp->packets_out) 2761 return; 2762 2763 if (!tp->lost_out) 2764 tp->retransmit_high = tp->snd_una; 2765 2766 if (tp->retransmit_skb_hint) { 2767 skb = tp->retransmit_skb_hint; 2768 last_lost = TCP_SKB_CB(skb)->end_seq; 2769 if (after(last_lost, tp->retransmit_high)) 2770 last_lost = tp->retransmit_high; 2771 } else { 2772 skb = tcp_write_queue_head(sk); 2773 last_lost = tp->snd_una; 2774 } 2775 2776 max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); 2777 tcp_for_write_queue_from(skb, sk) { 2778 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2779 int segs; 2780 2781 if (skb == tcp_send_head(sk)) 2782 break; 2783 /* we could do better than to assign each time */ 2784 if (!hole) 2785 tp->retransmit_skb_hint = skb; 2786 2787 segs = tp->snd_cwnd - tcp_packets_in_flight(tp); 2788 if (segs <= 0) 2789 return; 2790 /* In case tcp_shift_skb_data() have aggregated large skbs, 2791 * we need to make sure not sending too bigs TSO packets 2792 */ 2793 segs = min_t(int, segs, max_segs); 2794 2795 if (fwd_rexmitting) { 2796 begin_fwd: 2797 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2798 break; 2799 mib_idx = LINUX_MIB_TCPFORWARDRETRANS; 2800 2801 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { 2802 tp->retransmit_high = last_lost; 2803 if (!tcp_can_forward_retransmit(sk)) 2804 break; 2805 /* Backtrack if necessary to non-L'ed skb */ 2806 if (hole) { 2807 skb = hole; 2808 hole = NULL; 2809 } 2810 fwd_rexmitting = 1; 2811 goto begin_fwd; 2812 2813 } else if (!(sacked & TCPCB_LOST)) { 2814 if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED))) 2815 hole = skb; 2816 continue; 2817 2818 } else { 2819 last_lost = TCP_SKB_CB(skb)->end_seq; 2820 if (icsk->icsk_ca_state != TCP_CA_Loss) 2821 mib_idx = LINUX_MIB_TCPFASTRETRANS; 2822 else 2823 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; 2824 } 2825 2826 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) 2827 continue; 2828 2829 if (tcp_retransmit_skb(sk, skb, segs)) 2830 return; 2831 2832 NET_INC_STATS(sock_net(sk), mib_idx); 2833 2834 if (tcp_in_cwnd_reduction(sk)) 2835 tp->prr_out += tcp_skb_pcount(skb); 2836 2837 if (skb == tcp_write_queue_head(sk)) 2838 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2839 inet_csk(sk)->icsk_rto, 2840 TCP_RTO_MAX); 2841 } 2842 } 2843 2844 /* We allow to exceed memory limits for FIN packets to expedite 2845 * connection tear down and (memory) recovery. 2846 * Otherwise tcp_send_fin() could be tempted to either delay FIN 2847 * or even be forced to close flow without any FIN. 2848 * In general, we want to allow one skb per socket to avoid hangs 2849 * with edge trigger epoll() 2850 */ 2851 void sk_forced_mem_schedule(struct sock *sk, int size) 2852 { 2853 int amt; 2854 2855 if (size <= sk->sk_forward_alloc) 2856 return; 2857 amt = sk_mem_pages(size); 2858 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 2859 sk_memory_allocated_add(sk, amt); 2860 2861 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 2862 mem_cgroup_charge_skmem(sk->sk_memcg, amt); 2863 } 2864 2865 /* Send a FIN. The caller locks the socket for us. 2866 * We should try to send a FIN packet really hard, but eventually give up. 2867 */ 2868 void tcp_send_fin(struct sock *sk) 2869 { 2870 struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); 2871 struct tcp_sock *tp = tcp_sk(sk); 2872 2873 /* Optimization, tack on the FIN if we have one skb in write queue and 2874 * this skb was not yet sent, or we are under memory pressure. 2875 * Note: in the latter case, FIN packet will be sent after a timeout, 2876 * as TCP stack thinks it has already been transmitted. 2877 */ 2878 if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) { 2879 coalesce: 2880 TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; 2881 TCP_SKB_CB(tskb)->end_seq++; 2882 tp->write_seq++; 2883 if (!tcp_send_head(sk)) { 2884 /* This means tskb was already sent. 2885 * Pretend we included the FIN on previous transmit. 2886 * We need to set tp->snd_nxt to the value it would have 2887 * if FIN had been sent. This is because retransmit path 2888 * does not change tp->snd_nxt. 2889 */ 2890 tp->snd_nxt++; 2891 return; 2892 } 2893 } else { 2894 skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); 2895 if (unlikely(!skb)) { 2896 if (tskb) 2897 goto coalesce; 2898 return; 2899 } 2900 skb_reserve(skb, MAX_TCP_HEADER); 2901 sk_forced_mem_schedule(sk, skb->truesize); 2902 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2903 tcp_init_nondata_skb(skb, tp->write_seq, 2904 TCPHDR_ACK | TCPHDR_FIN); 2905 tcp_queue_skb(sk, skb); 2906 } 2907 __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); 2908 } 2909 2910 /* We get here when a process closes a file descriptor (either due to 2911 * an explicit close() or as a byproduct of exit()'ing) and there 2912 * was unread data in the receive queue. This behavior is recommended 2913 * by RFC 2525, section 2.17. -DaveM 2914 */ 2915 void tcp_send_active_reset(struct sock *sk, gfp_t priority) 2916 { 2917 struct sk_buff *skb; 2918 2919 /* NOTE: No TCP options attached and we never retransmit this. */ 2920 skb = alloc_skb(MAX_TCP_HEADER, priority); 2921 if (!skb) { 2922 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2923 return; 2924 } 2925 2926 /* Reserve space for headers and prepare control bits. */ 2927 skb_reserve(skb, MAX_TCP_HEADER); 2928 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2929 TCPHDR_ACK | TCPHDR_RST); 2930 skb_mstamp_get(&skb->skb_mstamp); 2931 /* Send it off. */ 2932 if (tcp_transmit_skb(sk, skb, 0, priority)) 2933 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2934 2935 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS); 2936 } 2937 2938 /* Send a crossed SYN-ACK during socket establishment. 2939 * WARNING: This routine must only be called when we have already sent 2940 * a SYN packet that crossed the incoming SYN that caused this routine 2941 * to get called. If this assumption fails then the initial rcv_wnd 2942 * and rcv_wscale values will not be correct. 2943 */ 2944 int tcp_send_synack(struct sock *sk) 2945 { 2946 struct sk_buff *skb; 2947 2948 skb = tcp_write_queue_head(sk); 2949 if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2950 pr_debug("%s: wrong queue state\n", __func__); 2951 return -EFAULT; 2952 } 2953 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { 2954 if (skb_cloned(skb)) { 2955 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2956 if (!nskb) 2957 return -ENOMEM; 2958 tcp_unlink_write_queue(skb, sk); 2959 __skb_header_release(nskb); 2960 __tcp_add_write_queue_head(sk, nskb); 2961 sk_wmem_free_skb(sk, skb); 2962 sk->sk_wmem_queued += nskb->truesize; 2963 sk_mem_charge(sk, nskb->truesize); 2964 skb = nskb; 2965 } 2966 2967 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2968 tcp_ecn_send_synack(sk, skb); 2969 } 2970 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2971 } 2972 2973 /** 2974 * tcp_make_synack - Prepare a SYN-ACK. 2975 * sk: listener socket 2976 * dst: dst entry attached to the SYNACK 2977 * req: request_sock pointer 2978 * 2979 * Allocate one skb and build a SYNACK packet. 2980 * @dst is consumed : Caller should not use it again. 2981 */ 2982 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 2983 struct request_sock *req, 2984 struct tcp_fastopen_cookie *foc, 2985 enum tcp_synack_type synack_type) 2986 { 2987 struct inet_request_sock *ireq = inet_rsk(req); 2988 const struct tcp_sock *tp = tcp_sk(sk); 2989 struct tcp_md5sig_key *md5 = NULL; 2990 struct tcp_out_options opts; 2991 struct sk_buff *skb; 2992 int tcp_header_size; 2993 struct tcphdr *th; 2994 u16 user_mss; 2995 int mss; 2996 2997 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 2998 if (unlikely(!skb)) { 2999 dst_release(dst); 3000 return NULL; 3001 } 3002 /* Reserve space for headers. */ 3003 skb_reserve(skb, MAX_TCP_HEADER); 3004 3005 switch (synack_type) { 3006 case TCP_SYNACK_NORMAL: 3007 skb_set_owner_w(skb, req_to_sk(req)); 3008 break; 3009 case TCP_SYNACK_COOKIE: 3010 /* Under synflood, we do not attach skb to a socket, 3011 * to avoid false sharing. 3012 */ 3013 break; 3014 case TCP_SYNACK_FASTOPEN: 3015 /* sk is a const pointer, because we want to express multiple 3016 * cpu might call us concurrently. 3017 * sk->sk_wmem_alloc in an atomic, we can promote to rw. 3018 */ 3019 skb_set_owner_w(skb, (struct sock *)sk); 3020 break; 3021 } 3022 skb_dst_set(skb, dst); 3023 3024 mss = dst_metric_advmss(dst); 3025 user_mss = READ_ONCE(tp->rx_opt.user_mss); 3026 if (user_mss && user_mss < mss) 3027 mss = user_mss; 3028 3029 memset(&opts, 0, sizeof(opts)); 3030 #ifdef CONFIG_SYN_COOKIES 3031 if (unlikely(req->cookie_ts)) 3032 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); 3033 else 3034 #endif 3035 skb_mstamp_get(&skb->skb_mstamp); 3036 3037 #ifdef CONFIG_TCP_MD5SIG 3038 rcu_read_lock(); 3039 md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req)); 3040 #endif 3041 skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4); 3042 tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) + 3043 sizeof(*th); 3044 3045 skb_push(skb, tcp_header_size); 3046 skb_reset_transport_header(skb); 3047 3048 th = (struct tcphdr *)skb->data; 3049 memset(th, 0, sizeof(struct tcphdr)); 3050 th->syn = 1; 3051 th->ack = 1; 3052 tcp_ecn_make_synack(req, th); 3053 th->source = htons(ireq->ir_num); 3054 th->dest = ireq->ir_rmt_port; 3055 /* Setting of flags are superfluous here for callers (and ECE is 3056 * not even correctly set) 3057 */ 3058 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 3059 TCPHDR_SYN | TCPHDR_ACK); 3060 3061 th->seq = htonl(TCP_SKB_CB(skb)->seq); 3062 /* XXX data is queued and acked as is. No buffer/window check */ 3063 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 3064 3065 /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ 3066 th->window = htons(min(req->rsk_rcv_wnd, 65535U)); 3067 tcp_options_write((__be32 *)(th + 1), NULL, &opts); 3068 th->doff = (tcp_header_size >> 2); 3069 __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 3070 3071 #ifdef CONFIG_TCP_MD5SIG 3072 /* Okay, we have all we need - do the md5 hash if needed */ 3073 if (md5) 3074 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location, 3075 md5, req_to_sk(req), skb); 3076 rcu_read_unlock(); 3077 #endif 3078 3079 /* Do not fool tcpdump (if any), clean our debris */ 3080 skb->tstamp.tv64 = 0; 3081 return skb; 3082 } 3083 EXPORT_SYMBOL(tcp_make_synack); 3084 3085 static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) 3086 { 3087 struct inet_connection_sock *icsk = inet_csk(sk); 3088 const struct tcp_congestion_ops *ca; 3089 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); 3090 3091 if (ca_key == TCP_CA_UNSPEC) 3092 return; 3093 3094 rcu_read_lock(); 3095 ca = tcp_ca_find_key(ca_key); 3096 if (likely(ca && try_module_get(ca->owner))) { 3097 module_put(icsk->icsk_ca_ops->owner); 3098 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); 3099 icsk->icsk_ca_ops = ca; 3100 } 3101 rcu_read_unlock(); 3102 } 3103 3104 /* Do all connect socket setups that can be done AF independent. */ 3105 static void tcp_connect_init(struct sock *sk) 3106 { 3107 const struct dst_entry *dst = __sk_dst_get(sk); 3108 struct tcp_sock *tp = tcp_sk(sk); 3109 __u8 rcv_wscale; 3110 3111 /* We'll fix this up when we get a response from the other end. 3112 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. 3113 */ 3114 tp->tcp_header_len = sizeof(struct tcphdr) + 3115 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); 3116 3117 #ifdef CONFIG_TCP_MD5SIG 3118 if (tp->af_specific->md5_lookup(sk, sk)) 3119 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; 3120 #endif 3121 3122 /* If user gave his TCP_MAXSEG, record it to clamp */ 3123 if (tp->rx_opt.user_mss) 3124 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 3125 tp->max_window = 0; 3126 tcp_mtup_init(sk); 3127 tcp_sync_mss(sk, dst_mtu(dst)); 3128 3129 tcp_ca_dst_init(sk, dst); 3130 3131 if (!tp->window_clamp) 3132 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 3133 tp->advmss = dst_metric_advmss(dst); 3134 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) 3135 tp->advmss = tp->rx_opt.user_mss; 3136 3137 tcp_initialize_rcv_mss(sk); 3138 3139 /* limit the window selection if the user enforce a smaller rx buffer */ 3140 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && 3141 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) 3142 tp->window_clamp = tcp_full_space(sk); 3143 3144 tcp_select_initial_window(tcp_full_space(sk), 3145 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), 3146 &tp->rcv_wnd, 3147 &tp->window_clamp, 3148 sysctl_tcp_window_scaling, 3149 &rcv_wscale, 3150 dst_metric(dst, RTAX_INITRWND)); 3151 3152 tp->rx_opt.rcv_wscale = rcv_wscale; 3153 tp->rcv_ssthresh = tp->rcv_wnd; 3154 3155 sk->sk_err = 0; 3156 sock_reset_flag(sk, SOCK_DONE); 3157 tp->snd_wnd = 0; 3158 tcp_init_wl(tp, 0); 3159 tp->snd_una = tp->write_seq; 3160 tp->snd_sml = tp->write_seq; 3161 tp->snd_up = tp->write_seq; 3162 tp->snd_nxt = tp->write_seq; 3163 3164 if (likely(!tp->repair)) 3165 tp->rcv_nxt = 0; 3166 else 3167 tp->rcv_tstamp = tcp_time_stamp; 3168 tp->rcv_wup = tp->rcv_nxt; 3169 tp->copied_seq = tp->rcv_nxt; 3170 3171 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 3172 inet_csk(sk)->icsk_retransmits = 0; 3173 tcp_clear_retrans(tp); 3174 } 3175 3176 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) 3177 { 3178 struct tcp_sock *tp = tcp_sk(sk); 3179 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 3180 3181 tcb->end_seq += skb->len; 3182 __skb_header_release(skb); 3183 __tcp_add_write_queue_tail(sk, skb); 3184 sk->sk_wmem_queued += skb->truesize; 3185 sk_mem_charge(sk, skb->truesize); 3186 tp->write_seq = tcb->end_seq; 3187 tp->packets_out += tcp_skb_pcount(skb); 3188 } 3189 3190 /* Build and send a SYN with data and (cached) Fast Open cookie. However, 3191 * queue a data-only packet after the regular SYN, such that regular SYNs 3192 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges 3193 * only the SYN sequence, the data are retransmitted in the first ACK. 3194 * If cookie is not cached or other error occurs, falls back to send a 3195 * regular SYN with Fast Open cookie request option. 3196 */ 3197 static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) 3198 { 3199 struct tcp_sock *tp = tcp_sk(sk); 3200 struct tcp_fastopen_request *fo = tp->fastopen_req; 3201 int syn_loss = 0, space, err = 0; 3202 unsigned long last_syn_loss = 0; 3203 struct sk_buff *syn_data; 3204 3205 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ 3206 tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, 3207 &syn_loss, &last_syn_loss); 3208 /* Recurring FO SYN losses: revert to regular handshake temporarily */ 3209 if (syn_loss > 1 && 3210 time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) { 3211 fo->cookie.len = -1; 3212 goto fallback; 3213 } 3214 3215 if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) 3216 fo->cookie.len = -1; 3217 else if (fo->cookie.len <= 0) 3218 goto fallback; 3219 3220 /* MSS for SYN-data is based on cached MSS and bounded by PMTU and 3221 * user-MSS. Reserve maximum option space for middleboxes that add 3222 * private TCP options. The cost is reduced data space in SYN :( 3223 */ 3224 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) 3225 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; 3226 space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - 3227 MAX_TCP_OPTION_SPACE; 3228 3229 space = min_t(size_t, space, fo->size); 3230 3231 /* limit to order-0 allocations */ 3232 space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); 3233 3234 syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false); 3235 if (!syn_data) 3236 goto fallback; 3237 syn_data->ip_summed = CHECKSUM_PARTIAL; 3238 memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); 3239 if (space) { 3240 int copied = copy_from_iter(skb_put(syn_data, space), space, 3241 &fo->data->msg_iter); 3242 if (unlikely(!copied)) { 3243 kfree_skb(syn_data); 3244 goto fallback; 3245 } 3246 if (copied != space) { 3247 skb_trim(syn_data, copied); 3248 space = copied; 3249 } 3250 } 3251 /* No more data pending in inet_wait_for_connect() */ 3252 if (space == fo->size) 3253 fo->data = NULL; 3254 fo->copied = space; 3255 3256 tcp_connect_queue_skb(sk, syn_data); 3257 3258 err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); 3259 3260 syn->skb_mstamp = syn_data->skb_mstamp; 3261 3262 /* Now full SYN+DATA was cloned and sent (or not), 3263 * remove the SYN from the original skb (syn_data) 3264 * we keep in write queue in case of a retransmit, as we 3265 * also have the SYN packet (with no data) in the same queue. 3266 */ 3267 TCP_SKB_CB(syn_data)->seq++; 3268 TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; 3269 if (!err) { 3270 tp->syn_data = (fo->copied > 0); 3271 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); 3272 goto done; 3273 } 3274 3275 fallback: 3276 /* Send a regular SYN with Fast Open cookie request option */ 3277 if (fo->cookie.len > 0) 3278 fo->cookie.len = 0; 3279 err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); 3280 if (err) 3281 tp->syn_fastopen = 0; 3282 done: 3283 fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ 3284 return err; 3285 } 3286 3287 /* Build a SYN and send it off. */ 3288 int tcp_connect(struct sock *sk) 3289 { 3290 struct tcp_sock *tp = tcp_sk(sk); 3291 struct sk_buff *buff; 3292 int err; 3293 3294 tcp_connect_init(sk); 3295 3296 if (unlikely(tp->repair)) { 3297 tcp_finish_connect(sk, NULL); 3298 return 0; 3299 } 3300 3301 buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true); 3302 if (unlikely(!buff)) 3303 return -ENOBUFS; 3304 3305 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 3306 tp->retrans_stamp = tcp_time_stamp; 3307 tcp_connect_queue_skb(sk, buff); 3308 tcp_ecn_send_syn(sk, buff); 3309 3310 /* Send off SYN; include data in Fast Open. */ 3311 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3312 tcp_transmit_skb(sk, buff, 1, sk->sk_allocation); 3313 if (err == -ECONNREFUSED) 3314 return err; 3315 3316 /* We change tp->snd_nxt after the tcp_transmit_skb() call 3317 * in order to make this packet get counted in tcpOutSegs. 3318 */ 3319 tp->snd_nxt = tp->write_seq; 3320 tp->pushed_seq = tp->write_seq; 3321 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 3322 3323 /* Timer for repeating the SYN until an answer. */ 3324 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 3325 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 3326 return 0; 3327 } 3328 EXPORT_SYMBOL(tcp_connect); 3329 3330 /* Send out a delayed ack, the caller does the policy checking 3331 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 3332 * for details. 3333 */ 3334 void tcp_send_delayed_ack(struct sock *sk) 3335 { 3336 struct inet_connection_sock *icsk = inet_csk(sk); 3337 int ato = icsk->icsk_ack.ato; 3338 unsigned long timeout; 3339 3340 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK); 3341 3342 if (ato > TCP_DELACK_MIN) { 3343 const struct tcp_sock *tp = tcp_sk(sk); 3344 int max_ato = HZ / 2; 3345 3346 if (icsk->icsk_ack.pingpong || 3347 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 3348 max_ato = TCP_DELACK_MAX; 3349 3350 /* Slow path, intersegment interval is "high". */ 3351 3352 /* If some rtt estimate is known, use it to bound delayed ack. 3353 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 3354 * directly. 3355 */ 3356 if (tp->srtt_us) { 3357 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), 3358 TCP_DELACK_MIN); 3359 3360 if (rtt < max_ato) 3361 max_ato = rtt; 3362 } 3363 3364 ato = min(ato, max_ato); 3365 } 3366 3367 /* Stay within the limit we were given */ 3368 timeout = jiffies + ato; 3369 3370 /* Use new timeout only if there wasn't a older one earlier. */ 3371 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 3372 /* If delack timer was blocked or is about to expire, 3373 * send ACK now. 3374 */ 3375 if (icsk->icsk_ack.blocked || 3376 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 3377 tcp_send_ack(sk); 3378 return; 3379 } 3380 3381 if (!time_before(timeout, icsk->icsk_ack.timeout)) 3382 timeout = icsk->icsk_ack.timeout; 3383 } 3384 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 3385 icsk->icsk_ack.timeout = timeout; 3386 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 3387 } 3388 3389 /* This routine sends an ack and also updates the window. */ 3390 void tcp_send_ack(struct sock *sk) 3391 { 3392 struct sk_buff *buff; 3393 3394 /* If we have been reset, we may not send again. */ 3395 if (sk->sk_state == TCP_CLOSE) 3396 return; 3397 3398 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK); 3399 3400 /* We are not putting this on the write queue, so 3401 * tcp_transmit_skb() will set the ownership to this 3402 * sock. 3403 */ 3404 buff = alloc_skb(MAX_TCP_HEADER, 3405 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 3406 if (unlikely(!buff)) { 3407 inet_csk_schedule_ack(sk); 3408 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3409 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 3410 TCP_DELACK_MAX, TCP_RTO_MAX); 3411 return; 3412 } 3413 3414 /* Reserve space for headers and prepare control bits. */ 3415 skb_reserve(buff, MAX_TCP_HEADER); 3416 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3417 3418 /* We do not want pure acks influencing TCP Small Queues or fq/pacing 3419 * too much. 3420 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 3421 * We also avoid tcp_wfree() overhead (cache line miss accessing 3422 * tp->tsq_flags) by using regular sock_wfree() 3423 */ 3424 skb_set_tcp_pure_ack(buff); 3425 3426 /* Send it off, this clears delayed acks for us. */ 3427 skb_mstamp_get(&buff->skb_mstamp); 3428 tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); 3429 } 3430 EXPORT_SYMBOL_GPL(tcp_send_ack); 3431 3432 /* This routine sends a packet with an out of date sequence 3433 * number. It assumes the other end will try to ack it. 3434 * 3435 * Question: what should we make while urgent mode? 3436 * 4.4BSD forces sending single byte of data. We cannot send 3437 * out of window data, because we have SND.NXT==SND.MAX... 3438 * 3439 * Current solution: to send TWO zero-length segments in urgent mode: 3440 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is 3441 * out-of-date with SND.UNA-1 to probe window. 3442 */ 3443 static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) 3444 { 3445 struct tcp_sock *tp = tcp_sk(sk); 3446 struct sk_buff *skb; 3447 3448 /* We don't queue it, tcp_transmit_skb() sets ownership. */ 3449 skb = alloc_skb(MAX_TCP_HEADER, 3450 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN)); 3451 if (!skb) 3452 return -1; 3453 3454 /* Reserve space for headers and set control bits. */ 3455 skb_reserve(skb, MAX_TCP_HEADER); 3456 /* Use a previous sequence. This should cause the other 3457 * end to send an ack. Don't queue or clone SKB, just 3458 * send it. 3459 */ 3460 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3461 skb_mstamp_get(&skb->skb_mstamp); 3462 NET_INC_STATS(sock_net(sk), mib); 3463 return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); 3464 } 3465 3466 void tcp_send_window_probe(struct sock *sk) 3467 { 3468 if (sk->sk_state == TCP_ESTABLISHED) { 3469 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; 3470 tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); 3471 } 3472 } 3473 3474 /* Initiate keepalive or window probe from timer. */ 3475 int tcp_write_wakeup(struct sock *sk, int mib) 3476 { 3477 struct tcp_sock *tp = tcp_sk(sk); 3478 struct sk_buff *skb; 3479 3480 if (sk->sk_state == TCP_CLOSE) 3481 return -1; 3482 3483 skb = tcp_send_head(sk); 3484 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 3485 int err; 3486 unsigned int mss = tcp_current_mss(sk); 3487 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 3488 3489 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 3490 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; 3491 3492 /* We are probing the opening of a window 3493 * but the window size is != 0 3494 * must have been a result SWS avoidance ( sender ) 3495 */ 3496 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 3497 skb->len > mss) { 3498 seg_size = min(seg_size, mss); 3499 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3500 if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) 3501 return -1; 3502 } else if (!tcp_skb_pcount(skb)) 3503 tcp_set_skb_tso_segs(skb, mss); 3504 3505 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3506 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3507 if (!err) 3508 tcp_event_new_data_sent(sk, skb); 3509 return err; 3510 } else { 3511 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) 3512 tcp_xmit_probe_skb(sk, 1, mib); 3513 return tcp_xmit_probe_skb(sk, 0, mib); 3514 } 3515 } 3516 3517 /* A window probe timeout has occurred. If window is not closed send 3518 * a partial packet else a zero probe. 3519 */ 3520 void tcp_send_probe0(struct sock *sk) 3521 { 3522 struct inet_connection_sock *icsk = inet_csk(sk); 3523 struct tcp_sock *tp = tcp_sk(sk); 3524 struct net *net = sock_net(sk); 3525 unsigned long probe_max; 3526 int err; 3527 3528 err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE); 3529 3530 if (tp->packets_out || !tcp_send_head(sk)) { 3531 /* Cancel probe timer, if it is not required. */ 3532 icsk->icsk_probes_out = 0; 3533 icsk->icsk_backoff = 0; 3534 return; 3535 } 3536 3537 if (err <= 0) { 3538 if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) 3539 icsk->icsk_backoff++; 3540 icsk->icsk_probes_out++; 3541 probe_max = TCP_RTO_MAX; 3542 } else { 3543 /* If packet was not sent due to local congestion, 3544 * do not backoff and do not remember icsk_probes_out. 3545 * Let local senders to fight for local resources. 3546 * 3547 * Use accumulated backoff yet. 3548 */ 3549 if (!icsk->icsk_probes_out) 3550 icsk->icsk_probes_out = 1; 3551 probe_max = TCP_RESOURCE_PROBE_INTERVAL; 3552 } 3553 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3554 tcp_probe0_when(sk, probe_max), 3555 TCP_RTO_MAX); 3556 } 3557 3558 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) 3559 { 3560 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific; 3561 struct flowi fl; 3562 int res; 3563 3564 tcp_rsk(req)->txhash = net_tx_rndhash(); 3565 res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); 3566 if (!res) { 3567 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 3568 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 3569 } 3570 return res; 3571 } 3572 EXPORT_SYMBOL(tcp_rtx_synack); 3573