1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 */ 21 22 #include <linux/module.h> 23 #include <linux/gfp.h> 24 #include <net/tcp.h> 25 26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) 27 { 28 struct inet_connection_sock *icsk = inet_csk(sk); 29 u32 elapsed, start_ts; 30 s32 remaining; 31 32 start_ts = tcp_sk(sk)->retrans_stamp; 33 if (!icsk->icsk_user_timeout) 34 return icsk->icsk_rto; 35 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts; 36 remaining = icsk->icsk_user_timeout - elapsed; 37 if (remaining <= 0) 38 return 1; /* user timeout has passed; fire ASAP */ 39 40 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); 41 } 42 43 /** 44 * tcp_write_err() - close socket and save error info 45 * @sk: The socket the error has appeared on. 46 * 47 * Returns: Nothing (void) 48 */ 49 50 static void tcp_write_err(struct sock *sk) 51 { 52 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 53 sk->sk_error_report(sk); 54 55 tcp_write_queue_purge(sk); 56 tcp_done(sk); 57 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); 58 } 59 60 /** 61 * tcp_out_of_resources() - Close socket if out of resources 62 * @sk: pointer to current socket 63 * @do_reset: send a last packet with reset flag 64 * 65 * Do not allow orphaned sockets to eat all our resources. 66 * This is direct violation of TCP specs, but it is required 67 * to prevent DoS attacks. It is called when a retransmission timeout 68 * or zero probe timeout occurs on orphaned socket. 69 * 70 * Also close if our net namespace is exiting; in that case there is no 71 * hope of ever communicating again since all netns interfaces are already 72 * down (or about to be down), and we need to release our dst references, 73 * which have been moved to the netns loopback interface, so the namespace 74 * can finish exiting. This condition is only possible if we are a kernel 75 * socket, as those do not hold references to the namespace. 76 * 77 * Criteria is still not confirmed experimentally and may change. 78 * We kill the socket, if: 79 * 1. If number of orphaned sockets exceeds an administratively configured 80 * limit. 81 * 2. If we have strong memory pressure. 82 * 3. If our net namespace is exiting. 83 */ 84 static int tcp_out_of_resources(struct sock *sk, bool do_reset) 85 { 86 struct tcp_sock *tp = tcp_sk(sk); 87 int shift = 0; 88 89 /* If peer does not open window for long time, or did not transmit 90 * anything for long time, penalize it. */ 91 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 92 shift++; 93 94 /* If some dubious ICMP arrived, penalize even more. */ 95 if (sk->sk_err_soft) 96 shift++; 97 98 if (tcp_check_oom(sk, shift)) { 99 /* Catch exceptional cases, when connection requires reset. 100 * 1. Last segment was sent recently. */ 101 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 102 /* 2. Window is closed. */ 103 (!tp->snd_wnd && !tp->packets_out)) 104 do_reset = true; 105 if (do_reset) 106 tcp_send_active_reset(sk, GFP_ATOMIC); 107 tcp_done(sk); 108 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); 109 return 1; 110 } 111 112 if (!check_net(sock_net(sk))) { 113 /* Not possible to send reset; just close */ 114 tcp_done(sk); 115 return 1; 116 } 117 118 return 0; 119 } 120 121 /** 122 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket 123 * @sk: Pointer to the current socket. 124 * @alive: bool, socket alive state 125 */ 126 static int tcp_orphan_retries(struct sock *sk, bool alive) 127 { 128 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ 129 130 /* We know from an ICMP that something is wrong. */ 131 if (sk->sk_err_soft && !alive) 132 retries = 0; 133 134 /* However, if socket sent something recently, select some safe 135 * number of retries. 8 corresponds to >100 seconds with minimal 136 * RTO of 200msec. */ 137 if (retries == 0 && alive) 138 retries = 8; 139 return retries; 140 } 141 142 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) 143 { 144 const struct net *net = sock_net(sk); 145 int mss; 146 147 /* Black hole detection */ 148 if (!net->ipv4.sysctl_tcp_mtu_probing) 149 return; 150 151 if (!icsk->icsk_mtup.enabled) { 152 icsk->icsk_mtup.enabled = 1; 153 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; 154 } else { 155 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; 156 mss = min(net->ipv4.sysctl_tcp_base_mss, mss); 157 mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor); 158 mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); 159 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 160 } 161 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 162 } 163 164 static unsigned int tcp_model_timeout(struct sock *sk, 165 unsigned int boundary, 166 unsigned int rto_base) 167 { 168 unsigned int linear_backoff_thresh, timeout; 169 170 linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base); 171 if (boundary <= linear_backoff_thresh) 172 timeout = ((2 << boundary) - 1) * rto_base; 173 else 174 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + 175 (boundary - linear_backoff_thresh) * TCP_RTO_MAX; 176 return jiffies_to_msecs(timeout); 177 } 178 /** 179 * retransmits_timed_out() - returns true if this connection has timed out 180 * @sk: The current socket 181 * @boundary: max number of retransmissions 182 * @timeout: A custom timeout value. 183 * If set to 0 the default timeout is calculated and used. 184 * Using TCP_RTO_MIN and the number of unsuccessful retransmits. 185 * 186 * The default "timeout" value this function can calculate and use 187 * is equivalent to the timeout of a TCP Connection 188 * after "boundary" unsuccessful, exponentially backed-off 189 * retransmissions with an initial RTO of TCP_RTO_MIN. 190 */ 191 static bool retransmits_timed_out(struct sock *sk, 192 unsigned int boundary, 193 unsigned int timeout) 194 { 195 unsigned int start_ts; 196 197 if (!inet_csk(sk)->icsk_retransmits) 198 return false; 199 200 start_ts = tcp_sk(sk)->retrans_stamp; 201 if (likely(timeout == 0)) { 202 unsigned int rto_base = TCP_RTO_MIN; 203 204 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 205 rto_base = tcp_timeout_init(sk); 206 timeout = tcp_model_timeout(sk, boundary, rto_base); 207 } 208 209 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0; 210 } 211 212 /* A write timeout has occurred. Process the after effects. */ 213 static int tcp_write_timeout(struct sock *sk) 214 { 215 struct inet_connection_sock *icsk = inet_csk(sk); 216 struct tcp_sock *tp = tcp_sk(sk); 217 struct net *net = sock_net(sk); 218 bool expired = false, do_reset; 219 int retry_until; 220 221 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 222 if (icsk->icsk_retransmits) { 223 dst_negative_advice(sk); 224 } else { 225 sk_rethink_txhash(sk); 226 tp->timeout_rehash++; 227 __NET_INC_STATS(sock_net(sk), 228 LINUX_MIB_TCPTIMEOUTREHASH); 229 } 230 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 231 expired = icsk->icsk_retransmits >= retry_until; 232 } else { 233 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { 234 /* Black hole detection */ 235 tcp_mtu_probing(icsk, sk); 236 237 dst_negative_advice(sk); 238 } else { 239 sk_rethink_txhash(sk); 240 tp->timeout_rehash++; 241 __NET_INC_STATS(sock_net(sk), 242 LINUX_MIB_TCPTIMEOUTREHASH); 243 } 244 245 retry_until = net->ipv4.sysctl_tcp_retries2; 246 if (sock_flag(sk, SOCK_DEAD)) { 247 const bool alive = icsk->icsk_rto < TCP_RTO_MAX; 248 249 retry_until = tcp_orphan_retries(sk, alive); 250 do_reset = alive || 251 !retransmits_timed_out(sk, retry_until, 0); 252 253 if (tcp_out_of_resources(sk, do_reset)) 254 return 1; 255 } 256 } 257 if (!expired) 258 expired = retransmits_timed_out(sk, retry_until, 259 icsk->icsk_user_timeout); 260 tcp_fastopen_active_detect_blackhole(sk, expired); 261 262 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) 263 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB, 264 icsk->icsk_retransmits, 265 icsk->icsk_rto, (int)expired); 266 267 if (expired) { 268 /* Has it gone just too far? */ 269 tcp_write_err(sk); 270 return 1; 271 } 272 273 return 0; 274 } 275 276 /* Called with BH disabled */ 277 void tcp_delack_timer_handler(struct sock *sk) 278 { 279 struct inet_connection_sock *icsk = inet_csk(sk); 280 281 sk_mem_reclaim_partial(sk); 282 283 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || 284 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 285 goto out; 286 287 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 288 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); 289 goto out; 290 } 291 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 292 293 if (inet_csk_ack_scheduled(sk)) { 294 if (!inet_csk_in_pingpong_mode(sk)) { 295 /* Delayed ACK missed: inflate ATO. */ 296 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); 297 } else { 298 /* Delayed ACK missed: leave pingpong mode and 299 * deflate ATO. 300 */ 301 inet_csk_exit_pingpong_mode(sk); 302 icsk->icsk_ack.ato = TCP_ATO_MIN; 303 } 304 tcp_mstamp_refresh(tcp_sk(sk)); 305 tcp_send_ack(sk); 306 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); 307 } 308 309 out: 310 if (tcp_under_memory_pressure(sk)) 311 sk_mem_reclaim(sk); 312 } 313 314 315 /** 316 * tcp_delack_timer() - The TCP delayed ACK timeout handler 317 * @t: Pointer to the timer. (gets casted to struct sock *) 318 * 319 * This function gets (indirectly) called when the kernel timer for a TCP packet 320 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work. 321 * 322 * Returns: Nothing (void) 323 */ 324 static void tcp_delack_timer(struct timer_list *t) 325 { 326 struct inet_connection_sock *icsk = 327 from_timer(icsk, t, icsk_delack_timer); 328 struct sock *sk = &icsk->icsk_inet.sk; 329 330 bh_lock_sock(sk); 331 if (!sock_owned_by_user(sk)) { 332 tcp_delack_timer_handler(sk); 333 } else { 334 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); 335 /* deleguate our work to tcp_release_cb() */ 336 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) 337 sock_hold(sk); 338 } 339 bh_unlock_sock(sk); 340 sock_put(sk); 341 } 342 343 static void tcp_probe_timer(struct sock *sk) 344 { 345 struct inet_connection_sock *icsk = inet_csk(sk); 346 struct sk_buff *skb = tcp_send_head(sk); 347 struct tcp_sock *tp = tcp_sk(sk); 348 int max_probes; 349 350 if (tp->packets_out || !skb) { 351 icsk->icsk_probes_out = 0; 352 return; 353 } 354 355 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as 356 * long as the receiver continues to respond probes. We support this by 357 * default and reset icsk_probes_out with incoming ACKs. But if the 358 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we 359 * kill the socket when the retry count and the time exceeds the 360 * corresponding system limit. We also implement similar policy when 361 * we use RTO to probe window in tcp_retransmit_timer(). 362 */ 363 if (icsk->icsk_user_timeout) { 364 u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out, 365 tcp_probe0_base(sk)); 366 367 if (elapsed >= icsk->icsk_user_timeout) 368 goto abort; 369 } 370 371 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; 372 if (sock_flag(sk, SOCK_DEAD)) { 373 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; 374 375 max_probes = tcp_orphan_retries(sk, alive); 376 if (!alive && icsk->icsk_backoff >= max_probes) 377 goto abort; 378 if (tcp_out_of_resources(sk, true)) 379 return; 380 } 381 382 if (icsk->icsk_probes_out >= max_probes) { 383 abort: tcp_write_err(sk); 384 } else { 385 /* Only send another probe if we didn't close things up. */ 386 tcp_send_probe0(sk); 387 } 388 } 389 390 /* 391 * Timer for Fast Open socket to retransmit SYNACK. Note that the 392 * sk here is the child socket, not the parent (listener) socket. 393 */ 394 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) 395 { 396 struct inet_connection_sock *icsk = inet_csk(sk); 397 int max_retries = icsk->icsk_syn_retries ? : 398 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ 399 struct tcp_sock *tp = tcp_sk(sk); 400 401 req->rsk_ops->syn_ack_timeout(req); 402 403 if (req->num_timeout >= max_retries) { 404 tcp_write_err(sk); 405 return; 406 } 407 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */ 408 if (icsk->icsk_retransmits == 1) 409 tcp_enter_loss(sk); 410 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error 411 * returned from rtx_syn_ack() to make it more persistent like 412 * regular retransmit because if the child socket has been accepted 413 * it's not good to give up too easily. 414 */ 415 inet_rtx_syn_ack(sk, req); 416 req->num_timeout++; 417 icsk->icsk_retransmits++; 418 if (!tp->retrans_stamp) 419 tp->retrans_stamp = tcp_time_stamp(tp); 420 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 421 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); 422 } 423 424 425 /** 426 * tcp_retransmit_timer() - The TCP retransmit timeout handler 427 * @sk: Pointer to the current socket. 428 * 429 * This function gets called when the kernel timer for a TCP packet 430 * of this socket expires. 431 * 432 * It handles retransmission, timer adjustment and other necesarry measures. 433 * 434 * Returns: Nothing (void) 435 */ 436 void tcp_retransmit_timer(struct sock *sk) 437 { 438 struct tcp_sock *tp = tcp_sk(sk); 439 struct net *net = sock_net(sk); 440 struct inet_connection_sock *icsk = inet_csk(sk); 441 struct request_sock *req; 442 struct sk_buff *skb; 443 444 req = rcu_dereference_protected(tp->fastopen_rsk, 445 lockdep_sock_is_held(sk)); 446 if (req) { 447 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 448 sk->sk_state != TCP_FIN_WAIT1); 449 tcp_fastopen_synack_timer(sk, req); 450 /* Before we receive ACK to our SYN-ACK don't retransmit 451 * anything else (e.g., data or FIN segments). 452 */ 453 return; 454 } 455 456 if (!tp->packets_out) 457 return; 458 459 skb = tcp_rtx_queue_head(sk); 460 if (WARN_ON_ONCE(!skb)) 461 return; 462 463 tp->tlp_high_seq = 0; 464 465 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 466 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 467 /* Receiver dastardly shrinks window. Our retransmits 468 * become zero probes, but we should not timeout this 469 * connection. If the socket is an orphan, time it out, 470 * we cannot allow such beasts to hang infinitely. 471 */ 472 struct inet_sock *inet = inet_sk(sk); 473 if (sk->sk_family == AF_INET) { 474 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 475 &inet->inet_daddr, 476 ntohs(inet->inet_dport), 477 inet->inet_num, 478 tp->snd_una, tp->snd_nxt); 479 } 480 #if IS_ENABLED(CONFIG_IPV6) 481 else if (sk->sk_family == AF_INET6) { 482 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", 483 &sk->sk_v6_daddr, 484 ntohs(inet->inet_dport), 485 inet->inet_num, 486 tp->snd_una, tp->snd_nxt); 487 } 488 #endif 489 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) { 490 tcp_write_err(sk); 491 goto out; 492 } 493 tcp_enter_loss(sk); 494 tcp_retransmit_skb(sk, skb, 1); 495 __sk_dst_reset(sk); 496 goto out_reset_timer; 497 } 498 499 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); 500 if (tcp_write_timeout(sk)) 501 goto out; 502 503 if (icsk->icsk_retransmits == 0) { 504 int mib_idx = 0; 505 506 if (icsk->icsk_ca_state == TCP_CA_Recovery) { 507 if (tcp_is_sack(tp)) 508 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; 509 else 510 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; 511 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 512 mib_idx = LINUX_MIB_TCPLOSSFAILURES; 513 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || 514 tp->sacked_out) { 515 if (tcp_is_sack(tp)) 516 mib_idx = LINUX_MIB_TCPSACKFAILURES; 517 else 518 mib_idx = LINUX_MIB_TCPRENOFAILURES; 519 } 520 if (mib_idx) 521 __NET_INC_STATS(sock_net(sk), mib_idx); 522 } 523 524 tcp_enter_loss(sk); 525 526 icsk->icsk_retransmits++; 527 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) { 528 /* Retransmission failed because of local congestion, 529 * Let senders fight for local resources conservatively. 530 */ 531 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 532 TCP_RESOURCE_PROBE_INTERVAL, 533 TCP_RTO_MAX); 534 goto out; 535 } 536 537 /* Increase the timeout each time we retransmit. Note that 538 * we do not increase the rtt estimate. rto is initialized 539 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests 540 * that doubling rto each time is the least we can get away with. 541 * In KA9Q, Karn uses this for the first few times, and then 542 * goes to quadratic. netBSD doubles, but only goes up to *64, 543 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is 544 * defined in the protocol as the maximum possible RTT. I guess 545 * we'll have to use something other than TCP to talk to the 546 * University of Mars. 547 * 548 * PAWS allows us longer timeouts and large windows, so once 549 * implemented ftp to mars will work nicely. We will have to fix 550 * the 120 second clamps though! 551 */ 552 icsk->icsk_backoff++; 553 554 out_reset_timer: 555 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is 556 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this 557 * might be increased if the stream oscillates between thin and thick, 558 * thus the old value might already be too high compared to the value 559 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without 560 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating 561 * exponential backoff behaviour to avoid continue hammering 562 * linear-timeout retransmissions into a black hole 563 */ 564 if (sk->sk_state == TCP_ESTABLISHED && 565 (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) && 566 tcp_stream_is_thin(tp) && 567 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { 568 icsk->icsk_backoff = 0; 569 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); 570 } else { 571 /* Use normal (exponential) backoff */ 572 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 573 } 574 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 575 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); 576 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) 577 __sk_dst_reset(sk); 578 579 out:; 580 } 581 582 /* Called with bottom-half processing disabled. 583 Called by tcp_write_timer() */ 584 void tcp_write_timer_handler(struct sock *sk) 585 { 586 struct inet_connection_sock *icsk = inet_csk(sk); 587 int event; 588 589 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || 590 !icsk->icsk_pending) 591 goto out; 592 593 if (time_after(icsk->icsk_timeout, jiffies)) { 594 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 595 goto out; 596 } 597 598 tcp_mstamp_refresh(tcp_sk(sk)); 599 event = icsk->icsk_pending; 600 601 switch (event) { 602 case ICSK_TIME_REO_TIMEOUT: 603 tcp_rack_reo_timeout(sk); 604 break; 605 case ICSK_TIME_LOSS_PROBE: 606 tcp_send_loss_probe(sk); 607 break; 608 case ICSK_TIME_RETRANS: 609 icsk->icsk_pending = 0; 610 tcp_retransmit_timer(sk); 611 break; 612 case ICSK_TIME_PROBE0: 613 icsk->icsk_pending = 0; 614 tcp_probe_timer(sk); 615 break; 616 } 617 618 out: 619 sk_mem_reclaim(sk); 620 } 621 622 static void tcp_write_timer(struct timer_list *t) 623 { 624 struct inet_connection_sock *icsk = 625 from_timer(icsk, t, icsk_retransmit_timer); 626 struct sock *sk = &icsk->icsk_inet.sk; 627 628 bh_lock_sock(sk); 629 if (!sock_owned_by_user(sk)) { 630 tcp_write_timer_handler(sk); 631 } else { 632 /* delegate our work to tcp_release_cb() */ 633 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags)) 634 sock_hold(sk); 635 } 636 bh_unlock_sock(sk); 637 sock_put(sk); 638 } 639 640 void tcp_syn_ack_timeout(const struct request_sock *req) 641 { 642 struct net *net = read_pnet(&inet_rsk(req)->ireq_net); 643 644 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); 645 } 646 EXPORT_SYMBOL(tcp_syn_ack_timeout); 647 648 void tcp_set_keepalive(struct sock *sk, int val) 649 { 650 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 651 return; 652 653 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) 654 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 655 else if (!val) 656 inet_csk_delete_keepalive_timer(sk); 657 } 658 EXPORT_SYMBOL_GPL(tcp_set_keepalive); 659 660 661 static void tcp_keepalive_timer (struct timer_list *t) 662 { 663 struct sock *sk = from_timer(sk, t, sk_timer); 664 struct inet_connection_sock *icsk = inet_csk(sk); 665 struct tcp_sock *tp = tcp_sk(sk); 666 u32 elapsed; 667 668 /* Only process if socket is not in use. */ 669 bh_lock_sock(sk); 670 if (sock_owned_by_user(sk)) { 671 /* Try again later. */ 672 inet_csk_reset_keepalive_timer (sk, HZ/20); 673 goto out; 674 } 675 676 if (sk->sk_state == TCP_LISTEN) { 677 pr_err("Hmm... keepalive on a LISTEN ???\n"); 678 goto out; 679 } 680 681 tcp_mstamp_refresh(tp); 682 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 683 if (tp->linger2 >= 0) { 684 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 685 686 if (tmo > 0) { 687 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 688 goto out; 689 } 690 } 691 tcp_send_active_reset(sk, GFP_ATOMIC); 692 goto death; 693 } 694 695 if (!sock_flag(sk, SOCK_KEEPOPEN) || 696 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) 697 goto out; 698 699 elapsed = keepalive_time_when(tp); 700 701 /* It is alive without keepalive 8) */ 702 if (tp->packets_out || !tcp_write_queue_empty(sk)) 703 goto resched; 704 705 elapsed = keepalive_time_elapsed(tp); 706 707 if (elapsed >= keepalive_time_when(tp)) { 708 /* If the TCP_USER_TIMEOUT option is enabled, use that 709 * to determine when to timeout instead. 710 */ 711 if ((icsk->icsk_user_timeout != 0 && 712 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) && 713 icsk->icsk_probes_out > 0) || 714 (icsk->icsk_user_timeout == 0 && 715 icsk->icsk_probes_out >= keepalive_probes(tp))) { 716 tcp_send_active_reset(sk, GFP_ATOMIC); 717 tcp_write_err(sk); 718 goto out; 719 } 720 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) { 721 icsk->icsk_probes_out++; 722 elapsed = keepalive_intvl_when(tp); 723 } else { 724 /* If keepalive was lost due to local congestion, 725 * try harder. 726 */ 727 elapsed = TCP_RESOURCE_PROBE_INTERVAL; 728 } 729 } else { 730 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ 731 elapsed = keepalive_time_when(tp) - elapsed; 732 } 733 734 sk_mem_reclaim(sk); 735 736 resched: 737 inet_csk_reset_keepalive_timer (sk, elapsed); 738 goto out; 739 740 death: 741 tcp_done(sk); 742 743 out: 744 bh_unlock_sock(sk); 745 sock_put(sk); 746 } 747 748 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) 749 { 750 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); 751 struct sock *sk = (struct sock *)tp; 752 753 bh_lock_sock(sk); 754 if (!sock_owned_by_user(sk)) { 755 if (tp->compressed_ack) { 756 /* Since we have to send one ack finally, 757 * substract one from tp->compressed_ack to keep 758 * LINUX_MIB_TCPACKCOMPRESSED accurate. 759 */ 760 tp->compressed_ack--; 761 tcp_send_ack(sk); 762 } 763 } else { 764 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, 765 &sk->sk_tsq_flags)) 766 sock_hold(sk); 767 } 768 bh_unlock_sock(sk); 769 770 sock_put(sk); 771 772 return HRTIMER_NORESTART; 773 } 774 775 void tcp_init_xmit_timers(struct sock *sk) 776 { 777 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, 778 &tcp_keepalive_timer); 779 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, 780 HRTIMER_MODE_ABS_PINNED_SOFT); 781 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; 782 783 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC, 784 HRTIMER_MODE_REL_PINNED_SOFT); 785 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick; 786 } 787