1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Implementation of the Transmission Control Protocol(TCP). 7 * 8 * Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $ 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 21 */ 22 23 #include <linux/module.h> 24 #include <net/tcp.h> 25 26 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; 27 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; 28 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; 29 int sysctl_tcp_keepalive_probes __read_mostly = TCP_KEEPALIVE_PROBES; 30 int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL; 31 int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; 32 int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; 33 int sysctl_tcp_orphan_retries __read_mostly; 34 35 static void tcp_write_timer(unsigned long); 36 static void tcp_delack_timer(unsigned long); 37 static void tcp_keepalive_timer (unsigned long data); 38 39 void tcp_init_xmit_timers(struct sock *sk) 40 { 41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, 42 &tcp_keepalive_timer); 43 } 44 45 EXPORT_SYMBOL(tcp_init_xmit_timers); 46 47 static void tcp_write_err(struct sock *sk) 48 { 49 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; 50 sk->sk_error_report(sk); 51 52 tcp_done(sk); 53 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT); 54 } 55 56 /* Do not allow orphaned sockets to eat all our resources. 57 * This is direct violation of TCP specs, but it is required 58 * to prevent DoS attacks. It is called when a retransmission timeout 59 * or zero probe timeout occurs on orphaned socket. 60 * 61 * Criteria is still not confirmed experimentally and may change. 62 * We kill the socket, if: 63 * 1. If number of orphaned sockets exceeds an administratively configured 64 * limit. 65 * 2. If we have strong memory pressure. 66 */ 67 static int tcp_out_of_resources(struct sock *sk, int do_reset) 68 { 69 struct tcp_sock *tp = tcp_sk(sk); 70 int orphans = atomic_read(&tcp_orphan_count); 71 72 /* If peer does not open window for long time, or did not transmit 73 * anything for long time, penalize it. */ 74 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) 75 orphans <<= 1; 76 77 /* If some dubious ICMP arrived, penalize even more. */ 78 if (sk->sk_err_soft) 79 orphans <<= 1; 80 81 if (tcp_too_many_orphans(sk, orphans)) { 82 if (net_ratelimit()) 83 printk(KERN_INFO "Out of socket memory\n"); 84 85 /* Catch exceptional cases, when connection requires reset. 86 * 1. Last segment was sent recently. */ 87 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || 88 /* 2. Window is closed. */ 89 (!tp->snd_wnd && !tp->packets_out)) 90 do_reset = 1; 91 if (do_reset) 92 tcp_send_active_reset(sk, GFP_ATOMIC); 93 tcp_done(sk); 94 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); 95 return 1; 96 } 97 return 0; 98 } 99 100 /* Calculate maximal number or retries on an orphaned socket. */ 101 static int tcp_orphan_retries(struct sock *sk, int alive) 102 { 103 int retries = sysctl_tcp_orphan_retries; /* May be zero. */ 104 105 /* We know from an ICMP that something is wrong. */ 106 if (sk->sk_err_soft && !alive) 107 retries = 0; 108 109 /* However, if socket sent something recently, select some safe 110 * number of retries. 8 corresponds to >100 seconds with minimal 111 * RTO of 200msec. */ 112 if (retries == 0 && alive) 113 retries = 8; 114 return retries; 115 } 116 117 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) 118 { 119 /* Black hole detection */ 120 if (sysctl_tcp_mtu_probing) { 121 if (!icsk->icsk_mtup.enabled) { 122 icsk->icsk_mtup.enabled = 1; 123 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 124 } else { 125 struct tcp_sock *tp = tcp_sk(sk); 126 int mss; 127 128 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; 129 mss = min(sysctl_tcp_base_mss, mss); 130 mss = max(mss, 68 - tp->tcp_header_len); 131 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); 132 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 133 } 134 } 135 } 136 137 /* A write timeout has occurred. Process the after effects. */ 138 static int tcp_write_timeout(struct sock *sk) 139 { 140 struct inet_connection_sock *icsk = inet_csk(sk); 141 int retry_until; 142 143 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 144 if (icsk->icsk_retransmits) 145 dst_negative_advice(&sk->sk_dst_cache); 146 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 147 } else { 148 if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { 149 /* Black hole detection */ 150 tcp_mtu_probing(icsk, sk); 151 152 dst_negative_advice(&sk->sk_dst_cache); 153 } 154 155 retry_until = sysctl_tcp_retries2; 156 if (sock_flag(sk, SOCK_DEAD)) { 157 const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 158 159 retry_until = tcp_orphan_retries(sk, alive); 160 161 if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 162 return 1; 163 } 164 } 165 166 if (icsk->icsk_retransmits >= retry_until) { 167 /* Has it gone just too far? */ 168 tcp_write_err(sk); 169 return 1; 170 } 171 return 0; 172 } 173 174 static void tcp_delack_timer(unsigned long data) 175 { 176 struct sock *sk = (struct sock*)data; 177 struct tcp_sock *tp = tcp_sk(sk); 178 struct inet_connection_sock *icsk = inet_csk(sk); 179 180 bh_lock_sock(sk); 181 if (sock_owned_by_user(sk)) { 182 /* Try again later. */ 183 icsk->icsk_ack.blocked = 1; 184 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 185 sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 186 goto out_unlock; 187 } 188 189 sk_mem_reclaim_partial(sk); 190 191 if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 192 goto out; 193 194 if (time_after(icsk->icsk_ack.timeout, jiffies)) { 195 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); 196 goto out; 197 } 198 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 199 200 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 201 struct sk_buff *skb; 202 203 NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); 204 205 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) 206 sk->sk_backlog_rcv(sk, skb); 207 208 tp->ucopy.memory = 0; 209 } 210 211 if (inet_csk_ack_scheduled(sk)) { 212 if (!icsk->icsk_ack.pingpong) { 213 /* Delayed ACK missed: inflate ATO. */ 214 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); 215 } else { 216 /* Delayed ACK missed: leave pingpong mode and 217 * deflate ATO. 218 */ 219 icsk->icsk_ack.pingpong = 0; 220 icsk->icsk_ack.ato = TCP_ATO_MIN; 221 } 222 tcp_send_ack(sk); 223 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); 224 } 225 TCP_CHECK_TIMER(sk); 226 227 out: 228 if (tcp_memory_pressure) 229 sk_mem_reclaim(sk); 230 out_unlock: 231 bh_unlock_sock(sk); 232 sock_put(sk); 233 } 234 235 static void tcp_probe_timer(struct sock *sk) 236 { 237 struct inet_connection_sock *icsk = inet_csk(sk); 238 struct tcp_sock *tp = tcp_sk(sk); 239 int max_probes; 240 241 if (tp->packets_out || !tcp_send_head(sk)) { 242 icsk->icsk_probes_out = 0; 243 return; 244 } 245 246 /* *WARNING* RFC 1122 forbids this 247 * 248 * It doesn't AFAIK, because we kill the retransmit timer -AK 249 * 250 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing 251 * this behaviour in Solaris down as a bug fix. [AC] 252 * 253 * Let me to explain. icsk_probes_out is zeroed by incoming ACKs 254 * even if they advertise zero window. Hence, connection is killed only 255 * if we received no ACKs for normal connection timeout. It is not killed 256 * only because window stays zero for some time, window may be zero 257 * until armageddon and even later. We are in full accordance 258 * with RFCs, only probe timer combines both retransmission timeout 259 * and probe timeout in one bottle. --ANK 260 */ 261 max_probes = sysctl_tcp_retries2; 262 263 if (sock_flag(sk, SOCK_DEAD)) { 264 const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 265 266 max_probes = tcp_orphan_retries(sk, alive); 267 268 if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes)) 269 return; 270 } 271 272 if (icsk->icsk_probes_out > max_probes) { 273 tcp_write_err(sk); 274 } else { 275 /* Only send another probe if we didn't close things up. */ 276 tcp_send_probe0(sk); 277 } 278 } 279 280 /* 281 * The TCP retransmit timer. 282 */ 283 284 static void tcp_retransmit_timer(struct sock *sk) 285 { 286 struct tcp_sock *tp = tcp_sk(sk); 287 struct inet_connection_sock *icsk = inet_csk(sk); 288 289 if (!tp->packets_out) 290 goto out; 291 292 BUG_TRAP(!tcp_write_queue_empty(sk)); 293 294 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 295 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 296 /* Receiver dastardly shrinks window. Our retransmits 297 * become zero probes, but we should not timeout this 298 * connection. If the socket is an orphan, time it out, 299 * we cannot allow such beasts to hang infinitely. 300 */ 301 #ifdef TCP_DEBUG 302 if (1) { 303 struct inet_sock *inet = inet_sk(sk); 304 LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n", 305 NIPQUAD(inet->daddr), ntohs(inet->dport), 306 inet->num, tp->snd_una, tp->snd_nxt); 307 } 308 #endif 309 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { 310 tcp_write_err(sk); 311 goto out; 312 } 313 tcp_enter_loss(sk, 0); 314 tcp_retransmit_skb(sk, tcp_write_queue_head(sk)); 315 __sk_dst_reset(sk); 316 goto out_reset_timer; 317 } 318 319 if (tcp_write_timeout(sk)) 320 goto out; 321 322 if (icsk->icsk_retransmits == 0) { 323 if (icsk->icsk_ca_state == TCP_CA_Disorder || 324 icsk->icsk_ca_state == TCP_CA_Recovery) { 325 if (tcp_is_sack(tp)) { 326 if (icsk->icsk_ca_state == TCP_CA_Recovery) 327 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); 328 else 329 NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES); 330 } else { 331 if (icsk->icsk_ca_state == TCP_CA_Recovery) 332 NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL); 333 else 334 NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES); 335 } 336 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { 337 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES); 338 } else { 339 NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS); 340 } 341 } 342 343 if (tcp_use_frto(sk)) { 344 tcp_enter_frto(sk); 345 } else { 346 tcp_enter_loss(sk, 0); 347 } 348 349 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { 350 /* Retransmission failed because of local congestion, 351 * do not backoff. 352 */ 353 if (!icsk->icsk_retransmits) 354 icsk->icsk_retransmits = 1; 355 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 356 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), 357 TCP_RTO_MAX); 358 goto out; 359 } 360 361 /* Increase the timeout each time we retransmit. Note that 362 * we do not increase the rtt estimate. rto is initialized 363 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests 364 * that doubling rto each time is the least we can get away with. 365 * In KA9Q, Karn uses this for the first few times, and then 366 * goes to quadratic. netBSD doubles, but only goes up to *64, 367 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is 368 * defined in the protocol as the maximum possible RTT. I guess 369 * we'll have to use something other than TCP to talk to the 370 * University of Mars. 371 * 372 * PAWS allows us longer timeouts and large windows, so once 373 * implemented ftp to mars will work nicely. We will have to fix 374 * the 120 second clamps though! 375 */ 376 icsk->icsk_backoff++; 377 icsk->icsk_retransmits++; 378 379 out_reset_timer: 380 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 381 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); 382 if (icsk->icsk_retransmits > sysctl_tcp_retries1) 383 __sk_dst_reset(sk); 384 385 out:; 386 } 387 388 static void tcp_write_timer(unsigned long data) 389 { 390 struct sock *sk = (struct sock*)data; 391 struct inet_connection_sock *icsk = inet_csk(sk); 392 int event; 393 394 bh_lock_sock(sk); 395 if (sock_owned_by_user(sk)) { 396 /* Try again later */ 397 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); 398 goto out_unlock; 399 } 400 401 if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 402 goto out; 403 404 if (time_after(icsk->icsk_timeout, jiffies)) { 405 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 406 goto out; 407 } 408 409 event = icsk->icsk_pending; 410 icsk->icsk_pending = 0; 411 412 switch (event) { 413 case ICSK_TIME_RETRANS: 414 tcp_retransmit_timer(sk); 415 break; 416 case ICSK_TIME_PROBE0: 417 tcp_probe_timer(sk); 418 break; 419 } 420 TCP_CHECK_TIMER(sk); 421 422 out: 423 sk_mem_reclaim(sk); 424 out_unlock: 425 bh_unlock_sock(sk); 426 sock_put(sk); 427 } 428 429 /* 430 * Timer for listening sockets 431 */ 432 433 static void tcp_synack_timer(struct sock *sk) 434 { 435 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, 436 TCP_TIMEOUT_INIT, TCP_RTO_MAX); 437 } 438 439 void tcp_set_keepalive(struct sock *sk, int val) 440 { 441 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) 442 return; 443 444 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) 445 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 446 else if (!val) 447 inet_csk_delete_keepalive_timer(sk); 448 } 449 450 451 static void tcp_keepalive_timer (unsigned long data) 452 { 453 struct sock *sk = (struct sock *) data; 454 struct inet_connection_sock *icsk = inet_csk(sk); 455 struct tcp_sock *tp = tcp_sk(sk); 456 __u32 elapsed; 457 458 /* Only process if socket is not in use. */ 459 bh_lock_sock(sk); 460 if (sock_owned_by_user(sk)) { 461 /* Try again later. */ 462 inet_csk_reset_keepalive_timer (sk, HZ/20); 463 goto out; 464 } 465 466 if (sk->sk_state == TCP_LISTEN) { 467 tcp_synack_timer(sk); 468 goto out; 469 } 470 471 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 472 if (tp->linger2 >= 0) { 473 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 474 475 if (tmo > 0) { 476 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 477 goto out; 478 } 479 } 480 tcp_send_active_reset(sk, GFP_ATOMIC); 481 goto death; 482 } 483 484 if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) 485 goto out; 486 487 elapsed = keepalive_time_when(tp); 488 489 /* It is alive without keepalive 8) */ 490 if (tp->packets_out || tcp_send_head(sk)) 491 goto resched; 492 493 elapsed = tcp_time_stamp - tp->rcv_tstamp; 494 495 if (elapsed >= keepalive_time_when(tp)) { 496 if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) || 497 (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) { 498 tcp_send_active_reset(sk, GFP_ATOMIC); 499 tcp_write_err(sk); 500 goto out; 501 } 502 if (tcp_write_wakeup(sk) <= 0) { 503 icsk->icsk_probes_out++; 504 elapsed = keepalive_intvl_when(tp); 505 } else { 506 /* If keepalive was lost due to local congestion, 507 * try harder. 508 */ 509 elapsed = TCP_RESOURCE_PROBE_INTERVAL; 510 } 511 } else { 512 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ 513 elapsed = keepalive_time_when(tp) - elapsed; 514 } 515 516 TCP_CHECK_TIMER(sk); 517 sk_mem_reclaim(sk); 518 519 resched: 520 inet_csk_reset_keepalive_timer (sk, elapsed); 521 goto out; 522 523 death: 524 tcp_done(sk); 525 526 out: 527 bh_unlock_sock(sk); 528 sock_put(sk); 529 } 530