xref: /openbmc/linux/net/ipv4/tcp_timer.c (revision e5242c5f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25 
26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
27 {
28 	struct inet_connection_sock *icsk = inet_csk(sk);
29 	u32 elapsed, start_ts, user_timeout;
30 	s32 remaining;
31 
32 	start_ts = tcp_sk(sk)->retrans_stamp;
33 	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
34 	if (!user_timeout)
35 		return icsk->icsk_rto;
36 	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
37 	remaining = user_timeout - elapsed;
38 	if (remaining <= 0)
39 		return 1; /* user timeout has passed; fire ASAP */
40 
41 	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
42 }
43 
44 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
45 {
46 	struct inet_connection_sock *icsk = inet_csk(sk);
47 	u32 remaining, user_timeout;
48 	s32 elapsed;
49 
50 	user_timeout = READ_ONCE(icsk->icsk_user_timeout);
51 	if (!user_timeout || !icsk->icsk_probes_tstamp)
52 		return when;
53 
54 	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
55 	if (unlikely(elapsed < 0))
56 		elapsed = 0;
57 	remaining = msecs_to_jiffies(user_timeout) - elapsed;
58 	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
59 
60 	return min_t(u32, remaining, when);
61 }
62 
63 /**
64  *  tcp_write_err() - close socket and save error info
65  *  @sk:  The socket the error has appeared on.
66  *
67  *  Returns: Nothing (void)
68  */
69 
70 static void tcp_write_err(struct sock *sk)
71 {
72 	tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
73 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
74 }
75 
76 /**
77  *  tcp_out_of_resources() - Close socket if out of resources
78  *  @sk:        pointer to current socket
79  *  @do_reset:  send a last packet with reset flag
80  *
81  *  Do not allow orphaned sockets to eat all our resources.
82  *  This is direct violation of TCP specs, but it is required
83  *  to prevent DoS attacks. It is called when a retransmission timeout
84  *  or zero probe timeout occurs on orphaned socket.
85  *
86  *  Also close if our net namespace is exiting; in that case there is no
87  *  hope of ever communicating again since all netns interfaces are already
88  *  down (or about to be down), and we need to release our dst references,
89  *  which have been moved to the netns loopback interface, so the namespace
90  *  can finish exiting.  This condition is only possible if we are a kernel
91  *  socket, as those do not hold references to the namespace.
92  *
93  *  Criteria is still not confirmed experimentally and may change.
94  *  We kill the socket, if:
95  *  1. If number of orphaned sockets exceeds an administratively configured
96  *     limit.
97  *  2. If we have strong memory pressure.
98  *  3. If our net namespace is exiting.
99  */
100 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
101 {
102 	struct tcp_sock *tp = tcp_sk(sk);
103 	int shift = 0;
104 
105 	/* If peer does not open window for long time, or did not transmit
106 	 * anything for long time, penalize it. */
107 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
108 		shift++;
109 
110 	/* If some dubious ICMP arrived, penalize even more. */
111 	if (READ_ONCE(sk->sk_err_soft))
112 		shift++;
113 
114 	if (tcp_check_oom(sk, shift)) {
115 		/* Catch exceptional cases, when connection requires reset.
116 		 *      1. Last segment was sent recently. */
117 		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
118 		    /*  2. Window is closed. */
119 		    (!tp->snd_wnd && !tp->packets_out))
120 			do_reset = true;
121 		if (do_reset)
122 			tcp_send_active_reset(sk, GFP_ATOMIC);
123 		tcp_done(sk);
124 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
125 		return 1;
126 	}
127 
128 	if (!check_net(sock_net(sk))) {
129 		/* Not possible to send reset; just close */
130 		tcp_done(sk);
131 		return 1;
132 	}
133 
134 	return 0;
135 }
136 
137 /**
138  *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
139  *  @sk:    Pointer to the current socket.
140  *  @alive: bool, socket alive state
141  */
142 static int tcp_orphan_retries(struct sock *sk, bool alive)
143 {
144 	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
145 
146 	/* We know from an ICMP that something is wrong. */
147 	if (READ_ONCE(sk->sk_err_soft) && !alive)
148 		retries = 0;
149 
150 	/* However, if socket sent something recently, select some safe
151 	 * number of retries. 8 corresponds to >100 seconds with minimal
152 	 * RTO of 200msec. */
153 	if (retries == 0 && alive)
154 		retries = 8;
155 	return retries;
156 }
157 
158 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
159 {
160 	const struct net *net = sock_net(sk);
161 	int mss;
162 
163 	/* Black hole detection */
164 	if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
165 		return;
166 
167 	if (!icsk->icsk_mtup.enabled) {
168 		icsk->icsk_mtup.enabled = 1;
169 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
170 	} else {
171 		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
172 		mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
173 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
174 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
175 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
176 	}
177 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
178 }
179 
180 static unsigned int tcp_model_timeout(struct sock *sk,
181 				      unsigned int boundary,
182 				      unsigned int rto_base)
183 {
184 	unsigned int linear_backoff_thresh, timeout;
185 
186 	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
187 	if (boundary <= linear_backoff_thresh)
188 		timeout = ((2 << boundary) - 1) * rto_base;
189 	else
190 		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
191 			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
192 	return jiffies_to_msecs(timeout);
193 }
194 /**
195  *  retransmits_timed_out() - returns true if this connection has timed out
196  *  @sk:       The current socket
197  *  @boundary: max number of retransmissions
198  *  @timeout:  A custom timeout value.
199  *             If set to 0 the default timeout is calculated and used.
200  *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
201  *
202  * The default "timeout" value this function can calculate and use
203  * is equivalent to the timeout of a TCP Connection
204  * after "boundary" unsuccessful, exponentially backed-off
205  * retransmissions with an initial RTO of TCP_RTO_MIN.
206  */
207 static bool retransmits_timed_out(struct sock *sk,
208 				  unsigned int boundary,
209 				  unsigned int timeout)
210 {
211 	unsigned int start_ts;
212 
213 	if (!inet_csk(sk)->icsk_retransmits)
214 		return false;
215 
216 	start_ts = tcp_sk(sk)->retrans_stamp;
217 	if (likely(timeout == 0)) {
218 		unsigned int rto_base = TCP_RTO_MIN;
219 
220 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
221 			rto_base = tcp_timeout_init(sk);
222 		timeout = tcp_model_timeout(sk, boundary, rto_base);
223 	}
224 
225 	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
226 }
227 
228 /* A write timeout has occurred. Process the after effects. */
229 static int tcp_write_timeout(struct sock *sk)
230 {
231 	struct inet_connection_sock *icsk = inet_csk(sk);
232 	struct tcp_sock *tp = tcp_sk(sk);
233 	struct net *net = sock_net(sk);
234 	bool expired = false, do_reset;
235 	int retry_until, max_retransmits;
236 
237 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
238 		if (icsk->icsk_retransmits)
239 			__dst_negative_advice(sk);
240 		/* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
241 		retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
242 			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
243 
244 		max_retransmits = retry_until;
245 		if (sk->sk_state == TCP_SYN_SENT)
246 			max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
247 
248 		expired = icsk->icsk_retransmits >= max_retransmits;
249 	} else {
250 		if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
251 			/* Black hole detection */
252 			tcp_mtu_probing(icsk, sk);
253 
254 			__dst_negative_advice(sk);
255 		}
256 
257 		retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
258 		if (sock_flag(sk, SOCK_DEAD)) {
259 			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
260 
261 			retry_until = tcp_orphan_retries(sk, alive);
262 			do_reset = alive ||
263 				!retransmits_timed_out(sk, retry_until, 0);
264 
265 			if (tcp_out_of_resources(sk, do_reset))
266 				return 1;
267 		}
268 	}
269 	if (!expired)
270 		expired = retransmits_timed_out(sk, retry_until,
271 						READ_ONCE(icsk->icsk_user_timeout));
272 	tcp_fastopen_active_detect_blackhole(sk, expired);
273 
274 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
275 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
276 				  icsk->icsk_retransmits,
277 				  icsk->icsk_rto, (int)expired);
278 
279 	if (expired) {
280 		/* Has it gone just too far? */
281 		tcp_write_err(sk);
282 		return 1;
283 	}
284 
285 	if (sk_rethink_txhash(sk)) {
286 		tp->timeout_rehash++;
287 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
288 	}
289 
290 	return 0;
291 }
292 
293 /* Called with BH disabled */
294 void tcp_delack_timer_handler(struct sock *sk)
295 {
296 	struct inet_connection_sock *icsk = inet_csk(sk);
297 	struct tcp_sock *tp = tcp_sk(sk);
298 
299 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
300 		return;
301 
302 	/* Handling the sack compression case */
303 	if (tp->compressed_ack) {
304 		tcp_mstamp_refresh(tp);
305 		tcp_sack_compress_send_ack(sk);
306 		return;
307 	}
308 
309 	if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
310 		return;
311 
312 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
313 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
314 		return;
315 	}
316 	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
317 
318 	if (inet_csk_ack_scheduled(sk)) {
319 		if (!inet_csk_in_pingpong_mode(sk)) {
320 			/* Delayed ACK missed: inflate ATO. */
321 			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
322 		} else {
323 			/* Delayed ACK missed: leave pingpong mode and
324 			 * deflate ATO.
325 			 */
326 			inet_csk_exit_pingpong_mode(sk);
327 			icsk->icsk_ack.ato      = TCP_ATO_MIN;
328 		}
329 		tcp_mstamp_refresh(tp);
330 		tcp_send_ack(sk);
331 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
332 	}
333 }
334 
335 
336 /**
337  *  tcp_delack_timer() - The TCP delayed ACK timeout handler
338  *  @t:  Pointer to the timer. (gets casted to struct sock *)
339  *
340  *  This function gets (indirectly) called when the kernel timer for a TCP packet
341  *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
342  *
343  *  Returns: Nothing (void)
344  */
345 static void tcp_delack_timer(struct timer_list *t)
346 {
347 	struct inet_connection_sock *icsk =
348 			from_timer(icsk, t, icsk_delack_timer);
349 	struct sock *sk = &icsk->icsk_inet.sk;
350 
351 	bh_lock_sock(sk);
352 	if (!sock_owned_by_user(sk)) {
353 		tcp_delack_timer_handler(sk);
354 	} else {
355 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
356 		/* deleguate our work to tcp_release_cb() */
357 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
358 			sock_hold(sk);
359 	}
360 	bh_unlock_sock(sk);
361 	sock_put(sk);
362 }
363 
364 static void tcp_probe_timer(struct sock *sk)
365 {
366 	struct inet_connection_sock *icsk = inet_csk(sk);
367 	struct sk_buff *skb = tcp_send_head(sk);
368 	struct tcp_sock *tp = tcp_sk(sk);
369 	int max_probes;
370 
371 	if (tp->packets_out || !skb) {
372 		icsk->icsk_probes_out = 0;
373 		icsk->icsk_probes_tstamp = 0;
374 		return;
375 	}
376 
377 	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
378 	 * long as the receiver continues to respond probes. We support this by
379 	 * default and reset icsk_probes_out with incoming ACKs. But if the
380 	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
381 	 * kill the socket when the retry count and the time exceeds the
382 	 * corresponding system limit. We also implement similar policy when
383 	 * we use RTO to probe window in tcp_retransmit_timer().
384 	 */
385 	if (!icsk->icsk_probes_tstamp) {
386 		icsk->icsk_probes_tstamp = tcp_jiffies32;
387 	} else {
388 		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
389 
390 		if (user_timeout &&
391 		    (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
392 		     msecs_to_jiffies(user_timeout))
393 		goto abort;
394 	}
395 	max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
396 	if (sock_flag(sk, SOCK_DEAD)) {
397 		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
398 
399 		max_probes = tcp_orphan_retries(sk, alive);
400 		if (!alive && icsk->icsk_backoff >= max_probes)
401 			goto abort;
402 		if (tcp_out_of_resources(sk, true))
403 			return;
404 	}
405 
406 	if (icsk->icsk_probes_out >= max_probes) {
407 abort:		tcp_write_err(sk);
408 	} else {
409 		/* Only send another probe if we didn't close things up. */
410 		tcp_send_probe0(sk);
411 	}
412 }
413 
414 static void tcp_update_rto_stats(struct sock *sk)
415 {
416 	struct inet_connection_sock *icsk = inet_csk(sk);
417 	struct tcp_sock *tp = tcp_sk(sk);
418 
419 	if (!icsk->icsk_retransmits) {
420 		tp->total_rto_recoveries++;
421 		tp->rto_stamp = tcp_time_stamp(tp);
422 	}
423 	icsk->icsk_retransmits++;
424 	tp->total_rto++;
425 }
426 
427 /*
428  *	Timer for Fast Open socket to retransmit SYNACK. Note that the
429  *	sk here is the child socket, not the parent (listener) socket.
430  */
431 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
432 {
433 	struct inet_connection_sock *icsk = inet_csk(sk);
434 	struct tcp_sock *tp = tcp_sk(sk);
435 	int max_retries;
436 
437 	req->rsk_ops->syn_ack_timeout(req);
438 
439 	/* Add one more retry for fastopen.
440 	 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
441 	 */
442 	max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
443 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
444 
445 	if (req->num_timeout >= max_retries) {
446 		tcp_write_err(sk);
447 		return;
448 	}
449 	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
450 	if (icsk->icsk_retransmits == 1)
451 		tcp_enter_loss(sk);
452 	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
453 	 * returned from rtx_syn_ack() to make it more persistent like
454 	 * regular retransmit because if the child socket has been accepted
455 	 * it's not good to give up too easily.
456 	 */
457 	inet_rtx_syn_ack(sk, req);
458 	req->num_timeout++;
459 	tcp_update_rto_stats(sk);
460 	if (!tp->retrans_stamp)
461 		tp->retrans_stamp = tcp_time_stamp(tp);
462 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
463 			  req->timeout << req->num_timeout, TCP_RTO_MAX);
464 }
465 
466 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
467 				     const struct sk_buff *skb)
468 {
469 	const struct inet_connection_sock *icsk = inet_csk(sk);
470 	u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
471 	const struct tcp_sock *tp = tcp_sk(sk);
472 	int timeout = TCP_RTO_MAX * 2;
473 	u32 rtx_delta;
474 	s32 rcv_delta;
475 
476 	rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
477 			(tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
478 
479 	if (user_timeout) {
480 		/* If user application specified a TCP_USER_TIMEOUT,
481 		 * it does not want win 0 packets to 'reset the timer'
482 		 * while retransmits are not making progress.
483 		 */
484 		if (rtx_delta > user_timeout)
485 			return true;
486 		timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
487 	}
488 
489 	/* Note: timer interrupt might have been delayed by at least one jiffy,
490 	 * and tp->rcv_tstamp might very well have been written recently.
491 	 * rcv_delta can thus be negative.
492 	 */
493 	rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
494 	if (rcv_delta <= timeout)
495 		return false;
496 
497 	return rtx_delta > timeout;
498 }
499 
500 /**
501  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
502  *  @sk:  Pointer to the current socket.
503  *
504  *  This function gets called when the kernel timer for a TCP packet
505  *  of this socket expires.
506  *
507  *  It handles retransmission, timer adjustment and other necessary measures.
508  *
509  *  Returns: Nothing (void)
510  */
511 void tcp_retransmit_timer(struct sock *sk)
512 {
513 	struct tcp_sock *tp = tcp_sk(sk);
514 	struct net *net = sock_net(sk);
515 	struct inet_connection_sock *icsk = inet_csk(sk);
516 	struct request_sock *req;
517 	struct sk_buff *skb;
518 
519 	req = rcu_dereference_protected(tp->fastopen_rsk,
520 					lockdep_sock_is_held(sk));
521 	if (req) {
522 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
523 			     sk->sk_state != TCP_FIN_WAIT1);
524 		tcp_fastopen_synack_timer(sk, req);
525 		/* Before we receive ACK to our SYN-ACK don't retransmit
526 		 * anything else (e.g., data or FIN segments).
527 		 */
528 		return;
529 	}
530 
531 	if (!tp->packets_out)
532 		return;
533 
534 	skb = tcp_rtx_queue_head(sk);
535 	if (WARN_ON_ONCE(!skb))
536 		return;
537 
538 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
539 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
540 		/* Receiver dastardly shrinks window. Our retransmits
541 		 * become zero probes, but we should not timeout this
542 		 * connection. If the socket is an orphan, time it out,
543 		 * we cannot allow such beasts to hang infinitely.
544 		 */
545 		struct inet_sock *inet = inet_sk(sk);
546 		u32 rtx_delta;
547 
548 		rtx_delta = tcp_time_stamp(tp) - (tp->retrans_stamp ?: tcp_skb_timestamp(skb));
549 		if (sk->sk_family == AF_INET) {
550 			net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
551 				&inet->inet_daddr, ntohs(inet->inet_dport),
552 				inet->inet_num, tp->snd_una, tp->snd_nxt,
553 				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
554 				rtx_delta);
555 		}
556 #if IS_ENABLED(CONFIG_IPV6)
557 		else if (sk->sk_family == AF_INET6) {
558 			net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
559 				&sk->sk_v6_daddr, ntohs(inet->inet_dport),
560 				inet->inet_num, tp->snd_una, tp->snd_nxt,
561 				jiffies_to_msecs(jiffies - tp->rcv_tstamp),
562 				rtx_delta);
563 		}
564 #endif
565 		if (tcp_rtx_probe0_timed_out(sk, skb)) {
566 			tcp_write_err(sk);
567 			goto out;
568 		}
569 		tcp_enter_loss(sk);
570 		tcp_retransmit_skb(sk, skb, 1);
571 		__sk_dst_reset(sk);
572 		goto out_reset_timer;
573 	}
574 
575 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
576 	if (tcp_write_timeout(sk))
577 		goto out;
578 
579 	if (icsk->icsk_retransmits == 0) {
580 		int mib_idx = 0;
581 
582 		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
583 			if (tcp_is_sack(tp))
584 				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
585 			else
586 				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
587 		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
588 			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
589 		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
590 			   tp->sacked_out) {
591 			if (tcp_is_sack(tp))
592 				mib_idx = LINUX_MIB_TCPSACKFAILURES;
593 			else
594 				mib_idx = LINUX_MIB_TCPRENOFAILURES;
595 		}
596 		if (mib_idx)
597 			__NET_INC_STATS(sock_net(sk), mib_idx);
598 	}
599 
600 	tcp_enter_loss(sk);
601 
602 	tcp_update_rto_stats(sk);
603 	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
604 		/* Retransmission failed because of local congestion,
605 		 * Let senders fight for local resources conservatively.
606 		 */
607 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
608 					  TCP_RESOURCE_PROBE_INTERVAL,
609 					  TCP_RTO_MAX);
610 		goto out;
611 	}
612 
613 	/* Increase the timeout each time we retransmit.  Note that
614 	 * we do not increase the rtt estimate.  rto is initialized
615 	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
616 	 * that doubling rto each time is the least we can get away with.
617 	 * In KA9Q, Karn uses this for the first few times, and then
618 	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
619 	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
620 	 * defined in the protocol as the maximum possible RTT.  I guess
621 	 * we'll have to use something other than TCP to talk to the
622 	 * University of Mars.
623 	 *
624 	 * PAWS allows us longer timeouts and large windows, so once
625 	 * implemented ftp to mars will work nicely. We will have to fix
626 	 * the 120 second clamps though!
627 	 */
628 	icsk->icsk_backoff++;
629 
630 out_reset_timer:
631 	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
632 	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
633 	 * might be increased if the stream oscillates between thin and thick,
634 	 * thus the old value might already be too high compared to the value
635 	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
636 	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
637 	 * exponential backoff behaviour to avoid continue hammering
638 	 * linear-timeout retransmissions into a black hole
639 	 */
640 	if (sk->sk_state == TCP_ESTABLISHED &&
641 	    (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
642 	    tcp_stream_is_thin(tp) &&
643 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
644 		icsk->icsk_backoff = 0;
645 		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
646 				       tcp_rto_min(sk),
647 				       TCP_RTO_MAX);
648 	} else if (sk->sk_state != TCP_SYN_SENT ||
649 		   icsk->icsk_backoff >
650 		   READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
651 		/* Use normal (exponential) backoff unless linear timeouts are
652 		 * activated.
653 		 */
654 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
655 	}
656 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
657 				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
658 	if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
659 		__sk_dst_reset(sk);
660 
661 out:;
662 }
663 
664 /* Called with bottom-half processing disabled.
665    Called by tcp_write_timer() */
666 void tcp_write_timer_handler(struct sock *sk)
667 {
668 	struct inet_connection_sock *icsk = inet_csk(sk);
669 	int event;
670 
671 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
672 	    !icsk->icsk_pending)
673 		return;
674 
675 	if (time_after(icsk->icsk_timeout, jiffies)) {
676 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
677 		return;
678 	}
679 
680 	tcp_mstamp_refresh(tcp_sk(sk));
681 	event = icsk->icsk_pending;
682 
683 	switch (event) {
684 	case ICSK_TIME_REO_TIMEOUT:
685 		tcp_rack_reo_timeout(sk);
686 		break;
687 	case ICSK_TIME_LOSS_PROBE:
688 		tcp_send_loss_probe(sk);
689 		break;
690 	case ICSK_TIME_RETRANS:
691 		icsk->icsk_pending = 0;
692 		tcp_retransmit_timer(sk);
693 		break;
694 	case ICSK_TIME_PROBE0:
695 		icsk->icsk_pending = 0;
696 		tcp_probe_timer(sk);
697 		break;
698 	}
699 }
700 
701 static void tcp_write_timer(struct timer_list *t)
702 {
703 	struct inet_connection_sock *icsk =
704 			from_timer(icsk, t, icsk_retransmit_timer);
705 	struct sock *sk = &icsk->icsk_inet.sk;
706 
707 	bh_lock_sock(sk);
708 	if (!sock_owned_by_user(sk)) {
709 		tcp_write_timer_handler(sk);
710 	} else {
711 		/* delegate our work to tcp_release_cb() */
712 		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
713 			sock_hold(sk);
714 	}
715 	bh_unlock_sock(sk);
716 	sock_put(sk);
717 }
718 
719 void tcp_syn_ack_timeout(const struct request_sock *req)
720 {
721 	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
722 
723 	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
724 }
725 EXPORT_SYMBOL(tcp_syn_ack_timeout);
726 
727 void tcp_set_keepalive(struct sock *sk, int val)
728 {
729 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
730 		return;
731 
732 	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
733 		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
734 	else if (!val)
735 		inet_csk_delete_keepalive_timer(sk);
736 }
737 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
738 
739 
740 static void tcp_keepalive_timer (struct timer_list *t)
741 {
742 	struct sock *sk = from_timer(sk, t, sk_timer);
743 	struct inet_connection_sock *icsk = inet_csk(sk);
744 	struct tcp_sock *tp = tcp_sk(sk);
745 	u32 elapsed;
746 
747 	/* Only process if socket is not in use. */
748 	bh_lock_sock(sk);
749 	if (sock_owned_by_user(sk)) {
750 		/* Try again later. */
751 		inet_csk_reset_keepalive_timer (sk, HZ/20);
752 		goto out;
753 	}
754 
755 	if (sk->sk_state == TCP_LISTEN) {
756 		pr_err("Hmm... keepalive on a LISTEN ???\n");
757 		goto out;
758 	}
759 
760 	tcp_mstamp_refresh(tp);
761 	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
762 		if (READ_ONCE(tp->linger2) >= 0) {
763 			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
764 
765 			if (tmo > 0) {
766 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
767 				goto out;
768 			}
769 		}
770 		tcp_send_active_reset(sk, GFP_ATOMIC);
771 		goto death;
772 	}
773 
774 	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
775 	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
776 		goto out;
777 
778 	elapsed = keepalive_time_when(tp);
779 
780 	/* It is alive without keepalive 8) */
781 	if (tp->packets_out || !tcp_write_queue_empty(sk))
782 		goto resched;
783 
784 	elapsed = keepalive_time_elapsed(tp);
785 
786 	if (elapsed >= keepalive_time_when(tp)) {
787 		u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
788 
789 		/* If the TCP_USER_TIMEOUT option is enabled, use that
790 		 * to determine when to timeout instead.
791 		 */
792 		if ((user_timeout != 0 &&
793 		    elapsed >= msecs_to_jiffies(user_timeout) &&
794 		    icsk->icsk_probes_out > 0) ||
795 		    (user_timeout == 0 &&
796 		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
797 			tcp_send_active_reset(sk, GFP_ATOMIC);
798 			tcp_write_err(sk);
799 			goto out;
800 		}
801 		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
802 			icsk->icsk_probes_out++;
803 			elapsed = keepalive_intvl_when(tp);
804 		} else {
805 			/* If keepalive was lost due to local congestion,
806 			 * try harder.
807 			 */
808 			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
809 		}
810 	} else {
811 		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
812 		elapsed = keepalive_time_when(tp) - elapsed;
813 	}
814 
815 resched:
816 	inet_csk_reset_keepalive_timer (sk, elapsed);
817 	goto out;
818 
819 death:
820 	tcp_done(sk);
821 
822 out:
823 	bh_unlock_sock(sk);
824 	sock_put(sk);
825 }
826 
827 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
828 {
829 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
830 	struct sock *sk = (struct sock *)tp;
831 
832 	bh_lock_sock(sk);
833 	if (!sock_owned_by_user(sk)) {
834 		if (tp->compressed_ack) {
835 			/* Since we have to send one ack finally,
836 			 * subtract one from tp->compressed_ack to keep
837 			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
838 			 */
839 			tp->compressed_ack--;
840 			tcp_send_ack(sk);
841 		}
842 	} else {
843 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
844 				      &sk->sk_tsq_flags))
845 			sock_hold(sk);
846 	}
847 	bh_unlock_sock(sk);
848 
849 	sock_put(sk);
850 
851 	return HRTIMER_NORESTART;
852 }
853 
854 void tcp_init_xmit_timers(struct sock *sk)
855 {
856 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
857 				  &tcp_keepalive_timer);
858 	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
859 		     HRTIMER_MODE_ABS_PINNED_SOFT);
860 	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
861 
862 	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
863 		     HRTIMER_MODE_REL_PINNED_SOFT);
864 	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
865 }
866