1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25
tcp_clamp_rto_to_user_timeout(const struct sock * sk)26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
27 {
28 struct inet_connection_sock *icsk = inet_csk(sk);
29 u32 elapsed, start_ts, user_timeout;
30 s32 remaining;
31
32 start_ts = tcp_sk(sk)->retrans_stamp;
33 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
34 if (!user_timeout)
35 return icsk->icsk_rto;
36 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
37 remaining = user_timeout - elapsed;
38 if (remaining <= 0)
39 return 1; /* user timeout has passed; fire ASAP */
40
41 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
42 }
43
tcp_clamp_probe0_to_user_timeout(const struct sock * sk,u32 when)44 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
45 {
46 struct inet_connection_sock *icsk = inet_csk(sk);
47 u32 remaining, user_timeout;
48 s32 elapsed;
49
50 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
51 if (!user_timeout || !icsk->icsk_probes_tstamp)
52 return when;
53
54 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
55 if (unlikely(elapsed < 0))
56 elapsed = 0;
57 remaining = msecs_to_jiffies(user_timeout) - elapsed;
58 remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
59
60 return min_t(u32, remaining, when);
61 }
62
63 /**
64 * tcp_write_err() - close socket and save error info
65 * @sk: The socket the error has appeared on.
66 *
67 * Returns: Nothing (void)
68 */
69
tcp_write_err(struct sock * sk)70 static void tcp_write_err(struct sock *sk)
71 {
72 tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
73 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
74 }
75
76 /**
77 * tcp_out_of_resources() - Close socket if out of resources
78 * @sk: pointer to current socket
79 * @do_reset: send a last packet with reset flag
80 *
81 * Do not allow orphaned sockets to eat all our resources.
82 * This is direct violation of TCP specs, but it is required
83 * to prevent DoS attacks. It is called when a retransmission timeout
84 * or zero probe timeout occurs on orphaned socket.
85 *
86 * Also close if our net namespace is exiting; in that case there is no
87 * hope of ever communicating again since all netns interfaces are already
88 * down (or about to be down), and we need to release our dst references,
89 * which have been moved to the netns loopback interface, so the namespace
90 * can finish exiting. This condition is only possible if we are a kernel
91 * socket, as those do not hold references to the namespace.
92 *
93 * Criteria is still not confirmed experimentally and may change.
94 * We kill the socket, if:
95 * 1. If number of orphaned sockets exceeds an administratively configured
96 * limit.
97 * 2. If we have strong memory pressure.
98 * 3. If our net namespace is exiting.
99 */
tcp_out_of_resources(struct sock * sk,bool do_reset)100 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
101 {
102 struct tcp_sock *tp = tcp_sk(sk);
103 int shift = 0;
104
105 /* If peer does not open window for long time, or did not transmit
106 * anything for long time, penalize it. */
107 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
108 shift++;
109
110 /* If some dubious ICMP arrived, penalize even more. */
111 if (READ_ONCE(sk->sk_err_soft))
112 shift++;
113
114 if (tcp_check_oom(sk, shift)) {
115 /* Catch exceptional cases, when connection requires reset.
116 * 1. Last segment was sent recently. */
117 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
118 /* 2. Window is closed. */
119 (!tp->snd_wnd && !tp->packets_out))
120 do_reset = true;
121 if (do_reset)
122 tcp_send_active_reset(sk, GFP_ATOMIC);
123 tcp_done(sk);
124 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
125 return 1;
126 }
127
128 if (!check_net(sock_net(sk))) {
129 /* Not possible to send reset; just close */
130 tcp_done(sk);
131 return 1;
132 }
133
134 return 0;
135 }
136
137 /**
138 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
139 * @sk: Pointer to the current socket.
140 * @alive: bool, socket alive state
141 */
tcp_orphan_retries(struct sock * sk,bool alive)142 static int tcp_orphan_retries(struct sock *sk, bool alive)
143 {
144 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
145
146 /* We know from an ICMP that something is wrong. */
147 if (READ_ONCE(sk->sk_err_soft) && !alive)
148 retries = 0;
149
150 /* However, if socket sent something recently, select some safe
151 * number of retries. 8 corresponds to >100 seconds with minimal
152 * RTO of 200msec. */
153 if (retries == 0 && alive)
154 retries = 8;
155 return retries;
156 }
157
tcp_mtu_probing(struct inet_connection_sock * icsk,struct sock * sk)158 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
159 {
160 const struct net *net = sock_net(sk);
161 int mss;
162
163 /* Black hole detection */
164 if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
165 return;
166
167 if (!icsk->icsk_mtup.enabled) {
168 icsk->icsk_mtup.enabled = 1;
169 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
170 } else {
171 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
172 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
173 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
174 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
175 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
176 }
177 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
178 }
179
tcp_model_timeout(struct sock * sk,unsigned int boundary,unsigned int rto_base)180 static unsigned int tcp_model_timeout(struct sock *sk,
181 unsigned int boundary,
182 unsigned int rto_base)
183 {
184 unsigned int linear_backoff_thresh, timeout;
185
186 linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
187 if (boundary <= linear_backoff_thresh)
188 timeout = ((2 << boundary) - 1) * rto_base;
189 else
190 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
191 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
192 return jiffies_to_msecs(timeout);
193 }
194 /**
195 * retransmits_timed_out() - returns true if this connection has timed out
196 * @sk: The current socket
197 * @boundary: max number of retransmissions
198 * @timeout: A custom timeout value.
199 * If set to 0 the default timeout is calculated and used.
200 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
201 *
202 * The default "timeout" value this function can calculate and use
203 * is equivalent to the timeout of a TCP Connection
204 * after "boundary" unsuccessful, exponentially backed-off
205 * retransmissions with an initial RTO of TCP_RTO_MIN.
206 */
retransmits_timed_out(struct sock * sk,unsigned int boundary,unsigned int timeout)207 static bool retransmits_timed_out(struct sock *sk,
208 unsigned int boundary,
209 unsigned int timeout)
210 {
211 unsigned int start_ts;
212
213 if (!inet_csk(sk)->icsk_retransmits)
214 return false;
215
216 start_ts = tcp_sk(sk)->retrans_stamp;
217 if (likely(timeout == 0)) {
218 unsigned int rto_base = TCP_RTO_MIN;
219
220 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
221 rto_base = tcp_timeout_init(sk);
222 timeout = tcp_model_timeout(sk, boundary, rto_base);
223 }
224
225 return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
226 }
227
228 /* A write timeout has occurred. Process the after effects. */
tcp_write_timeout(struct sock * sk)229 static int tcp_write_timeout(struct sock *sk)
230 {
231 struct inet_connection_sock *icsk = inet_csk(sk);
232 struct tcp_sock *tp = tcp_sk(sk);
233 struct net *net = sock_net(sk);
234 bool expired = false, do_reset;
235 int retry_until, max_retransmits;
236
237 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
238 if (icsk->icsk_retransmits)
239 __dst_negative_advice(sk);
240 /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */
241 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? :
242 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
243
244 max_retransmits = retry_until;
245 if (sk->sk_state == TCP_SYN_SENT)
246 max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts);
247
248 expired = icsk->icsk_retransmits >= max_retransmits;
249 } else {
250 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
251 /* Black hole detection */
252 tcp_mtu_probing(icsk, sk);
253
254 __dst_negative_advice(sk);
255 }
256
257 retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
258 if (sock_flag(sk, SOCK_DEAD)) {
259 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
260
261 retry_until = tcp_orphan_retries(sk, alive);
262 do_reset = alive ||
263 !retransmits_timed_out(sk, retry_until, 0);
264
265 if (tcp_out_of_resources(sk, do_reset))
266 return 1;
267 }
268 }
269 if (!expired)
270 expired = retransmits_timed_out(sk, retry_until,
271 READ_ONCE(icsk->icsk_user_timeout));
272 tcp_fastopen_active_detect_blackhole(sk, expired);
273
274 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
275 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
276 icsk->icsk_retransmits,
277 icsk->icsk_rto, (int)expired);
278
279 if (expired) {
280 /* Has it gone just too far? */
281 tcp_write_err(sk);
282 return 1;
283 }
284
285 if (sk_rethink_txhash(sk)) {
286 tp->timeout_rehash++;
287 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
288 }
289
290 return 0;
291 }
292
293 /* Called with BH disabled */
tcp_delack_timer_handler(struct sock * sk)294 void tcp_delack_timer_handler(struct sock *sk)
295 {
296 struct inet_connection_sock *icsk = inet_csk(sk);
297 struct tcp_sock *tp = tcp_sk(sk);
298
299 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
300 return;
301
302 /* Handling the sack compression case */
303 if (tp->compressed_ack) {
304 tcp_mstamp_refresh(tp);
305 tcp_sack_compress_send_ack(sk);
306 return;
307 }
308
309 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
310 return;
311
312 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
313 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
314 return;
315 }
316 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
317
318 if (inet_csk_ack_scheduled(sk)) {
319 if (!inet_csk_in_pingpong_mode(sk)) {
320 /* Delayed ACK missed: inflate ATO. */
321 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
322 } else {
323 /* Delayed ACK missed: leave pingpong mode and
324 * deflate ATO.
325 */
326 inet_csk_exit_pingpong_mode(sk);
327 icsk->icsk_ack.ato = TCP_ATO_MIN;
328 }
329 tcp_mstamp_refresh(tp);
330 tcp_send_ack(sk);
331 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
332 }
333 }
334
335
336 /**
337 * tcp_delack_timer() - The TCP delayed ACK timeout handler
338 * @t: Pointer to the timer. (gets casted to struct sock *)
339 *
340 * This function gets (indirectly) called when the kernel timer for a TCP packet
341 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
342 *
343 * Returns: Nothing (void)
344 */
tcp_delack_timer(struct timer_list * t)345 static void tcp_delack_timer(struct timer_list *t)
346 {
347 struct inet_connection_sock *icsk =
348 from_timer(icsk, t, icsk_delack_timer);
349 struct sock *sk = &icsk->icsk_inet.sk;
350
351 bh_lock_sock(sk);
352 if (!sock_owned_by_user(sk)) {
353 tcp_delack_timer_handler(sk);
354 } else {
355 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
356 /* deleguate our work to tcp_release_cb() */
357 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
358 sock_hold(sk);
359 }
360 bh_unlock_sock(sk);
361 sock_put(sk);
362 }
363
tcp_probe_timer(struct sock * sk)364 static void tcp_probe_timer(struct sock *sk)
365 {
366 struct inet_connection_sock *icsk = inet_csk(sk);
367 struct sk_buff *skb = tcp_send_head(sk);
368 struct tcp_sock *tp = tcp_sk(sk);
369 int max_probes;
370
371 if (tp->packets_out || !skb) {
372 icsk->icsk_probes_out = 0;
373 icsk->icsk_probes_tstamp = 0;
374 return;
375 }
376
377 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
378 * long as the receiver continues to respond probes. We support this by
379 * default and reset icsk_probes_out with incoming ACKs. But if the
380 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
381 * kill the socket when the retry count and the time exceeds the
382 * corresponding system limit. We also implement similar policy when
383 * we use RTO to probe window in tcp_retransmit_timer().
384 */
385 if (!icsk->icsk_probes_tstamp) {
386 icsk->icsk_probes_tstamp = tcp_jiffies32;
387 } else {
388 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
389
390 if (user_timeout &&
391 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
392 msecs_to_jiffies(user_timeout))
393 goto abort;
394 }
395 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
396 if (sock_flag(sk, SOCK_DEAD)) {
397 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
398
399 max_probes = tcp_orphan_retries(sk, alive);
400 if (!alive && icsk->icsk_backoff >= max_probes)
401 goto abort;
402 if (tcp_out_of_resources(sk, true))
403 return;
404 }
405
406 if (icsk->icsk_probes_out >= max_probes) {
407 abort: tcp_write_err(sk);
408 } else {
409 /* Only send another probe if we didn't close things up. */
410 tcp_send_probe0(sk);
411 }
412 }
413
414 /*
415 * Timer for Fast Open socket to retransmit SYNACK. Note that the
416 * sk here is the child socket, not the parent (listener) socket.
417 */
tcp_fastopen_synack_timer(struct sock * sk,struct request_sock * req)418 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
419 {
420 struct inet_connection_sock *icsk = inet_csk(sk);
421 struct tcp_sock *tp = tcp_sk(sk);
422 int max_retries;
423
424 req->rsk_ops->syn_ack_timeout(req);
425
426 /* Add one more retry for fastopen.
427 * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
428 */
429 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
430 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
431
432 if (req->num_timeout >= max_retries) {
433 tcp_write_err(sk);
434 return;
435 }
436 /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
437 if (icsk->icsk_retransmits == 1)
438 tcp_enter_loss(sk);
439 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
440 * returned from rtx_syn_ack() to make it more persistent like
441 * regular retransmit because if the child socket has been accepted
442 * it's not good to give up too easily.
443 */
444 inet_rtx_syn_ack(sk, req);
445 req->num_timeout++;
446 icsk->icsk_retransmits++;
447 if (!tp->retrans_stamp)
448 tp->retrans_stamp = tcp_time_stamp(tp);
449 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
450 req->timeout << req->num_timeout, TCP_RTO_MAX);
451 }
452
tcp_rtx_probe0_timed_out(const struct sock * sk,const struct sk_buff * skb)453 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
454 const struct sk_buff *skb)
455 {
456 const struct inet_connection_sock *icsk = inet_csk(sk);
457 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
458 const struct tcp_sock *tp = tcp_sk(sk);
459 int timeout = TCP_RTO_MAX * 2;
460 u32 rtx_delta;
461 s32 rcv_delta;
462
463 rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
464 (tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
465
466 if (user_timeout) {
467 /* If user application specified a TCP_USER_TIMEOUT,
468 * it does not want win 0 packets to 'reset the timer'
469 * while retransmits are not making progress.
470 */
471 if (rtx_delta > user_timeout)
472 return true;
473 timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout));
474 }
475
476 /* Note: timer interrupt might have been delayed by at least one jiffy,
477 * and tp->rcv_tstamp might very well have been written recently.
478 * rcv_delta can thus be negative.
479 */
480 rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp;
481 if (rcv_delta <= timeout)
482 return false;
483
484 return rtx_delta > timeout;
485 }
486
487 /**
488 * tcp_retransmit_timer() - The TCP retransmit timeout handler
489 * @sk: Pointer to the current socket.
490 *
491 * This function gets called when the kernel timer for a TCP packet
492 * of this socket expires.
493 *
494 * It handles retransmission, timer adjustment and other necessary measures.
495 *
496 * Returns: Nothing (void)
497 */
tcp_retransmit_timer(struct sock * sk)498 void tcp_retransmit_timer(struct sock *sk)
499 {
500 struct tcp_sock *tp = tcp_sk(sk);
501 struct net *net = sock_net(sk);
502 struct inet_connection_sock *icsk = inet_csk(sk);
503 struct request_sock *req;
504 struct sk_buff *skb;
505
506 req = rcu_dereference_protected(tp->fastopen_rsk,
507 lockdep_sock_is_held(sk));
508 if (req) {
509 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
510 sk->sk_state != TCP_FIN_WAIT1);
511 tcp_fastopen_synack_timer(sk, req);
512 /* Before we receive ACK to our SYN-ACK don't retransmit
513 * anything else (e.g., data or FIN segments).
514 */
515 return;
516 }
517
518 if (!tp->packets_out)
519 return;
520
521 skb = tcp_rtx_queue_head(sk);
522 if (WARN_ON_ONCE(!skb))
523 return;
524
525 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
526 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
527 /* Receiver dastardly shrinks window. Our retransmits
528 * become zero probes, but we should not timeout this
529 * connection. If the socket is an orphan, time it out,
530 * we cannot allow such beasts to hang infinitely.
531 */
532 struct inet_sock *inet = inet_sk(sk);
533 u32 rtx_delta;
534
535 rtx_delta = tcp_time_stamp(tp) - (tp->retrans_stamp ?: tcp_skb_timestamp(skb));
536 if (sk->sk_family == AF_INET) {
537 net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
538 &inet->inet_daddr, ntohs(inet->inet_dport),
539 inet->inet_num, tp->snd_una, tp->snd_nxt,
540 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
541 rtx_delta);
542 }
543 #if IS_ENABLED(CONFIG_IPV6)
544 else if (sk->sk_family == AF_INET6) {
545 net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n",
546 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
547 inet->inet_num, tp->snd_una, tp->snd_nxt,
548 jiffies_to_msecs(jiffies - tp->rcv_tstamp),
549 rtx_delta);
550 }
551 #endif
552 if (tcp_rtx_probe0_timed_out(sk, skb)) {
553 tcp_write_err(sk);
554 goto out;
555 }
556 tcp_enter_loss(sk);
557 tcp_retransmit_skb(sk, skb, 1);
558 __sk_dst_reset(sk);
559 goto out_reset_timer;
560 }
561
562 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
563 if (tcp_write_timeout(sk))
564 goto out;
565
566 if (icsk->icsk_retransmits == 0) {
567 int mib_idx = 0;
568
569 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
570 if (tcp_is_sack(tp))
571 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
572 else
573 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
574 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
575 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
576 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
577 tp->sacked_out) {
578 if (tcp_is_sack(tp))
579 mib_idx = LINUX_MIB_TCPSACKFAILURES;
580 else
581 mib_idx = LINUX_MIB_TCPRENOFAILURES;
582 }
583 if (mib_idx)
584 __NET_INC_STATS(sock_net(sk), mib_idx);
585 }
586
587 tcp_enter_loss(sk);
588
589 icsk->icsk_retransmits++;
590 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
591 /* Retransmission failed because of local congestion,
592 * Let senders fight for local resources conservatively.
593 */
594 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
595 TCP_RESOURCE_PROBE_INTERVAL,
596 TCP_RTO_MAX);
597 goto out;
598 }
599
600 /* Increase the timeout each time we retransmit. Note that
601 * we do not increase the rtt estimate. rto is initialized
602 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
603 * that doubling rto each time is the least we can get away with.
604 * In KA9Q, Karn uses this for the first few times, and then
605 * goes to quadratic. netBSD doubles, but only goes up to *64,
606 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
607 * defined in the protocol as the maximum possible RTT. I guess
608 * we'll have to use something other than TCP to talk to the
609 * University of Mars.
610 *
611 * PAWS allows us longer timeouts and large windows, so once
612 * implemented ftp to mars will work nicely. We will have to fix
613 * the 120 second clamps though!
614 */
615 icsk->icsk_backoff++;
616
617 out_reset_timer:
618 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
619 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
620 * might be increased if the stream oscillates between thin and thick,
621 * thus the old value might already be too high compared to the value
622 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
623 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
624 * exponential backoff behaviour to avoid continue hammering
625 * linear-timeout retransmissions into a black hole
626 */
627 if (sk->sk_state == TCP_ESTABLISHED &&
628 (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
629 tcp_stream_is_thin(tp) &&
630 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
631 icsk->icsk_backoff = 0;
632 icsk->icsk_rto = clamp(__tcp_set_rto(tp),
633 tcp_rto_min(sk),
634 TCP_RTO_MAX);
635 } else if (sk->sk_state != TCP_SYN_SENT ||
636 icsk->icsk_backoff >
637 READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
638 /* Use normal (exponential) backoff unless linear timeouts are
639 * activated.
640 */
641 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
642 }
643 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
644 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
645 if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
646 __sk_dst_reset(sk);
647
648 out:;
649 }
650
651 /* Called with bottom-half processing disabled.
652 Called by tcp_write_timer() */
tcp_write_timer_handler(struct sock * sk)653 void tcp_write_timer_handler(struct sock *sk)
654 {
655 struct inet_connection_sock *icsk = inet_csk(sk);
656 int event;
657
658 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
659 !icsk->icsk_pending)
660 return;
661
662 if (time_after(icsk->icsk_timeout, jiffies)) {
663 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
664 return;
665 }
666
667 tcp_mstamp_refresh(tcp_sk(sk));
668 event = icsk->icsk_pending;
669
670 switch (event) {
671 case ICSK_TIME_REO_TIMEOUT:
672 tcp_rack_reo_timeout(sk);
673 break;
674 case ICSK_TIME_LOSS_PROBE:
675 tcp_send_loss_probe(sk);
676 break;
677 case ICSK_TIME_RETRANS:
678 icsk->icsk_pending = 0;
679 tcp_retransmit_timer(sk);
680 break;
681 case ICSK_TIME_PROBE0:
682 icsk->icsk_pending = 0;
683 tcp_probe_timer(sk);
684 break;
685 }
686 }
687
tcp_write_timer(struct timer_list * t)688 static void tcp_write_timer(struct timer_list *t)
689 {
690 struct inet_connection_sock *icsk =
691 from_timer(icsk, t, icsk_retransmit_timer);
692 struct sock *sk = &icsk->icsk_inet.sk;
693
694 bh_lock_sock(sk);
695 if (!sock_owned_by_user(sk)) {
696 tcp_write_timer_handler(sk);
697 } else {
698 /* delegate our work to tcp_release_cb() */
699 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
700 sock_hold(sk);
701 }
702 bh_unlock_sock(sk);
703 sock_put(sk);
704 }
705
tcp_syn_ack_timeout(const struct request_sock * req)706 void tcp_syn_ack_timeout(const struct request_sock *req)
707 {
708 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
709
710 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
711 }
712 EXPORT_SYMBOL(tcp_syn_ack_timeout);
713
tcp_set_keepalive(struct sock * sk,int val)714 void tcp_set_keepalive(struct sock *sk, int val)
715 {
716 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
717 return;
718
719 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
720 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
721 else if (!val)
722 inet_csk_delete_keepalive_timer(sk);
723 }
724 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
725
726
tcp_keepalive_timer(struct timer_list * t)727 static void tcp_keepalive_timer (struct timer_list *t)
728 {
729 struct sock *sk = from_timer(sk, t, sk_timer);
730 struct inet_connection_sock *icsk = inet_csk(sk);
731 struct tcp_sock *tp = tcp_sk(sk);
732 u32 elapsed;
733
734 /* Only process if socket is not in use. */
735 bh_lock_sock(sk);
736 if (sock_owned_by_user(sk)) {
737 /* Try again later. */
738 inet_csk_reset_keepalive_timer (sk, HZ/20);
739 goto out;
740 }
741
742 if (sk->sk_state == TCP_LISTEN) {
743 pr_err("Hmm... keepalive on a LISTEN ???\n");
744 goto out;
745 }
746
747 tcp_mstamp_refresh(tp);
748 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
749 if (READ_ONCE(tp->linger2) >= 0) {
750 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
751
752 if (tmo > 0) {
753 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
754 goto out;
755 }
756 }
757 tcp_send_active_reset(sk, GFP_ATOMIC);
758 goto death;
759 }
760
761 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
762 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
763 goto out;
764
765 elapsed = keepalive_time_when(tp);
766
767 /* It is alive without keepalive 8) */
768 if (tp->packets_out || !tcp_write_queue_empty(sk))
769 goto resched;
770
771 elapsed = keepalive_time_elapsed(tp);
772
773 if (elapsed >= keepalive_time_when(tp)) {
774 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout);
775
776 /* If the TCP_USER_TIMEOUT option is enabled, use that
777 * to determine when to timeout instead.
778 */
779 if ((user_timeout != 0 &&
780 elapsed >= msecs_to_jiffies(user_timeout) &&
781 icsk->icsk_probes_out > 0) ||
782 (user_timeout == 0 &&
783 icsk->icsk_probes_out >= keepalive_probes(tp))) {
784 tcp_send_active_reset(sk, GFP_ATOMIC);
785 tcp_write_err(sk);
786 goto out;
787 }
788 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
789 icsk->icsk_probes_out++;
790 elapsed = keepalive_intvl_when(tp);
791 } else {
792 /* If keepalive was lost due to local congestion,
793 * try harder.
794 */
795 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
796 }
797 } else {
798 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
799 elapsed = keepalive_time_when(tp) - elapsed;
800 }
801
802 resched:
803 inet_csk_reset_keepalive_timer (sk, elapsed);
804 goto out;
805
806 death:
807 tcp_done(sk);
808
809 out:
810 bh_unlock_sock(sk);
811 sock_put(sk);
812 }
813
tcp_compressed_ack_kick(struct hrtimer * timer)814 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
815 {
816 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
817 struct sock *sk = (struct sock *)tp;
818
819 bh_lock_sock(sk);
820 if (!sock_owned_by_user(sk)) {
821 if (tp->compressed_ack) {
822 /* Since we have to send one ack finally,
823 * subtract one from tp->compressed_ack to keep
824 * LINUX_MIB_TCPACKCOMPRESSED accurate.
825 */
826 tp->compressed_ack--;
827 tcp_send_ack(sk);
828 }
829 } else {
830 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
831 &sk->sk_tsq_flags))
832 sock_hold(sk);
833 }
834 bh_unlock_sock(sk);
835
836 sock_put(sk);
837
838 return HRTIMER_NORESTART;
839 }
840
tcp_init_xmit_timers(struct sock * sk)841 void tcp_init_xmit_timers(struct sock *sk)
842 {
843 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
844 &tcp_keepalive_timer);
845 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
846 HRTIMER_MODE_ABS_PINNED_SOFT);
847 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
848
849 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
850 HRTIMER_MODE_REL_PINNED_SOFT);
851 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
852 }
853