1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Mark Evans, <evansmp@uhura.aston.ac.uk>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche, <flla@stud.uni-sb.de>
14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15 * Linus Torvalds, <torvalds@cs.helsinki.fi>
16 * Alan Cox, <gw4pts@gw4pts.ampr.org>
17 * Matthew Dillon, <dillon@apollo.west.oic.com>
18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19 * Jorge Cwik, <jorge@laser.satlink.net>
20 */
21
22 /*
23 * Changes:
24 * Pedro Roque : Fast Retransmit/Recovery.
25 * Two receive queues.
26 * Retransmit queue handled by TCP.
27 * Better retransmit timer handling.
28 * New congestion avoidance.
29 * Header prediction.
30 * Variable renaming.
31 *
32 * Eric : Fast Retransmit.
33 * Randy Scott : MSS option defines.
34 * Eric Schenk : Fixes to slow start algorithm.
35 * Eric Schenk : Yet another double ACK bug.
36 * Eric Schenk : Delayed ACK bug fixes.
37 * Eric Schenk : Floyd style fast retrans war avoidance.
38 * David S. Miller : Don't allow zero congestion window.
39 * Eric Schenk : Fix retransmitter so that it sends
40 * next packet on ack of previous packet.
41 * Andi Kleen : Moved open_request checking here
42 * and process RSTs for open_requests.
43 * Andi Kleen : Better prune_queue, and other fixes.
44 * Andrey Savochkin: Fix RTT measurements in the presence of
45 * timestamps.
46 * Andrey Savochkin: Check sequence numbers correctly when
47 * removing SACKs due to in sequence incoming
48 * data segments.
49 * Andi Kleen: Make sure we never ack data there is not
50 * enough room for. Also make this condition
51 * a fatal error if it might still happen.
52 * Andi Kleen: Add tcp_measure_rcv_mss to make
53 * connections with MSS<min(MTU,ann. MSS)
54 * work without delayed acks.
55 * Andi Kleen: Process packets with PSH set in the
56 * fast path.
57 * J Hadi Salim: ECN support
58 * Andrei Gurtov,
59 * Pasi Sarolahti,
60 * Panu Kuhlberg: Experimental audit of TCP (re)transmission
61 * engine. Lots of bugs are found.
62 * Pasi Sarolahti: F-RTO for dealing with spurious RTOs
63 */
64
65 #define pr_fmt(fmt) "TCP: " fmt
66
67 #include <linux/mm.h>
68 #include <linux/slab.h>
69 #include <linux/module.h>
70 #include <linux/sysctl.h>
71 #include <linux/kernel.h>
72 #include <linux/prefetch.h>
73 #include <net/dst.h>
74 #include <net/tcp.h>
75 #include <net/inet_common.h>
76 #include <linux/ipsec.h>
77 #include <asm/unaligned.h>
78 #include <linux/errqueue.h>
79 #include <trace/events/tcp.h>
80 #include <linux/jump_label_ratelimit.h>
81 #include <net/busy_poll.h>
82 #include <net/mptcp.h>
83
84 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
85
86 #define FLAG_DATA 0x01 /* Incoming frame contained data. */
87 #define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
88 #define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
89 #define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
90 #define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
91 #define FLAG_DATA_SACKED 0x20 /* New SACK. */
92 #define FLAG_ECE 0x40 /* ECE in this ACK */
93 #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */
94 #define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
95 #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
96 #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
97 #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
98 #define FLAG_SET_XMIT_TIMER 0x1000 /* Set TLP or RTO timer */
99 #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
100 #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */
101 #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */
102 #define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */
103 #define FLAG_DSACK_TLP 0x20000 /* DSACK for tail loss probe */
104
105 #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
106 #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
107 #define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
108 #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
109
110 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
111 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
112
113 #define REXMIT_NONE 0 /* no loss recovery to do */
114 #define REXMIT_LOST 1 /* retransmit packets marked lost */
115 #define REXMIT_NEW 2 /* FRTO-style transmit of unsent/new packets */
116
117 #if IS_ENABLED(CONFIG_TLS_DEVICE)
118 static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ);
119
clean_acked_data_enable(struct inet_connection_sock * icsk,void (* cad)(struct sock * sk,u32 ack_seq))120 void clean_acked_data_enable(struct inet_connection_sock *icsk,
121 void (*cad)(struct sock *sk, u32 ack_seq))
122 {
123 icsk->icsk_clean_acked = cad;
124 static_branch_deferred_inc(&clean_acked_data_enabled);
125 }
126 EXPORT_SYMBOL_GPL(clean_acked_data_enable);
127
clean_acked_data_disable(struct inet_connection_sock * icsk)128 void clean_acked_data_disable(struct inet_connection_sock *icsk)
129 {
130 static_branch_slow_dec_deferred(&clean_acked_data_enabled);
131 icsk->icsk_clean_acked = NULL;
132 }
133 EXPORT_SYMBOL_GPL(clean_acked_data_disable);
134
clean_acked_data_flush(void)135 void clean_acked_data_flush(void)
136 {
137 static_key_deferred_flush(&clean_acked_data_enabled);
138 }
139 EXPORT_SYMBOL_GPL(clean_acked_data_flush);
140 #endif
141
142 #ifdef CONFIG_CGROUP_BPF
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)143 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
144 {
145 bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown &&
146 BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
147 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG);
148 bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
149 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
150 struct bpf_sock_ops_kern sock_ops;
151
152 if (likely(!unknown_opt && !parse_all_opt))
153 return;
154
155 /* The skb will be handled in the
156 * bpf_skops_established() or
157 * bpf_skops_write_hdr_opt().
158 */
159 switch (sk->sk_state) {
160 case TCP_SYN_RECV:
161 case TCP_SYN_SENT:
162 case TCP_LISTEN:
163 return;
164 }
165
166 sock_owned_by_me(sk);
167
168 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
169 sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
170 sock_ops.is_fullsock = 1;
171 sock_ops.sk = sk;
172 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
173
174 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
175 }
176
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)177 static void bpf_skops_established(struct sock *sk, int bpf_op,
178 struct sk_buff *skb)
179 {
180 struct bpf_sock_ops_kern sock_ops;
181
182 sock_owned_by_me(sk);
183
184 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
185 sock_ops.op = bpf_op;
186 sock_ops.is_fullsock = 1;
187 sock_ops.sk = sk;
188 /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
189 if (skb)
190 bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
191
192 BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
193 }
194 #else
bpf_skops_parse_hdr(struct sock * sk,struct sk_buff * skb)195 static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
196 {
197 }
198
bpf_skops_established(struct sock * sk,int bpf_op,struct sk_buff * skb)199 static void bpf_skops_established(struct sock *sk, int bpf_op,
200 struct sk_buff *skb)
201 {
202 }
203 #endif
204
tcp_gro_dev_warn(struct sock * sk,const struct sk_buff * skb,unsigned int len)205 static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
206 unsigned int len)
207 {
208 static bool __once __read_mostly;
209
210 if (!__once) {
211 struct net_device *dev;
212
213 __once = true;
214
215 rcu_read_lock();
216 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
217 if (!dev || len >= dev->mtu)
218 pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
219 dev ? dev->name : "Unknown driver");
220 rcu_read_unlock();
221 }
222 }
223
224 /* Adapt the MSS value used to make delayed ack decision to the
225 * real world.
226 */
tcp_measure_rcv_mss(struct sock * sk,const struct sk_buff * skb)227 static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
228 {
229 struct inet_connection_sock *icsk = inet_csk(sk);
230 const unsigned int lss = icsk->icsk_ack.last_seg_size;
231 unsigned int len;
232
233 icsk->icsk_ack.last_seg_size = 0;
234
235 /* skb->len may jitter because of SACKs, even if peer
236 * sends good full-sized frames.
237 */
238 len = skb_shinfo(skb)->gso_size ? : skb->len;
239 if (len >= icsk->icsk_ack.rcv_mss) {
240 /* Note: divides are still a bit expensive.
241 * For the moment, only adjust scaling_ratio
242 * when we update icsk_ack.rcv_mss.
243 */
244 if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
245 u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
246 u8 old_ratio = tcp_sk(sk)->scaling_ratio;
247
248 do_div(val, skb->truesize);
249 tcp_sk(sk)->scaling_ratio = val ? val : 1;
250
251 if (old_ratio != tcp_sk(sk)->scaling_ratio)
252 WRITE_ONCE(tcp_sk(sk)->window_clamp,
253 tcp_win_from_space(sk, sk->sk_rcvbuf));
254 }
255 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
256 tcp_sk(sk)->advmss);
257 /* Account for possibly-removed options */
258 if (unlikely(len > icsk->icsk_ack.rcv_mss +
259 MAX_TCP_OPTION_SPACE))
260 tcp_gro_dev_warn(sk, skb, len);
261 /* If the skb has a len of exactly 1*MSS and has the PSH bit
262 * set then it is likely the end of an application write. So
263 * more data may not be arriving soon, and yet the data sender
264 * may be waiting for an ACK if cwnd-bound or using TX zero
265 * copy. So we set ICSK_ACK_PUSHED here so that
266 * tcp_cleanup_rbuf() will send an ACK immediately if the app
267 * reads all of the data and is not ping-pong. If len > MSS
268 * then this logic does not matter (and does not hurt) because
269 * tcp_cleanup_rbuf() will always ACK immediately if the app
270 * reads data and there is more than an MSS of unACKed data.
271 */
272 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
273 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
274 } else {
275 /* Otherwise, we make more careful check taking into account,
276 * that SACKs block is variable.
277 *
278 * "len" is invariant segment length, including TCP header.
279 */
280 len += skb->data - skb_transport_header(skb);
281 if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
282 /* If PSH is not set, packet should be
283 * full sized, provided peer TCP is not badly broken.
284 * This observation (if it is correct 8)) allows
285 * to handle super-low mtu links fairly.
286 */
287 (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
288 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
289 /* Subtract also invariant (if peer is RFC compliant),
290 * tcp header plus fixed timestamp option length.
291 * Resulting "len" is MSS free of SACK jitter.
292 */
293 len -= tcp_sk(sk)->tcp_header_len;
294 icsk->icsk_ack.last_seg_size = len;
295 if (len == lss) {
296 icsk->icsk_ack.rcv_mss = len;
297 return;
298 }
299 }
300 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
301 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
302 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
303 }
304 }
305
tcp_incr_quickack(struct sock * sk,unsigned int max_quickacks)306 static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
307 {
308 struct inet_connection_sock *icsk = inet_csk(sk);
309 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
310
311 if (quickacks == 0)
312 quickacks = 2;
313 quickacks = min(quickacks, max_quickacks);
314 if (quickacks > icsk->icsk_ack.quick)
315 icsk->icsk_ack.quick = quickacks;
316 }
317
tcp_enter_quickack_mode(struct sock * sk,unsigned int max_quickacks)318 static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
319 {
320 struct inet_connection_sock *icsk = inet_csk(sk);
321
322 tcp_incr_quickack(sk, max_quickacks);
323 inet_csk_exit_pingpong_mode(sk);
324 icsk->icsk_ack.ato = TCP_ATO_MIN;
325 }
326
327 /* Send ACKs quickly, if "quick" count is not exhausted
328 * and the session is not interactive.
329 */
330
tcp_in_quickack_mode(struct sock * sk)331 static bool tcp_in_quickack_mode(struct sock *sk)
332 {
333 const struct inet_connection_sock *icsk = inet_csk(sk);
334 const struct dst_entry *dst = __sk_dst_get(sk);
335
336 return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
337 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
338 }
339
tcp_ecn_queue_cwr(struct tcp_sock * tp)340 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
341 {
342 if (tp->ecn_flags & TCP_ECN_OK)
343 tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
344 }
345
tcp_ecn_accept_cwr(struct sock * sk,const struct sk_buff * skb)346 static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
347 {
348 if (tcp_hdr(skb)->cwr) {
349 tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
350
351 /* If the sender is telling us it has entered CWR, then its
352 * cwnd may be very low (even just 1 packet), so we should ACK
353 * immediately.
354 */
355 if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
356 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
357 }
358 }
359
tcp_ecn_withdraw_cwr(struct tcp_sock * tp)360 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
361 {
362 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
363 }
364
__tcp_ecn_check_ce(struct sock * sk,const struct sk_buff * skb)365 static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
366 {
367 struct tcp_sock *tp = tcp_sk(sk);
368
369 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
370 case INET_ECN_NOT_ECT:
371 /* Funny extension: if ECT is not set on a segment,
372 * and we already seen ECT on a previous segment,
373 * it is probably a retransmit.
374 */
375 if (tp->ecn_flags & TCP_ECN_SEEN)
376 tcp_enter_quickack_mode(sk, 2);
377 break;
378 case INET_ECN_CE:
379 if (tcp_ca_needs_ecn(sk))
380 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
381
382 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
383 /* Better not delay acks, sender can have a very low cwnd */
384 tcp_enter_quickack_mode(sk, 2);
385 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
386 }
387 tp->ecn_flags |= TCP_ECN_SEEN;
388 break;
389 default:
390 if (tcp_ca_needs_ecn(sk))
391 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
392 tp->ecn_flags |= TCP_ECN_SEEN;
393 break;
394 }
395 }
396
tcp_ecn_check_ce(struct sock * sk,const struct sk_buff * skb)397 static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
398 {
399 if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
400 __tcp_ecn_check_ce(sk, skb);
401 }
402
tcp_ecn_rcv_synack(struct tcp_sock * tp,const struct tcphdr * th)403 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
404 {
405 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
406 tp->ecn_flags &= ~TCP_ECN_OK;
407 }
408
tcp_ecn_rcv_syn(struct tcp_sock * tp,const struct tcphdr * th)409 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
410 {
411 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
412 tp->ecn_flags &= ~TCP_ECN_OK;
413 }
414
tcp_ecn_rcv_ecn_echo(const struct tcp_sock * tp,const struct tcphdr * th)415 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
416 {
417 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
418 return true;
419 return false;
420 }
421
422 /* Buffer size and advertised window tuning.
423 *
424 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
425 */
426
tcp_sndbuf_expand(struct sock * sk)427 static void tcp_sndbuf_expand(struct sock *sk)
428 {
429 const struct tcp_sock *tp = tcp_sk(sk);
430 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
431 int sndmem, per_mss;
432 u32 nr_segs;
433
434 /* Worst case is non GSO/TSO : each frame consumes one skb
435 * and skb->head is kmalloced using power of two area of memory
436 */
437 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
438 MAX_TCP_HEADER +
439 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
440
441 per_mss = roundup_pow_of_two(per_mss) +
442 SKB_DATA_ALIGN(sizeof(struct sk_buff));
443
444 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
445 nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
446
447 /* Fast Recovery (RFC 5681 3.2) :
448 * Cubic needs 1.7 factor, rounded to 2 to include
449 * extra cushion (application might react slowly to EPOLLOUT)
450 */
451 sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
452 sndmem *= nr_segs * per_mss;
453
454 if (sk->sk_sndbuf < sndmem)
455 WRITE_ONCE(sk->sk_sndbuf,
456 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
457 }
458
459 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
460 *
461 * All tcp_full_space() is split to two parts: "network" buffer, allocated
462 * forward and advertised in receiver window (tp->rcv_wnd) and
463 * "application buffer", required to isolate scheduling/application
464 * latencies from network.
465 * window_clamp is maximal advertised window. It can be less than
466 * tcp_full_space(), in this case tcp_full_space() - window_clamp
467 * is reserved for "application" buffer. The less window_clamp is
468 * the smoother our behaviour from viewpoint of network, but the lower
469 * throughput and the higher sensitivity of the connection to losses. 8)
470 *
471 * rcv_ssthresh is more strict window_clamp used at "slow start"
472 * phase to predict further behaviour of this connection.
473 * It is used for two goals:
474 * - to enforce header prediction at sender, even when application
475 * requires some significant "application buffer". It is check #1.
476 * - to prevent pruning of receive queue because of misprediction
477 * of receiver window. Check #2.
478 *
479 * The scheme does not work when sender sends good segments opening
480 * window and then starts to feed us spaghetti. But it should work
481 * in common situations. Otherwise, we have to rely on queue collapsing.
482 */
483
484 /* Slow part of check#2. */
__tcp_grow_window(const struct sock * sk,const struct sk_buff * skb,unsigned int skbtruesize)485 static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
486 unsigned int skbtruesize)
487 {
488 const struct tcp_sock *tp = tcp_sk(sk);
489 /* Optimize this! */
490 int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
491 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
492
493 while (tp->rcv_ssthresh <= window) {
494 if (truesize <= skb->len)
495 return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
496
497 truesize >>= 1;
498 window >>= 1;
499 }
500 return 0;
501 }
502
503 /* Even if skb appears to have a bad len/truesize ratio, TCP coalescing
504 * can play nice with us, as sk_buff and skb->head might be either
505 * freed or shared with up to MAX_SKB_FRAGS segments.
506 * Only give a boost to drivers using page frag(s) to hold the frame(s),
507 * and if no payload was pulled in skb->head before reaching us.
508 */
truesize_adjust(bool adjust,const struct sk_buff * skb)509 static u32 truesize_adjust(bool adjust, const struct sk_buff *skb)
510 {
511 u32 truesize = skb->truesize;
512
513 if (adjust && !skb_headlen(skb)) {
514 truesize -= SKB_TRUESIZE(skb_end_offset(skb));
515 /* paranoid check, some drivers might be buggy */
516 if (unlikely((int)truesize < (int)skb->len))
517 truesize = skb->truesize;
518 }
519 return truesize;
520 }
521
tcp_grow_window(struct sock * sk,const struct sk_buff * skb,bool adjust)522 static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
523 bool adjust)
524 {
525 struct tcp_sock *tp = tcp_sk(sk);
526 int room;
527
528 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
529
530 if (room <= 0)
531 return;
532
533 /* Check #1 */
534 if (!tcp_under_memory_pressure(sk)) {
535 unsigned int truesize = truesize_adjust(adjust, skb);
536 int incr;
537
538 /* Check #2. Increase window, if skb with such overhead
539 * will fit to rcvbuf in future.
540 */
541 if (tcp_win_from_space(sk, truesize) <= skb->len)
542 incr = 2 * tp->advmss;
543 else
544 incr = __tcp_grow_window(sk, skb, truesize);
545
546 if (incr) {
547 incr = max_t(int, incr, 2 * skb->len);
548 tp->rcv_ssthresh += min(room, incr);
549 inet_csk(sk)->icsk_ack.quick |= 1;
550 }
551 } else {
552 /* Under pressure:
553 * Adjust rcv_ssthresh according to reserved mem
554 */
555 tcp_adjust_rcv_ssthresh(sk);
556 }
557 }
558
559 /* 3. Try to fixup all. It is made immediately after connection enters
560 * established state.
561 */
tcp_init_buffer_space(struct sock * sk)562 static void tcp_init_buffer_space(struct sock *sk)
563 {
564 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
565 struct tcp_sock *tp = tcp_sk(sk);
566 int maxwin;
567
568 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
569 tcp_sndbuf_expand(sk);
570
571 tcp_mstamp_refresh(tp);
572 tp->rcvq_space.time = tp->tcp_mstamp;
573 tp->rcvq_space.seq = tp->copied_seq;
574
575 maxwin = tcp_full_space(sk);
576
577 if (tp->window_clamp >= maxwin) {
578 WRITE_ONCE(tp->window_clamp, maxwin);
579
580 if (tcp_app_win && maxwin > 4 * tp->advmss)
581 WRITE_ONCE(tp->window_clamp,
582 max(maxwin - (maxwin >> tcp_app_win),
583 4 * tp->advmss));
584 }
585
586 /* Force reservation of one segment. */
587 if (tcp_app_win &&
588 tp->window_clamp > 2 * tp->advmss &&
589 tp->window_clamp + tp->advmss > maxwin)
590 WRITE_ONCE(tp->window_clamp,
591 max(2 * tp->advmss, maxwin - tp->advmss));
592
593 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
594 tp->snd_cwnd_stamp = tcp_jiffies32;
595 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
596 (u32)TCP_INIT_CWND * tp->advmss);
597 }
598
599 /* 4. Recalculate window clamp after socket hit its memory bounds. */
tcp_clamp_window(struct sock * sk)600 static void tcp_clamp_window(struct sock *sk)
601 {
602 struct tcp_sock *tp = tcp_sk(sk);
603 struct inet_connection_sock *icsk = inet_csk(sk);
604 struct net *net = sock_net(sk);
605 int rmem2;
606
607 icsk->icsk_ack.quick = 0;
608 rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
609
610 if (sk->sk_rcvbuf < rmem2 &&
611 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
612 !tcp_under_memory_pressure(sk) &&
613 sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
614 WRITE_ONCE(sk->sk_rcvbuf,
615 min(atomic_read(&sk->sk_rmem_alloc), rmem2));
616 }
617 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
618 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
619 }
620
621 /* Initialize RCV_MSS value.
622 * RCV_MSS is an our guess about MSS used by the peer.
623 * We haven't any direct information about the MSS.
624 * It's better to underestimate the RCV_MSS rather than overestimate.
625 * Overestimations make us ACKing less frequently than needed.
626 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
627 */
tcp_initialize_rcv_mss(struct sock * sk)628 void tcp_initialize_rcv_mss(struct sock *sk)
629 {
630 const struct tcp_sock *tp = tcp_sk(sk);
631 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
632
633 hint = min(hint, tp->rcv_wnd / 2);
634 hint = min(hint, TCP_MSS_DEFAULT);
635 hint = max(hint, TCP_MIN_MSS);
636
637 inet_csk(sk)->icsk_ack.rcv_mss = hint;
638 }
639 EXPORT_SYMBOL(tcp_initialize_rcv_mss);
640
641 /* Receiver "autotuning" code.
642 *
643 * The algorithm for RTT estimation w/o timestamps is based on
644 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
645 * <https://public.lanl.gov/radiant/pubs.html#DRS>
646 *
647 * More detail on this code can be found at
648 * <http://staff.psc.edu/jheffner/>,
649 * though this reference is out of date. A new paper
650 * is pending.
651 */
tcp_rcv_rtt_update(struct tcp_sock * tp,u32 sample,int win_dep)652 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
653 {
654 u32 new_sample = tp->rcv_rtt_est.rtt_us;
655 long m = sample;
656
657 if (new_sample != 0) {
658 /* If we sample in larger samples in the non-timestamp
659 * case, we could grossly overestimate the RTT especially
660 * with chatty applications or bulk transfer apps which
661 * are stalled on filesystem I/O.
662 *
663 * Also, since we are only going for a minimum in the
664 * non-timestamp case, we do not smooth things out
665 * else with timestamps disabled convergence takes too
666 * long.
667 */
668 if (!win_dep) {
669 m -= (new_sample >> 3);
670 new_sample += m;
671 } else {
672 m <<= 3;
673 if (m < new_sample)
674 new_sample = m;
675 }
676 } else {
677 /* No previous measure. */
678 new_sample = m << 3;
679 }
680
681 tp->rcv_rtt_est.rtt_us = new_sample;
682 }
683
tcp_rcv_rtt_measure(struct tcp_sock * tp)684 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
685 {
686 u32 delta_us;
687
688 if (tp->rcv_rtt_est.time == 0)
689 goto new_measure;
690 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
691 return;
692 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
693 if (!delta_us)
694 delta_us = 1;
695 tcp_rcv_rtt_update(tp, delta_us, 1);
696
697 new_measure:
698 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
699 tp->rcv_rtt_est.time = tp->tcp_mstamp;
700 }
701
tcp_rcv_rtt_measure_ts(struct sock * sk,const struct sk_buff * skb)702 static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
703 const struct sk_buff *skb)
704 {
705 struct tcp_sock *tp = tcp_sk(sk);
706
707 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
708 return;
709 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
710
711 if (TCP_SKB_CB(skb)->end_seq -
712 TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
713 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
714 u32 delta_us;
715
716 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
717 if (!delta)
718 delta = 1;
719 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
720 tcp_rcv_rtt_update(tp, delta_us, 0);
721 }
722 }
723 }
724
725 /*
726 * This function should be called every time data is copied to user space.
727 * It calculates the appropriate TCP receive buffer space.
728 */
tcp_rcv_space_adjust(struct sock * sk)729 void tcp_rcv_space_adjust(struct sock *sk)
730 {
731 struct tcp_sock *tp = tcp_sk(sk);
732 u32 copied;
733 int time;
734
735 trace_tcp_rcv_space_adjust(sk);
736
737 tcp_mstamp_refresh(tp);
738 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
739 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
740 return;
741
742 /* Number of bytes copied to user in last RTT */
743 copied = tp->copied_seq - tp->rcvq_space.seq;
744 if (copied <= tp->rcvq_space.space)
745 goto new_measure;
746
747 /* A bit of theory :
748 * copied = bytes received in previous RTT, our base window
749 * To cope with packet losses, we need a 2x factor
750 * To cope with slow start, and sender growing its cwin by 100 %
751 * every RTT, we need a 4x factor, because the ACK we are sending
752 * now is for the next RTT, not the current one :
753 * <prev RTT . ><current RTT .. ><next RTT .... >
754 */
755
756 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
757 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
758 u64 rcvwin, grow;
759 int rcvbuf;
760
761 /* minimal window to cope with packet losses, assuming
762 * steady state. Add some cushion because of small variations.
763 */
764 rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
765
766 /* Accommodate for sender rate increase (eg. slow start) */
767 grow = rcvwin * (copied - tp->rcvq_space.space);
768 do_div(grow, tp->rcvq_space.space);
769 rcvwin += (grow << 1);
770
771 rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
772 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
773 if (rcvbuf > sk->sk_rcvbuf) {
774 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
775
776 /* Make the window clamp follow along. */
777 WRITE_ONCE(tp->window_clamp,
778 tcp_win_from_space(sk, rcvbuf));
779 }
780 }
781 tp->rcvq_space.space = copied;
782
783 new_measure:
784 tp->rcvq_space.seq = tp->copied_seq;
785 tp->rcvq_space.time = tp->tcp_mstamp;
786 }
787
788 /* There is something which you must keep in mind when you analyze the
789 * behavior of the tp->ato delayed ack timeout interval. When a
790 * connection starts up, we want to ack as quickly as possible. The
791 * problem is that "good" TCP's do slow start at the beginning of data
792 * transmission. The means that until we send the first few ACK's the
793 * sender will sit on his end and only queue most of his data, because
794 * he can only send snd_cwnd unacked packets at any given time. For
795 * each ACK we send, he increments snd_cwnd and transmits more of his
796 * queue. -DaveM
797 */
tcp_event_data_recv(struct sock * sk,struct sk_buff * skb)798 static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
799 {
800 struct tcp_sock *tp = tcp_sk(sk);
801 struct inet_connection_sock *icsk = inet_csk(sk);
802 u32 now;
803
804 inet_csk_schedule_ack(sk);
805
806 tcp_measure_rcv_mss(sk, skb);
807
808 tcp_rcv_rtt_measure(tp);
809
810 now = tcp_jiffies32;
811
812 if (!icsk->icsk_ack.ato) {
813 /* The _first_ data packet received, initialize
814 * delayed ACK engine.
815 */
816 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
817 icsk->icsk_ack.ato = TCP_ATO_MIN;
818 } else {
819 int m = now - icsk->icsk_ack.lrcvtime;
820
821 if (m <= TCP_ATO_MIN / 2) {
822 /* The fastest case is the first. */
823 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
824 } else if (m < icsk->icsk_ack.ato) {
825 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
826 if (icsk->icsk_ack.ato > icsk->icsk_rto)
827 icsk->icsk_ack.ato = icsk->icsk_rto;
828 } else if (m > icsk->icsk_rto) {
829 /* Too long gap. Apparently sender failed to
830 * restart window, so that we send ACKs quickly.
831 */
832 tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
833 }
834 }
835 icsk->icsk_ack.lrcvtime = now;
836
837 tcp_ecn_check_ce(sk, skb);
838
839 if (skb->len >= 128)
840 tcp_grow_window(sk, skb, true);
841 }
842
843 /* Called to compute a smoothed rtt estimate. The data fed to this
844 * routine either comes from timestamps, or from segments that were
845 * known _not_ to have been retransmitted [see Karn/Partridge
846 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
847 * piece by Van Jacobson.
848 * NOTE: the next three routines used to be one big routine.
849 * To save cycles in the RFC 1323 implementation it was better to break
850 * it up into three procedures. -- erics
851 */
tcp_rtt_estimator(struct sock * sk,long mrtt_us)852 static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
853 {
854 struct tcp_sock *tp = tcp_sk(sk);
855 long m = mrtt_us; /* RTT */
856 u32 srtt = tp->srtt_us;
857
858 /* The following amusing code comes from Jacobson's
859 * article in SIGCOMM '88. Note that rtt and mdev
860 * are scaled versions of rtt and mean deviation.
861 * This is designed to be as fast as possible
862 * m stands for "measurement".
863 *
864 * On a 1990 paper the rto value is changed to:
865 * RTO = rtt + 4 * mdev
866 *
867 * Funny. This algorithm seems to be very broken.
868 * These formulae increase RTO, when it should be decreased, increase
869 * too slowly, when it should be increased quickly, decrease too quickly
870 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
871 * does not matter how to _calculate_ it. Seems, it was trap
872 * that VJ failed to avoid. 8)
873 */
874 if (srtt != 0) {
875 m -= (srtt >> 3); /* m is now error in rtt est */
876 srtt += m; /* rtt = 7/8 rtt + 1/8 new */
877 if (m < 0) {
878 m = -m; /* m is now abs(error) */
879 m -= (tp->mdev_us >> 2); /* similar update on mdev */
880 /* This is similar to one of Eifel findings.
881 * Eifel blocks mdev updates when rtt decreases.
882 * This solution is a bit different: we use finer gain
883 * for mdev in this case (alpha*beta).
884 * Like Eifel it also prevents growth of rto,
885 * but also it limits too fast rto decreases,
886 * happening in pure Eifel.
887 */
888 if (m > 0)
889 m >>= 3;
890 } else {
891 m -= (tp->mdev_us >> 2); /* similar update on mdev */
892 }
893 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
894 if (tp->mdev_us > tp->mdev_max_us) {
895 tp->mdev_max_us = tp->mdev_us;
896 if (tp->mdev_max_us > tp->rttvar_us)
897 tp->rttvar_us = tp->mdev_max_us;
898 }
899 if (after(tp->snd_una, tp->rtt_seq)) {
900 if (tp->mdev_max_us < tp->rttvar_us)
901 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
902 tp->rtt_seq = tp->snd_nxt;
903 tp->mdev_max_us = tcp_rto_min_us(sk);
904
905 tcp_bpf_rtt(sk);
906 }
907 } else {
908 /* no previous measure. */
909 srtt = m << 3; /* take the measured time to be rtt */
910 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
911 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
912 tp->mdev_max_us = tp->rttvar_us;
913 tp->rtt_seq = tp->snd_nxt;
914
915 tcp_bpf_rtt(sk);
916 }
917 tp->srtt_us = max(1U, srtt);
918 }
919
tcp_update_pacing_rate(struct sock * sk)920 static void tcp_update_pacing_rate(struct sock *sk)
921 {
922 const struct tcp_sock *tp = tcp_sk(sk);
923 u64 rate;
924
925 /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
926 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
927
928 /* current rate is (cwnd * mss) / srtt
929 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
930 * In Congestion Avoidance phase, set it to 120 % the current rate.
931 *
932 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
933 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
934 * end of slow start and should slow down.
935 */
936 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
937 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
938 else
939 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
940
941 rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
942
943 if (likely(tp->srtt_us))
944 do_div(rate, tp->srtt_us);
945
946 /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
947 * without any lock. We want to make sure compiler wont store
948 * intermediate values in this location.
949 */
950 WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
951 sk->sk_max_pacing_rate));
952 }
953
954 /* Calculate rto without backoff. This is the second half of Van Jacobson's
955 * routine referred to above.
956 */
tcp_set_rto(struct sock * sk)957 static void tcp_set_rto(struct sock *sk)
958 {
959 const struct tcp_sock *tp = tcp_sk(sk);
960 /* Old crap is replaced with new one. 8)
961 *
962 * More seriously:
963 * 1. If rtt variance happened to be less 50msec, it is hallucination.
964 * It cannot be less due to utterly erratic ACK generation made
965 * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
966 * to do with delayed acks, because at cwnd>2 true delack timeout
967 * is invisible. Actually, Linux-2.4 also generates erratic
968 * ACKs in some circumstances.
969 */
970 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
971
972 /* 2. Fixups made earlier cannot be right.
973 * If we do not estimate RTO correctly without them,
974 * all the algo is pure shit and should be replaced
975 * with correct one. It is exactly, which we pretend to do.
976 */
977
978 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
979 * guarantees that rto is higher.
980 */
981 tcp_bound_rto(sk);
982 }
983
tcp_init_cwnd(const struct tcp_sock * tp,const struct dst_entry * dst)984 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
985 {
986 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
987
988 if (!cwnd)
989 cwnd = TCP_INIT_CWND;
990 return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
991 }
992
993 struct tcp_sacktag_state {
994 /* Timestamps for earliest and latest never-retransmitted segment
995 * that was SACKed. RTO needs the earliest RTT to stay conservative,
996 * but congestion control should still get an accurate delay signal.
997 */
998 u64 first_sackt;
999 u64 last_sackt;
1000 u32 reord;
1001 u32 sack_delivered;
1002 int flag;
1003 unsigned int mss_now;
1004 struct rate_sample *rate;
1005 };
1006
1007 /* Take a notice that peer is sending D-SACKs. Skip update of data delivery
1008 * and spurious retransmission information if this DSACK is unlikely caused by
1009 * sender's action:
1010 * - DSACKed sequence range is larger than maximum receiver's window.
1011 * - Total no. of DSACKed segments exceed the total no. of retransmitted segs.
1012 */
tcp_dsack_seen(struct tcp_sock * tp,u32 start_seq,u32 end_seq,struct tcp_sacktag_state * state)1013 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq,
1014 u32 end_seq, struct tcp_sacktag_state *state)
1015 {
1016 u32 seq_len, dup_segs = 1;
1017
1018 if (!before(start_seq, end_seq))
1019 return 0;
1020
1021 seq_len = end_seq - start_seq;
1022 /* Dubious DSACK: DSACKed range greater than maximum advertised rwnd */
1023 if (seq_len > tp->max_window)
1024 return 0;
1025 if (seq_len > tp->mss_cache)
1026 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache);
1027 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq)
1028 state->flag |= FLAG_DSACK_TLP;
1029
1030 tp->dsack_dups += dup_segs;
1031 /* Skip the DSACK if dup segs weren't retransmitted by sender */
1032 if (tp->dsack_dups > tp->total_retrans)
1033 return 0;
1034
1035 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
1036 /* We increase the RACK ordering window in rounds where we receive
1037 * DSACKs that may have been due to reordering causing RACK to trigger
1038 * a spurious fast recovery. Thus RACK ignores DSACKs that happen
1039 * without having seen reordering, or that match TLP probes (TLP
1040 * is timer-driven, not triggered by RACK).
1041 */
1042 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP))
1043 tp->rack.dsack_seen = 1;
1044
1045 state->flag |= FLAG_DSACKING_ACK;
1046 /* A spurious retransmission is delivered */
1047 state->sack_delivered += dup_segs;
1048
1049 return dup_segs;
1050 }
1051
1052 /* It's reordering when higher sequence was delivered (i.e. sacked) before
1053 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
1054 * distance is approximated in full-mss packet distance ("reordering").
1055 */
tcp_check_sack_reordering(struct sock * sk,const u32 low_seq,const int ts)1056 static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
1057 const int ts)
1058 {
1059 struct tcp_sock *tp = tcp_sk(sk);
1060 const u32 mss = tp->mss_cache;
1061 u32 fack, metric;
1062
1063 fack = tcp_highest_sack_seq(tp);
1064 if (!before(low_seq, fack))
1065 return;
1066
1067 metric = fack - low_seq;
1068 if ((metric > tp->reordering * mss) && mss) {
1069 #if FASTRETRANS_DEBUG > 1
1070 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
1071 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
1072 tp->reordering,
1073 0,
1074 tp->sacked_out,
1075 tp->undo_marker ? tp->undo_retrans : 0);
1076 #endif
1077 tp->reordering = min_t(u32, (metric + mss - 1) / mss,
1078 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
1079 }
1080
1081 /* This exciting event is worth to be remembered. 8) */
1082 tp->reord_seen++;
1083 NET_INC_STATS(sock_net(sk),
1084 ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
1085 }
1086
1087 /* This must be called before lost_out or retrans_out are updated
1088 * on a new loss, because we want to know if all skbs previously
1089 * known to be lost have already been retransmitted, indicating
1090 * that this newly lost skb is our next skb to retransmit.
1091 */
tcp_verify_retransmit_hint(struct tcp_sock * tp,struct sk_buff * skb)1092 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
1093 {
1094 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
1095 (tp->retransmit_skb_hint &&
1096 before(TCP_SKB_CB(skb)->seq,
1097 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
1098 tp->retransmit_skb_hint = skb;
1099 }
1100
1101 /* Sum the number of packets on the wire we have marked as lost, and
1102 * notify the congestion control module that the given skb was marked lost.
1103 */
tcp_notify_skb_loss_event(struct tcp_sock * tp,const struct sk_buff * skb)1104 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
1105 {
1106 tp->lost += tcp_skb_pcount(skb);
1107 }
1108
tcp_mark_skb_lost(struct sock * sk,struct sk_buff * skb)1109 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
1110 {
1111 __u8 sacked = TCP_SKB_CB(skb)->sacked;
1112 struct tcp_sock *tp = tcp_sk(sk);
1113
1114 if (sacked & TCPCB_SACKED_ACKED)
1115 return;
1116
1117 tcp_verify_retransmit_hint(tp, skb);
1118 if (sacked & TCPCB_LOST) {
1119 if (sacked & TCPCB_SACKED_RETRANS) {
1120 /* Account for retransmits that are lost again */
1121 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1122 tp->retrans_out -= tcp_skb_pcount(skb);
1123 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
1124 tcp_skb_pcount(skb));
1125 tcp_notify_skb_loss_event(tp, skb);
1126 }
1127 } else {
1128 tp->lost_out += tcp_skb_pcount(skb);
1129 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1130 tcp_notify_skb_loss_event(tp, skb);
1131 }
1132 }
1133
1134 /* Updates the delivered and delivered_ce counts */
tcp_count_delivered(struct tcp_sock * tp,u32 delivered,bool ece_ack)1135 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
1136 bool ece_ack)
1137 {
1138 tp->delivered += delivered;
1139 if (ece_ack)
1140 tp->delivered_ce += delivered;
1141 }
1142
1143 /* This procedure tags the retransmission queue when SACKs arrive.
1144 *
1145 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1146 * Packets in queue with these bits set are counted in variables
1147 * sacked_out, retrans_out and lost_out, correspondingly.
1148 *
1149 * Valid combinations are:
1150 * Tag InFlight Description
1151 * 0 1 - orig segment is in flight.
1152 * S 0 - nothing flies, orig reached receiver.
1153 * L 0 - nothing flies, orig lost by net.
1154 * R 2 - both orig and retransmit are in flight.
1155 * L|R 1 - orig is lost, retransmit is in flight.
1156 * S|R 1 - orig reached receiver, retrans is still in flight.
1157 * (L|S|R is logically valid, it could occur when L|R is sacked,
1158 * but it is equivalent to plain S and code short-curcuits it to S.
1159 * L|S is logically invalid, it would mean -1 packet in flight 8))
1160 *
1161 * These 6 states form finite state machine, controlled by the following events:
1162 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1163 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1164 * 3. Loss detection event of two flavors:
1165 * A. Scoreboard estimator decided the packet is lost.
1166 * A'. Reno "three dupacks" marks head of queue lost.
1167 * B. SACK arrives sacking SND.NXT at the moment, when the
1168 * segment was retransmitted.
1169 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1170 *
1171 * It is pleasant to note, that state diagram turns out to be commutative,
1172 * so that we are allowed not to be bothered by order of our actions,
1173 * when multiple events arrive simultaneously. (see the function below).
1174 *
1175 * Reordering detection.
1176 * --------------------
1177 * Reordering metric is maximal distance, which a packet can be displaced
1178 * in packet stream. With SACKs we can estimate it:
1179 *
1180 * 1. SACK fills old hole and the corresponding segment was not
1181 * ever retransmitted -> reordering. Alas, we cannot use it
1182 * when segment was retransmitted.
1183 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1184 * for retransmitted and already SACKed segment -> reordering..
1185 * Both of these heuristics are not used in Loss state, when we cannot
1186 * account for retransmits accurately.
1187 *
1188 * SACK block validation.
1189 * ----------------------
1190 *
1191 * SACK block range validation checks that the received SACK block fits to
1192 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1193 * Note that SND.UNA is not included to the range though being valid because
1194 * it means that the receiver is rather inconsistent with itself reporting
1195 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1196 * perfectly valid, however, in light of RFC2018 which explicitly states
1197 * that "SACK block MUST reflect the newest segment. Even if the newest
1198 * segment is going to be discarded ...", not that it looks very clever
1199 * in case of head skb. Due to potentional receiver driven attacks, we
1200 * choose to avoid immediate execution of a walk in write queue due to
1201 * reneging and defer head skb's loss recovery to standard loss recovery
1202 * procedure that will eventually trigger (nothing forbids us doing this).
1203 *
1204 * Implements also blockage to start_seq wrap-around. Problem lies in the
1205 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1206 * there's no guarantee that it will be before snd_nxt (n). The problem
1207 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1208 * wrap (s_w):
1209 *
1210 * <- outs wnd -> <- wrapzone ->
1211 * u e n u_w e_w s n_w
1212 * | | | | | | |
1213 * |<------------+------+----- TCP seqno space --------------+---------->|
1214 * ...-- <2^31 ->| |<--------...
1215 * ...---- >2^31 ------>| |<--------...
1216 *
1217 * Current code wouldn't be vulnerable but it's better still to discard such
1218 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1219 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1220 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1221 * equal to the ideal case (infinite seqno space without wrap caused issues).
1222 *
1223 * With D-SACK the lower bound is extended to cover sequence space below
1224 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1225 * again, D-SACK block must not to go across snd_una (for the same reason as
1226 * for the normal SACK blocks, explained above). But there all simplicity
1227 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1228 * fully below undo_marker they do not affect behavior in anyway and can
1229 * therefore be safely ignored. In rare cases (which are more or less
1230 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1231 * fragmentation and packet reordering past skb's retransmission. To consider
1232 * them correctly, the acceptable range must be extended even more though
1233 * the exact amount is rather hard to quantify. However, tp->max_window can
1234 * be used as an exaggerated estimate.
1235 */
tcp_is_sackblock_valid(struct tcp_sock * tp,bool is_dsack,u32 start_seq,u32 end_seq)1236 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1237 u32 start_seq, u32 end_seq)
1238 {
1239 /* Too far in future, or reversed (interpretation is ambiguous) */
1240 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1241 return false;
1242
1243 /* Nasty start_seq wrap-around check (see comments above) */
1244 if (!before(start_seq, tp->snd_nxt))
1245 return false;
1246
1247 /* In outstanding window? ...This is valid exit for D-SACKs too.
1248 * start_seq == snd_una is non-sensical (see comments above)
1249 */
1250 if (after(start_seq, tp->snd_una))
1251 return true;
1252
1253 if (!is_dsack || !tp->undo_marker)
1254 return false;
1255
1256 /* ...Then it's D-SACK, and must reside below snd_una completely */
1257 if (after(end_seq, tp->snd_una))
1258 return false;
1259
1260 if (!before(start_seq, tp->undo_marker))
1261 return true;
1262
1263 /* Too old */
1264 if (!after(end_seq, tp->undo_marker))
1265 return false;
1266
1267 /* Undo_marker boundary crossing (overestimates a lot). Known already:
1268 * start_seq < undo_marker and end_seq >= undo_marker.
1269 */
1270 return !before(start_seq, end_seq - tp->max_window);
1271 }
1272
tcp_check_dsack(struct sock * sk,const struct sk_buff * ack_skb,struct tcp_sack_block_wire * sp,int num_sacks,u32 prior_snd_una,struct tcp_sacktag_state * state)1273 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1274 struct tcp_sack_block_wire *sp, int num_sacks,
1275 u32 prior_snd_una, struct tcp_sacktag_state *state)
1276 {
1277 struct tcp_sock *tp = tcp_sk(sk);
1278 u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1279 u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1280 u32 dup_segs;
1281
1282 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1283 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1284 } else if (num_sacks > 1) {
1285 u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1286 u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1287
1288 if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1))
1289 return false;
1290 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV);
1291 } else {
1292 return false;
1293 }
1294
1295 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state);
1296 if (!dup_segs) { /* Skip dubious DSACK */
1297 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS);
1298 return false;
1299 }
1300
1301 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs);
1302
1303 /* D-SACK for already forgotten data... Do dumb counting. */
1304 if (tp->undo_marker && tp->undo_retrans > 0 &&
1305 !after(end_seq_0, prior_snd_una) &&
1306 after(end_seq_0, tp->undo_marker))
1307 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs);
1308
1309 return true;
1310 }
1311
1312 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
1313 * the incoming SACK may not exactly match but we can find smaller MSS
1314 * aligned portion of it that matches. Therefore we might need to fragment
1315 * which may fail and creates some hassle (caller must handle error case
1316 * returns).
1317 *
1318 * FIXME: this could be merged to shift decision code
1319 */
tcp_match_skb_to_sack(struct sock * sk,struct sk_buff * skb,u32 start_seq,u32 end_seq)1320 static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1321 u32 start_seq, u32 end_seq)
1322 {
1323 int err;
1324 bool in_sack;
1325 unsigned int pkt_len;
1326 unsigned int mss;
1327
1328 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1329 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1330
1331 if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1332 after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1333 mss = tcp_skb_mss(skb);
1334 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1335
1336 if (!in_sack) {
1337 pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1338 if (pkt_len < mss)
1339 pkt_len = mss;
1340 } else {
1341 pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1342 if (pkt_len < mss)
1343 return -EINVAL;
1344 }
1345
1346 /* Round if necessary so that SACKs cover only full MSSes
1347 * and/or the remaining small portion (if present)
1348 */
1349 if (pkt_len > mss) {
1350 unsigned int new_len = (pkt_len / mss) * mss;
1351 if (!in_sack && new_len < pkt_len)
1352 new_len += mss;
1353 pkt_len = new_len;
1354 }
1355
1356 if (pkt_len >= skb->len && !in_sack)
1357 return 0;
1358
1359 err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
1360 pkt_len, mss, GFP_ATOMIC);
1361 if (err < 0)
1362 return err;
1363 }
1364
1365 return in_sack;
1366 }
1367
1368 /* Mark the given newly-SACKed range as such, adjusting counters and hints. */
tcp_sacktag_one(struct sock * sk,struct tcp_sacktag_state * state,u8 sacked,u32 start_seq,u32 end_seq,int dup_sack,int pcount,u64 xmit_time)1369 static u8 tcp_sacktag_one(struct sock *sk,
1370 struct tcp_sacktag_state *state, u8 sacked,
1371 u32 start_seq, u32 end_seq,
1372 int dup_sack, int pcount,
1373 u64 xmit_time)
1374 {
1375 struct tcp_sock *tp = tcp_sk(sk);
1376
1377 /* Account D-SACK for retransmitted packet. */
1378 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1379 if (tp->undo_marker && tp->undo_retrans > 0 &&
1380 after(end_seq, tp->undo_marker))
1381 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
1382 if ((sacked & TCPCB_SACKED_ACKED) &&
1383 before(start_seq, state->reord))
1384 state->reord = start_seq;
1385 }
1386
1387 /* Nothing to do; acked frame is about to be dropped (was ACKed). */
1388 if (!after(end_seq, tp->snd_una))
1389 return sacked;
1390
1391 if (!(sacked & TCPCB_SACKED_ACKED)) {
1392 tcp_rack_advance(tp, sacked, end_seq, xmit_time);
1393
1394 if (sacked & TCPCB_SACKED_RETRANS) {
1395 /* If the segment is not tagged as lost,
1396 * we do not clear RETRANS, believing
1397 * that retransmission is still in flight.
1398 */
1399 if (sacked & TCPCB_LOST) {
1400 sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1401 tp->lost_out -= pcount;
1402 tp->retrans_out -= pcount;
1403 }
1404 } else {
1405 if (!(sacked & TCPCB_RETRANS)) {
1406 /* New sack for not retransmitted frame,
1407 * which was in hole. It is reordering.
1408 */
1409 if (before(start_seq,
1410 tcp_highest_sack_seq(tp)) &&
1411 before(start_seq, state->reord))
1412 state->reord = start_seq;
1413
1414 if (!after(end_seq, tp->high_seq))
1415 state->flag |= FLAG_ORIG_SACK_ACKED;
1416 if (state->first_sackt == 0)
1417 state->first_sackt = xmit_time;
1418 state->last_sackt = xmit_time;
1419 }
1420
1421 if (sacked & TCPCB_LOST) {
1422 sacked &= ~TCPCB_LOST;
1423 tp->lost_out -= pcount;
1424 }
1425 }
1426
1427 sacked |= TCPCB_SACKED_ACKED;
1428 state->flag |= FLAG_DATA_SACKED;
1429 tp->sacked_out += pcount;
1430 /* Out-of-order packets delivered */
1431 state->sack_delivered += pcount;
1432
1433 /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1434 if (tp->lost_skb_hint &&
1435 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
1436 tp->lost_cnt_hint += pcount;
1437 }
1438
1439 /* D-SACK. We can detect redundant retransmission in S|R and plain R
1440 * frames and clear it. undo_retrans is decreased above, L|R frames
1441 * are accounted above as well.
1442 */
1443 if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1444 sacked &= ~TCPCB_SACKED_RETRANS;
1445 tp->retrans_out -= pcount;
1446 }
1447
1448 return sacked;
1449 }
1450
1451 /* Shift newly-SACKed bytes from this skb to the immediately previous
1452 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1453 */
tcp_shifted_skb(struct sock * sk,struct sk_buff * prev,struct sk_buff * skb,struct tcp_sacktag_state * state,unsigned int pcount,int shifted,int mss,bool dup_sack)1454 static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1455 struct sk_buff *skb,
1456 struct tcp_sacktag_state *state,
1457 unsigned int pcount, int shifted, int mss,
1458 bool dup_sack)
1459 {
1460 struct tcp_sock *tp = tcp_sk(sk);
1461 u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */
1462 u32 end_seq = start_seq + shifted; /* end of newly-SACKed */
1463
1464 BUG_ON(!pcount);
1465
1466 /* Adjust counters and hints for the newly sacked sequence
1467 * range but discard the return value since prev is already
1468 * marked. We must tag the range first because the seq
1469 * advancement below implicitly advances
1470 * tcp_highest_sack_seq() when skb is highest_sack.
1471 */
1472 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1473 start_seq, end_seq, dup_sack, pcount,
1474 tcp_skb_timestamp_us(skb));
1475 tcp_rate_skb_delivered(sk, skb, state->rate);
1476
1477 if (skb == tp->lost_skb_hint)
1478 tp->lost_cnt_hint += pcount;
1479
1480 TCP_SKB_CB(prev)->end_seq += shifted;
1481 TCP_SKB_CB(skb)->seq += shifted;
1482
1483 tcp_skb_pcount_add(prev, pcount);
1484 WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
1485 tcp_skb_pcount_add(skb, -pcount);
1486
1487 /* When we're adding to gso_segs == 1, gso_size will be zero,
1488 * in theory this shouldn't be necessary but as long as DSACK
1489 * code can come after this skb later on it's better to keep
1490 * setting gso_size to something.
1491 */
1492 if (!TCP_SKB_CB(prev)->tcp_gso_size)
1493 TCP_SKB_CB(prev)->tcp_gso_size = mss;
1494
1495 /* CHECKME: To clear or not to clear? Mimics normal skb currently */
1496 if (tcp_skb_pcount(skb) <= 1)
1497 TCP_SKB_CB(skb)->tcp_gso_size = 0;
1498
1499 /* Difference in this won't matter, both ACKed by the same cumul. ACK */
1500 TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1501
1502 if (skb->len > 0) {
1503 BUG_ON(!tcp_skb_pcount(skb));
1504 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1505 return false;
1506 }
1507
1508 /* Whole SKB was eaten :-) */
1509
1510 if (skb == tp->retransmit_skb_hint)
1511 tp->retransmit_skb_hint = prev;
1512 if (skb == tp->lost_skb_hint) {
1513 tp->lost_skb_hint = prev;
1514 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1515 }
1516
1517 TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1518 TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
1519 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1520 TCP_SKB_CB(prev)->end_seq++;
1521
1522 if (skb == tcp_highest_sack(sk))
1523 tcp_advance_highest_sack(sk, skb);
1524
1525 tcp_skb_collapse_tstamp(prev, skb);
1526 if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1527 TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1528
1529 tcp_rtx_queue_unlink_and_free(skb, sk);
1530
1531 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1532
1533 return true;
1534 }
1535
1536 /* I wish gso_size would have a bit more sane initialization than
1537 * something-or-zero which complicates things
1538 */
tcp_skb_seglen(const struct sk_buff * skb)1539 static int tcp_skb_seglen(const struct sk_buff *skb)
1540 {
1541 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1542 }
1543
1544 /* Shifting pages past head area doesn't work */
skb_can_shift(const struct sk_buff * skb)1545 static int skb_can_shift(const struct sk_buff *skb)
1546 {
1547 return !skb_headlen(skb) && skb_is_nonlinear(skb);
1548 }
1549
tcp_skb_shift(struct sk_buff * to,struct sk_buff * from,int pcount,int shiftlen)1550 int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
1551 int pcount, int shiftlen)
1552 {
1553 /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
1554 * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
1555 * to make sure not storing more than 65535 * 8 bytes per skb,
1556 * even if current MSS is bigger.
1557 */
1558 if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
1559 return 0;
1560 if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
1561 return 0;
1562 return skb_shift(to, from, shiftlen);
1563 }
1564
1565 /* Try collapsing SACK blocks spanning across multiple skbs to a single
1566 * skb.
1567 */
tcp_shift_skb_data(struct sock * sk,struct sk_buff * skb,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack)1568 static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1569 struct tcp_sacktag_state *state,
1570 u32 start_seq, u32 end_seq,
1571 bool dup_sack)
1572 {
1573 struct tcp_sock *tp = tcp_sk(sk);
1574 struct sk_buff *prev;
1575 int mss;
1576 int pcount = 0;
1577 int len;
1578 int in_sack;
1579
1580 /* Normally R but no L won't result in plain S */
1581 if (!dup_sack &&
1582 (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1583 goto fallback;
1584 if (!skb_can_shift(skb))
1585 goto fallback;
1586 /* This frame is about to be dropped (was ACKed). */
1587 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1588 goto fallback;
1589
1590 /* Can only happen with delayed DSACK + discard craziness */
1591 prev = skb_rb_prev(skb);
1592 if (!prev)
1593 goto fallback;
1594
1595 if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1596 goto fallback;
1597
1598 if (!tcp_skb_can_collapse(prev, skb))
1599 goto fallback;
1600
1601 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1602 !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1603
1604 if (in_sack) {
1605 len = skb->len;
1606 pcount = tcp_skb_pcount(skb);
1607 mss = tcp_skb_seglen(skb);
1608
1609 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1610 * drop this restriction as unnecessary
1611 */
1612 if (mss != tcp_skb_seglen(prev))
1613 goto fallback;
1614 } else {
1615 if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1616 goto noop;
1617 /* CHECKME: This is non-MSS split case only?, this will
1618 * cause skipped skbs due to advancing loop btw, original
1619 * has that feature too
1620 */
1621 if (tcp_skb_pcount(skb) <= 1)
1622 goto noop;
1623
1624 in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1625 if (!in_sack) {
1626 /* TODO: head merge to next could be attempted here
1627 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1628 * though it might not be worth of the additional hassle
1629 *
1630 * ...we can probably just fallback to what was done
1631 * previously. We could try merging non-SACKed ones
1632 * as well but it probably isn't going to buy off
1633 * because later SACKs might again split them, and
1634 * it would make skb timestamp tracking considerably
1635 * harder problem.
1636 */
1637 goto fallback;
1638 }
1639
1640 len = end_seq - TCP_SKB_CB(skb)->seq;
1641 BUG_ON(len < 0);
1642 BUG_ON(len > skb->len);
1643
1644 /* MSS boundaries should be honoured or else pcount will
1645 * severely break even though it makes things bit trickier.
1646 * Optimize common case to avoid most of the divides
1647 */
1648 mss = tcp_skb_mss(skb);
1649
1650 /* TODO: Fix DSACKs to not fragment already SACKed and we can
1651 * drop this restriction as unnecessary
1652 */
1653 if (mss != tcp_skb_seglen(prev))
1654 goto fallback;
1655
1656 if (len == mss) {
1657 pcount = 1;
1658 } else if (len < mss) {
1659 goto noop;
1660 } else {
1661 pcount = len / mss;
1662 len = pcount * mss;
1663 }
1664 }
1665
1666 /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1667 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1668 goto fallback;
1669
1670 if (!tcp_skb_shift(prev, skb, pcount, len))
1671 goto fallback;
1672 if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
1673 goto out;
1674
1675 /* Hole filled allows collapsing with the next as well, this is very
1676 * useful when hole on every nth skb pattern happens
1677 */
1678 skb = skb_rb_next(prev);
1679 if (!skb)
1680 goto out;
1681
1682 if (!skb_can_shift(skb) ||
1683 ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
1684 (mss != tcp_skb_seglen(skb)))
1685 goto out;
1686
1687 if (!tcp_skb_can_collapse(prev, skb))
1688 goto out;
1689 len = skb->len;
1690 pcount = tcp_skb_pcount(skb);
1691 if (tcp_skb_shift(prev, skb, pcount, len))
1692 tcp_shifted_skb(sk, prev, skb, state, pcount,
1693 len, mss, 0);
1694
1695 out:
1696 return prev;
1697
1698 noop:
1699 return skb;
1700
1701 fallback:
1702 NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1703 return NULL;
1704 }
1705
tcp_sacktag_walk(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 start_seq,u32 end_seq,bool dup_sack_in)1706 static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1707 struct tcp_sack_block *next_dup,
1708 struct tcp_sacktag_state *state,
1709 u32 start_seq, u32 end_seq,
1710 bool dup_sack_in)
1711 {
1712 struct tcp_sock *tp = tcp_sk(sk);
1713 struct sk_buff *tmp;
1714
1715 skb_rbtree_walk_from(skb) {
1716 int in_sack = 0;
1717 bool dup_sack = dup_sack_in;
1718
1719 /* queue is in-order => we can short-circuit the walk early */
1720 if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1721 break;
1722
1723 if (next_dup &&
1724 before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1725 in_sack = tcp_match_skb_to_sack(sk, skb,
1726 next_dup->start_seq,
1727 next_dup->end_seq);
1728 if (in_sack > 0)
1729 dup_sack = true;
1730 }
1731
1732 /* skb reference here is a bit tricky to get right, since
1733 * shifting can eat and free both this skb and the next,
1734 * so not even _safe variant of the loop is enough.
1735 */
1736 if (in_sack <= 0) {
1737 tmp = tcp_shift_skb_data(sk, skb, state,
1738 start_seq, end_seq, dup_sack);
1739 if (tmp) {
1740 if (tmp != skb) {
1741 skb = tmp;
1742 continue;
1743 }
1744
1745 in_sack = 0;
1746 } else {
1747 in_sack = tcp_match_skb_to_sack(sk, skb,
1748 start_seq,
1749 end_seq);
1750 }
1751 }
1752
1753 if (unlikely(in_sack < 0))
1754 break;
1755
1756 if (in_sack) {
1757 TCP_SKB_CB(skb)->sacked =
1758 tcp_sacktag_one(sk,
1759 state,
1760 TCP_SKB_CB(skb)->sacked,
1761 TCP_SKB_CB(skb)->seq,
1762 TCP_SKB_CB(skb)->end_seq,
1763 dup_sack,
1764 tcp_skb_pcount(skb),
1765 tcp_skb_timestamp_us(skb));
1766 tcp_rate_skb_delivered(sk, skb, state->rate);
1767 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1768 list_del_init(&skb->tcp_tsorted_anchor);
1769
1770 if (!before(TCP_SKB_CB(skb)->seq,
1771 tcp_highest_sack_seq(tp)))
1772 tcp_advance_highest_sack(sk, skb);
1773 }
1774 }
1775 return skb;
1776 }
1777
tcp_sacktag_bsearch(struct sock * sk,u32 seq)1778 static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
1779 {
1780 struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
1781 struct sk_buff *skb;
1782
1783 while (*p) {
1784 parent = *p;
1785 skb = rb_to_skb(parent);
1786 if (before(seq, TCP_SKB_CB(skb)->seq)) {
1787 p = &parent->rb_left;
1788 continue;
1789 }
1790 if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
1791 p = &parent->rb_right;
1792 continue;
1793 }
1794 return skb;
1795 }
1796 return NULL;
1797 }
1798
tcp_sacktag_skip(struct sk_buff * skb,struct sock * sk,u32 skip_to_seq)1799 static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1800 u32 skip_to_seq)
1801 {
1802 if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
1803 return skb;
1804
1805 return tcp_sacktag_bsearch(sk, skip_to_seq);
1806 }
1807
tcp_maybe_skipping_dsack(struct sk_buff * skb,struct sock * sk,struct tcp_sack_block * next_dup,struct tcp_sacktag_state * state,u32 skip_to_seq)1808 static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1809 struct sock *sk,
1810 struct tcp_sack_block *next_dup,
1811 struct tcp_sacktag_state *state,
1812 u32 skip_to_seq)
1813 {
1814 if (!next_dup)
1815 return skb;
1816
1817 if (before(next_dup->start_seq, skip_to_seq)) {
1818 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
1819 skb = tcp_sacktag_walk(skb, sk, NULL, state,
1820 next_dup->start_seq, next_dup->end_seq,
1821 1);
1822 }
1823
1824 return skb;
1825 }
1826
tcp_sack_cache_ok(const struct tcp_sock * tp,const struct tcp_sack_block * cache)1827 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
1828 {
1829 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1830 }
1831
1832 static int
tcp_sacktag_write_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_snd_una,struct tcp_sacktag_state * state)1833 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1834 u32 prior_snd_una, struct tcp_sacktag_state *state)
1835 {
1836 struct tcp_sock *tp = tcp_sk(sk);
1837 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1838 TCP_SKB_CB(ack_skb)->sacked);
1839 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1840 struct tcp_sack_block sp[TCP_NUM_SACKS];
1841 struct tcp_sack_block *cache;
1842 struct sk_buff *skb;
1843 int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1844 int used_sacks;
1845 bool found_dup_sack = false;
1846 int i, j;
1847 int first_sack_index;
1848
1849 state->flag = 0;
1850 state->reord = tp->snd_nxt;
1851
1852 if (!tp->sacked_out)
1853 tcp_highest_sack_reset(sk);
1854
1855 found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1856 num_sacks, prior_snd_una, state);
1857
1858 /* Eliminate too old ACKs, but take into
1859 * account more or less fresh ones, they can
1860 * contain valid SACK info.
1861 */
1862 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1863 return 0;
1864
1865 if (!tp->packets_out)
1866 goto out;
1867
1868 used_sacks = 0;
1869 first_sack_index = 0;
1870 for (i = 0; i < num_sacks; i++) {
1871 bool dup_sack = !i && found_dup_sack;
1872
1873 sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1874 sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1875
1876 if (!tcp_is_sackblock_valid(tp, dup_sack,
1877 sp[used_sacks].start_seq,
1878 sp[used_sacks].end_seq)) {
1879 int mib_idx;
1880
1881 if (dup_sack) {
1882 if (!tp->undo_marker)
1883 mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1884 else
1885 mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1886 } else {
1887 /* Don't count olds caused by ACK reordering */
1888 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1889 !after(sp[used_sacks].end_seq, tp->snd_una))
1890 continue;
1891 mib_idx = LINUX_MIB_TCPSACKDISCARD;
1892 }
1893
1894 NET_INC_STATS(sock_net(sk), mib_idx);
1895 if (i == 0)
1896 first_sack_index = -1;
1897 continue;
1898 }
1899
1900 /* Ignore very old stuff early */
1901 if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
1902 if (i == 0)
1903 first_sack_index = -1;
1904 continue;
1905 }
1906
1907 used_sacks++;
1908 }
1909
1910 /* order SACK blocks to allow in order walk of the retrans queue */
1911 for (i = used_sacks - 1; i > 0; i--) {
1912 for (j = 0; j < i; j++) {
1913 if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
1914 swap(sp[j], sp[j + 1]);
1915
1916 /* Track where the first SACK block goes to */
1917 if (j == first_sack_index)
1918 first_sack_index = j + 1;
1919 }
1920 }
1921 }
1922
1923 state->mss_now = tcp_current_mss(sk);
1924 skb = NULL;
1925 i = 0;
1926
1927 if (!tp->sacked_out) {
1928 /* It's already past, so skip checking against it */
1929 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1930 } else {
1931 cache = tp->recv_sack_cache;
1932 /* Skip empty blocks in at head of the cache */
1933 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
1934 !cache->end_seq)
1935 cache++;
1936 }
1937
1938 while (i < used_sacks) {
1939 u32 start_seq = sp[i].start_seq;
1940 u32 end_seq = sp[i].end_seq;
1941 bool dup_sack = (found_dup_sack && (i == first_sack_index));
1942 struct tcp_sack_block *next_dup = NULL;
1943
1944 if (found_dup_sack && ((i + 1) == first_sack_index))
1945 next_dup = &sp[i + 1];
1946
1947 /* Skip too early cached blocks */
1948 while (tcp_sack_cache_ok(tp, cache) &&
1949 !before(start_seq, cache->end_seq))
1950 cache++;
1951
1952 /* Can skip some work by looking recv_sack_cache? */
1953 if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
1954 after(end_seq, cache->start_seq)) {
1955
1956 /* Head todo? */
1957 if (before(start_seq, cache->start_seq)) {
1958 skb = tcp_sacktag_skip(skb, sk, start_seq);
1959 skb = tcp_sacktag_walk(skb, sk, next_dup,
1960 state,
1961 start_seq,
1962 cache->start_seq,
1963 dup_sack);
1964 }
1965
1966 /* Rest of the block already fully processed? */
1967 if (!after(end_seq, cache->end_seq))
1968 goto advance_sp;
1969
1970 skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
1971 state,
1972 cache->end_seq);
1973
1974 /* ...tail remains todo... */
1975 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1976 /* ...but better entrypoint exists! */
1977 skb = tcp_highest_sack(sk);
1978 if (!skb)
1979 break;
1980 cache++;
1981 goto walk;
1982 }
1983
1984 skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
1985 /* Check overlap against next cached too (past this one already) */
1986 cache++;
1987 continue;
1988 }
1989
1990 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1991 skb = tcp_highest_sack(sk);
1992 if (!skb)
1993 break;
1994 }
1995 skb = tcp_sacktag_skip(skb, sk, start_seq);
1996
1997 walk:
1998 skb = tcp_sacktag_walk(skb, sk, next_dup, state,
1999 start_seq, end_seq, dup_sack);
2000
2001 advance_sp:
2002 i++;
2003 }
2004
2005 /* Clear the head of the cache sack blocks so we can skip it next time */
2006 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
2007 tp->recv_sack_cache[i].start_seq = 0;
2008 tp->recv_sack_cache[i].end_seq = 0;
2009 }
2010 for (j = 0; j < used_sacks; j++)
2011 tp->recv_sack_cache[i++] = sp[j];
2012
2013 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
2014 tcp_check_sack_reordering(sk, state->reord, 0);
2015
2016 tcp_verify_left_out(tp);
2017 out:
2018
2019 #if FASTRETRANS_DEBUG > 0
2020 WARN_ON((int)tp->sacked_out < 0);
2021 WARN_ON((int)tp->lost_out < 0);
2022 WARN_ON((int)tp->retrans_out < 0);
2023 WARN_ON((int)tcp_packets_in_flight(tp) < 0);
2024 #endif
2025 return state->flag;
2026 }
2027
2028 /* Limits sacked_out so that sum with lost_out isn't ever larger than
2029 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
2030 */
tcp_limit_reno_sacked(struct tcp_sock * tp)2031 static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
2032 {
2033 u32 holes;
2034
2035 holes = max(tp->lost_out, 1U);
2036 holes = min(holes, tp->packets_out);
2037
2038 if ((tp->sacked_out + holes) > tp->packets_out) {
2039 tp->sacked_out = tp->packets_out - holes;
2040 return true;
2041 }
2042 return false;
2043 }
2044
2045 /* If we receive more dupacks than we expected counting segments
2046 * in assumption of absent reordering, interpret this as reordering.
2047 * The only another reason could be bug in receiver TCP.
2048 */
tcp_check_reno_reordering(struct sock * sk,const int addend)2049 static void tcp_check_reno_reordering(struct sock *sk, const int addend)
2050 {
2051 struct tcp_sock *tp = tcp_sk(sk);
2052
2053 if (!tcp_limit_reno_sacked(tp))
2054 return;
2055
2056 tp->reordering = min_t(u32, tp->packets_out + addend,
2057 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
2058 tp->reord_seen++;
2059 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
2060 }
2061
2062 /* Emulate SACKs for SACKless connection: account for a new dupack. */
2063
tcp_add_reno_sack(struct sock * sk,int num_dupack,bool ece_ack)2064 static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
2065 {
2066 if (num_dupack) {
2067 struct tcp_sock *tp = tcp_sk(sk);
2068 u32 prior_sacked = tp->sacked_out;
2069 s32 delivered;
2070
2071 tp->sacked_out += num_dupack;
2072 tcp_check_reno_reordering(sk, 0);
2073 delivered = tp->sacked_out - prior_sacked;
2074 if (delivered > 0)
2075 tcp_count_delivered(tp, delivered, ece_ack);
2076 tcp_verify_left_out(tp);
2077 }
2078 }
2079
2080 /* Account for ACK, ACKing some data in Reno Recovery phase. */
2081
tcp_remove_reno_sacks(struct sock * sk,int acked,bool ece_ack)2082 static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
2083 {
2084 struct tcp_sock *tp = tcp_sk(sk);
2085
2086 if (acked > 0) {
2087 /* One ACK acked hole. The rest eat duplicate ACKs. */
2088 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
2089 ece_ack);
2090 if (acked - 1 >= tp->sacked_out)
2091 tp->sacked_out = 0;
2092 else
2093 tp->sacked_out -= acked - 1;
2094 }
2095 tcp_check_reno_reordering(sk, acked);
2096 tcp_verify_left_out(tp);
2097 }
2098
tcp_reset_reno_sack(struct tcp_sock * tp)2099 static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
2100 {
2101 tp->sacked_out = 0;
2102 }
2103
tcp_clear_retrans(struct tcp_sock * tp)2104 void tcp_clear_retrans(struct tcp_sock *tp)
2105 {
2106 tp->retrans_out = 0;
2107 tp->lost_out = 0;
2108 tp->undo_marker = 0;
2109 tp->undo_retrans = -1;
2110 tp->sacked_out = 0;
2111 }
2112
tcp_init_undo(struct tcp_sock * tp)2113 static inline void tcp_init_undo(struct tcp_sock *tp)
2114 {
2115 tp->undo_marker = tp->snd_una;
2116
2117 /* Retransmission still in flight may cause DSACKs later. */
2118 /* First, account for regular retransmits in flight: */
2119 tp->undo_retrans = tp->retrans_out;
2120 /* Next, account for TLP retransmits in flight: */
2121 if (tp->tlp_high_seq && tp->tlp_retrans)
2122 tp->undo_retrans++;
2123 /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
2124 if (!tp->undo_retrans)
2125 tp->undo_retrans = -1;
2126 }
2127
tcp_is_rack(const struct sock * sk)2128 static bool tcp_is_rack(const struct sock *sk)
2129 {
2130 return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
2131 TCP_RACK_LOSS_DETECTION;
2132 }
2133
2134 /* If we detect SACK reneging, forget all SACK information
2135 * and reset tags completely, otherwise preserve SACKs. If receiver
2136 * dropped its ofo queue, we will know this due to reneging detection.
2137 */
tcp_timeout_mark_lost(struct sock * sk)2138 static void tcp_timeout_mark_lost(struct sock *sk)
2139 {
2140 struct tcp_sock *tp = tcp_sk(sk);
2141 struct sk_buff *skb, *head;
2142 bool is_reneg; /* is receiver reneging on SACKs? */
2143
2144 head = tcp_rtx_queue_head(sk);
2145 is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED);
2146 if (is_reneg) {
2147 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2148 tp->sacked_out = 0;
2149 /* Mark SACK reneging until we recover from this loss event. */
2150 tp->is_sack_reneg = 1;
2151 } else if (tcp_is_reno(tp)) {
2152 tcp_reset_reno_sack(tp);
2153 }
2154
2155 skb = head;
2156 skb_rbtree_walk_from(skb) {
2157 if (is_reneg)
2158 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
2159 else if (tcp_is_rack(sk) && skb != head &&
2160 tcp_rack_skb_timeout(tp, skb, 0) > 0)
2161 continue; /* Don't mark recently sent ones lost yet */
2162 tcp_mark_skb_lost(sk, skb);
2163 }
2164 tcp_verify_left_out(tp);
2165 tcp_clear_all_retrans_hints(tp);
2166 }
2167
2168 /* Enter Loss state. */
tcp_enter_loss(struct sock * sk)2169 void tcp_enter_loss(struct sock *sk)
2170 {
2171 const struct inet_connection_sock *icsk = inet_csk(sk);
2172 struct tcp_sock *tp = tcp_sk(sk);
2173 struct net *net = sock_net(sk);
2174 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
2175 u8 reordering;
2176
2177 tcp_timeout_mark_lost(sk);
2178
2179 /* Reduce ssthresh if it has not yet been made inside this window. */
2180 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
2181 !after(tp->high_seq, tp->snd_una) ||
2182 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2183 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2184 tp->prior_cwnd = tcp_snd_cwnd(tp);
2185 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2186 tcp_ca_event(sk, CA_EVENT_LOSS);
2187 tcp_init_undo(tp);
2188 }
2189 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
2190 tp->snd_cwnd_cnt = 0;
2191 tp->snd_cwnd_stamp = tcp_jiffies32;
2192
2193 /* Timeout in disordered state after receiving substantial DUPACKs
2194 * suggests that the degree of reordering is over-estimated.
2195 */
2196 reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
2197 if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
2198 tp->sacked_out >= reordering)
2199 tp->reordering = min_t(unsigned int, tp->reordering,
2200 reordering);
2201
2202 tcp_set_ca_state(sk, TCP_CA_Loss);
2203 tp->high_seq = tp->snd_nxt;
2204 tp->tlp_high_seq = 0;
2205 tcp_ecn_queue_cwr(tp);
2206
2207 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
2208 * loss recovery is underway except recurring timeout(s) on
2209 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2210 */
2211 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
2212 (new_recovery || icsk->icsk_retransmits) &&
2213 !inet_csk(sk)->icsk_mtup.probe_size;
2214 }
2215
2216 /* If ACK arrived pointing to a remembered SACK, it means that our
2217 * remembered SACKs do not reflect real state of receiver i.e.
2218 * receiver _host_ is heavily congested (or buggy).
2219 *
2220 * To avoid big spurious retransmission bursts due to transient SACK
2221 * scoreboard oddities that look like reneging, we give the receiver a
2222 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will
2223 * restore sanity to the SACK scoreboard. If the apparent reneging
2224 * persists until this RTO then we'll clear the SACK scoreboard.
2225 */
tcp_check_sack_reneging(struct sock * sk,int * ack_flag)2226 static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
2227 {
2228 if (*ack_flag & FLAG_SACK_RENEGING &&
2229 *ack_flag & FLAG_SND_UNA_ADVANCED) {
2230 struct tcp_sock *tp = tcp_sk(sk);
2231 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
2232 msecs_to_jiffies(10));
2233
2234 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2235 delay, TCP_RTO_MAX);
2236 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
2237 return true;
2238 }
2239 return false;
2240 }
2241
2242 /* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
2243 * counter when SACK is enabled (without SACK, sacked_out is used for
2244 * that purpose).
2245 *
2246 * With reordering, holes may still be in flight, so RFC3517 recovery
2247 * uses pure sacked_out (total number of SACKed segments) even though
2248 * it violates the RFC that uses duplicate ACKs, often these are equal
2249 * but when e.g. out-of-window ACKs or packet duplication occurs,
2250 * they differ. Since neither occurs due to loss, TCP should really
2251 * ignore them.
2252 */
tcp_dupack_heuristics(const struct tcp_sock * tp)2253 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2254 {
2255 return tp->sacked_out + 1;
2256 }
2257
2258 /* Linux NewReno/SACK/ECN state machine.
2259 * --------------------------------------
2260 *
2261 * "Open" Normal state, no dubious events, fast path.
2262 * "Disorder" In all the respects it is "Open",
2263 * but requires a bit more attention. It is entered when
2264 * we see some SACKs or dupacks. It is split of "Open"
2265 * mainly to move some processing from fast path to slow one.
2266 * "CWR" CWND was reduced due to some Congestion Notification event.
2267 * It can be ECN, ICMP source quench, local device congestion.
2268 * "Recovery" CWND was reduced, we are fast-retransmitting.
2269 * "Loss" CWND was reduced due to RTO timeout or SACK reneging.
2270 *
2271 * tcp_fastretrans_alert() is entered:
2272 * - each incoming ACK, if state is not "Open"
2273 * - when arrived ACK is unusual, namely:
2274 * * SACK
2275 * * Duplicate ACK.
2276 * * ECN ECE.
2277 *
2278 * Counting packets in flight is pretty simple.
2279 *
2280 * in_flight = packets_out - left_out + retrans_out
2281 *
2282 * packets_out is SND.NXT-SND.UNA counted in packets.
2283 *
2284 * retrans_out is number of retransmitted segments.
2285 *
2286 * left_out is number of segments left network, but not ACKed yet.
2287 *
2288 * left_out = sacked_out + lost_out
2289 *
2290 * sacked_out: Packets, which arrived to receiver out of order
2291 * and hence not ACKed. With SACKs this number is simply
2292 * amount of SACKed data. Even without SACKs
2293 * it is easy to give pretty reliable estimate of this number,
2294 * counting duplicate ACKs.
2295 *
2296 * lost_out: Packets lost by network. TCP has no explicit
2297 * "loss notification" feedback from network (for now).
2298 * It means that this number can be only _guessed_.
2299 * Actually, it is the heuristics to predict lossage that
2300 * distinguishes different algorithms.
2301 *
2302 * F.e. after RTO, when all the queue is considered as lost,
2303 * lost_out = packets_out and in_flight = retrans_out.
2304 *
2305 * Essentially, we have now a few algorithms detecting
2306 * lost packets.
2307 *
2308 * If the receiver supports SACK:
2309 *
2310 * RFC6675/3517: It is the conventional algorithm. A packet is
2311 * considered lost if the number of higher sequence packets
2312 * SACKed is greater than or equal the DUPACK thoreshold
2313 * (reordering). This is implemented in tcp_mark_head_lost and
2314 * tcp_update_scoreboard.
2315 *
2316 * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
2317 * (2017-) that checks timing instead of counting DUPACKs.
2318 * Essentially a packet is considered lost if it's not S/ACKed
2319 * after RTT + reordering_window, where both metrics are
2320 * dynamically measured and adjusted. This is implemented in
2321 * tcp_rack_mark_lost.
2322 *
2323 * If the receiver does not support SACK:
2324 *
2325 * NewReno (RFC6582): in Recovery we assume that one segment
2326 * is lost (classic Reno). While we are in Recovery and
2327 * a partial ACK arrives, we assume that one more packet
2328 * is lost (NewReno). This heuristics are the same in NewReno
2329 * and SACK.
2330 *
2331 * Really tricky (and requiring careful tuning) part of algorithm
2332 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
2333 * The first determines the moment _when_ we should reduce CWND and,
2334 * hence, slow down forward transmission. In fact, it determines the moment
2335 * when we decide that hole is caused by loss, rather than by a reorder.
2336 *
2337 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2338 * holes, caused by lost packets.
2339 *
2340 * And the most logically complicated part of algorithm is undo
2341 * heuristics. We detect false retransmits due to both too early
2342 * fast retransmit (reordering) and underestimated RTO, analyzing
2343 * timestamps and D-SACKs. When we detect that some segments were
2344 * retransmitted by mistake and CWND reduction was wrong, we undo
2345 * window reduction and abort recovery phase. This logic is hidden
2346 * inside several functions named tcp_try_undo_<something>.
2347 */
2348
2349 /* This function decides, when we should leave Disordered state
2350 * and enter Recovery phase, reducing congestion window.
2351 *
2352 * Main question: may we further continue forward transmission
2353 * with the same cwnd?
2354 */
tcp_time_to_recover(struct sock * sk,int flag)2355 static bool tcp_time_to_recover(struct sock *sk, int flag)
2356 {
2357 struct tcp_sock *tp = tcp_sk(sk);
2358
2359 /* Trick#1: The loss is proven. */
2360 if (tp->lost_out)
2361 return true;
2362
2363 /* Not-A-Trick#2 : Classic rule... */
2364 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
2365 return true;
2366
2367 return false;
2368 }
2369
2370 /* Detect loss in event "A" above by marking head of queue up as lost.
2371 * For RFC3517 SACK, a segment is considered lost if it
2372 * has at least tp->reordering SACKed seqments above it; "packets" refers to
2373 * the maximum SACKed segments to pass before reaching this limit.
2374 */
tcp_mark_head_lost(struct sock * sk,int packets,int mark_head)2375 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2376 {
2377 struct tcp_sock *tp = tcp_sk(sk);
2378 struct sk_buff *skb;
2379 int cnt;
2380 /* Use SACK to deduce losses of new sequences sent during recovery */
2381 const u32 loss_high = tp->snd_nxt;
2382
2383 WARN_ON(packets > tp->packets_out);
2384 skb = tp->lost_skb_hint;
2385 if (skb) {
2386 /* Head already handled? */
2387 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
2388 return;
2389 cnt = tp->lost_cnt_hint;
2390 } else {
2391 skb = tcp_rtx_queue_head(sk);
2392 cnt = 0;
2393 }
2394
2395 skb_rbtree_walk_from(skb) {
2396 /* TODO: do this better */
2397 /* this is not the most efficient way to do this... */
2398 tp->lost_skb_hint = skb;
2399 tp->lost_cnt_hint = cnt;
2400
2401 if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
2402 break;
2403
2404 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2405 cnt += tcp_skb_pcount(skb);
2406
2407 if (cnt > packets)
2408 break;
2409
2410 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST))
2411 tcp_mark_skb_lost(sk, skb);
2412
2413 if (mark_head)
2414 break;
2415 }
2416 tcp_verify_left_out(tp);
2417 }
2418
2419 /* Account newly detected lost packet(s) */
2420
tcp_update_scoreboard(struct sock * sk,int fast_rexmit)2421 static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2422 {
2423 struct tcp_sock *tp = tcp_sk(sk);
2424
2425 if (tcp_is_sack(tp)) {
2426 int sacked_upto = tp->sacked_out - tp->reordering;
2427 if (sacked_upto >= 0)
2428 tcp_mark_head_lost(sk, sacked_upto, 0);
2429 else if (fast_rexmit)
2430 tcp_mark_head_lost(sk, 1, 1);
2431 }
2432 }
2433
tcp_tsopt_ecr_before(const struct tcp_sock * tp,u32 when)2434 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2435 {
2436 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2437 before(tp->rx_opt.rcv_tsecr, when);
2438 }
2439
2440 /* skb is spurious retransmitted if the returned timestamp echo
2441 * reply is prior to the skb transmission time
2442 */
tcp_skb_spurious_retrans(const struct tcp_sock * tp,const struct sk_buff * skb)2443 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
2444 const struct sk_buff *skb)
2445 {
2446 return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
2447 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb));
2448 }
2449
2450 /* Nothing was retransmitted or returned timestamp is less
2451 * than timestamp of the first retransmission.
2452 */
tcp_packet_delayed(const struct tcp_sock * tp)2453 static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2454 {
2455 return tp->retrans_stamp &&
2456 tcp_tsopt_ecr_before(tp, tp->retrans_stamp);
2457 }
2458
2459 /* Undo procedures. */
2460
2461 /* We can clear retrans_stamp when there are no retransmissions in the
2462 * window. It would seem that it is trivially available for us in
2463 * tp->retrans_out, however, that kind of assumptions doesn't consider
2464 * what will happen if errors occur when sending retransmission for the
2465 * second time. ...It could the that such segment has only
2466 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2467 * the head skb is enough except for some reneging corner cases that
2468 * are not worth the effort.
2469 *
2470 * Main reason for all this complexity is the fact that connection dying
2471 * time now depends on the validity of the retrans_stamp, in particular,
2472 * that successive retransmissions of a segment must not advance
2473 * retrans_stamp under any conditions.
2474 */
tcp_any_retrans_done(const struct sock * sk)2475 static bool tcp_any_retrans_done(const struct sock *sk)
2476 {
2477 const struct tcp_sock *tp = tcp_sk(sk);
2478 struct sk_buff *skb;
2479
2480 if (tp->retrans_out)
2481 return true;
2482
2483 skb = tcp_rtx_queue_head(sk);
2484 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2485 return true;
2486
2487 return false;
2488 }
2489
DBGUNDO(struct sock * sk,const char * msg)2490 static void DBGUNDO(struct sock *sk, const char *msg)
2491 {
2492 #if FASTRETRANS_DEBUG > 1
2493 struct tcp_sock *tp = tcp_sk(sk);
2494 struct inet_sock *inet = inet_sk(sk);
2495
2496 if (sk->sk_family == AF_INET) {
2497 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2498 msg,
2499 &inet->inet_daddr, ntohs(inet->inet_dport),
2500 tcp_snd_cwnd(tp), tcp_left_out(tp),
2501 tp->snd_ssthresh, tp->prior_ssthresh,
2502 tp->packets_out);
2503 }
2504 #if IS_ENABLED(CONFIG_IPV6)
2505 else if (sk->sk_family == AF_INET6) {
2506 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2507 msg,
2508 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2509 tcp_snd_cwnd(tp), tcp_left_out(tp),
2510 tp->snd_ssthresh, tp->prior_ssthresh,
2511 tp->packets_out);
2512 }
2513 #endif
2514 #endif
2515 }
2516
tcp_undo_cwnd_reduction(struct sock * sk,bool unmark_loss)2517 static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2518 {
2519 struct tcp_sock *tp = tcp_sk(sk);
2520
2521 if (unmark_loss) {
2522 struct sk_buff *skb;
2523
2524 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2525 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2526 }
2527 tp->lost_out = 0;
2528 tcp_clear_all_retrans_hints(tp);
2529 }
2530
2531 if (tp->prior_ssthresh) {
2532 const struct inet_connection_sock *icsk = inet_csk(sk);
2533
2534 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
2535
2536 if (tp->prior_ssthresh > tp->snd_ssthresh) {
2537 tp->snd_ssthresh = tp->prior_ssthresh;
2538 tcp_ecn_withdraw_cwr(tp);
2539 }
2540 }
2541 tp->snd_cwnd_stamp = tcp_jiffies32;
2542 tp->undo_marker = 0;
2543 tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2544 }
2545
tcp_may_undo(const struct tcp_sock * tp)2546 static inline bool tcp_may_undo(const struct tcp_sock *tp)
2547 {
2548 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2549 }
2550
tcp_is_non_sack_preventing_reopen(struct sock * sk)2551 static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
2552 {
2553 struct tcp_sock *tp = tcp_sk(sk);
2554
2555 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2556 /* Hold old state until something *above* high_seq
2557 * is ACKed. For Reno it is MUST to prevent false
2558 * fast retransmits (RFC2582). SACK TCP is safe. */
2559 if (!tcp_any_retrans_done(sk))
2560 tp->retrans_stamp = 0;
2561 return true;
2562 }
2563 return false;
2564 }
2565
2566 /* People celebrate: "We love our President!" */
tcp_try_undo_recovery(struct sock * sk)2567 static bool tcp_try_undo_recovery(struct sock *sk)
2568 {
2569 struct tcp_sock *tp = tcp_sk(sk);
2570
2571 if (tcp_may_undo(tp)) {
2572 int mib_idx;
2573
2574 /* Happy end! We did not retransmit anything
2575 * or our original transmission succeeded.
2576 */
2577 DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2578 tcp_undo_cwnd_reduction(sk, false);
2579 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2580 mib_idx = LINUX_MIB_TCPLOSSUNDO;
2581 else
2582 mib_idx = LINUX_MIB_TCPFULLUNDO;
2583
2584 NET_INC_STATS(sock_net(sk), mib_idx);
2585 } else if (tp->rack.reo_wnd_persist) {
2586 tp->rack.reo_wnd_persist--;
2587 }
2588 if (tcp_is_non_sack_preventing_reopen(sk))
2589 return true;
2590 tcp_set_ca_state(sk, TCP_CA_Open);
2591 tp->is_sack_reneg = 0;
2592 return false;
2593 }
2594
2595 /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
tcp_try_undo_dsack(struct sock * sk)2596 static bool tcp_try_undo_dsack(struct sock *sk)
2597 {
2598 struct tcp_sock *tp = tcp_sk(sk);
2599
2600 if (tp->undo_marker && !tp->undo_retrans) {
2601 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH,
2602 tp->rack.reo_wnd_persist + 1);
2603 DBGUNDO(sk, "D-SACK");
2604 tcp_undo_cwnd_reduction(sk, false);
2605 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2606 return true;
2607 }
2608 return false;
2609 }
2610
2611 /* Undo during loss recovery after partial ACK or using F-RTO. */
tcp_try_undo_loss(struct sock * sk,bool frto_undo)2612 static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2613 {
2614 struct tcp_sock *tp = tcp_sk(sk);
2615
2616 if (frto_undo || tcp_may_undo(tp)) {
2617 tcp_undo_cwnd_reduction(sk, true);
2618
2619 DBGUNDO(sk, "partial loss");
2620 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2621 if (frto_undo)
2622 NET_INC_STATS(sock_net(sk),
2623 LINUX_MIB_TCPSPURIOUSRTOS);
2624 inet_csk(sk)->icsk_retransmits = 0;
2625 if (tcp_is_non_sack_preventing_reopen(sk))
2626 return true;
2627 if (frto_undo || tcp_is_sack(tp)) {
2628 tcp_set_ca_state(sk, TCP_CA_Open);
2629 tp->is_sack_reneg = 0;
2630 }
2631 return true;
2632 }
2633 return false;
2634 }
2635
2636 /* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
2637 * It computes the number of packets to send (sndcnt) based on packets newly
2638 * delivered:
2639 * 1) If the packets in flight is larger than ssthresh, PRR spreads the
2640 * cwnd reductions across a full RTT.
2641 * 2) Otherwise PRR uses packet conservation to send as much as delivered.
2642 * But when SND_UNA is acked without further losses,
2643 * slow starts cwnd up to ssthresh to speed up the recovery.
2644 */
tcp_init_cwnd_reduction(struct sock * sk)2645 static void tcp_init_cwnd_reduction(struct sock *sk)
2646 {
2647 struct tcp_sock *tp = tcp_sk(sk);
2648
2649 tp->high_seq = tp->snd_nxt;
2650 tp->tlp_high_seq = 0;
2651 tp->snd_cwnd_cnt = 0;
2652 tp->prior_cwnd = tcp_snd_cwnd(tp);
2653 tp->prr_delivered = 0;
2654 tp->prr_out = 0;
2655 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2656 tcp_ecn_queue_cwr(tp);
2657 }
2658
tcp_cwnd_reduction(struct sock * sk,int newly_acked_sacked,int newly_lost,int flag)2659 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
2660 {
2661 struct tcp_sock *tp = tcp_sk(sk);
2662 int sndcnt = 0;
2663 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2664
2665 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
2666 return;
2667
2668 tp->prr_delivered += newly_acked_sacked;
2669 if (delta < 0) {
2670 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2671 tp->prior_cwnd - 1;
2672 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2673 } else {
2674 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out,
2675 newly_acked_sacked);
2676 if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
2677 sndcnt++;
2678 sndcnt = min(delta, sndcnt);
2679 }
2680 /* Force a fast retransmit upon entering fast recovery */
2681 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
2682 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
2683 }
2684
tcp_end_cwnd_reduction(struct sock * sk)2685 static inline void tcp_end_cwnd_reduction(struct sock *sk)
2686 {
2687 struct tcp_sock *tp = tcp_sk(sk);
2688
2689 if (inet_csk(sk)->icsk_ca_ops->cong_control)
2690 return;
2691
2692 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2693 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2694 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2695 tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
2696 tp->snd_cwnd_stamp = tcp_jiffies32;
2697 }
2698 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2699 }
2700
2701 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
tcp_enter_cwr(struct sock * sk)2702 void tcp_enter_cwr(struct sock *sk)
2703 {
2704 struct tcp_sock *tp = tcp_sk(sk);
2705
2706 tp->prior_ssthresh = 0;
2707 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2708 tp->undo_marker = 0;
2709 tcp_init_cwnd_reduction(sk);
2710 tcp_set_ca_state(sk, TCP_CA_CWR);
2711 }
2712 }
2713 EXPORT_SYMBOL(tcp_enter_cwr);
2714
tcp_try_keep_open(struct sock * sk)2715 static void tcp_try_keep_open(struct sock *sk)
2716 {
2717 struct tcp_sock *tp = tcp_sk(sk);
2718 int state = TCP_CA_Open;
2719
2720 if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
2721 state = TCP_CA_Disorder;
2722
2723 if (inet_csk(sk)->icsk_ca_state != state) {
2724 tcp_set_ca_state(sk, state);
2725 tp->high_seq = tp->snd_nxt;
2726 }
2727 }
2728
tcp_try_to_open(struct sock * sk,int flag)2729 static void tcp_try_to_open(struct sock *sk, int flag)
2730 {
2731 struct tcp_sock *tp = tcp_sk(sk);
2732
2733 tcp_verify_left_out(tp);
2734
2735 if (!tcp_any_retrans_done(sk))
2736 tp->retrans_stamp = 0;
2737
2738 if (flag & FLAG_ECE)
2739 tcp_enter_cwr(sk);
2740
2741 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2742 tcp_try_keep_open(sk);
2743 }
2744 }
2745
tcp_mtup_probe_failed(struct sock * sk)2746 static void tcp_mtup_probe_failed(struct sock *sk)
2747 {
2748 struct inet_connection_sock *icsk = inet_csk(sk);
2749
2750 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2751 icsk->icsk_mtup.probe_size = 0;
2752 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
2753 }
2754
tcp_mtup_probe_success(struct sock * sk)2755 static void tcp_mtup_probe_success(struct sock *sk)
2756 {
2757 struct tcp_sock *tp = tcp_sk(sk);
2758 struct inet_connection_sock *icsk = inet_csk(sk);
2759 u64 val;
2760
2761 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2762
2763 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
2764 do_div(val, icsk->icsk_mtup.probe_size);
2765 DEBUG_NET_WARN_ON_ONCE((u32)val != val);
2766 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
2767
2768 tp->snd_cwnd_cnt = 0;
2769 tp->snd_cwnd_stamp = tcp_jiffies32;
2770 tp->snd_ssthresh = tcp_current_ssthresh(sk);
2771
2772 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2773 icsk->icsk_mtup.probe_size = 0;
2774 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2775 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
2776 }
2777
2778 /* Sometimes we deduce that packets have been dropped due to reasons other than
2779 * congestion, like path MTU reductions or failed client TFO attempts. In these
2780 * cases we call this function to retransmit as many packets as cwnd allows,
2781 * without reducing cwnd. Given that retransmits will set retrans_stamp to a
2782 * non-zero value (and may do so in a later calling context due to TSQ), we
2783 * also enter CA_Loss so that we track when all retransmitted packets are ACKed
2784 * and clear retrans_stamp when that happens (to ensure later recurring RTOs
2785 * are using the correct retrans_stamp and don't declare ETIMEDOUT
2786 * prematurely).
2787 */
tcp_non_congestion_loss_retransmit(struct sock * sk)2788 static void tcp_non_congestion_loss_retransmit(struct sock *sk)
2789 {
2790 const struct inet_connection_sock *icsk = inet_csk(sk);
2791 struct tcp_sock *tp = tcp_sk(sk);
2792
2793 if (icsk->icsk_ca_state != TCP_CA_Loss) {
2794 tp->high_seq = tp->snd_nxt;
2795 tp->snd_ssthresh = tcp_current_ssthresh(sk);
2796 tp->prior_ssthresh = 0;
2797 tp->undo_marker = 0;
2798 tcp_set_ca_state(sk, TCP_CA_Loss);
2799 }
2800 tcp_xmit_retransmit_queue(sk);
2801 }
2802
2803 /* Do a simple retransmit without using the backoff mechanisms in
2804 * tcp_timer. This is used for path mtu discovery.
2805 * The socket is already locked here.
2806 */
tcp_simple_retransmit(struct sock * sk)2807 void tcp_simple_retransmit(struct sock *sk)
2808 {
2809 struct tcp_sock *tp = tcp_sk(sk);
2810 struct sk_buff *skb;
2811 int mss;
2812
2813 /* A fastopen SYN request is stored as two separate packets within
2814 * the retransmit queue, this is done by tcp_send_syn_data().
2815 * As a result simply checking the MSS of the frames in the queue
2816 * will not work for the SYN packet.
2817 *
2818 * Us being here is an indication of a path MTU issue so we can
2819 * assume that the fastopen SYN was lost and just mark all the
2820 * frames in the retransmit queue as lost. We will use an MSS of
2821 * -1 to mark all frames as lost, otherwise compute the current MSS.
2822 */
2823 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT)
2824 mss = -1;
2825 else
2826 mss = tcp_current_mss(sk);
2827
2828 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2829 if (tcp_skb_seglen(skb) > mss)
2830 tcp_mark_skb_lost(sk, skb);
2831 }
2832
2833 tcp_clear_retrans_hints_partial(tp);
2834
2835 if (!tp->lost_out)
2836 return;
2837
2838 if (tcp_is_reno(tp))
2839 tcp_limit_reno_sacked(tp);
2840
2841 tcp_verify_left_out(tp);
2842
2843 /* Don't muck with the congestion window here.
2844 * Reason is that we do not increase amount of _data_
2845 * in network, but units changed and effective
2846 * cwnd/ssthresh really reduced now.
2847 */
2848 tcp_non_congestion_loss_retransmit(sk);
2849 }
2850 EXPORT_SYMBOL(tcp_simple_retransmit);
2851
tcp_enter_recovery(struct sock * sk,bool ece_ack)2852 void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2853 {
2854 struct tcp_sock *tp = tcp_sk(sk);
2855 int mib_idx;
2856
2857 if (tcp_is_reno(tp))
2858 mib_idx = LINUX_MIB_TCPRENORECOVERY;
2859 else
2860 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2861
2862 NET_INC_STATS(sock_net(sk), mib_idx);
2863
2864 tp->prior_ssthresh = 0;
2865 tcp_init_undo(tp);
2866
2867 if (!tcp_in_cwnd_reduction(sk)) {
2868 if (!ece_ack)
2869 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2870 tcp_init_cwnd_reduction(sk);
2871 }
2872 tcp_set_ca_state(sk, TCP_CA_Recovery);
2873 }
2874
2875 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
2876 * recovered or spurious. Otherwise retransmits more on partial ACKs.
2877 */
tcp_process_loss(struct sock * sk,int flag,int num_dupack,int * rexmit)2878 static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
2879 int *rexmit)
2880 {
2881 struct tcp_sock *tp = tcp_sk(sk);
2882 bool recovered = !before(tp->snd_una, tp->high_seq);
2883
2884 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
2885 tcp_try_undo_loss(sk, false))
2886 return;
2887
2888 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2889 /* Step 3.b. A timeout is spurious if not all data are
2890 * lost, i.e., never-retransmitted data are (s)acked.
2891 */
2892 if ((flag & FLAG_ORIG_SACK_ACKED) &&
2893 tcp_try_undo_loss(sk, true))
2894 return;
2895
2896 if (after(tp->snd_nxt, tp->high_seq)) {
2897 if (flag & FLAG_DATA_SACKED || num_dupack)
2898 tp->frto = 0; /* Step 3.a. loss was real */
2899 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2900 tp->high_seq = tp->snd_nxt;
2901 /* Step 2.b. Try send new data (but deferred until cwnd
2902 * is updated in tcp_ack()). Otherwise fall back to
2903 * the conventional recovery.
2904 */
2905 if (!tcp_write_queue_empty(sk) &&
2906 after(tcp_wnd_end(tp), tp->snd_nxt)) {
2907 *rexmit = REXMIT_NEW;
2908 return;
2909 }
2910 tp->frto = 0;
2911 }
2912 }
2913
2914 if (recovered) {
2915 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
2916 tcp_try_undo_recovery(sk);
2917 return;
2918 }
2919 if (tcp_is_reno(tp)) {
2920 /* A Reno DUPACK means new data in F-RTO step 2.b above are
2921 * delivered. Lower inflight to clock out (re)transmissions.
2922 */
2923 if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
2924 tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
2925 else if (flag & FLAG_SND_UNA_ADVANCED)
2926 tcp_reset_reno_sack(tp);
2927 }
2928 *rexmit = REXMIT_LOST;
2929 }
2930
tcp_force_fast_retransmit(struct sock * sk)2931 static bool tcp_force_fast_retransmit(struct sock *sk)
2932 {
2933 struct tcp_sock *tp = tcp_sk(sk);
2934
2935 return after(tcp_highest_sack_seq(tp),
2936 tp->snd_una + tp->reordering * tp->mss_cache);
2937 }
2938
2939 /* Undo during fast recovery after partial ACK. */
tcp_try_undo_partial(struct sock * sk,u32 prior_snd_una,bool * do_lost)2940 static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
2941 bool *do_lost)
2942 {
2943 struct tcp_sock *tp = tcp_sk(sk);
2944
2945 if (tp->undo_marker && tcp_packet_delayed(tp)) {
2946 /* Plain luck! Hole if filled with delayed
2947 * packet, rather than with a retransmit. Check reordering.
2948 */
2949 tcp_check_sack_reordering(sk, prior_snd_una, 1);
2950
2951 /* We are getting evidence that the reordering degree is higher
2952 * than we realized. If there are no retransmits out then we
2953 * can undo. Otherwise we clock out new packets but do not
2954 * mark more packets lost or retransmit more.
2955 */
2956 if (tp->retrans_out)
2957 return true;
2958
2959 if (!tcp_any_retrans_done(sk))
2960 tp->retrans_stamp = 0;
2961
2962 DBGUNDO(sk, "partial recovery");
2963 tcp_undo_cwnd_reduction(sk, true);
2964 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2965 tcp_try_keep_open(sk);
2966 } else {
2967 /* Partial ACK arrived. Force fast retransmit. */
2968 *do_lost = tcp_force_fast_retransmit(sk);
2969 }
2970 return false;
2971 }
2972
tcp_identify_packet_loss(struct sock * sk,int * ack_flag)2973 static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
2974 {
2975 struct tcp_sock *tp = tcp_sk(sk);
2976
2977 if (tcp_rtx_queue_empty(sk))
2978 return;
2979
2980 if (unlikely(tcp_is_reno(tp))) {
2981 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
2982 } else if (tcp_is_rack(sk)) {
2983 u32 prior_retrans = tp->retrans_out;
2984
2985 if (tcp_rack_mark_lost(sk))
2986 *ack_flag &= ~FLAG_SET_XMIT_TIMER;
2987 if (prior_retrans > tp->retrans_out)
2988 *ack_flag |= FLAG_LOST_RETRANS;
2989 }
2990 }
2991
2992 /* Process an event, which can update packets-in-flight not trivially.
2993 * Main goal of this function is to calculate new estimate for left_out,
2994 * taking into account both packets sitting in receiver's buffer and
2995 * packets lost by network.
2996 *
2997 * Besides that it updates the congestion state when packet loss or ECN
2998 * is detected. But it does not reduce the cwnd, it is done by the
2999 * congestion control later.
3000 *
3001 * It does _not_ decide what to send, it is made in function
3002 * tcp_xmit_retransmit_queue().
3003 */
tcp_fastretrans_alert(struct sock * sk,const u32 prior_snd_una,int num_dupack,int * ack_flag,int * rexmit)3004 static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
3005 int num_dupack, int *ack_flag, int *rexmit)
3006 {
3007 struct inet_connection_sock *icsk = inet_csk(sk);
3008 struct tcp_sock *tp = tcp_sk(sk);
3009 int fast_rexmit = 0, flag = *ack_flag;
3010 bool ece_ack = flag & FLAG_ECE;
3011 bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
3012 tcp_force_fast_retransmit(sk));
3013
3014 if (!tp->packets_out && tp->sacked_out)
3015 tp->sacked_out = 0;
3016
3017 /* Now state machine starts.
3018 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
3019 if (ece_ack)
3020 tp->prior_ssthresh = 0;
3021
3022 /* B. In all the states check for reneging SACKs. */
3023 if (tcp_check_sack_reneging(sk, ack_flag))
3024 return;
3025
3026 /* C. Check consistency of the current state. */
3027 tcp_verify_left_out(tp);
3028
3029 /* D. Check state exit conditions. State can be terminated
3030 * when high_seq is ACKed. */
3031 if (icsk->icsk_ca_state == TCP_CA_Open) {
3032 WARN_ON(tp->retrans_out != 0 && !tp->syn_data);
3033 tp->retrans_stamp = 0;
3034 } else if (!before(tp->snd_una, tp->high_seq)) {
3035 switch (icsk->icsk_ca_state) {
3036 case TCP_CA_CWR:
3037 /* CWR is to be held something *above* high_seq
3038 * is ACKed for CWR bit to reach receiver. */
3039 if (tp->snd_una != tp->high_seq) {
3040 tcp_end_cwnd_reduction(sk);
3041 tcp_set_ca_state(sk, TCP_CA_Open);
3042 }
3043 break;
3044
3045 case TCP_CA_Recovery:
3046 if (tcp_is_reno(tp))
3047 tcp_reset_reno_sack(tp);
3048 if (tcp_try_undo_recovery(sk))
3049 return;
3050 tcp_end_cwnd_reduction(sk);
3051 break;
3052 }
3053 }
3054
3055 /* E. Process state. */
3056 switch (icsk->icsk_ca_state) {
3057 case TCP_CA_Recovery:
3058 if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3059 if (tcp_is_reno(tp))
3060 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3061 } else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
3062 return;
3063
3064 if (tcp_try_undo_dsack(sk))
3065 tcp_try_to_open(sk, flag);
3066
3067 tcp_identify_packet_loss(sk, ack_flag);
3068 if (icsk->icsk_ca_state != TCP_CA_Recovery) {
3069 if (!tcp_time_to_recover(sk, flag))
3070 return;
3071 /* Undo reverts the recovery state. If loss is evident,
3072 * starts a new recovery (e.g. reordering then loss);
3073 */
3074 tcp_enter_recovery(sk, ece_ack);
3075 }
3076 break;
3077 case TCP_CA_Loss:
3078 tcp_process_loss(sk, flag, num_dupack, rexmit);
3079 tcp_identify_packet_loss(sk, ack_flag);
3080 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
3081 (*ack_flag & FLAG_LOST_RETRANS)))
3082 return;
3083 /* Change state if cwnd is undone or retransmits are lost */
3084 fallthrough;
3085 default:
3086 if (tcp_is_reno(tp)) {
3087 if (flag & FLAG_SND_UNA_ADVANCED)
3088 tcp_reset_reno_sack(tp);
3089 tcp_add_reno_sack(sk, num_dupack, ece_ack);
3090 }
3091
3092 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3093 tcp_try_undo_dsack(sk);
3094
3095 tcp_identify_packet_loss(sk, ack_flag);
3096 if (!tcp_time_to_recover(sk, flag)) {
3097 tcp_try_to_open(sk, flag);
3098 return;
3099 }
3100
3101 /* MTU probe failure: don't reduce cwnd */
3102 if (icsk->icsk_ca_state < TCP_CA_CWR &&
3103 icsk->icsk_mtup.probe_size &&
3104 tp->snd_una == tp->mtu_probe.probe_seq_start) {
3105 tcp_mtup_probe_failed(sk);
3106 /* Restores the reduction we did in tcp_mtup_probe() */
3107 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
3108 tcp_simple_retransmit(sk);
3109 return;
3110 }
3111
3112 /* Otherwise enter Recovery state */
3113 tcp_enter_recovery(sk, ece_ack);
3114 fast_rexmit = 1;
3115 }
3116
3117 if (!tcp_is_rack(sk) && do_lost)
3118 tcp_update_scoreboard(sk, fast_rexmit);
3119 *rexmit = REXMIT_LOST;
3120 }
3121
tcp_update_rtt_min(struct sock * sk,u32 rtt_us,const int flag)3122 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
3123 {
3124 u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
3125 struct tcp_sock *tp = tcp_sk(sk);
3126
3127 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
3128 /* If the remote keeps returning delayed ACKs, eventually
3129 * the min filter would pick it up and overestimate the
3130 * prop. delay when it expires. Skip suspected delayed ACKs.
3131 */
3132 return;
3133 }
3134 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
3135 rtt_us ? : jiffies_to_usecs(1));
3136 }
3137
tcp_ack_update_rtt(struct sock * sk,const int flag,long seq_rtt_us,long sack_rtt_us,long ca_rtt_us,struct rate_sample * rs)3138 static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
3139 long seq_rtt_us, long sack_rtt_us,
3140 long ca_rtt_us, struct rate_sample *rs)
3141 {
3142 const struct tcp_sock *tp = tcp_sk(sk);
3143
3144 /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
3145 * broken middle-boxes or peers may corrupt TS-ECR fields. But
3146 * Karn's algorithm forbids taking RTT if some retransmitted data
3147 * is acked (RFC6298).
3148 */
3149 if (seq_rtt_us < 0)
3150 seq_rtt_us = sack_rtt_us;
3151
3152 /* RTTM Rule: A TSecr value received in a segment is used to
3153 * update the averaged RTT measurement only if the segment
3154 * acknowledges some new data, i.e., only if it advances the
3155 * left edge of the send window.
3156 * See draft-ietf-tcplw-high-performance-00, section 3.3.
3157 */
3158 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
3159 flag & FLAG_ACKED) {
3160 u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
3161
3162 if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
3163 if (!delta)
3164 delta = 1;
3165 seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
3166 ca_rtt_us = seq_rtt_us;
3167 }
3168 }
3169 rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
3170 if (seq_rtt_us < 0)
3171 return false;
3172
3173 /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
3174 * always taken together with ACK, SACK, or TS-opts. Any negative
3175 * values will be skipped with the seq_rtt_us < 0 check above.
3176 */
3177 tcp_update_rtt_min(sk, ca_rtt_us, flag);
3178 tcp_rtt_estimator(sk, seq_rtt_us);
3179 tcp_set_rto(sk);
3180
3181 /* RFC6298: only reset backoff on valid RTT measurement. */
3182 inet_csk(sk)->icsk_backoff = 0;
3183 return true;
3184 }
3185
3186 /* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
tcp_synack_rtt_meas(struct sock * sk,struct request_sock * req)3187 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
3188 {
3189 struct rate_sample rs;
3190 long rtt_us = -1L;
3191
3192 if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
3193 rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
3194
3195 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
3196 }
3197
3198
tcp_cong_avoid(struct sock * sk,u32 ack,u32 acked)3199 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3200 {
3201 const struct inet_connection_sock *icsk = inet_csk(sk);
3202
3203 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
3204 tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
3205 }
3206
3207 /* Restart timer after forward progress on connection.
3208 * RFC2988 recommends to restart timer to now+rto.
3209 */
tcp_rearm_rto(struct sock * sk)3210 void tcp_rearm_rto(struct sock *sk)
3211 {
3212 const struct inet_connection_sock *icsk = inet_csk(sk);
3213 struct tcp_sock *tp = tcp_sk(sk);
3214
3215 /* If the retrans timer is currently being used by Fast Open
3216 * for SYN-ACK retrans purpose, stay put.
3217 */
3218 if (rcu_access_pointer(tp->fastopen_rsk))
3219 return;
3220
3221 if (!tp->packets_out) {
3222 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3223 } else {
3224 u32 rto = inet_csk(sk)->icsk_rto;
3225 /* Offset the time elapsed after installing regular RTO */
3226 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3227 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3228 s64 delta_us = tcp_rto_delta_us(sk);
3229 /* delta_us may not be positive if the socket is locked
3230 * when the retrans timer fires and is rescheduled.
3231 */
3232 rto = usecs_to_jiffies(max_t(int, delta_us, 1));
3233 }
3234 tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3235 TCP_RTO_MAX);
3236 }
3237 }
3238
3239 /* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
tcp_set_xmit_timer(struct sock * sk)3240 static void tcp_set_xmit_timer(struct sock *sk)
3241 {
3242 if (!tcp_schedule_loss_probe(sk, true))
3243 tcp_rearm_rto(sk);
3244 }
3245
3246 /* If we get here, the whole TSO packet has not been acked. */
tcp_tso_acked(struct sock * sk,struct sk_buff * skb)3247 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3248 {
3249 struct tcp_sock *tp = tcp_sk(sk);
3250 u32 packets_acked;
3251
3252 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3253
3254 packets_acked = tcp_skb_pcount(skb);
3255 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3256 return 0;
3257 packets_acked -= tcp_skb_pcount(skb);
3258
3259 if (packets_acked) {
3260 BUG_ON(tcp_skb_pcount(skb) == 0);
3261 BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3262 }
3263
3264 return packets_acked;
3265 }
3266
tcp_ack_tstamp(struct sock * sk,struct sk_buff * skb,const struct sk_buff * ack_skb,u32 prior_snd_una)3267 static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3268 const struct sk_buff *ack_skb, u32 prior_snd_una)
3269 {
3270 const struct skb_shared_info *shinfo;
3271
3272 /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3273 if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3274 return;
3275
3276 shinfo = skb_shinfo(skb);
3277 if (!before(shinfo->tskey, prior_snd_una) &&
3278 before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3279 tcp_skb_tsorted_save(skb) {
3280 __skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK);
3281 } tcp_skb_tsorted_restore(skb);
3282 }
3283 }
3284
3285 /* Remove acknowledged frames from the retransmission queue. If our packet
3286 * is before the ack sequence we can discard it as it's confirmed to have
3287 * arrived at the other end.
3288 */
tcp_clean_rtx_queue(struct sock * sk,const struct sk_buff * ack_skb,u32 prior_fack,u32 prior_snd_una,struct tcp_sacktag_state * sack,bool ece_ack)3289 static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
3290 u32 prior_fack, u32 prior_snd_una,
3291 struct tcp_sacktag_state *sack, bool ece_ack)
3292 {
3293 const struct inet_connection_sock *icsk = inet_csk(sk);
3294 u64 first_ackt, last_ackt;
3295 struct tcp_sock *tp = tcp_sk(sk);
3296 u32 prior_sacked = tp->sacked_out;
3297 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
3298 struct sk_buff *skb, *next;
3299 bool fully_acked = true;
3300 long sack_rtt_us = -1L;
3301 long seq_rtt_us = -1L;
3302 long ca_rtt_us = -1L;
3303 u32 pkts_acked = 0;
3304 bool rtt_update;
3305 int flag = 0;
3306
3307 first_ackt = 0;
3308
3309 for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
3310 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3311 const u32 start_seq = scb->seq;
3312 u8 sacked = scb->sacked;
3313 u32 acked_pcount;
3314
3315 /* Determine how many packets and what bytes were acked, tso and else */
3316 if (after(scb->end_seq, tp->snd_una)) {
3317 if (tcp_skb_pcount(skb) == 1 ||
3318 !after(tp->snd_una, scb->seq))
3319 break;
3320
3321 acked_pcount = tcp_tso_acked(sk, skb);
3322 if (!acked_pcount)
3323 break;
3324 fully_acked = false;
3325 } else {
3326 acked_pcount = tcp_skb_pcount(skb);
3327 }
3328
3329 if (unlikely(sacked & TCPCB_RETRANS)) {
3330 if (sacked & TCPCB_SACKED_RETRANS)
3331 tp->retrans_out -= acked_pcount;
3332 flag |= FLAG_RETRANS_DATA_ACKED;
3333 } else if (!(sacked & TCPCB_SACKED_ACKED)) {
3334 last_ackt = tcp_skb_timestamp_us(skb);
3335 WARN_ON_ONCE(last_ackt == 0);
3336 if (!first_ackt)
3337 first_ackt = last_ackt;
3338
3339 if (before(start_seq, reord))
3340 reord = start_seq;
3341 if (!after(scb->end_seq, tp->high_seq))
3342 flag |= FLAG_ORIG_SACK_ACKED;
3343 }
3344
3345 if (sacked & TCPCB_SACKED_ACKED) {
3346 tp->sacked_out -= acked_pcount;
3347 } else if (tcp_is_sack(tp)) {
3348 tcp_count_delivered(tp, acked_pcount, ece_ack);
3349 if (!tcp_skb_spurious_retrans(tp, skb))
3350 tcp_rack_advance(tp, sacked, scb->end_seq,
3351 tcp_skb_timestamp_us(skb));
3352 }
3353 if (sacked & TCPCB_LOST)
3354 tp->lost_out -= acked_pcount;
3355
3356 tp->packets_out -= acked_pcount;
3357 pkts_acked += acked_pcount;
3358 tcp_rate_skb_delivered(sk, skb, sack->rate);
3359
3360 /* Initial outgoing SYN's get put onto the write_queue
3361 * just like anything else we transmit. It is not
3362 * true data, and if we misinform our callers that
3363 * this ACK acks real data, we will erroneously exit
3364 * connection startup slow start one packet too
3365 * quickly. This is severely frowned upon behavior.
3366 */
3367 if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3368 flag |= FLAG_DATA_ACKED;
3369 } else {
3370 flag |= FLAG_SYN_ACKED;
3371 tp->retrans_stamp = 0;
3372 }
3373
3374 if (!fully_acked)
3375 break;
3376
3377 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3378
3379 next = skb_rb_next(skb);
3380 if (unlikely(skb == tp->retransmit_skb_hint))
3381 tp->retransmit_skb_hint = NULL;
3382 if (unlikely(skb == tp->lost_skb_hint))
3383 tp->lost_skb_hint = NULL;
3384 tcp_highest_sack_replace(sk, skb, next);
3385 tcp_rtx_queue_unlink_and_free(skb, sk);
3386 }
3387
3388 if (!skb)
3389 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3390
3391 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3392 tp->snd_up = tp->snd_una;
3393
3394 if (skb) {
3395 tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3396 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3397 flag |= FLAG_SACK_RENEGING;
3398 }
3399
3400 if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3401 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3402 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3403
3404 if (pkts_acked == 1 && fully_acked && !prior_sacked &&
3405 (tp->snd_una - prior_snd_una) < tp->mss_cache &&
3406 sack->rate->prior_delivered + 1 == tp->delivered &&
3407 !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3408 /* Conservatively mark a delayed ACK. It's typically
3409 * from a lone runt packet over the round trip to
3410 * a receiver w/o out-of-order or CE events.
3411 */
3412 flag |= FLAG_ACK_MAYBE_DELAYED;
3413 }
3414 }
3415 if (sack->first_sackt) {
3416 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3417 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3418 }
3419 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3420 ca_rtt_us, sack->rate);
3421
3422 if (flag & FLAG_ACKED) {
3423 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3424 if (unlikely(icsk->icsk_mtup.probe_size &&
3425 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3426 tcp_mtup_probe_success(sk);
3427 }
3428
3429 if (tcp_is_reno(tp)) {
3430 tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
3431
3432 /* If any of the cumulatively ACKed segments was
3433 * retransmitted, non-SACK case cannot confirm that
3434 * progress was due to original transmission due to
3435 * lack of TCPCB_SACKED_ACKED bits even if some of
3436 * the packets may have been never retransmitted.
3437 */
3438 if (flag & FLAG_RETRANS_DATA_ACKED)
3439 flag &= ~FLAG_ORIG_SACK_ACKED;
3440 } else {
3441 int delta;
3442
3443 /* Non-retransmitted hole got filled? That's reordering */
3444 if (before(reord, prior_fack))
3445 tcp_check_sack_reordering(sk, reord, 0);
3446
3447 delta = prior_sacked - tp->sacked_out;
3448 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3449 }
3450 } else if (skb && rtt_update && sack_rtt_us >= 0 &&
3451 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3452 tcp_skb_timestamp_us(skb))) {
3453 /* Do not re-arm RTO if the sack RTT is measured from data sent
3454 * after when the head was last (re)transmitted. Otherwise the
3455 * timeout may continue to extend in loss recovery.
3456 */
3457 flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */
3458 }
3459
3460 if (icsk->icsk_ca_ops->pkts_acked) {
3461 struct ack_sample sample = { .pkts_acked = pkts_acked,
3462 .rtt_us = sack->rate->rtt_us };
3463
3464 sample.in_flight = tp->mss_cache *
3465 (tp->delivered - sack->rate->prior_delivered);
3466 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3467 }
3468
3469 #if FASTRETRANS_DEBUG > 0
3470 WARN_ON((int)tp->sacked_out < 0);
3471 WARN_ON((int)tp->lost_out < 0);
3472 WARN_ON((int)tp->retrans_out < 0);
3473 if (!tp->packets_out && tcp_is_sack(tp)) {
3474 icsk = inet_csk(sk);
3475 if (tp->lost_out) {
3476 pr_debug("Leak l=%u %d\n",
3477 tp->lost_out, icsk->icsk_ca_state);
3478 tp->lost_out = 0;
3479 }
3480 if (tp->sacked_out) {
3481 pr_debug("Leak s=%u %d\n",
3482 tp->sacked_out, icsk->icsk_ca_state);
3483 tp->sacked_out = 0;
3484 }
3485 if (tp->retrans_out) {
3486 pr_debug("Leak r=%u %d\n",
3487 tp->retrans_out, icsk->icsk_ca_state);
3488 tp->retrans_out = 0;
3489 }
3490 }
3491 #endif
3492 return flag;
3493 }
3494
tcp_ack_probe(struct sock * sk)3495 static void tcp_ack_probe(struct sock *sk)
3496 {
3497 struct inet_connection_sock *icsk = inet_csk(sk);
3498 struct sk_buff *head = tcp_send_head(sk);
3499 const struct tcp_sock *tp = tcp_sk(sk);
3500
3501 /* Was it a usable window open? */
3502 if (!head)
3503 return;
3504 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
3505 icsk->icsk_backoff = 0;
3506 icsk->icsk_probes_tstamp = 0;
3507 inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3508 /* Socket must be waked up by subsequent tcp_data_snd_check().
3509 * This function is not for random using!
3510 */
3511 } else {
3512 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
3513
3514 when = tcp_clamp_probe0_to_user_timeout(sk, when);
3515 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
3516 }
3517 }
3518
tcp_ack_is_dubious(const struct sock * sk,const int flag)3519 static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3520 {
3521 return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3522 inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3523 }
3524
3525 /* Decide wheather to run the increase function of congestion control. */
tcp_may_raise_cwnd(const struct sock * sk,const int flag)3526 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3527 {
3528 /* If reordering is high then always grow cwnd whenever data is
3529 * delivered regardless of its ordering. Otherwise stay conservative
3530 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3531 * new SACK or ECE mark may first advance cwnd here and later reduce
3532 * cwnd in tcp_fastretrans_alert() based on more states.
3533 */
3534 if (tcp_sk(sk)->reordering >
3535 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
3536 return flag & FLAG_FORWARD_PROGRESS;
3537
3538 return flag & FLAG_DATA_ACKED;
3539 }
3540
3541 /* The "ultimate" congestion control function that aims to replace the rigid
3542 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
3543 * It's called toward the end of processing an ACK with precise rate
3544 * information. All transmission or retransmission are delayed afterwards.
3545 */
tcp_cong_control(struct sock * sk,u32 ack,u32 acked_sacked,int flag,const struct rate_sample * rs)3546 static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
3547 int flag, const struct rate_sample *rs)
3548 {
3549 const struct inet_connection_sock *icsk = inet_csk(sk);
3550
3551 if (icsk->icsk_ca_ops->cong_control) {
3552 icsk->icsk_ca_ops->cong_control(sk, rs);
3553 return;
3554 }
3555
3556 if (tcp_in_cwnd_reduction(sk)) {
3557 /* Reduce cwnd if state mandates */
3558 tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
3559 } else if (tcp_may_raise_cwnd(sk, flag)) {
3560 /* Advance cwnd if state allows */
3561 tcp_cong_avoid(sk, ack, acked_sacked);
3562 }
3563 tcp_update_pacing_rate(sk);
3564 }
3565
3566 /* Check that window update is acceptable.
3567 * The function assumes that snd_una<=ack<=snd_next.
3568 */
tcp_may_update_window(const struct tcp_sock * tp,const u32 ack,const u32 ack_seq,const u32 nwin)3569 static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3570 const u32 ack, const u32 ack_seq,
3571 const u32 nwin)
3572 {
3573 return after(ack, tp->snd_una) ||
3574 after(ack_seq, tp->snd_wl1) ||
3575 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
3576 }
3577
3578 /* If we update tp->snd_una, also update tp->bytes_acked */
tcp_snd_una_update(struct tcp_sock * tp,u32 ack)3579 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3580 {
3581 u32 delta = ack - tp->snd_una;
3582
3583 sock_owned_by_me((struct sock *)tp);
3584 tp->bytes_acked += delta;
3585 tp->snd_una = ack;
3586 }
3587
3588 /* If we update tp->rcv_nxt, also update tp->bytes_received */
tcp_rcv_nxt_update(struct tcp_sock * tp,u32 seq)3589 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3590 {
3591 u32 delta = seq - tp->rcv_nxt;
3592
3593 sock_owned_by_me((struct sock *)tp);
3594 tp->bytes_received += delta;
3595 WRITE_ONCE(tp->rcv_nxt, seq);
3596 }
3597
3598 /* Update our send window.
3599 *
3600 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3601 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3602 */
tcp_ack_update_window(struct sock * sk,const struct sk_buff * skb,u32 ack,u32 ack_seq)3603 static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3604 u32 ack_seq)
3605 {
3606 struct tcp_sock *tp = tcp_sk(sk);
3607 int flag = 0;
3608 u32 nwin = ntohs(tcp_hdr(skb)->window);
3609
3610 if (likely(!tcp_hdr(skb)->syn))
3611 nwin <<= tp->rx_opt.snd_wscale;
3612
3613 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3614 flag |= FLAG_WIN_UPDATE;
3615 tcp_update_wl(tp, ack_seq);
3616
3617 if (tp->snd_wnd != nwin) {
3618 tp->snd_wnd = nwin;
3619
3620 /* Note, it is the only place, where
3621 * fast path is recovered for sending TCP.
3622 */
3623 tp->pred_flags = 0;
3624 tcp_fast_path_check(sk);
3625
3626 if (!tcp_write_queue_empty(sk))
3627 tcp_slow_start_after_idle_check(sk);
3628
3629 if (nwin > tp->max_window) {
3630 tp->max_window = nwin;
3631 tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3632 }
3633 }
3634 }
3635
3636 tcp_snd_una_update(tp, ack);
3637
3638 return flag;
3639 }
3640
__tcp_oow_rate_limited(struct net * net,int mib_idx,u32 * last_oow_ack_time)3641 static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3642 u32 *last_oow_ack_time)
3643 {
3644 /* Paired with the WRITE_ONCE() in this function. */
3645 u32 val = READ_ONCE(*last_oow_ack_time);
3646
3647 if (val) {
3648 s32 elapsed = (s32)(tcp_jiffies32 - val);
3649
3650 if (0 <= elapsed &&
3651 elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
3652 NET_INC_STATS(net, mib_idx);
3653 return true; /* rate-limited: don't send yet! */
3654 }
3655 }
3656
3657 /* Paired with the prior READ_ONCE() and with itself,
3658 * as we might be lockless.
3659 */
3660 WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
3661
3662 return false; /* not rate-limited: go ahead, send dupack now! */
3663 }
3664
3665 /* Return true if we're currently rate-limiting out-of-window ACKs and
3666 * thus shouldn't send a dupack right now. We rate-limit dupacks in
3667 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
3668 * attacks that send repeated SYNs or ACKs for the same connection. To
3669 * do this, we do not send a duplicate SYNACK or ACK if the remote
3670 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
3671 */
tcp_oow_rate_limited(struct net * net,const struct sk_buff * skb,int mib_idx,u32 * last_oow_ack_time)3672 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3673 int mib_idx, u32 *last_oow_ack_time)
3674 {
3675 /* Data packets without SYNs are not likely part of an ACK loop. */
3676 if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
3677 !tcp_hdr(skb)->syn)
3678 return false;
3679
3680 return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
3681 }
3682
3683 /* RFC 5961 7 [ACK Throttling] */
tcp_send_challenge_ack(struct sock * sk)3684 static void tcp_send_challenge_ack(struct sock *sk)
3685 {
3686 struct tcp_sock *tp = tcp_sk(sk);
3687 struct net *net = sock_net(sk);
3688 u32 count, now, ack_limit;
3689
3690 /* First check our per-socket dupack rate limit. */
3691 if (__tcp_oow_rate_limited(net,
3692 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
3693 &tp->last_oow_ack_time))
3694 return;
3695
3696 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
3697 if (ack_limit == INT_MAX)
3698 goto send_ack;
3699
3700 /* Then check host-wide RFC 5961 rate limit. */
3701 now = jiffies / HZ;
3702 if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
3703 u32 half = (ack_limit + 1) >> 1;
3704
3705 WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
3706 WRITE_ONCE(net->ipv4.tcp_challenge_count,
3707 get_random_u32_inclusive(half, ack_limit + half - 1));
3708 }
3709 count = READ_ONCE(net->ipv4.tcp_challenge_count);
3710 if (count > 0) {
3711 WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
3712 send_ack:
3713 NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
3714 tcp_send_ack(sk);
3715 }
3716 }
3717
tcp_store_ts_recent(struct tcp_sock * tp)3718 static void tcp_store_ts_recent(struct tcp_sock *tp)
3719 {
3720 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3721 tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
3722 }
3723
tcp_replace_ts_recent(struct tcp_sock * tp,u32 seq)3724 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3725 {
3726 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3727 /* PAWS bug workaround wrt. ACK frames, the PAWS discard
3728 * extra check below makes sure this can only happen
3729 * for pure ACK frames. -DaveM
3730 *
3731 * Not only, also it occurs for expired timestamps.
3732 */
3733
3734 if (tcp_paws_check(&tp->rx_opt, 0))
3735 tcp_store_ts_recent(tp);
3736 }
3737 }
3738
3739 /* This routine deals with acks during a TLP episode and ends an episode by
3740 * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
3741 */
tcp_process_tlp_ack(struct sock * sk,u32 ack,int flag)3742 static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3743 {
3744 struct tcp_sock *tp = tcp_sk(sk);
3745
3746 if (before(ack, tp->tlp_high_seq))
3747 return;
3748
3749 if (!tp->tlp_retrans) {
3750 /* TLP of new data has been acknowledged */
3751 tp->tlp_high_seq = 0;
3752 } else if (flag & FLAG_DSACK_TLP) {
3753 /* This DSACK means original and TLP probe arrived; no loss */
3754 tp->tlp_high_seq = 0;
3755 } else if (after(ack, tp->tlp_high_seq)) {
3756 /* ACK advances: there was a loss, so reduce cwnd. Reset
3757 * tlp_high_seq in tcp_init_cwnd_reduction()
3758 */
3759 tcp_init_cwnd_reduction(sk);
3760 tcp_set_ca_state(sk, TCP_CA_CWR);
3761 tcp_end_cwnd_reduction(sk);
3762 tcp_try_keep_open(sk);
3763 NET_INC_STATS(sock_net(sk),
3764 LINUX_MIB_TCPLOSSPROBERECOVERY);
3765 } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3766 FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
3767 /* Pure dupack: original and TLP probe arrived; no loss */
3768 tp->tlp_high_seq = 0;
3769 }
3770 }
3771
tcp_in_ack_event(struct sock * sk,u32 flags)3772 static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
3773 {
3774 const struct inet_connection_sock *icsk = inet_csk(sk);
3775
3776 if (icsk->icsk_ca_ops->in_ack_event)
3777 icsk->icsk_ca_ops->in_ack_event(sk, flags);
3778 }
3779
3780 /* Congestion control has updated the cwnd already. So if we're in
3781 * loss recovery then now we do any new sends (for FRTO) or
3782 * retransmits (for CA_Loss or CA_recovery) that make sense.
3783 */
tcp_xmit_recovery(struct sock * sk,int rexmit)3784 static void tcp_xmit_recovery(struct sock *sk, int rexmit)
3785 {
3786 struct tcp_sock *tp = tcp_sk(sk);
3787
3788 if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
3789 return;
3790
3791 if (unlikely(rexmit == REXMIT_NEW)) {
3792 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
3793 TCP_NAGLE_OFF);
3794 if (after(tp->snd_nxt, tp->high_seq))
3795 return;
3796 tp->frto = 0;
3797 }
3798 tcp_xmit_retransmit_queue(sk);
3799 }
3800
3801 /* Returns the number of packets newly acked or sacked by the current ACK */
tcp_newly_delivered(struct sock * sk,u32 prior_delivered,int flag)3802 static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
3803 {
3804 const struct net *net = sock_net(sk);
3805 struct tcp_sock *tp = tcp_sk(sk);
3806 u32 delivered;
3807
3808 delivered = tp->delivered - prior_delivered;
3809 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
3810 if (flag & FLAG_ECE)
3811 NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
3812
3813 return delivered;
3814 }
3815
3816 /* This routine deals with incoming acks, but not outgoing ones. */
tcp_ack(struct sock * sk,const struct sk_buff * skb,int flag)3817 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3818 {
3819 struct inet_connection_sock *icsk = inet_csk(sk);
3820 struct tcp_sock *tp = tcp_sk(sk);
3821 struct tcp_sacktag_state sack_state;
3822 struct rate_sample rs = { .prior_delivered = 0 };
3823 u32 prior_snd_una = tp->snd_una;
3824 bool is_sack_reneg = tp->is_sack_reneg;
3825 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3826 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3827 int num_dupack = 0;
3828 int prior_packets = tp->packets_out;
3829 u32 delivered = tp->delivered;
3830 u32 lost = tp->lost;
3831 int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
3832 u32 prior_fack;
3833
3834 sack_state.first_sackt = 0;
3835 sack_state.rate = &rs;
3836 sack_state.sack_delivered = 0;
3837
3838 /* We very likely will need to access rtx queue. */
3839 prefetch(sk->tcp_rtx_queue.rb_node);
3840
3841 /* If the ack is older than previous acks
3842 * then we can probably ignore it.
3843 */
3844 if (before(ack, prior_snd_una)) {
3845 u32 max_window;
3846
3847 /* do not accept ACK for bytes we never sent. */
3848 max_window = min_t(u64, tp->max_window, tp->bytes_acked);
3849 /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3850 if (before(ack, prior_snd_una - max_window)) {
3851 if (!(flag & FLAG_NO_CHALLENGE_ACK))
3852 tcp_send_challenge_ack(sk);
3853 return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
3854 }
3855 goto old_ack;
3856 }
3857
3858 /* If the ack includes data we haven't sent yet, discard
3859 * this segment (RFC793 Section 3.9).
3860 */
3861 if (after(ack, tp->snd_nxt))
3862 return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA;
3863
3864 if (after(ack, prior_snd_una)) {
3865 flag |= FLAG_SND_UNA_ADVANCED;
3866 icsk->icsk_retransmits = 0;
3867
3868 #if IS_ENABLED(CONFIG_TLS_DEVICE)
3869 if (static_branch_unlikely(&clean_acked_data_enabled.key))
3870 if (icsk->icsk_clean_acked)
3871 icsk->icsk_clean_acked(sk, ack);
3872 #endif
3873 }
3874
3875 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
3876 rs.prior_in_flight = tcp_packets_in_flight(tp);
3877
3878 /* ts_recent update must be made after we are sure that the packet
3879 * is in window.
3880 */
3881 if (flag & FLAG_UPDATE_TS_RECENT)
3882 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
3883
3884 if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
3885 FLAG_SND_UNA_ADVANCED) {
3886 /* Window is constant, pure forward advance.
3887 * No more checks are required.
3888 * Note, we use the fact that SND.UNA>=SND.WL2.
3889 */
3890 tcp_update_wl(tp, ack_seq);
3891 tcp_snd_una_update(tp, ack);
3892 flag |= FLAG_WIN_UPDATE;
3893
3894 tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
3895
3896 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
3897 } else {
3898 u32 ack_ev_flags = CA_ACK_SLOWPATH;
3899
3900 if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3901 flag |= FLAG_DATA;
3902 else
3903 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3904
3905 flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3906
3907 if (TCP_SKB_CB(skb)->sacked)
3908 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
3909 &sack_state);
3910
3911 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
3912 flag |= FLAG_ECE;
3913 ack_ev_flags |= CA_ACK_ECE;
3914 }
3915
3916 if (sack_state.sack_delivered)
3917 tcp_count_delivered(tp, sack_state.sack_delivered,
3918 flag & FLAG_ECE);
3919
3920 if (flag & FLAG_WIN_UPDATE)
3921 ack_ev_flags |= CA_ACK_WIN_UPDATE;
3922
3923 tcp_in_ack_event(sk, ack_ev_flags);
3924 }
3925
3926 /* This is a deviation from RFC3168 since it states that:
3927 * "When the TCP data sender is ready to set the CWR bit after reducing
3928 * the congestion window, it SHOULD set the CWR bit only on the first
3929 * new data packet that it transmits."
3930 * We accept CWR on pure ACKs to be more robust
3931 * with widely-deployed TCP implementations that do this.
3932 */
3933 tcp_ecn_accept_cwr(sk, skb);
3934
3935 /* We passed data and got it acked, remove any soft error
3936 * log. Something worked...
3937 */
3938 WRITE_ONCE(sk->sk_err_soft, 0);
3939 icsk->icsk_probes_out = 0;
3940 tp->rcv_tstamp = tcp_jiffies32;
3941 if (!prior_packets)
3942 goto no_queue;
3943
3944 /* See if we can take anything off of the retransmit queue. */
3945 flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una,
3946 &sack_state, flag & FLAG_ECE);
3947
3948 tcp_rack_update_reo_wnd(sk, &rs);
3949
3950 if (tp->tlp_high_seq)
3951 tcp_process_tlp_ack(sk, ack, flag);
3952
3953 if (tcp_ack_is_dubious(sk, flag)) {
3954 if (!(flag & (FLAG_SND_UNA_ADVANCED |
3955 FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
3956 num_dupack = 1;
3957 /* Consider if pure acks were aggregated in tcp_add_backlog() */
3958 if (!(flag & FLAG_DATA))
3959 num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
3960 }
3961 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3962 &rexmit);
3963 }
3964
3965 /* If needed, reset TLP/RTO timer when RACK doesn't set. */
3966 if (flag & FLAG_SET_XMIT_TIMER)
3967 tcp_set_xmit_timer(sk);
3968
3969 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3970 sk_dst_confirm(sk);
3971
3972 delivered = tcp_newly_delivered(sk, delivered, flag);
3973 lost = tp->lost - lost; /* freshly marked lost */
3974 rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
3975 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
3976 tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
3977 tcp_xmit_recovery(sk, rexmit);
3978 return 1;
3979
3980 no_queue:
3981 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3982 if (flag & FLAG_DSACKING_ACK) {
3983 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
3984 &rexmit);
3985 tcp_newly_delivered(sk, delivered, flag);
3986 }
3987 /* If this ack opens up a zero window, clear backoff. It was
3988 * being used to time the probes, and is probably far higher than
3989 * it needs to be for normal retransmission.
3990 */
3991 tcp_ack_probe(sk);
3992
3993 if (tp->tlp_high_seq)
3994 tcp_process_tlp_ack(sk, ack, flag);
3995 return 1;
3996
3997 old_ack:
3998 /* If data was SACKed, tag it and see if we should send more data.
3999 * If data was DSACKed, see if we can undo a cwnd reduction.
4000 */
4001 if (TCP_SKB_CB(skb)->sacked) {
4002 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4003 &sack_state);
4004 tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4005 &rexmit);
4006 tcp_newly_delivered(sk, delivered, flag);
4007 tcp_xmit_recovery(sk, rexmit);
4008 }
4009
4010 return 0;
4011 }
4012
tcp_parse_fastopen_option(int len,const unsigned char * cookie,bool syn,struct tcp_fastopen_cookie * foc,bool exp_opt)4013 static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
4014 bool syn, struct tcp_fastopen_cookie *foc,
4015 bool exp_opt)
4016 {
4017 /* Valid only in SYN or SYN-ACK with an even length. */
4018 if (!foc || !syn || len < 0 || (len & 1))
4019 return;
4020
4021 if (len >= TCP_FASTOPEN_COOKIE_MIN &&
4022 len <= TCP_FASTOPEN_COOKIE_MAX)
4023 memcpy(foc->val, cookie, len);
4024 else if (len != 0)
4025 len = -1;
4026 foc->len = len;
4027 foc->exp = exp_opt;
4028 }
4029
smc_parse_options(const struct tcphdr * th,struct tcp_options_received * opt_rx,const unsigned char * ptr,int opsize)4030 static bool smc_parse_options(const struct tcphdr *th,
4031 struct tcp_options_received *opt_rx,
4032 const unsigned char *ptr,
4033 int opsize)
4034 {
4035 #if IS_ENABLED(CONFIG_SMC)
4036 if (static_branch_unlikely(&tcp_have_smc)) {
4037 if (th->syn && !(opsize & 1) &&
4038 opsize >= TCPOLEN_EXP_SMC_BASE &&
4039 get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) {
4040 opt_rx->smc_ok = 1;
4041 return true;
4042 }
4043 }
4044 #endif
4045 return false;
4046 }
4047
4048 /* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
4049 * value on success.
4050 */
tcp_parse_mss_option(const struct tcphdr * th,u16 user_mss)4051 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
4052 {
4053 const unsigned char *ptr = (const unsigned char *)(th + 1);
4054 int length = (th->doff * 4) - sizeof(struct tcphdr);
4055 u16 mss = 0;
4056
4057 while (length > 0) {
4058 int opcode = *ptr++;
4059 int opsize;
4060
4061 switch (opcode) {
4062 case TCPOPT_EOL:
4063 return mss;
4064 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4065 length--;
4066 continue;
4067 default:
4068 if (length < 2)
4069 return mss;
4070 opsize = *ptr++;
4071 if (opsize < 2) /* "silly options" */
4072 return mss;
4073 if (opsize > length)
4074 return mss; /* fail on partial options */
4075 if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) {
4076 u16 in_mss = get_unaligned_be16(ptr);
4077
4078 if (in_mss) {
4079 if (user_mss && user_mss < in_mss)
4080 in_mss = user_mss;
4081 mss = in_mss;
4082 }
4083 }
4084 ptr += opsize - 2;
4085 length -= opsize;
4086 }
4087 }
4088 return mss;
4089 }
4090 EXPORT_SYMBOL_GPL(tcp_parse_mss_option);
4091
4092 /* Look for tcp options. Normally only called on SYN and SYNACK packets.
4093 * But, this can also be called on packets in the established flow when
4094 * the fast version below fails.
4095 */
tcp_parse_options(const struct net * net,const struct sk_buff * skb,struct tcp_options_received * opt_rx,int estab,struct tcp_fastopen_cookie * foc)4096 void tcp_parse_options(const struct net *net,
4097 const struct sk_buff *skb,
4098 struct tcp_options_received *opt_rx, int estab,
4099 struct tcp_fastopen_cookie *foc)
4100 {
4101 const unsigned char *ptr;
4102 const struct tcphdr *th = tcp_hdr(skb);
4103 int length = (th->doff * 4) - sizeof(struct tcphdr);
4104
4105 ptr = (const unsigned char *)(th + 1);
4106 opt_rx->saw_tstamp = 0;
4107 opt_rx->saw_unknown = 0;
4108
4109 while (length > 0) {
4110 int opcode = *ptr++;
4111 int opsize;
4112
4113 switch (opcode) {
4114 case TCPOPT_EOL:
4115 return;
4116 case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
4117 length--;
4118 continue;
4119 default:
4120 if (length < 2)
4121 return;
4122 opsize = *ptr++;
4123 if (opsize < 2) /* "silly options" */
4124 return;
4125 if (opsize > length)
4126 return; /* don't parse partial options */
4127 switch (opcode) {
4128 case TCPOPT_MSS:
4129 if (opsize == TCPOLEN_MSS && th->syn && !estab) {
4130 u16 in_mss = get_unaligned_be16(ptr);
4131 if (in_mss) {
4132 if (opt_rx->user_mss &&
4133 opt_rx->user_mss < in_mss)
4134 in_mss = opt_rx->user_mss;
4135 opt_rx->mss_clamp = in_mss;
4136 }
4137 }
4138 break;
4139 case TCPOPT_WINDOW:
4140 if (opsize == TCPOLEN_WINDOW && th->syn &&
4141 !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
4142 __u8 snd_wscale = *(__u8 *)ptr;
4143 opt_rx->wscale_ok = 1;
4144 if (snd_wscale > TCP_MAX_WSCALE) {
4145 net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
4146 __func__,
4147 snd_wscale,
4148 TCP_MAX_WSCALE);
4149 snd_wscale = TCP_MAX_WSCALE;
4150 }
4151 opt_rx->snd_wscale = snd_wscale;
4152 }
4153 break;
4154 case TCPOPT_TIMESTAMP:
4155 if ((opsize == TCPOLEN_TIMESTAMP) &&
4156 ((estab && opt_rx->tstamp_ok) ||
4157 (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
4158 opt_rx->saw_tstamp = 1;
4159 opt_rx->rcv_tsval = get_unaligned_be32(ptr);
4160 opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
4161 }
4162 break;
4163 case TCPOPT_SACK_PERM:
4164 if (opsize == TCPOLEN_SACK_PERM && th->syn &&
4165 !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
4166 opt_rx->sack_ok = TCP_SACK_SEEN;
4167 tcp_sack_reset(opt_rx);
4168 }
4169 break;
4170
4171 case TCPOPT_SACK:
4172 if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
4173 !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
4174 opt_rx->sack_ok) {
4175 TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
4176 }
4177 break;
4178 #ifdef CONFIG_TCP_MD5SIG
4179 case TCPOPT_MD5SIG:
4180 /* The MD5 Hash has already been
4181 * checked (see tcp_v{4,6}_rcv()).
4182 */
4183 break;
4184 #endif
4185 case TCPOPT_FASTOPEN:
4186 tcp_parse_fastopen_option(
4187 opsize - TCPOLEN_FASTOPEN_BASE,
4188 ptr, th->syn, foc, false);
4189 break;
4190
4191 case TCPOPT_EXP:
4192 /* Fast Open option shares code 254 using a
4193 * 16 bits magic number.
4194 */
4195 if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
4196 get_unaligned_be16(ptr) ==
4197 TCPOPT_FASTOPEN_MAGIC) {
4198 tcp_parse_fastopen_option(opsize -
4199 TCPOLEN_EXP_FASTOPEN_BASE,
4200 ptr + 2, th->syn, foc, true);
4201 break;
4202 }
4203
4204 if (smc_parse_options(th, opt_rx, ptr, opsize))
4205 break;
4206
4207 opt_rx->saw_unknown = 1;
4208 break;
4209
4210 default:
4211 opt_rx->saw_unknown = 1;
4212 }
4213 ptr += opsize-2;
4214 length -= opsize;
4215 }
4216 }
4217 }
4218 EXPORT_SYMBOL(tcp_parse_options);
4219
tcp_parse_aligned_timestamp(struct tcp_sock * tp,const struct tcphdr * th)4220 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4221 {
4222 const __be32 *ptr = (const __be32 *)(th + 1);
4223
4224 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
4225 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
4226 tp->rx_opt.saw_tstamp = 1;
4227 ++ptr;
4228 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4229 ++ptr;
4230 if (*ptr)
4231 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
4232 else
4233 tp->rx_opt.rcv_tsecr = 0;
4234 return true;
4235 }
4236 return false;
4237 }
4238
4239 /* Fast parse options. This hopes to only see timestamps.
4240 * If it is wrong it falls back on tcp_parse_options().
4241 */
tcp_fast_parse_options(const struct net * net,const struct sk_buff * skb,const struct tcphdr * th,struct tcp_sock * tp)4242 static bool tcp_fast_parse_options(const struct net *net,
4243 const struct sk_buff *skb,
4244 const struct tcphdr *th, struct tcp_sock *tp)
4245 {
4246 /* In the spirit of fast parsing, compare doff directly to constant
4247 * values. Because equality is used, short doff can be ignored here.
4248 */
4249 if (th->doff == (sizeof(*th) / 4)) {
4250 tp->rx_opt.saw_tstamp = 0;
4251 return false;
4252 } else if (tp->rx_opt.tstamp_ok &&
4253 th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4254 if (tcp_parse_aligned_timestamp(tp, th))
4255 return true;
4256 }
4257
4258 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
4259 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
4260 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
4261
4262 return true;
4263 }
4264
4265 #ifdef CONFIG_TCP_MD5SIG
4266 /*
4267 * Parse MD5 Signature option
4268 */
tcp_parse_md5sig_option(const struct tcphdr * th)4269 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
4270 {
4271 int length = (th->doff << 2) - sizeof(*th);
4272 const u8 *ptr = (const u8 *)(th + 1);
4273
4274 /* If not enough data remaining, we can short cut */
4275 while (length >= TCPOLEN_MD5SIG) {
4276 int opcode = *ptr++;
4277 int opsize;
4278
4279 switch (opcode) {
4280 case TCPOPT_EOL:
4281 return NULL;
4282 case TCPOPT_NOP:
4283 length--;
4284 continue;
4285 default:
4286 opsize = *ptr++;
4287 if (opsize < 2 || opsize > length)
4288 return NULL;
4289 if (opcode == TCPOPT_MD5SIG)
4290 return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
4291 }
4292 ptr += opsize - 2;
4293 length -= opsize;
4294 }
4295 return NULL;
4296 }
4297 EXPORT_SYMBOL(tcp_parse_md5sig_option);
4298 #endif
4299
4300 /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
4301 *
4302 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
4303 * it can pass through stack. So, the following predicate verifies that
4304 * this segment is not used for anything but congestion avoidance or
4305 * fast retransmit. Moreover, we even are able to eliminate most of such
4306 * second order effects, if we apply some small "replay" window (~RTO)
4307 * to timestamp space.
4308 *
4309 * All these measures still do not guarantee that we reject wrapped ACKs
4310 * on networks with high bandwidth, when sequence space is recycled fastly,
4311 * but it guarantees that such events will be very rare and do not affect
4312 * connection seriously. This doesn't look nice, but alas, PAWS is really
4313 * buggy extension.
4314 *
4315 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
4316 * states that events when retransmit arrives after original data are rare.
4317 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
4318 * the biggest problem on large power networks even with minor reordering.
4319 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
4320 * up to bandwidth of 18Gigabit/sec. 8) ]
4321 */
4322
tcp_disordered_ack(const struct sock * sk,const struct sk_buff * skb)4323 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
4324 {
4325 const struct tcp_sock *tp = tcp_sk(sk);
4326 const struct tcphdr *th = tcp_hdr(skb);
4327 u32 seq = TCP_SKB_CB(skb)->seq;
4328 u32 ack = TCP_SKB_CB(skb)->ack_seq;
4329
4330 return (/* 1. Pure ACK with correct sequence number. */
4331 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
4332
4333 /* 2. ... and duplicate ACK. */
4334 ack == tp->snd_una &&
4335
4336 /* 3. ... and does not update window. */
4337 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
4338
4339 /* 4. ... and sits in replay window. */
4340 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
4341 }
4342
tcp_paws_discard(const struct sock * sk,const struct sk_buff * skb)4343 static inline bool tcp_paws_discard(const struct sock *sk,
4344 const struct sk_buff *skb)
4345 {
4346 const struct tcp_sock *tp = tcp_sk(sk);
4347
4348 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
4349 !tcp_disordered_ack(sk, skb);
4350 }
4351
4352 /* Check segment sequence number for validity.
4353 *
4354 * Segment controls are considered valid, if the segment
4355 * fits to the window after truncation to the window. Acceptability
4356 * of data (and SYN, FIN, of course) is checked separately.
4357 * See tcp_data_queue(), for example.
4358 *
4359 * Also, controls (RST is main one) are accepted using RCV.WUP instead
4360 * of RCV.NXT. Peer still did not advance his SND.UNA when we
4361 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4362 * (borrowed from freebsd)
4363 */
4364
tcp_sequence(const struct tcp_sock * tp,u32 seq,u32 end_seq)4365 static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
4366 u32 seq, u32 end_seq)
4367 {
4368 if (before(end_seq, tp->rcv_wup))
4369 return SKB_DROP_REASON_TCP_OLD_SEQUENCE;
4370
4371 if (after(seq, tp->rcv_nxt + tcp_receive_window(tp)))
4372 return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
4373
4374 return SKB_NOT_DROPPED_YET;
4375 }
4376
4377
tcp_done_with_error(struct sock * sk,int err)4378 void tcp_done_with_error(struct sock *sk, int err)
4379 {
4380 /* This barrier is coupled with smp_rmb() in tcp_poll() */
4381 WRITE_ONCE(sk->sk_err, err);
4382 smp_wmb();
4383
4384 tcp_write_queue_purge(sk);
4385 tcp_done(sk);
4386
4387 if (!sock_flag(sk, SOCK_DEAD))
4388 sk_error_report(sk);
4389 }
4390 EXPORT_SYMBOL(tcp_done_with_error);
4391
4392 /* When we get a reset we do this. */
tcp_reset(struct sock * sk,struct sk_buff * skb)4393 void tcp_reset(struct sock *sk, struct sk_buff *skb)
4394 {
4395 int err;
4396
4397 trace_tcp_receive_reset(sk);
4398
4399 /* mptcp can't tell us to ignore reset pkts,
4400 * so just ignore the return value of mptcp_incoming_options().
4401 */
4402 if (sk_is_mptcp(sk))
4403 mptcp_incoming_options(sk, skb);
4404
4405 /* We want the right error as BSD sees it (and indeed as we do). */
4406 switch (sk->sk_state) {
4407 case TCP_SYN_SENT:
4408 err = ECONNREFUSED;
4409 break;
4410 case TCP_CLOSE_WAIT:
4411 err = EPIPE;
4412 break;
4413 case TCP_CLOSE:
4414 return;
4415 default:
4416 err = ECONNRESET;
4417 }
4418 tcp_done_with_error(sk, err);
4419 }
4420
4421 /*
4422 * Process the FIN bit. This now behaves as it is supposed to work
4423 * and the FIN takes effect when it is validly part of sequence
4424 * space. Not before when we get holes.
4425 *
4426 * If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4427 * (and thence onto LAST-ACK and finally, CLOSE, we never enter
4428 * TIME-WAIT)
4429 *
4430 * If we are in FINWAIT-1, a received FIN indicates simultaneous
4431 * close and we go into CLOSING (and later onto TIME-WAIT)
4432 *
4433 * If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4434 */
tcp_fin(struct sock * sk)4435 void tcp_fin(struct sock *sk)
4436 {
4437 struct tcp_sock *tp = tcp_sk(sk);
4438
4439 inet_csk_schedule_ack(sk);
4440
4441 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
4442 sock_set_flag(sk, SOCK_DONE);
4443
4444 switch (sk->sk_state) {
4445 case TCP_SYN_RECV:
4446 case TCP_ESTABLISHED:
4447 /* Move to CLOSE_WAIT */
4448 tcp_set_state(sk, TCP_CLOSE_WAIT);
4449 inet_csk_enter_pingpong_mode(sk);
4450 break;
4451
4452 case TCP_CLOSE_WAIT:
4453 case TCP_CLOSING:
4454 /* Received a retransmission of the FIN, do
4455 * nothing.
4456 */
4457 break;
4458 case TCP_LAST_ACK:
4459 /* RFC793: Remain in the LAST-ACK state. */
4460 break;
4461
4462 case TCP_FIN_WAIT1:
4463 /* This case occurs when a simultaneous close
4464 * happens, we must ack the received FIN and
4465 * enter the CLOSING state.
4466 */
4467 tcp_send_ack(sk);
4468 tcp_set_state(sk, TCP_CLOSING);
4469 break;
4470 case TCP_FIN_WAIT2:
4471 /* Received a FIN -- send ACK and enter TIME_WAIT. */
4472 tcp_send_ack(sk);
4473 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4474 break;
4475 default:
4476 /* Only TCP_LISTEN and TCP_CLOSE are left, in these
4477 * cases we should never reach this piece of code.
4478 */
4479 pr_err("%s: Impossible, sk->sk_state=%d\n",
4480 __func__, sk->sk_state);
4481 break;
4482 }
4483
4484 /* It _is_ possible, that we have something out-of-order _after_ FIN.
4485 * Probably, we should reset in this case. For now drop them.
4486 */
4487 skb_rbtree_purge(&tp->out_of_order_queue);
4488 if (tcp_is_sack(tp))
4489 tcp_sack_reset(&tp->rx_opt);
4490
4491 if (!sock_flag(sk, SOCK_DEAD)) {
4492 sk->sk_state_change(sk);
4493
4494 /* Do not send POLL_HUP for half duplex close. */
4495 if (sk->sk_shutdown == SHUTDOWN_MASK ||
4496 sk->sk_state == TCP_CLOSE)
4497 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4498 else
4499 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4500 }
4501 }
4502
tcp_sack_extend(struct tcp_sack_block * sp,u32 seq,u32 end_seq)4503 static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4504 u32 end_seq)
4505 {
4506 if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4507 if (before(seq, sp->start_seq))
4508 sp->start_seq = seq;
4509 if (after(end_seq, sp->end_seq))
4510 sp->end_seq = end_seq;
4511 return true;
4512 }
4513 return false;
4514 }
4515
tcp_dsack_set(struct sock * sk,u32 seq,u32 end_seq)4516 static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4517 {
4518 struct tcp_sock *tp = tcp_sk(sk);
4519
4520 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4521 int mib_idx;
4522
4523 if (before(seq, tp->rcv_nxt))
4524 mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4525 else
4526 mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4527
4528 NET_INC_STATS(sock_net(sk), mib_idx);
4529
4530 tp->rx_opt.dsack = 1;
4531 tp->duplicate_sack[0].start_seq = seq;
4532 tp->duplicate_sack[0].end_seq = end_seq;
4533 }
4534 }
4535
tcp_dsack_extend(struct sock * sk,u32 seq,u32 end_seq)4536 static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4537 {
4538 struct tcp_sock *tp = tcp_sk(sk);
4539
4540 if (!tp->rx_opt.dsack)
4541 tcp_dsack_set(sk, seq, end_seq);
4542 else
4543 tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4544 }
4545
tcp_rcv_spurious_retrans(struct sock * sk,const struct sk_buff * skb)4546 static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
4547 {
4548 /* When the ACK path fails or drops most ACKs, the sender would
4549 * timeout and spuriously retransmit the same segment repeatedly.
4550 * The receiver remembers and reflects via DSACKs. Leverage the
4551 * DSACK state and change the txhash to re-route speculatively.
4552 */
4553 if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
4554 sk_rethink_txhash(sk))
4555 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
4556 }
4557
tcp_send_dupack(struct sock * sk,const struct sk_buff * skb)4558 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4559 {
4560 struct tcp_sock *tp = tcp_sk(sk);
4561
4562 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4563 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4564 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4565 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4566
4567 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4568 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4569
4570 tcp_rcv_spurious_retrans(sk, skb);
4571 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
4572 end_seq = tp->rcv_nxt;
4573 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
4574 }
4575 }
4576
4577 tcp_send_ack(sk);
4578 }
4579
4580 /* These routines update the SACK block as out-of-order packets arrive or
4581 * in-order packets close up the sequence space.
4582 */
tcp_sack_maybe_coalesce(struct tcp_sock * tp)4583 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4584 {
4585 int this_sack;
4586 struct tcp_sack_block *sp = &tp->selective_acks[0];
4587 struct tcp_sack_block *swalk = sp + 1;
4588
4589 /* See if the recent change to the first SACK eats into
4590 * or hits the sequence space of other SACK blocks, if so coalesce.
4591 */
4592 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
4593 if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
4594 int i;
4595
4596 /* Zap SWALK, by moving every further SACK up by one slot.
4597 * Decrease num_sacks.
4598 */
4599 tp->rx_opt.num_sacks--;
4600 for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
4601 sp[i] = sp[i + 1];
4602 continue;
4603 }
4604 this_sack++;
4605 swalk++;
4606 }
4607 }
4608
tcp_sack_compress_send_ack(struct sock * sk)4609 void tcp_sack_compress_send_ack(struct sock *sk)
4610 {
4611 struct tcp_sock *tp = tcp_sk(sk);
4612
4613 if (!tp->compressed_ack)
4614 return;
4615
4616 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
4617 __sock_put(sk);
4618
4619 /* Since we have to send one ack finally,
4620 * substract one from tp->compressed_ack to keep
4621 * LINUX_MIB_TCPACKCOMPRESSED accurate.
4622 */
4623 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
4624 tp->compressed_ack - 1);
4625
4626 tp->compressed_ack = 0;
4627 tcp_send_ack(sk);
4628 }
4629
4630 /* Reasonable amount of sack blocks included in TCP SACK option
4631 * The max is 4, but this becomes 3 if TCP timestamps are there.
4632 * Given that SACK packets might be lost, be conservative and use 2.
4633 */
4634 #define TCP_SACK_BLOCKS_EXPECTED 2
4635
tcp_sack_new_ofo_skb(struct sock * sk,u32 seq,u32 end_seq)4636 static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4637 {
4638 struct tcp_sock *tp = tcp_sk(sk);
4639 struct tcp_sack_block *sp = &tp->selective_acks[0];
4640 int cur_sacks = tp->rx_opt.num_sacks;
4641 int this_sack;
4642
4643 if (!cur_sacks)
4644 goto new_sack;
4645
4646 for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
4647 if (tcp_sack_extend(sp, seq, end_seq)) {
4648 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
4649 tcp_sack_compress_send_ack(sk);
4650 /* Rotate this_sack to the first one. */
4651 for (; this_sack > 0; this_sack--, sp--)
4652 swap(*sp, *(sp - 1));
4653 if (cur_sacks > 1)
4654 tcp_sack_maybe_coalesce(tp);
4655 return;
4656 }
4657 }
4658
4659 if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
4660 tcp_sack_compress_send_ack(sk);
4661
4662 /* Could not find an adjacent existing SACK, build a new one,
4663 * put it at the front, and shift everyone else down. We
4664 * always know there is at least one SACK present already here.
4665 *
4666 * If the sack array is full, forget about the last one.
4667 */
4668 if (this_sack >= TCP_NUM_SACKS) {
4669 this_sack--;
4670 tp->rx_opt.num_sacks--;
4671 sp--;
4672 }
4673 for (; this_sack > 0; this_sack--, sp--)
4674 *sp = *(sp - 1);
4675
4676 new_sack:
4677 /* Build the new head SACK, and we're done. */
4678 sp->start_seq = seq;
4679 sp->end_seq = end_seq;
4680 tp->rx_opt.num_sacks++;
4681 }
4682
4683 /* RCV.NXT advances, some SACKs should be eaten. */
4684
tcp_sack_remove(struct tcp_sock * tp)4685 static void tcp_sack_remove(struct tcp_sock *tp)
4686 {
4687 struct tcp_sack_block *sp = &tp->selective_acks[0];
4688 int num_sacks = tp->rx_opt.num_sacks;
4689 int this_sack;
4690
4691 /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
4692 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4693 tp->rx_opt.num_sacks = 0;
4694 return;
4695 }
4696
4697 for (this_sack = 0; this_sack < num_sacks;) {
4698 /* Check if the start of the sack is covered by RCV.NXT. */
4699 if (!before(tp->rcv_nxt, sp->start_seq)) {
4700 int i;
4701
4702 /* RCV.NXT must cover all the block! */
4703 WARN_ON(before(tp->rcv_nxt, sp->end_seq));
4704
4705 /* Zap this SACK, by moving forward any other SACKS. */
4706 for (i = this_sack+1; i < num_sacks; i++)
4707 tp->selective_acks[i-1] = tp->selective_acks[i];
4708 num_sacks--;
4709 continue;
4710 }
4711 this_sack++;
4712 sp++;
4713 }
4714 tp->rx_opt.num_sacks = num_sacks;
4715 }
4716
4717 /**
4718 * tcp_try_coalesce - try to merge skb to prior one
4719 * @sk: socket
4720 * @to: prior buffer
4721 * @from: buffer to add in queue
4722 * @fragstolen: pointer to boolean
4723 *
4724 * Before queueing skb @from after @to, try to merge them
4725 * to reduce overall memory use and queue lengths, if cost is small.
4726 * Packets in ofo or receive queues can stay a long time.
4727 * Better try to coalesce them right now to avoid future collapses.
4728 * Returns true if caller should free @from instead of queueing it
4729 */
tcp_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)4730 static bool tcp_try_coalesce(struct sock *sk,
4731 struct sk_buff *to,
4732 struct sk_buff *from,
4733 bool *fragstolen)
4734 {
4735 int delta;
4736
4737 *fragstolen = false;
4738
4739 /* Its possible this segment overlaps with prior segment in queue */
4740 if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
4741 return false;
4742
4743 if (!mptcp_skb_can_collapse(to, from))
4744 return false;
4745
4746 #ifdef CONFIG_TLS_DEVICE
4747 if (from->decrypted != to->decrypted)
4748 return false;
4749 #endif
4750
4751 if (!skb_try_coalesce(to, from, fragstolen, &delta))
4752 return false;
4753
4754 atomic_add(delta, &sk->sk_rmem_alloc);
4755 sk_mem_charge(sk, delta);
4756 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4757 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4758 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4759 TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
4760
4761 if (TCP_SKB_CB(from)->has_rxtstamp) {
4762 TCP_SKB_CB(to)->has_rxtstamp = true;
4763 to->tstamp = from->tstamp;
4764 skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
4765 }
4766
4767 return true;
4768 }
4769
tcp_ooo_try_coalesce(struct sock * sk,struct sk_buff * to,struct sk_buff * from,bool * fragstolen)4770 static bool tcp_ooo_try_coalesce(struct sock *sk,
4771 struct sk_buff *to,
4772 struct sk_buff *from,
4773 bool *fragstolen)
4774 {
4775 bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4776
4777 /* In case tcp_drop_reason() is called later, update to->gso_segs */
4778 if (res) {
4779 u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4780 max_t(u16, 1, skb_shinfo(from)->gso_segs);
4781
4782 skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4783 }
4784 return res;
4785 }
4786
tcp_drop_reason(struct sock * sk,struct sk_buff * skb,enum skb_drop_reason reason)4787 static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb,
4788 enum skb_drop_reason reason)
4789 {
4790 sk_drops_add(sk, skb);
4791 kfree_skb_reason(skb, reason);
4792 }
4793
4794 /* This one checks to see if we can put data from the
4795 * out_of_order queue into the receive_queue.
4796 */
tcp_ofo_queue(struct sock * sk)4797 static void tcp_ofo_queue(struct sock *sk)
4798 {
4799 struct tcp_sock *tp = tcp_sk(sk);
4800 __u32 dsack_high = tp->rcv_nxt;
4801 bool fin, fragstolen, eaten;
4802 struct sk_buff *skb, *tail;
4803 struct rb_node *p;
4804
4805 p = rb_first(&tp->out_of_order_queue);
4806 while (p) {
4807 skb = rb_to_skb(p);
4808 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4809 break;
4810
4811 if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
4812 __u32 dsack = dsack_high;
4813 if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4814 dsack_high = TCP_SKB_CB(skb)->end_seq;
4815 tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4816 }
4817 p = rb_next(p);
4818 rb_erase(&skb->rbnode, &tp->out_of_order_queue);
4819
4820 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
4821 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP);
4822 continue;
4823 }
4824
4825 tail = skb_peek_tail(&sk->sk_receive_queue);
4826 eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
4827 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
4828 fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
4829 if (!eaten)
4830 __skb_queue_tail(&sk->sk_receive_queue, skb);
4831 else
4832 kfree_skb_partial(skb, fragstolen);
4833
4834 if (unlikely(fin)) {
4835 tcp_fin(sk);
4836 /* tcp_fin() purges tp->out_of_order_queue,
4837 * so we must end this loop right now.
4838 */
4839 break;
4840 }
4841 }
4842 }
4843
4844 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
4845 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
4846
tcp_try_rmem_schedule(struct sock * sk,struct sk_buff * skb,unsigned int size)4847 static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
4848 unsigned int size)
4849 {
4850 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4851 !sk_rmem_schedule(sk, skb, size)) {
4852
4853 if (tcp_prune_queue(sk, skb) < 0)
4854 return -1;
4855
4856 while (!sk_rmem_schedule(sk, skb, size)) {
4857 if (!tcp_prune_ofo_queue(sk, skb))
4858 return -1;
4859 }
4860 }
4861 return 0;
4862 }
4863
tcp_data_queue_ofo(struct sock * sk,struct sk_buff * skb)4864 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4865 {
4866 struct tcp_sock *tp = tcp_sk(sk);
4867 struct rb_node **p, *parent;
4868 struct sk_buff *skb1;
4869 u32 seq, end_seq;
4870 bool fragstolen;
4871
4872 tcp_ecn_check_ce(sk, skb);
4873
4874 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
4875 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
4876 sk->sk_data_ready(sk);
4877 tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
4878 return;
4879 }
4880
4881 /* Disable header prediction. */
4882 tp->pred_flags = 0;
4883 inet_csk_schedule_ack(sk);
4884
4885 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
4886 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
4887 seq = TCP_SKB_CB(skb)->seq;
4888 end_seq = TCP_SKB_CB(skb)->end_seq;
4889
4890 p = &tp->out_of_order_queue.rb_node;
4891 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4892 /* Initial out of order segment, build 1 SACK. */
4893 if (tcp_is_sack(tp)) {
4894 tp->rx_opt.num_sacks = 1;
4895 tp->selective_acks[0].start_seq = seq;
4896 tp->selective_acks[0].end_seq = end_seq;
4897 }
4898 rb_link_node(&skb->rbnode, NULL, p);
4899 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
4900 tp->ooo_last_skb = skb;
4901 goto end;
4902 }
4903
4904 /* In the typical case, we are adding an skb to the end of the list.
4905 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
4906 */
4907 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
4908 skb, &fragstolen)) {
4909 coalesce_done:
4910 /* For non sack flows, do not grow window to force DUPACK
4911 * and trigger fast retransmit.
4912 */
4913 if (tcp_is_sack(tp))
4914 tcp_grow_window(sk, skb, true);
4915 kfree_skb_partial(skb, fragstolen);
4916 skb = NULL;
4917 goto add_sack;
4918 }
4919 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
4920 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
4921 parent = &tp->ooo_last_skb->rbnode;
4922 p = &parent->rb_right;
4923 goto insert;
4924 }
4925
4926 /* Find place to insert this segment. Handle overlaps on the way. */
4927 parent = NULL;
4928 while (*p) {
4929 parent = *p;
4930 skb1 = rb_to_skb(parent);
4931 if (before(seq, TCP_SKB_CB(skb1)->seq)) {
4932 p = &parent->rb_left;
4933 continue;
4934 }
4935 if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4936 if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4937 /* All the bits are present. Drop. */
4938 NET_INC_STATS(sock_net(sk),
4939 LINUX_MIB_TCPOFOMERGE);
4940 tcp_drop_reason(sk, skb,
4941 SKB_DROP_REASON_TCP_OFOMERGE);
4942 skb = NULL;
4943 tcp_dsack_set(sk, seq, end_seq);
4944 goto add_sack;
4945 }
4946 if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4947 /* Partial overlap. */
4948 tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
4949 } else {
4950 /* skb's seq == skb1's seq and skb covers skb1.
4951 * Replace skb1 with skb.
4952 */
4953 rb_replace_node(&skb1->rbnode, &skb->rbnode,
4954 &tp->out_of_order_queue);
4955 tcp_dsack_extend(sk,
4956 TCP_SKB_CB(skb1)->seq,
4957 TCP_SKB_CB(skb1)->end_seq);
4958 NET_INC_STATS(sock_net(sk),
4959 LINUX_MIB_TCPOFOMERGE);
4960 tcp_drop_reason(sk, skb1,
4961 SKB_DROP_REASON_TCP_OFOMERGE);
4962 goto merge_right;
4963 }
4964 } else if (tcp_ooo_try_coalesce(sk, skb1,
4965 skb, &fragstolen)) {
4966 goto coalesce_done;
4967 }
4968 p = &parent->rb_right;
4969 }
4970 insert:
4971 /* Insert segment into RB tree. */
4972 rb_link_node(&skb->rbnode, parent, p);
4973 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
4974
4975 merge_right:
4976 /* Remove other segments covered by skb. */
4977 while ((skb1 = skb_rb_next(skb)) != NULL) {
4978 if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4979 break;
4980 if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4981 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4982 end_seq);
4983 break;
4984 }
4985 rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
4986 tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4987 TCP_SKB_CB(skb1)->end_seq);
4988 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
4989 tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE);
4990 }
4991 /* If there is no skb after us, we are the last_skb ! */
4992 if (!skb1)
4993 tp->ooo_last_skb = skb;
4994
4995 add_sack:
4996 if (tcp_is_sack(tp))
4997 tcp_sack_new_ofo_skb(sk, seq, end_seq);
4998 end:
4999 if (skb) {
5000 /* For non sack flows, do not grow window to force DUPACK
5001 * and trigger fast retransmit.
5002 */
5003 if (tcp_is_sack(tp))
5004 tcp_grow_window(sk, skb, false);
5005 skb_condense(skb);
5006 skb_set_owner_r(skb, sk);
5007 }
5008 }
5009
tcp_queue_rcv(struct sock * sk,struct sk_buff * skb,bool * fragstolen)5010 static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
5011 bool *fragstolen)
5012 {
5013 int eaten;
5014 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
5015
5016 eaten = (tail &&
5017 tcp_try_coalesce(sk, tail,
5018 skb, fragstolen)) ? 1 : 0;
5019 tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
5020 if (!eaten) {
5021 __skb_queue_tail(&sk->sk_receive_queue, skb);
5022 skb_set_owner_r(skb, sk);
5023 }
5024 return eaten;
5025 }
5026
tcp_send_rcvq(struct sock * sk,struct msghdr * msg,size_t size)5027 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
5028 {
5029 struct sk_buff *skb;
5030 int err = -ENOMEM;
5031 int data_len = 0;
5032 bool fragstolen;
5033
5034 if (size == 0)
5035 return 0;
5036
5037 if (size > PAGE_SIZE) {
5038 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
5039
5040 data_len = npages << PAGE_SHIFT;
5041 size = data_len + (size & ~PAGE_MASK);
5042 }
5043 skb = alloc_skb_with_frags(size - data_len, data_len,
5044 PAGE_ALLOC_COSTLY_ORDER,
5045 &err, sk->sk_allocation);
5046 if (!skb)
5047 goto err;
5048
5049 skb_put(skb, size - data_len);
5050 skb->data_len = data_len;
5051 skb->len = size;
5052
5053 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5054 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5055 goto err_free;
5056 }
5057
5058 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
5059 if (err)
5060 goto err_free;
5061
5062 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
5063 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
5064 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
5065
5066 if (tcp_queue_rcv(sk, skb, &fragstolen)) {
5067 WARN_ON_ONCE(fragstolen); /* should not happen */
5068 __kfree_skb(skb);
5069 }
5070 return size;
5071
5072 err_free:
5073 kfree_skb(skb);
5074 err:
5075 return err;
5076
5077 }
5078
tcp_data_ready(struct sock * sk)5079 void tcp_data_ready(struct sock *sk)
5080 {
5081 if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
5082 sk->sk_data_ready(sk);
5083 }
5084
tcp_data_queue(struct sock * sk,struct sk_buff * skb)5085 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
5086 {
5087 struct tcp_sock *tp = tcp_sk(sk);
5088 enum skb_drop_reason reason;
5089 bool fragstolen;
5090 int eaten;
5091
5092 /* If a subflow has been reset, the packet should not continue
5093 * to be processed, drop the packet.
5094 */
5095 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
5096 __kfree_skb(skb);
5097 return;
5098 }
5099
5100 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
5101 __kfree_skb(skb);
5102 return;
5103 }
5104 skb_dst_drop(skb);
5105 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
5106
5107 reason = SKB_DROP_REASON_NOT_SPECIFIED;
5108 tp->rx_opt.dsack = 0;
5109
5110 /* Queue data for delivery to the user.
5111 * Packets in sequence go to the receive queue.
5112 * Out of sequence packets to the out_of_order_queue.
5113 */
5114 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
5115 if (tcp_receive_window(tp) == 0) {
5116 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5117 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5118 goto out_of_window;
5119 }
5120
5121 /* Ok. In sequence. In window. */
5122 queue_and_out:
5123 if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5124 /* TODO: maybe ratelimit these WIN 0 ACK ? */
5125 inet_csk(sk)->icsk_ack.pending |=
5126 (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
5127 inet_csk_schedule_ack(sk);
5128 sk->sk_data_ready(sk);
5129
5130 if (skb_queue_len(&sk->sk_receive_queue)) {
5131 reason = SKB_DROP_REASON_PROTO_MEM;
5132 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5133 goto drop;
5134 }
5135 sk_forced_mem_schedule(sk, skb->truesize);
5136 }
5137
5138 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
5139 if (skb->len)
5140 tcp_event_data_recv(sk, skb);
5141 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
5142 tcp_fin(sk);
5143
5144 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5145 tcp_ofo_queue(sk);
5146
5147 /* RFC5681. 4.2. SHOULD send immediate ACK, when
5148 * gap in queue is filled.
5149 */
5150 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5151 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
5152 }
5153
5154 if (tp->rx_opt.num_sacks)
5155 tcp_sack_remove(tp);
5156
5157 tcp_fast_path_check(sk);
5158
5159 if (eaten > 0)
5160 kfree_skb_partial(skb, fragstolen);
5161 if (!sock_flag(sk, SOCK_DEAD))
5162 tcp_data_ready(sk);
5163 return;
5164 }
5165
5166 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
5167 tcp_rcv_spurious_retrans(sk, skb);
5168 /* A retransmit, 2nd most common case. Force an immediate ack. */
5169 reason = SKB_DROP_REASON_TCP_OLD_DATA;
5170 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5171 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5172
5173 out_of_window:
5174 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5175 inet_csk_schedule_ack(sk);
5176 drop:
5177 tcp_drop_reason(sk, skb, reason);
5178 return;
5179 }
5180
5181 /* Out of window. F.e. zero window probe. */
5182 if (!before(TCP_SKB_CB(skb)->seq,
5183 tp->rcv_nxt + tcp_receive_window(tp))) {
5184 reason = SKB_DROP_REASON_TCP_OVERWINDOW;
5185 goto out_of_window;
5186 }
5187
5188 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5189 /* Partial packet, seq < rcv_next < end_seq */
5190 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
5191
5192 /* If window is closed, drop tail of packet. But after
5193 * remembering D-SACK for its head made in previous line.
5194 */
5195 if (!tcp_receive_window(tp)) {
5196 reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5197 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5198 goto out_of_window;
5199 }
5200 goto queue_and_out;
5201 }
5202
5203 tcp_data_queue_ofo(sk, skb);
5204 }
5205
tcp_skb_next(struct sk_buff * skb,struct sk_buff_head * list)5206 static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
5207 {
5208 if (list)
5209 return !skb_queue_is_last(list, skb) ? skb->next : NULL;
5210
5211 return skb_rb_next(skb);
5212 }
5213
tcp_collapse_one(struct sock * sk,struct sk_buff * skb,struct sk_buff_head * list,struct rb_root * root)5214 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5215 struct sk_buff_head *list,
5216 struct rb_root *root)
5217 {
5218 struct sk_buff *next = tcp_skb_next(skb, list);
5219
5220 if (list)
5221 __skb_unlink(skb, list);
5222 else
5223 rb_erase(&skb->rbnode, root);
5224
5225 __kfree_skb(skb);
5226 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
5227
5228 return next;
5229 }
5230
5231 /* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
tcp_rbtree_insert(struct rb_root * root,struct sk_buff * skb)5232 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
5233 {
5234 struct rb_node **p = &root->rb_node;
5235 struct rb_node *parent = NULL;
5236 struct sk_buff *skb1;
5237
5238 while (*p) {
5239 parent = *p;
5240 skb1 = rb_to_skb(parent);
5241 if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
5242 p = &parent->rb_left;
5243 else
5244 p = &parent->rb_right;
5245 }
5246 rb_link_node(&skb->rbnode, parent, p);
5247 rb_insert_color(&skb->rbnode, root);
5248 }
5249
5250 /* Collapse contiguous sequence of skbs head..tail with
5251 * sequence numbers start..end.
5252 *
5253 * If tail is NULL, this means until the end of the queue.
5254 *
5255 * Segments with FIN/SYN are not collapsed (only because this
5256 * simplifies code)
5257 */
5258 static void
tcp_collapse(struct sock * sk,struct sk_buff_head * list,struct rb_root * root,struct sk_buff * head,struct sk_buff * tail,u32 start,u32 end)5259 tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
5260 struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
5261 {
5262 struct sk_buff *skb = head, *n;
5263 struct sk_buff_head tmp;
5264 bool end_of_skbs;
5265
5266 /* First, check that queue is collapsible and find
5267 * the point where collapsing can be useful.
5268 */
5269 restart:
5270 for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
5271 n = tcp_skb_next(skb, list);
5272
5273 /* No new bits? It is possible on ofo queue. */
5274 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5275 skb = tcp_collapse_one(sk, skb, list, root);
5276 if (!skb)
5277 break;
5278 goto restart;
5279 }
5280
5281 /* The first skb to collapse is:
5282 * - not SYN/FIN and
5283 * - bloated or contains data before "start" or
5284 * overlaps to the next one and mptcp allow collapsing.
5285 */
5286 if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
5287 (tcp_win_from_space(sk, skb->truesize) > skb->len ||
5288 before(TCP_SKB_CB(skb)->seq, start))) {
5289 end_of_skbs = false;
5290 break;
5291 }
5292
5293 if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
5294 TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
5295 end_of_skbs = false;
5296 break;
5297 }
5298
5299 /* Decided to skip this, advance start seq. */
5300 start = TCP_SKB_CB(skb)->end_seq;
5301 }
5302 if (end_of_skbs ||
5303 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
5304 return;
5305
5306 __skb_queue_head_init(&tmp);
5307
5308 while (before(start, end)) {
5309 int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
5310 struct sk_buff *nskb;
5311
5312 nskb = alloc_skb(copy, GFP_ATOMIC);
5313 if (!nskb)
5314 break;
5315
5316 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
5317 #ifdef CONFIG_TLS_DEVICE
5318 nskb->decrypted = skb->decrypted;
5319 #endif
5320 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
5321 if (list)
5322 __skb_queue_before(list, skb, nskb);
5323 else
5324 __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
5325 skb_set_owner_r(nskb, sk);
5326 mptcp_skb_ext_move(nskb, skb);
5327
5328 /* Copy data, releasing collapsed skbs. */
5329 while (copy > 0) {
5330 int offset = start - TCP_SKB_CB(skb)->seq;
5331 int size = TCP_SKB_CB(skb)->end_seq - start;
5332
5333 BUG_ON(offset < 0);
5334 if (size > 0) {
5335 size = min(copy, size);
5336 if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
5337 BUG();
5338 TCP_SKB_CB(nskb)->end_seq += size;
5339 copy -= size;
5340 start += size;
5341 }
5342 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5343 skb = tcp_collapse_one(sk, skb, list, root);
5344 if (!skb ||
5345 skb == tail ||
5346 !mptcp_skb_can_collapse(nskb, skb) ||
5347 (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
5348 goto end;
5349 #ifdef CONFIG_TLS_DEVICE
5350 if (skb->decrypted != nskb->decrypted)
5351 goto end;
5352 #endif
5353 }
5354 }
5355 }
5356 end:
5357 skb_queue_walk_safe(&tmp, skb, n)
5358 tcp_rbtree_insert(root, skb);
5359 }
5360
5361 /* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
5362 * and tcp_collapse() them until all the queue is collapsed.
5363 */
tcp_collapse_ofo_queue(struct sock * sk)5364 static void tcp_collapse_ofo_queue(struct sock *sk)
5365 {
5366 struct tcp_sock *tp = tcp_sk(sk);
5367 u32 range_truesize, sum_tiny = 0;
5368 struct sk_buff *skb, *head;
5369 u32 start, end;
5370
5371 skb = skb_rb_first(&tp->out_of_order_queue);
5372 new_range:
5373 if (!skb) {
5374 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5375 return;
5376 }
5377 start = TCP_SKB_CB(skb)->seq;
5378 end = TCP_SKB_CB(skb)->end_seq;
5379 range_truesize = skb->truesize;
5380
5381 for (head = skb;;) {
5382 skb = skb_rb_next(skb);
5383
5384 /* Range is terminated when we see a gap or when
5385 * we are at the queue end.
5386 */
5387 if (!skb ||
5388 after(TCP_SKB_CB(skb)->seq, end) ||
5389 before(TCP_SKB_CB(skb)->end_seq, start)) {
5390 /* Do not attempt collapsing tiny skbs */
5391 if (range_truesize != head->truesize ||
5392 end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
5393 tcp_collapse(sk, NULL, &tp->out_of_order_queue,
5394 head, skb, start, end);
5395 } else {
5396 sum_tiny += range_truesize;
5397 if (sum_tiny > sk->sk_rcvbuf >> 3)
5398 return;
5399 }
5400 goto new_range;
5401 }
5402
5403 range_truesize += skb->truesize;
5404 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
5405 start = TCP_SKB_CB(skb)->seq;
5406 if (after(TCP_SKB_CB(skb)->end_seq, end))
5407 end = TCP_SKB_CB(skb)->end_seq;
5408 }
5409 }
5410
5411 /*
5412 * Clean the out-of-order queue to make room.
5413 * We drop high sequences packets to :
5414 * 1) Let a chance for holes to be filled.
5415 * This means we do not drop packets from ooo queue if their sequence
5416 * is before incoming packet sequence.
5417 * 2) not add too big latencies if thousands of packets sit there.
5418 * (But if application shrinks SO_RCVBUF, we could still end up
5419 * freeing whole queue here)
5420 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
5421 *
5422 * Return true if queue has shrunk.
5423 */
tcp_prune_ofo_queue(struct sock * sk,const struct sk_buff * in_skb)5424 static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
5425 {
5426 struct tcp_sock *tp = tcp_sk(sk);
5427 struct rb_node *node, *prev;
5428 bool pruned = false;
5429 int goal;
5430
5431 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5432 return false;
5433
5434 goal = sk->sk_rcvbuf >> 3;
5435 node = &tp->ooo_last_skb->rbnode;
5436
5437 do {
5438 struct sk_buff *skb = rb_to_skb(node);
5439
5440 /* If incoming skb would land last in ofo queue, stop pruning. */
5441 if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq))
5442 break;
5443 pruned = true;
5444 prev = rb_prev(node);
5445 rb_erase(node, &tp->out_of_order_queue);
5446 goal -= skb->truesize;
5447 tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
5448 tp->ooo_last_skb = rb_to_skb(prev);
5449 if (!prev || goal <= 0) {
5450 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5451 !tcp_under_memory_pressure(sk))
5452 break;
5453 goal = sk->sk_rcvbuf >> 3;
5454 }
5455 node = prev;
5456 } while (node);
5457
5458 if (pruned) {
5459 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
5460 /* Reset SACK state. A conforming SACK implementation will
5461 * do the same at a timeout based retransmit. When a connection
5462 * is in a sad state like this, we care only about integrity
5463 * of the connection not performance.
5464 */
5465 if (tp->rx_opt.sack_ok)
5466 tcp_sack_reset(&tp->rx_opt);
5467 }
5468 return pruned;
5469 }
5470
5471 /* Reduce allocated memory if we can, trying to get
5472 * the socket within its memory limits again.
5473 *
5474 * Return less than zero if we should start dropping frames
5475 * until the socket owning process reads some of the data
5476 * to stabilize the situation.
5477 */
tcp_prune_queue(struct sock * sk,const struct sk_buff * in_skb)5478 static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
5479 {
5480 struct tcp_sock *tp = tcp_sk(sk);
5481
5482 NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
5483
5484 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
5485 tcp_clamp_window(sk);
5486 else if (tcp_under_memory_pressure(sk))
5487 tcp_adjust_rcv_ssthresh(sk);
5488
5489 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5490 return 0;
5491
5492 tcp_collapse_ofo_queue(sk);
5493 if (!skb_queue_empty(&sk->sk_receive_queue))
5494 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
5495 skb_peek(&sk->sk_receive_queue),
5496 NULL,
5497 tp->copied_seq, tp->rcv_nxt);
5498
5499 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5500 return 0;
5501
5502 /* Collapsing did not help, destructive actions follow.
5503 * This must not ever occur. */
5504
5505 tcp_prune_ofo_queue(sk, in_skb);
5506
5507 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5508 return 0;
5509
5510 /* If we are really being abused, tell the caller to silently
5511 * drop receive data on the floor. It will get retransmitted
5512 * and hopefully then we'll have sufficient space.
5513 */
5514 NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
5515
5516 /* Massive buffer overcommit. */
5517 tp->pred_flags = 0;
5518 return -1;
5519 }
5520
tcp_should_expand_sndbuf(struct sock * sk)5521 static bool tcp_should_expand_sndbuf(struct sock *sk)
5522 {
5523 const struct tcp_sock *tp = tcp_sk(sk);
5524
5525 /* If the user specified a specific send buffer setting, do
5526 * not modify it.
5527 */
5528 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
5529 return false;
5530
5531 /* If we are under global TCP memory pressure, do not expand. */
5532 if (tcp_under_memory_pressure(sk)) {
5533 int unused_mem = sk_unused_reserved_mem(sk);
5534
5535 /* Adjust sndbuf according to reserved mem. But make sure
5536 * it never goes below SOCK_MIN_SNDBUF.
5537 * See sk_stream_moderate_sndbuf() for more details.
5538 */
5539 if (unused_mem > SOCK_MIN_SNDBUF)
5540 WRITE_ONCE(sk->sk_sndbuf, unused_mem);
5541
5542 return false;
5543 }
5544
5545 /* If we are under soft global TCP memory pressure, do not expand. */
5546 if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
5547 return false;
5548
5549 /* If we filled the congestion window, do not expand. */
5550 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
5551 return false;
5552
5553 return true;
5554 }
5555
tcp_new_space(struct sock * sk)5556 static void tcp_new_space(struct sock *sk)
5557 {
5558 struct tcp_sock *tp = tcp_sk(sk);
5559
5560 if (tcp_should_expand_sndbuf(sk)) {
5561 tcp_sndbuf_expand(sk);
5562 tp->snd_cwnd_stamp = tcp_jiffies32;
5563 }
5564
5565 INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
5566 }
5567
5568 /* Caller made space either from:
5569 * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
5570 * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
5571 *
5572 * We might be able to generate EPOLLOUT to the application if:
5573 * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
5574 * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
5575 * small enough that tcp_stream_memory_free() decides it
5576 * is time to generate EPOLLOUT.
5577 */
tcp_check_space(struct sock * sk)5578 void tcp_check_space(struct sock *sk)
5579 {
5580 /* pairs with tcp_poll() */
5581 smp_mb();
5582 if (sk->sk_socket &&
5583 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5584 tcp_new_space(sk);
5585 if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
5586 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
5587 }
5588 }
5589
tcp_data_snd_check(struct sock * sk)5590 static inline void tcp_data_snd_check(struct sock *sk)
5591 {
5592 tcp_push_pending_frames(sk);
5593 tcp_check_space(sk);
5594 }
5595
5596 /*
5597 * Check if sending an ack is needed.
5598 */
__tcp_ack_snd_check(struct sock * sk,int ofo_possible)5599 static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
5600 {
5601 struct tcp_sock *tp = tcp_sk(sk);
5602 unsigned long rtt, delay;
5603
5604 /* More than one full frame received... */
5605 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
5606 /* ... and right edge of window advances far enough.
5607 * (tcp_recvmsg() will send ACK otherwise).
5608 * If application uses SO_RCVLOWAT, we want send ack now if
5609 * we have not received enough bytes to satisfy the condition.
5610 */
5611 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
5612 __tcp_select_window(sk) >= tp->rcv_wnd)) ||
5613 /* We ACK each frame or... */
5614 tcp_in_quickack_mode(sk) ||
5615 /* Protocol state mandates a one-time immediate ACK */
5616 inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
5617 send_now:
5618 tcp_send_ack(sk);
5619 return;
5620 }
5621
5622 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5623 tcp_send_delayed_ack(sk);
5624 return;
5625 }
5626
5627 if (!tcp_is_sack(tp) ||
5628 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
5629 goto send_now;
5630
5631 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
5632 tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
5633 tp->dup_ack_counter = 0;
5634 }
5635 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
5636 tp->dup_ack_counter++;
5637 goto send_now;
5638 }
5639 tp->compressed_ack++;
5640 if (hrtimer_is_queued(&tp->compressed_ack_timer))
5641 return;
5642
5643 /* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */
5644
5645 rtt = tp->rcv_rtt_est.rtt_us;
5646 if (tp->srtt_us && tp->srtt_us < rtt)
5647 rtt = tp->srtt_us;
5648
5649 delay = min_t(unsigned long,
5650 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
5651 rtt * (NSEC_PER_USEC >> 3)/20);
5652 sock_hold(sk);
5653 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
5654 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
5655 HRTIMER_MODE_REL_PINNED_SOFT);
5656 }
5657
tcp_ack_snd_check(struct sock * sk)5658 static inline void tcp_ack_snd_check(struct sock *sk)
5659 {
5660 if (!inet_csk_ack_scheduled(sk)) {
5661 /* We sent a data segment already. */
5662 return;
5663 }
5664 __tcp_ack_snd_check(sk, 1);
5665 }
5666
5667 /*
5668 * This routine is only called when we have urgent data
5669 * signaled. Its the 'slow' part of tcp_urg. It could be
5670 * moved inline now as tcp_urg is only called from one
5671 * place. We handle URGent data wrong. We have to - as
5672 * BSD still doesn't use the correction from RFC961.
5673 * For 1003.1g we should support a new option TCP_STDURG to permit
5674 * either form (or just set the sysctl tcp_stdurg).
5675 */
5676
tcp_check_urg(struct sock * sk,const struct tcphdr * th)5677 static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
5678 {
5679 struct tcp_sock *tp = tcp_sk(sk);
5680 u32 ptr = ntohs(th->urg_ptr);
5681
5682 if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
5683 ptr--;
5684 ptr += ntohl(th->seq);
5685
5686 /* Ignore urgent data that we've already seen and read. */
5687 if (after(tp->copied_seq, ptr))
5688 return;
5689
5690 /* Do not replay urg ptr.
5691 *
5692 * NOTE: interesting situation not covered by specs.
5693 * Misbehaving sender may send urg ptr, pointing to segment,
5694 * which we already have in ofo queue. We are not able to fetch
5695 * such data and will stay in TCP_URG_NOTYET until will be eaten
5696 * by recvmsg(). Seems, we are not obliged to handle such wicked
5697 * situations. But it is worth to think about possibility of some
5698 * DoSes using some hypothetical application level deadlock.
5699 */
5700 if (before(ptr, tp->rcv_nxt))
5701 return;
5702
5703 /* Do we already have a newer (or duplicate) urgent pointer? */
5704 if (tp->urg_data && !after(ptr, tp->urg_seq))
5705 return;
5706
5707 /* Tell the world about our new urgent pointer. */
5708 sk_send_sigurg(sk);
5709
5710 /* We may be adding urgent data when the last byte read was
5711 * urgent. To do this requires some care. We cannot just ignore
5712 * tp->copied_seq since we would read the last urgent byte again
5713 * as data, nor can we alter copied_seq until this data arrives
5714 * or we break the semantics of SIOCATMARK (and thus sockatmark())
5715 *
5716 * NOTE. Double Dutch. Rendering to plain English: author of comment
5717 * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
5718 * and expect that both A and B disappear from stream. This is _wrong_.
5719 * Though this happens in BSD with high probability, this is occasional.
5720 * Any application relying on this is buggy. Note also, that fix "works"
5721 * only in this artificial test. Insert some normal data between A and B and we will
5722 * decline of BSD again. Verdict: it is better to remove to trap
5723 * buggy users.
5724 */
5725 if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
5726 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
5727 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
5728 tp->copied_seq++;
5729 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
5730 __skb_unlink(skb, &sk->sk_receive_queue);
5731 __kfree_skb(skb);
5732 }
5733 }
5734
5735 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
5736 WRITE_ONCE(tp->urg_seq, ptr);
5737
5738 /* Disable header prediction. */
5739 tp->pred_flags = 0;
5740 }
5741
5742 /* This is the 'fast' part of urgent handling. */
tcp_urg(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)5743 static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
5744 {
5745 struct tcp_sock *tp = tcp_sk(sk);
5746
5747 /* Check if we get a new urgent pointer - normally not. */
5748 if (unlikely(th->urg))
5749 tcp_check_urg(sk, th);
5750
5751 /* Do we wait for any urgent data? - normally not... */
5752 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
5753 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
5754 th->syn;
5755
5756 /* Is the urgent pointer pointing into this packet? */
5757 if (ptr < skb->len) {
5758 u8 tmp;
5759 if (skb_copy_bits(skb, ptr, &tmp, 1))
5760 BUG();
5761 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
5762 if (!sock_flag(sk, SOCK_DEAD))
5763 sk->sk_data_ready(sk);
5764 }
5765 }
5766 }
5767
5768 /* Accept RST for rcv_nxt - 1 after a FIN.
5769 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
5770 * FIN is sent followed by a RST packet. The RST is sent with the same
5771 * sequence number as the FIN, and thus according to RFC 5961 a challenge
5772 * ACK should be sent. However, Mac OSX rate limits replies to challenge
5773 * ACKs on the closed socket. In addition middleboxes can drop either the
5774 * challenge ACK or a subsequent RST.
5775 */
tcp_reset_check(const struct sock * sk,const struct sk_buff * skb)5776 static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
5777 {
5778 const struct tcp_sock *tp = tcp_sk(sk);
5779
5780 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
5781 (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
5782 TCPF_CLOSING));
5783 }
5784
5785 /* Does PAWS and seqno based validation of an incoming segment, flags will
5786 * play significant role here.
5787 */
tcp_validate_incoming(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th,int syn_inerr)5788 static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5789 const struct tcphdr *th, int syn_inerr)
5790 {
5791 struct tcp_sock *tp = tcp_sk(sk);
5792 SKB_DR(reason);
5793
5794 /* RFC1323: H1. Apply PAWS check first. */
5795 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
5796 tp->rx_opt.saw_tstamp &&
5797 tcp_paws_discard(sk, skb)) {
5798 if (!th->rst) {
5799 if (unlikely(th->syn))
5800 goto syn_challenge;
5801 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5802 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5803 LINUX_MIB_TCPACKSKIPPEDPAWS,
5804 &tp->last_oow_ack_time))
5805 tcp_send_dupack(sk, skb);
5806 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
5807 goto discard;
5808 }
5809 /* Reset is accepted even if it did not pass PAWS. */
5810 }
5811
5812 /* Step 1: check sequence number */
5813 reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5814 if (reason) {
5815 /* RFC793, page 37: "In all states except SYN-SENT, all reset
5816 * (RST) segments are validated by checking their SEQ-fields."
5817 * And page 69: "If an incoming segment is not acceptable,
5818 * an acknowledgment should be sent in reply (unless the RST
5819 * bit is set, if so drop the segment and return)".
5820 */
5821 if (!th->rst) {
5822 if (th->syn)
5823 goto syn_challenge;
5824 if (!tcp_oow_rate_limited(sock_net(sk), skb,
5825 LINUX_MIB_TCPACKSKIPPEDSEQ,
5826 &tp->last_oow_ack_time))
5827 tcp_send_dupack(sk, skb);
5828 } else if (tcp_reset_check(sk, skb)) {
5829 goto reset;
5830 }
5831 goto discard;
5832 }
5833
5834 /* Step 2: check RST bit */
5835 if (th->rst) {
5836 /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
5837 * FIN and SACK too if available):
5838 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
5839 * the right-most SACK block,
5840 * then
5841 * RESET the connection
5842 * else
5843 * Send a challenge ACK
5844 */
5845 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
5846 tcp_reset_check(sk, skb))
5847 goto reset;
5848
5849 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
5850 struct tcp_sack_block *sp = &tp->selective_acks[0];
5851 int max_sack = sp[0].end_seq;
5852 int this_sack;
5853
5854 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
5855 ++this_sack) {
5856 max_sack = after(sp[this_sack].end_seq,
5857 max_sack) ?
5858 sp[this_sack].end_seq : max_sack;
5859 }
5860
5861 if (TCP_SKB_CB(skb)->seq == max_sack)
5862 goto reset;
5863 }
5864
5865 /* Disable TFO if RST is out-of-order
5866 * and no data has been received
5867 * for current active TFO socket
5868 */
5869 if (tp->syn_fastopen && !tp->data_segs_in &&
5870 sk->sk_state == TCP_ESTABLISHED)
5871 tcp_fastopen_active_disable(sk);
5872 tcp_send_challenge_ack(sk);
5873 SKB_DR_SET(reason, TCP_RESET);
5874 goto discard;
5875 }
5876
5877 /* step 3: check security and precedence [ignored] */
5878
5879 /* step 4: Check for a SYN
5880 * RFC 5961 4.2 : Send a challenge ack
5881 */
5882 if (th->syn) {
5883 if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
5884 TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
5885 TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
5886 TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
5887 goto pass;
5888 syn_challenge:
5889 if (syn_inerr)
5890 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
5891 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
5892 tcp_send_challenge_ack(sk);
5893 SKB_DR_SET(reason, TCP_INVALID_SYN);
5894 goto discard;
5895 }
5896
5897 pass:
5898 bpf_skops_parse_hdr(sk, skb);
5899
5900 return true;
5901
5902 discard:
5903 tcp_drop_reason(sk, skb, reason);
5904 return false;
5905
5906 reset:
5907 tcp_reset(sk, skb);
5908 __kfree_skb(skb);
5909 return false;
5910 }
5911
5912 /*
5913 * TCP receive function for the ESTABLISHED state.
5914 *
5915 * It is split into a fast path and a slow path. The fast path is
5916 * disabled when:
5917 * - A zero window was announced from us - zero window probing
5918 * is only handled properly in the slow path.
5919 * - Out of order segments arrived.
5920 * - Urgent data is expected.
5921 * - There is no buffer space left
5922 * - Unexpected TCP flags/window values/header lengths are received
5923 * (detected by checking the TCP header against pred_flags)
5924 * - Data is sent in both directions. Fast path only supports pure senders
5925 * or pure receivers (this means either the sequence number or the ack
5926 * value must stay constant)
5927 * - Unexpected TCP option.
5928 *
5929 * When these conditions are not satisfied it drops into a standard
5930 * receive procedure patterned after RFC793 to handle all cases.
5931 * The first three cases are guaranteed by proper pred_flags setting,
5932 * the rest is checked inline. Fast processing is turned on in
5933 * tcp_data_queue when everything is OK.
5934 */
tcp_rcv_established(struct sock * sk,struct sk_buff * skb)5935 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
5936 {
5937 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
5938 const struct tcphdr *th = (const struct tcphdr *)skb->data;
5939 struct tcp_sock *tp = tcp_sk(sk);
5940 unsigned int len = skb->len;
5941
5942 /* TCP congestion window tracking */
5943 trace_tcp_probe(sk, skb);
5944
5945 tcp_mstamp_refresh(tp);
5946 if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
5947 inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
5948 /*
5949 * Header prediction.
5950 * The code loosely follows the one in the famous
5951 * "30 instruction TCP receive" Van Jacobson mail.
5952 *
5953 * Van's trick is to deposit buffers into socket queue
5954 * on a device interrupt, to call tcp_recv function
5955 * on the receive process context and checksum and copy
5956 * the buffer to user space. smart...
5957 *
5958 * Our current scheme is not silly either but we take the
5959 * extra cost of the net_bh soft interrupt processing...
5960 * We do checksum and copy also but from device to kernel.
5961 */
5962
5963 tp->rx_opt.saw_tstamp = 0;
5964
5965 /* pred_flags is 0xS?10 << 16 + snd_wnd
5966 * if header_prediction is to be made
5967 * 'S' will always be tp->tcp_header_len >> 2
5968 * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
5969 * turn it off (when there are holes in the receive
5970 * space for instance)
5971 * PSH flag is ignored.
5972 */
5973
5974 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
5975 TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
5976 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
5977 int tcp_header_len = tp->tcp_header_len;
5978
5979 /* Timestamp header prediction: tcp_header_len
5980 * is automatically equal to th->doff*4 due to pred_flags
5981 * match.
5982 */
5983
5984 /* Check timestamp */
5985 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
5986 /* No? Slow path! */
5987 if (!tcp_parse_aligned_timestamp(tp, th))
5988 goto slow_path;
5989
5990 /* If PAWS failed, check it more carefully in slow path */
5991 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
5992 goto slow_path;
5993
5994 /* DO NOT update ts_recent here, if checksum fails
5995 * and timestamp was corrupted part, it will result
5996 * in a hung connection since we will drop all
5997 * future packets due to the PAWS test.
5998 */
5999 }
6000
6001 if (len <= tcp_header_len) {
6002 /* Bulk data transfer: sender */
6003 if (len == tcp_header_len) {
6004 /* Predicted packet is in window by definition.
6005 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6006 * Hence, check seq<=rcv_wup reduces to:
6007 */
6008 if (tcp_header_len ==
6009 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6010 tp->rcv_nxt == tp->rcv_wup)
6011 tcp_store_ts_recent(tp);
6012
6013 /* We know that such packets are checksummed
6014 * on entry.
6015 */
6016 tcp_ack(sk, skb, 0);
6017 __kfree_skb(skb);
6018 tcp_data_snd_check(sk);
6019 /* When receiving pure ack in fast path, update
6020 * last ts ecr directly instead of calling
6021 * tcp_rcv_rtt_measure_ts()
6022 */
6023 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
6024 return;
6025 } else { /* Header too small */
6026 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
6027 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6028 goto discard;
6029 }
6030 } else {
6031 int eaten = 0;
6032 bool fragstolen = false;
6033
6034 if (tcp_checksum_complete(skb))
6035 goto csum_error;
6036
6037 if ((int)skb->truesize > sk->sk_forward_alloc)
6038 goto step5;
6039
6040 /* Predicted packet is in window by definition.
6041 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6042 * Hence, check seq<=rcv_wup reduces to:
6043 */
6044 if (tcp_header_len ==
6045 (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6046 tp->rcv_nxt == tp->rcv_wup)
6047 tcp_store_ts_recent(tp);
6048
6049 tcp_rcv_rtt_measure_ts(sk, skb);
6050
6051 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
6052
6053 /* Bulk data transfer: receiver */
6054 skb_dst_drop(skb);
6055 __skb_pull(skb, tcp_header_len);
6056 eaten = tcp_queue_rcv(sk, skb, &fragstolen);
6057
6058 tcp_event_data_recv(sk, skb);
6059
6060 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
6061 /* Well, only one small jumplet in fast path... */
6062 tcp_ack(sk, skb, FLAG_DATA);
6063 tcp_data_snd_check(sk);
6064 if (!inet_csk_ack_scheduled(sk))
6065 goto no_ack;
6066 } else {
6067 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
6068 }
6069
6070 __tcp_ack_snd_check(sk, 0);
6071 no_ack:
6072 if (eaten)
6073 kfree_skb_partial(skb, fragstolen);
6074 tcp_data_ready(sk);
6075 return;
6076 }
6077 }
6078
6079 slow_path:
6080 if (len < (th->doff << 2) || tcp_checksum_complete(skb))
6081 goto csum_error;
6082
6083 if (!th->ack && !th->rst && !th->syn) {
6084 reason = SKB_DROP_REASON_TCP_FLAGS;
6085 goto discard;
6086 }
6087
6088 /*
6089 * Standard slow path.
6090 */
6091
6092 if (!tcp_validate_incoming(sk, skb, th, 1))
6093 return;
6094
6095 step5:
6096 reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
6097 if ((int)reason < 0) {
6098 reason = -reason;
6099 goto discard;
6100 }
6101 tcp_rcv_rtt_measure_ts(sk, skb);
6102
6103 /* Process urgent data. */
6104 tcp_urg(sk, skb, th);
6105
6106 /* step 7: process the segment text */
6107 tcp_data_queue(sk, skb);
6108
6109 tcp_data_snd_check(sk);
6110 tcp_ack_snd_check(sk);
6111 return;
6112
6113 csum_error:
6114 reason = SKB_DROP_REASON_TCP_CSUM;
6115 trace_tcp_bad_csum(skb);
6116 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
6117 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6118
6119 discard:
6120 tcp_drop_reason(sk, skb, reason);
6121 }
6122 EXPORT_SYMBOL(tcp_rcv_established);
6123
tcp_init_transfer(struct sock * sk,int bpf_op,struct sk_buff * skb)6124 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
6125 {
6126 struct inet_connection_sock *icsk = inet_csk(sk);
6127 struct tcp_sock *tp = tcp_sk(sk);
6128
6129 tcp_mtup_init(sk);
6130 icsk->icsk_af_ops->rebuild_header(sk);
6131 tcp_init_metrics(sk);
6132
6133 /* Initialize the congestion window to start the transfer.
6134 * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
6135 * retransmitted. In light of RFC6298 more aggressive 1sec
6136 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
6137 * retransmission has occurred.
6138 */
6139 if (tp->total_retrans > 1 && tp->undo_marker)
6140 tcp_snd_cwnd_set(tp, 1);
6141 else
6142 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
6143 tp->snd_cwnd_stamp = tcp_jiffies32;
6144
6145 bpf_skops_established(sk, bpf_op, skb);
6146 /* Initialize congestion control unless BPF initialized it already: */
6147 if (!icsk->icsk_ca_initialized)
6148 tcp_init_congestion_control(sk);
6149 tcp_init_buffer_space(sk);
6150 }
6151
tcp_finish_connect(struct sock * sk,struct sk_buff * skb)6152 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
6153 {
6154 struct tcp_sock *tp = tcp_sk(sk);
6155 struct inet_connection_sock *icsk = inet_csk(sk);
6156
6157 tcp_set_state(sk, TCP_ESTABLISHED);
6158 icsk->icsk_ack.lrcvtime = tcp_jiffies32;
6159
6160 if (skb) {
6161 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
6162 security_inet_conn_established(sk, skb);
6163 sk_mark_napi_id(sk, skb);
6164 }
6165
6166 tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb);
6167
6168 /* Prevent spurious tcp_cwnd_restart() on first data
6169 * packet.
6170 */
6171 tp->lsndtime = tcp_jiffies32;
6172
6173 if (sock_flag(sk, SOCK_KEEPOPEN))
6174 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
6175
6176 if (!tp->rx_opt.snd_wscale)
6177 __tcp_fast_path_on(tp, tp->snd_wnd);
6178 else
6179 tp->pred_flags = 0;
6180 }
6181
tcp_rcv_fastopen_synack(struct sock * sk,struct sk_buff * synack,struct tcp_fastopen_cookie * cookie)6182 static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
6183 struct tcp_fastopen_cookie *cookie)
6184 {
6185 struct tcp_sock *tp = tcp_sk(sk);
6186 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
6187 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
6188 bool syn_drop = false;
6189
6190 if (mss == tp->rx_opt.user_mss) {
6191 struct tcp_options_received opt;
6192
6193 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
6194 tcp_clear_options(&opt);
6195 opt.user_mss = opt.mss_clamp = 0;
6196 tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
6197 mss = opt.mss_clamp;
6198 }
6199
6200 if (!tp->syn_fastopen) {
6201 /* Ignore an unsolicited cookie */
6202 cookie->len = -1;
6203 } else if (tp->total_retrans) {
6204 /* SYN timed out and the SYN-ACK neither has a cookie nor
6205 * acknowledges data. Presumably the remote received only
6206 * the retransmitted (regular) SYNs: either the original
6207 * SYN-data or the corresponding SYN-ACK was dropped.
6208 */
6209 syn_drop = (cookie->len < 0 && data);
6210 } else if (cookie->len < 0 && !tp->syn_data) {
6211 /* We requested a cookie but didn't get it. If we did not use
6212 * the (old) exp opt format then try so next time (try_exp=1).
6213 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
6214 */
6215 try_exp = tp->syn_fastopen_exp ? 2 : 1;
6216 }
6217
6218 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
6219
6220 if (data) { /* Retransmit unacked data in SYN */
6221 if (tp->total_retrans)
6222 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED;
6223 else
6224 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
6225 skb_rbtree_walk_from(data)
6226 tcp_mark_skb_lost(sk, data);
6227 tcp_non_congestion_loss_retransmit(sk);
6228 NET_INC_STATS(sock_net(sk),
6229 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
6230 return true;
6231 }
6232 tp->syn_data_acked = tp->syn_data;
6233 if (tp->syn_data_acked) {
6234 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
6235 /* SYN-data is counted as two separate packets in tcp_ack() */
6236 if (tp->delivered > 1)
6237 --tp->delivered;
6238 }
6239
6240 tcp_fastopen_add_skb(sk, synack);
6241
6242 return false;
6243 }
6244
smc_check_reset_syn(struct tcp_sock * tp)6245 static void smc_check_reset_syn(struct tcp_sock *tp)
6246 {
6247 #if IS_ENABLED(CONFIG_SMC)
6248 if (static_branch_unlikely(&tcp_have_smc)) {
6249 if (tp->syn_smc && !tp->rx_opt.smc_ok)
6250 tp->syn_smc = 0;
6251 }
6252 #endif
6253 }
6254
tcp_try_undo_spurious_syn(struct sock * sk)6255 static void tcp_try_undo_spurious_syn(struct sock *sk)
6256 {
6257 struct tcp_sock *tp = tcp_sk(sk);
6258 u32 syn_stamp;
6259
6260 /* undo_marker is set when SYN or SYNACK times out. The timeout is
6261 * spurious if the ACK's timestamp option echo value matches the
6262 * original SYN timestamp.
6263 */
6264 syn_stamp = tp->retrans_stamp;
6265 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp &&
6266 syn_stamp == tp->rx_opt.rcv_tsecr)
6267 tp->undo_marker = 0;
6268 }
6269
tcp_rcv_synsent_state_process(struct sock * sk,struct sk_buff * skb,const struct tcphdr * th)6270 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
6271 const struct tcphdr *th)
6272 {
6273 struct inet_connection_sock *icsk = inet_csk(sk);
6274 struct tcp_sock *tp = tcp_sk(sk);
6275 struct tcp_fastopen_cookie foc = { .len = -1 };
6276 int saved_clamp = tp->rx_opt.mss_clamp;
6277 bool fastopen_fail;
6278 SKB_DR(reason);
6279
6280 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
6281 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
6282 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
6283
6284 if (th->ack) {
6285 /* rfc793:
6286 * "If the state is SYN-SENT then
6287 * first check the ACK bit
6288 * If the ACK bit is set
6289 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
6290 * a reset (unless the RST bit is set, if so drop
6291 * the segment and return)"
6292 */
6293 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
6294 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6295 /* Previous FIN/ACK or RST/ACK might be ignored. */
6296 if (icsk->icsk_retransmits == 0)
6297 inet_csk_reset_xmit_timer(sk,
6298 ICSK_TIME_RETRANS,
6299 TCP_TIMEOUT_MIN, TCP_RTO_MAX);
6300 goto reset_and_undo;
6301 }
6302
6303 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
6304 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
6305 tcp_time_stamp(tp))) {
6306 NET_INC_STATS(sock_net(sk),
6307 LINUX_MIB_PAWSACTIVEREJECTED);
6308 goto reset_and_undo;
6309 }
6310
6311 /* Now ACK is acceptable.
6312 *
6313 * "If the RST bit is set
6314 * If the ACK was acceptable then signal the user "error:
6315 * connection reset", drop the segment, enter CLOSED state,
6316 * delete TCB, and return."
6317 */
6318
6319 if (th->rst) {
6320 tcp_reset(sk, skb);
6321 consume:
6322 __kfree_skb(skb);
6323 return 0;
6324 }
6325
6326 /* rfc793:
6327 * "fifth, if neither of the SYN or RST bits is set then
6328 * drop the segment and return."
6329 *
6330 * See note below!
6331 * --ANK(990513)
6332 */
6333 if (!th->syn) {
6334 SKB_DR_SET(reason, TCP_FLAGS);
6335 goto discard_and_undo;
6336 }
6337 /* rfc793:
6338 * "If the SYN bit is on ...
6339 * are acceptable then ...
6340 * (our SYN has been ACKed), change the connection
6341 * state to ESTABLISHED..."
6342 */
6343
6344 tcp_ecn_rcv_synack(tp, th);
6345
6346 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
6347 tcp_try_undo_spurious_syn(sk);
6348 tcp_ack(sk, skb, FLAG_SLOWPATH);
6349
6350 /* Ok.. it's good. Set up sequence numbers and
6351 * move to established.
6352 */
6353 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6354 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6355
6356 /* RFC1323: The window in SYN & SYN/ACK segments is
6357 * never scaled.
6358 */
6359 tp->snd_wnd = ntohs(th->window);
6360
6361 if (!tp->rx_opt.wscale_ok) {
6362 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
6363 WRITE_ONCE(tp->window_clamp,
6364 min(tp->window_clamp, 65535U));
6365 }
6366
6367 if (tp->rx_opt.saw_tstamp) {
6368 tp->rx_opt.tstamp_ok = 1;
6369 tp->tcp_header_len =
6370 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6371 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6372 tcp_store_ts_recent(tp);
6373 } else {
6374 tp->tcp_header_len = sizeof(struct tcphdr);
6375 }
6376
6377 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6378 tcp_initialize_rcv_mss(sk);
6379
6380 /* Remember, tcp_poll() does not lock socket!
6381 * Change state from SYN-SENT only after copied_seq
6382 * is initialized. */
6383 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6384
6385 smc_check_reset_syn(tp);
6386
6387 smp_mb();
6388
6389 tcp_finish_connect(sk, skb);
6390
6391 fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
6392 tcp_rcv_fastopen_synack(sk, skb, &foc);
6393
6394 if (!sock_flag(sk, SOCK_DEAD)) {
6395 sk->sk_state_change(sk);
6396 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6397 }
6398 if (fastopen_fail)
6399 return -1;
6400 if (sk->sk_write_pending ||
6401 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) ||
6402 inet_csk_in_pingpong_mode(sk)) {
6403 /* Save one ACK. Data will be ready after
6404 * several ticks, if write_pending is set.
6405 *
6406 * It may be deleted, but with this feature tcpdumps
6407 * look so _wonderfully_ clever, that I was not able
6408 * to stand against the temptation 8) --ANK
6409 */
6410 inet_csk_schedule_ack(sk);
6411 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
6412 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
6413 TCP_DELACK_MAX, TCP_RTO_MAX);
6414 goto consume;
6415 }
6416 tcp_send_ack(sk);
6417 return -1;
6418 }
6419
6420 /* No ACK in the segment */
6421
6422 if (th->rst) {
6423 /* rfc793:
6424 * "If the RST bit is set
6425 *
6426 * Otherwise (no ACK) drop the segment and return."
6427 */
6428 SKB_DR_SET(reason, TCP_RESET);
6429 goto discard_and_undo;
6430 }
6431
6432 /* PAWS check. */
6433 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
6434 tcp_paws_reject(&tp->rx_opt, 0)) {
6435 SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6436 goto discard_and_undo;
6437 }
6438 if (th->syn) {
6439 /* We see SYN without ACK. It is attempt of
6440 * simultaneous connect with crossed SYNs.
6441 * Particularly, it can be connect to self.
6442 */
6443 tcp_set_state(sk, TCP_SYN_RECV);
6444
6445 if (tp->rx_opt.saw_tstamp) {
6446 tp->rx_opt.tstamp_ok = 1;
6447 tcp_store_ts_recent(tp);
6448 tp->tcp_header_len =
6449 sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6450 } else {
6451 tp->tcp_header_len = sizeof(struct tcphdr);
6452 }
6453
6454 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6455 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6456 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6457
6458 /* RFC1323: The window in SYN & SYN/ACK segments is
6459 * never scaled.
6460 */
6461 tp->snd_wnd = ntohs(th->window);
6462 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
6463 tp->max_window = tp->snd_wnd;
6464
6465 tcp_ecn_rcv_syn(tp, th);
6466
6467 tcp_mtup_init(sk);
6468 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6469 tcp_initialize_rcv_mss(sk);
6470
6471 tcp_send_synack(sk);
6472 #if 0
6473 /* Note, we could accept data and URG from this segment.
6474 * There are no obstacles to make this (except that we must
6475 * either change tcp_recvmsg() to prevent it from returning data
6476 * before 3WHS completes per RFC793, or employ TCP Fast Open).
6477 *
6478 * However, if we ignore data in ACKless segments sometimes,
6479 * we have no reasons to accept it sometimes.
6480 * Also, seems the code doing it in step6 of tcp_rcv_state_process
6481 * is not flawless. So, discard packet for sanity.
6482 * Uncomment this return to process the data.
6483 */
6484 return -1;
6485 #else
6486 goto consume;
6487 #endif
6488 }
6489 /* "fifth, if neither of the SYN or RST bits is set then
6490 * drop the segment and return."
6491 */
6492
6493 discard_and_undo:
6494 tcp_clear_options(&tp->rx_opt);
6495 tp->rx_opt.mss_clamp = saved_clamp;
6496 tcp_drop_reason(sk, skb, reason);
6497 return 0;
6498
6499 reset_and_undo:
6500 tcp_clear_options(&tp->rx_opt);
6501 tp->rx_opt.mss_clamp = saved_clamp;
6502 return 1;
6503 }
6504
tcp_rcv_synrecv_state_fastopen(struct sock * sk)6505 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
6506 {
6507 struct tcp_sock *tp = tcp_sk(sk);
6508 struct request_sock *req;
6509
6510 /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
6511 * undo. If peer SACKs triggered fast recovery, we can't undo here.
6512 */
6513 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
6514 tcp_try_undo_recovery(sk);
6515
6516 /* Reset rtx states to prevent spurious retransmits_timed_out() */
6517 tp->retrans_stamp = 0;
6518 inet_csk(sk)->icsk_retransmits = 0;
6519
6520 /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
6521 * we no longer need req so release it.
6522 */
6523 req = rcu_dereference_protected(tp->fastopen_rsk,
6524 lockdep_sock_is_held(sk));
6525 reqsk_fastopen_remove(sk, req, false);
6526
6527 /* Re-arm the timer because data may have been sent out.
6528 * This is similar to the regular data transmission case
6529 * when new data has just been ack'ed.
6530 *
6531 * (TFO) - we could try to be more aggressive and
6532 * retransmitting any data sooner based on when they
6533 * are sent out.
6534 */
6535 tcp_rearm_rto(sk);
6536 }
6537
6538 /*
6539 * This function implements the receiving procedure of RFC 793 for
6540 * all states except ESTABLISHED and TIME_WAIT.
6541 * It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
6542 * address independent.
6543 */
6544
tcp_rcv_state_process(struct sock * sk,struct sk_buff * skb)6545 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6546 {
6547 struct tcp_sock *tp = tcp_sk(sk);
6548 struct inet_connection_sock *icsk = inet_csk(sk);
6549 const struct tcphdr *th = tcp_hdr(skb);
6550 struct request_sock *req;
6551 int queued = 0;
6552 bool acceptable;
6553 SKB_DR(reason);
6554
6555 switch (sk->sk_state) {
6556 case TCP_CLOSE:
6557 SKB_DR_SET(reason, TCP_CLOSE);
6558 goto discard;
6559
6560 case TCP_LISTEN:
6561 if (th->ack)
6562 return 1;
6563
6564 if (th->rst) {
6565 SKB_DR_SET(reason, TCP_RESET);
6566 goto discard;
6567 }
6568 if (th->syn) {
6569 if (th->fin) {
6570 SKB_DR_SET(reason, TCP_FLAGS);
6571 goto discard;
6572 }
6573 /* It is possible that we process SYN packets from backlog,
6574 * so we need to make sure to disable BH and RCU right there.
6575 */
6576 rcu_read_lock();
6577 local_bh_disable();
6578 acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
6579 local_bh_enable();
6580 rcu_read_unlock();
6581
6582 if (!acceptable)
6583 return 1;
6584 consume_skb(skb);
6585 return 0;
6586 }
6587 SKB_DR_SET(reason, TCP_FLAGS);
6588 goto discard;
6589
6590 case TCP_SYN_SENT:
6591 tp->rx_opt.saw_tstamp = 0;
6592 tcp_mstamp_refresh(tp);
6593 queued = tcp_rcv_synsent_state_process(sk, skb, th);
6594 if (queued >= 0)
6595 return queued;
6596
6597 /* Do step6 onward by hand. */
6598 tcp_urg(sk, skb, th);
6599 __kfree_skb(skb);
6600 tcp_data_snd_check(sk);
6601 return 0;
6602 }
6603
6604 tcp_mstamp_refresh(tp);
6605 tp->rx_opt.saw_tstamp = 0;
6606 req = rcu_dereference_protected(tp->fastopen_rsk,
6607 lockdep_sock_is_held(sk));
6608 if (req) {
6609 bool req_stolen;
6610
6611 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
6612 sk->sk_state != TCP_FIN_WAIT1);
6613
6614 if (!tcp_check_req(sk, skb, req, true, &req_stolen)) {
6615 SKB_DR_SET(reason, TCP_FASTOPEN);
6616 goto discard;
6617 }
6618 }
6619
6620 if (!th->ack && !th->rst && !th->syn) {
6621 SKB_DR_SET(reason, TCP_FLAGS);
6622 goto discard;
6623 }
6624 if (!tcp_validate_incoming(sk, skb, th, 0))
6625 return 0;
6626
6627 /* step 5: check the ACK field */
6628 acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
6629 FLAG_UPDATE_TS_RECENT |
6630 FLAG_NO_CHALLENGE_ACK) > 0;
6631
6632 if (!acceptable) {
6633 if (sk->sk_state == TCP_SYN_RECV)
6634 return 1; /* send one RST */
6635 tcp_send_challenge_ack(sk);
6636 SKB_DR_SET(reason, TCP_OLD_ACK);
6637 goto discard;
6638 }
6639 switch (sk->sk_state) {
6640 case TCP_SYN_RECV:
6641 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
6642 if (!tp->srtt_us)
6643 tcp_synack_rtt_meas(sk, req);
6644
6645 if (req) {
6646 tcp_rcv_synrecv_state_fastopen(sk);
6647 } else {
6648 tcp_try_undo_spurious_syn(sk);
6649 tp->retrans_stamp = 0;
6650 tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,
6651 skb);
6652 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6653 }
6654 smp_mb();
6655 tcp_set_state(sk, TCP_ESTABLISHED);
6656 sk->sk_state_change(sk);
6657
6658 /* Note, that this wakeup is only for marginal crossed SYN case.
6659 * Passively open sockets are not waked up, because
6660 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
6661 */
6662 if (sk->sk_socket)
6663 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6664
6665 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
6666 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
6667 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
6668
6669 if (tp->rx_opt.tstamp_ok)
6670 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6671
6672 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
6673 tcp_update_pacing_rate(sk);
6674
6675 /* Prevent spurious tcp_cwnd_restart() on first data packet */
6676 tp->lsndtime = tcp_jiffies32;
6677
6678 tcp_initialize_rcv_mss(sk);
6679 tcp_fast_path_on(tp);
6680 if (sk->sk_shutdown & SEND_SHUTDOWN)
6681 tcp_shutdown(sk, SEND_SHUTDOWN);
6682 break;
6683
6684 case TCP_FIN_WAIT1: {
6685 int tmo;
6686
6687 if (req)
6688 tcp_rcv_synrecv_state_fastopen(sk);
6689
6690 if (tp->snd_una != tp->write_seq)
6691 break;
6692
6693 tcp_set_state(sk, TCP_FIN_WAIT2);
6694 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
6695
6696 sk_dst_confirm(sk);
6697
6698 if (!sock_flag(sk, SOCK_DEAD)) {
6699 /* Wake up lingering close() */
6700 sk->sk_state_change(sk);
6701 break;
6702 }
6703
6704 if (READ_ONCE(tp->linger2) < 0) {
6705 tcp_done(sk);
6706 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6707 return 1;
6708 }
6709 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6710 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6711 /* Receive out of order FIN after close() */
6712 if (tp->syn_fastopen && th->fin)
6713 tcp_fastopen_active_disable(sk);
6714 tcp_done(sk);
6715 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6716 return 1;
6717 }
6718
6719 tmo = tcp_fin_time(sk);
6720 if (tmo > TCP_TIMEWAIT_LEN) {
6721 inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
6722 } else if (th->fin || sock_owned_by_user(sk)) {
6723 /* Bad case. We could lose such FIN otherwise.
6724 * It is not a big problem, but it looks confusing
6725 * and not so rare event. We still can lose it now,
6726 * if it spins in bh_lock_sock(), but it is really
6727 * marginal case.
6728 */
6729 inet_csk_reset_keepalive_timer(sk, tmo);
6730 } else {
6731 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
6732 goto consume;
6733 }
6734 break;
6735 }
6736
6737 case TCP_CLOSING:
6738 if (tp->snd_una == tp->write_seq) {
6739 tcp_time_wait(sk, TCP_TIME_WAIT, 0);
6740 goto consume;
6741 }
6742 break;
6743
6744 case TCP_LAST_ACK:
6745 if (tp->snd_una == tp->write_seq) {
6746 tcp_update_metrics(sk);
6747 tcp_done(sk);
6748 goto consume;
6749 }
6750 break;
6751 }
6752
6753 /* step 6: check the URG bit */
6754 tcp_urg(sk, skb, th);
6755
6756 /* step 7: process the segment text */
6757 switch (sk->sk_state) {
6758 case TCP_CLOSE_WAIT:
6759 case TCP_CLOSING:
6760 case TCP_LAST_ACK:
6761 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
6762 /* If a subflow has been reset, the packet should not
6763 * continue to be processed, drop the packet.
6764 */
6765 if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
6766 goto discard;
6767 break;
6768 }
6769 fallthrough;
6770 case TCP_FIN_WAIT1:
6771 case TCP_FIN_WAIT2:
6772 /* RFC 793 says to queue data in these states,
6773 * RFC 1122 says we MUST send a reset.
6774 * BSD 4.4 also does reset.
6775 */
6776 if (sk->sk_shutdown & RCV_SHUTDOWN) {
6777 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6778 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6779 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6780 tcp_reset(sk, skb);
6781 return 1;
6782 }
6783 }
6784 fallthrough;
6785 case TCP_ESTABLISHED:
6786 tcp_data_queue(sk, skb);
6787 queued = 1;
6788 break;
6789 }
6790
6791 /* tcp_data could move socket to TIME-WAIT */
6792 if (sk->sk_state != TCP_CLOSE) {
6793 tcp_data_snd_check(sk);
6794 tcp_ack_snd_check(sk);
6795 }
6796
6797 if (!queued) {
6798 discard:
6799 tcp_drop_reason(sk, skb, reason);
6800 }
6801 return 0;
6802
6803 consume:
6804 __kfree_skb(skb);
6805 return 0;
6806 }
6807 EXPORT_SYMBOL(tcp_rcv_state_process);
6808
pr_drop_req(struct request_sock * req,__u16 port,int family)6809 static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
6810 {
6811 struct inet_request_sock *ireq = inet_rsk(req);
6812
6813 if (family == AF_INET)
6814 net_dbg_ratelimited("drop open request from %pI4/%u\n",
6815 &ireq->ir_rmt_addr, port);
6816 #if IS_ENABLED(CONFIG_IPV6)
6817 else if (family == AF_INET6)
6818 net_dbg_ratelimited("drop open request from %pI6/%u\n",
6819 &ireq->ir_v6_rmt_addr, port);
6820 #endif
6821 }
6822
6823 /* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
6824 *
6825 * If we receive a SYN packet with these bits set, it means a
6826 * network is playing bad games with TOS bits. In order to
6827 * avoid possible false congestion notifications, we disable
6828 * TCP ECN negotiation.
6829 *
6830 * Exception: tcp_ca wants ECN. This is required for DCTCP
6831 * congestion control: Linux DCTCP asserts ECT on all packets,
6832 * including SYN, which is most optimal solution; however,
6833 * others, such as FreeBSD do not.
6834 *
6835 * Exception: At least one of the reserved bits of the TCP header (th->res1) is
6836 * set, indicating the use of a future TCP extension (such as AccECN). See
6837 * RFC8311 §4.3 which updates RFC3168 to allow the development of such
6838 * extensions.
6839 */
tcp_ecn_create_request(struct request_sock * req,const struct sk_buff * skb,const struct sock * listen_sk,const struct dst_entry * dst)6840 static void tcp_ecn_create_request(struct request_sock *req,
6841 const struct sk_buff *skb,
6842 const struct sock *listen_sk,
6843 const struct dst_entry *dst)
6844 {
6845 const struct tcphdr *th = tcp_hdr(skb);
6846 const struct net *net = sock_net(listen_sk);
6847 bool th_ecn = th->ece && th->cwr;
6848 bool ect, ecn_ok;
6849 u32 ecn_ok_dst;
6850
6851 if (!th_ecn)
6852 return;
6853
6854 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
6855 ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
6856 ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
6857
6858 if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
6859 (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
6860 tcp_bpf_ca_needs_ecn((struct sock *)req))
6861 inet_rsk(req)->ecn_ok = 1;
6862 }
6863
tcp_openreq_init(struct request_sock * req,const struct tcp_options_received * rx_opt,struct sk_buff * skb,const struct sock * sk)6864 static void tcp_openreq_init(struct request_sock *req,
6865 const struct tcp_options_received *rx_opt,
6866 struct sk_buff *skb, const struct sock *sk)
6867 {
6868 struct inet_request_sock *ireq = inet_rsk(req);
6869
6870 req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */
6871 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
6872 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
6873 tcp_rsk(req)->snt_synack = 0;
6874 tcp_rsk(req)->last_oow_ack_time = 0;
6875 req->mss = rx_opt->mss_clamp;
6876 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
6877 ireq->tstamp_ok = rx_opt->tstamp_ok;
6878 ireq->sack_ok = rx_opt->sack_ok;
6879 ireq->snd_wscale = rx_opt->snd_wscale;
6880 ireq->wscale_ok = rx_opt->wscale_ok;
6881 ireq->acked = 0;
6882 ireq->ecn_ok = 0;
6883 ireq->ir_rmt_port = tcp_hdr(skb)->source;
6884 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
6885 ireq->ir_mark = inet_request_mark(sk, skb);
6886 #if IS_ENABLED(CONFIG_SMC)
6887 ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
6888 tcp_sk(sk)->smc_hs_congested(sk));
6889 #endif
6890 }
6891
inet_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)6892 struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
6893 struct sock *sk_listener,
6894 bool attach_listener)
6895 {
6896 struct request_sock *req = reqsk_alloc(ops, sk_listener,
6897 attach_listener);
6898
6899 if (req) {
6900 struct inet_request_sock *ireq = inet_rsk(req);
6901
6902 ireq->ireq_opt = NULL;
6903 #if IS_ENABLED(CONFIG_IPV6)
6904 ireq->pktopts = NULL;
6905 #endif
6906 atomic64_set(&ireq->ir_cookie, 0);
6907 ireq->ireq_state = TCP_NEW_SYN_RECV;
6908 write_pnet(&ireq->ireq_net, sock_net(sk_listener));
6909 ireq->ireq_family = sk_listener->sk_family;
6910 req->timeout = TCP_TIMEOUT_INIT;
6911 }
6912
6913 return req;
6914 }
6915 EXPORT_SYMBOL(inet_reqsk_alloc);
6916
6917 /*
6918 * Return true if a syncookie should be sent
6919 */
tcp_syn_flood_action(const struct sock * sk,const char * proto)6920 static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
6921 {
6922 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
6923 const char *msg = "Dropping request";
6924 struct net *net = sock_net(sk);
6925 bool want_cookie = false;
6926 u8 syncookies;
6927
6928 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
6929
6930 #ifdef CONFIG_SYN_COOKIES
6931 if (syncookies) {
6932 msg = "Sending cookies";
6933 want_cookie = true;
6934 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
6935 } else
6936 #endif
6937 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
6938
6939 if (!READ_ONCE(queue->synflood_warned) && syncookies != 2 &&
6940 xchg(&queue->synflood_warned, 1) == 0) {
6941 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) {
6942 net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n",
6943 proto, inet6_rcv_saddr(sk),
6944 sk->sk_num, msg);
6945 } else {
6946 net_info_ratelimited("%s: Possible SYN flooding on port %pI4:%u. %s.\n",
6947 proto, &sk->sk_rcv_saddr,
6948 sk->sk_num, msg);
6949 }
6950 }
6951
6952 return want_cookie;
6953 }
6954
tcp_reqsk_record_syn(const struct sock * sk,struct request_sock * req,const struct sk_buff * skb)6955 static void tcp_reqsk_record_syn(const struct sock *sk,
6956 struct request_sock *req,
6957 const struct sk_buff *skb)
6958 {
6959 if (tcp_sk(sk)->save_syn) {
6960 u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
6961 struct saved_syn *saved_syn;
6962 u32 mac_hdrlen;
6963 void *base;
6964
6965 if (tcp_sk(sk)->save_syn == 2) { /* Save full header. */
6966 base = skb_mac_header(skb);
6967 mac_hdrlen = skb_mac_header_len(skb);
6968 len += mac_hdrlen;
6969 } else {
6970 base = skb_network_header(skb);
6971 mac_hdrlen = 0;
6972 }
6973
6974 saved_syn = kmalloc(struct_size(saved_syn, data, len),
6975 GFP_ATOMIC);
6976 if (saved_syn) {
6977 saved_syn->mac_hdrlen = mac_hdrlen;
6978 saved_syn->network_hdrlen = skb_network_header_len(skb);
6979 saved_syn->tcp_hdrlen = tcp_hdrlen(skb);
6980 memcpy(saved_syn->data, base, len);
6981 req->saved_syn = saved_syn;
6982 }
6983 }
6984 }
6985
6986 /* If a SYN cookie is required and supported, returns a clamped MSS value to be
6987 * used for SYN cookie generation.
6988 */
tcp_get_syncookie_mss(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct tcphdr * th)6989 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
6990 const struct tcp_request_sock_ops *af_ops,
6991 struct sock *sk, struct tcphdr *th)
6992 {
6993 struct tcp_sock *tp = tcp_sk(sk);
6994 u16 mss;
6995
6996 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
6997 !inet_csk_reqsk_queue_is_full(sk))
6998 return 0;
6999
7000 if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
7001 return 0;
7002
7003 if (sk_acceptq_is_full(sk)) {
7004 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7005 return 0;
7006 }
7007
7008 mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss);
7009 if (!mss)
7010 mss = af_ops->mss_clamp;
7011
7012 return mss;
7013 }
7014 EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss);
7015
tcp_conn_request(struct request_sock_ops * rsk_ops,const struct tcp_request_sock_ops * af_ops,struct sock * sk,struct sk_buff * skb)7016 int tcp_conn_request(struct request_sock_ops *rsk_ops,
7017 const struct tcp_request_sock_ops *af_ops,
7018 struct sock *sk, struct sk_buff *skb)
7019 {
7020 struct tcp_fastopen_cookie foc = { .len = -1 };
7021 __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
7022 struct tcp_options_received tmp_opt;
7023 struct tcp_sock *tp = tcp_sk(sk);
7024 struct net *net = sock_net(sk);
7025 struct sock *fastopen_sk = NULL;
7026 struct request_sock *req;
7027 bool want_cookie = false;
7028 struct dst_entry *dst;
7029 struct flowi fl;
7030 u8 syncookies;
7031
7032 syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7033
7034 /* TW buckets are converted to open requests without
7035 * limitations, they conserve resources and peer is
7036 * evidently real one.
7037 */
7038 if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) {
7039 want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
7040 if (!want_cookie)
7041 goto drop;
7042 }
7043
7044 if (sk_acceptq_is_full(sk)) {
7045 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7046 goto drop;
7047 }
7048
7049 req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
7050 if (!req)
7051 goto drop;
7052
7053 req->syncookie = want_cookie;
7054 tcp_rsk(req)->af_specific = af_ops;
7055 tcp_rsk(req)->ts_off = 0;
7056 #if IS_ENABLED(CONFIG_MPTCP)
7057 tcp_rsk(req)->is_mptcp = 0;
7058 #endif
7059
7060 tcp_clear_options(&tmp_opt);
7061 tmp_opt.mss_clamp = af_ops->mss_clamp;
7062 tmp_opt.user_mss = tp->rx_opt.user_mss;
7063 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
7064 want_cookie ? NULL : &foc);
7065
7066 if (want_cookie && !tmp_opt.saw_tstamp)
7067 tcp_clear_options(&tmp_opt);
7068
7069 if (IS_ENABLED(CONFIG_SMC) && want_cookie)
7070 tmp_opt.smc_ok = 0;
7071
7072 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
7073 tcp_openreq_init(req, &tmp_opt, skb, sk);
7074 inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk);
7075
7076 /* Note: tcp_v6_init_req() might override ir_iif for link locals */
7077 inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
7078
7079 dst = af_ops->route_req(sk, skb, &fl, req);
7080 if (!dst)
7081 goto drop_and_free;
7082
7083 if (tmp_opt.tstamp_ok)
7084 tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
7085
7086 if (!want_cookie && !isn) {
7087 int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
7088
7089 /* Kill the following clause, if you dislike this way. */
7090 if (!syncookies &&
7091 (max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
7092 (max_syn_backlog >> 2)) &&
7093 !tcp_peer_is_proven(req, dst)) {
7094 /* Without syncookies last quarter of
7095 * backlog is filled with destinations,
7096 * proven to be alive.
7097 * It means that we continue to communicate
7098 * to destinations, already remembered
7099 * to the moment of synflood.
7100 */
7101 pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
7102 rsk_ops->family);
7103 goto drop_and_release;
7104 }
7105
7106 isn = af_ops->init_seq(skb);
7107 }
7108
7109 tcp_ecn_create_request(req, skb, sk, dst);
7110
7111 if (want_cookie) {
7112 isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
7113 if (!tmp_opt.tstamp_ok)
7114 inet_rsk(req)->ecn_ok = 0;
7115 }
7116
7117 tcp_rsk(req)->snt_isn = isn;
7118 tcp_rsk(req)->txhash = net_tx_rndhash();
7119 tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
7120 tcp_openreq_init_rwin(req, sk, dst);
7121 sk_rx_queue_set(req_to_sk(req), skb);
7122 if (!want_cookie) {
7123 tcp_reqsk_record_syn(sk, req, skb);
7124 fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
7125 }
7126 if (fastopen_sk) {
7127 af_ops->send_synack(fastopen_sk, dst, &fl, req,
7128 &foc, TCP_SYNACK_FASTOPEN, skb);
7129 /* Add the child socket directly into the accept queue */
7130 if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
7131 reqsk_fastopen_remove(fastopen_sk, req, false);
7132 bh_unlock_sock(fastopen_sk);
7133 sock_put(fastopen_sk);
7134 goto drop_and_free;
7135 }
7136 sk->sk_data_ready(sk);
7137 bh_unlock_sock(fastopen_sk);
7138 sock_put(fastopen_sk);
7139 } else {
7140 tcp_rsk(req)->tfo_listener = false;
7141 if (!want_cookie) {
7142 req->timeout = tcp_timeout_init((struct sock *)req);
7143 if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
7144 req->timeout))) {
7145 reqsk_free(req);
7146 return 0;
7147 }
7148
7149 }
7150 af_ops->send_synack(sk, dst, &fl, req, &foc,
7151 !want_cookie ? TCP_SYNACK_NORMAL :
7152 TCP_SYNACK_COOKIE,
7153 skb);
7154 if (want_cookie) {
7155 reqsk_free(req);
7156 return 0;
7157 }
7158 }
7159 reqsk_put(req);
7160 return 0;
7161
7162 drop_and_release:
7163 dst_release(dst);
7164 drop_and_free:
7165 __reqsk_free(req);
7166 drop:
7167 tcp_listendrop(sk);
7168 return 0;
7169 }
7170 EXPORT_SYMBOL(tcp_conn_request);
7171