xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision 09b06c25)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <net/tcp.h>
23 #include <net/xfrm.h>
24 #include <net/busy_poll.h>
25 
26 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
27 {
28 	if (seq == s_win)
29 		return true;
30 	if (after(end_seq, s_win) && before(seq, e_win))
31 		return true;
32 	return seq == e_win && seq == end_seq;
33 }
34 
35 static enum tcp_tw_status
36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37 				  const struct sk_buff *skb, int mib_idx)
38 {
39 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40 
41 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42 				  &tcptw->tw_last_oow_ack_time)) {
43 		/* Send ACK. Note, we do not put the bucket,
44 		 * it will be released by caller.
45 		 */
46 		return TCP_TW_ACK;
47 	}
48 
49 	/* We are rate-limiting, so just release the tw sock and drop skb. */
50 	inet_twsk_put(tw);
51 	return TCP_TW_SUCCESS;
52 }
53 
54 /*
55  * * Main purpose of TIME-WAIT state is to close connection gracefully,
56  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
57  *   (and, probably, tail of data) and one or more our ACKs are lost.
58  * * What is TIME-WAIT timeout? It is associated with maximal packet
59  *   lifetime in the internet, which results in wrong conclusion, that
60  *   it is set to catch "old duplicate segments" wandering out of their path.
61  *   It is not quite correct. This timeout is calculated so that it exceeds
62  *   maximal retransmission timeout enough to allow to lose one (or more)
63  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
64  * * When TIME-WAIT socket receives RST, it means that another end
65  *   finally closed and we are allowed to kill TIME-WAIT too.
66  * * Second purpose of TIME-WAIT is catching old duplicate segments.
67  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
68  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
69  * * If we invented some more clever way to catch duplicates
70  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
71  *
72  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
73  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
74  * from the very beginning.
75  *
76  * NOTE. With recycling (and later with fin-wait-2) TW bucket
77  * is _not_ stateless. It means, that strictly speaking we must
78  * spinlock it. I do not want! Well, probability of misbehaviour
79  * is ridiculously low and, seems, we could use some mb() tricks
80  * to avoid misread sequence numbers, states etc.  --ANK
81  *
82  * We don't need to initialize tmp_out.sack_ok as we don't use the results
83  */
84 enum tcp_tw_status
85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
86 			   const struct tcphdr *th)
87 {
88 	struct tcp_options_received tmp_opt;
89 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90 	bool paws_reject = false;
91 
92 	tmp_opt.saw_tstamp = 0;
93 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
95 
96 		if (tmp_opt.saw_tstamp) {
97 			if (tmp_opt.rcv_tsecr)
98 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
99 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
100 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
101 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
102 		}
103 	}
104 
105 	if (tw->tw_substate == TCP_FIN_WAIT2) {
106 		/* Just repeat all the checks of tcp_rcv_state_process() */
107 
108 		/* Out of window, send ACK */
109 		if (paws_reject ||
110 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
111 				   tcptw->tw_rcv_nxt,
112 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113 			return tcp_timewait_check_oow_rate_limit(
114 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
115 
116 		if (th->rst)
117 			goto kill;
118 
119 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120 			return TCP_TW_RST;
121 
122 		/* Dup ACK? */
123 		if (!th->ack ||
124 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126 			inet_twsk_put(tw);
127 			return TCP_TW_SUCCESS;
128 		}
129 
130 		/* New data or FIN. If new data arrive after half-duplex close,
131 		 * reset.
132 		 */
133 		if (!th->fin ||
134 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
135 			return TCP_TW_RST;
136 
137 		/* FIN arrived, enter true time-wait state. */
138 		tw->tw_substate	  = TCP_TIME_WAIT;
139 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
140 		if (tmp_opt.saw_tstamp) {
141 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
143 		}
144 
145 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
146 		return TCP_TW_ACK;
147 	}
148 
149 	/*
150 	 *	Now real TIME-WAIT state.
151 	 *
152 	 *	RFC 1122:
153 	 *	"When a connection is [...] on TIME-WAIT state [...]
154 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
155 	 *	reopen the connection directly, if it:
156 	 *
157 	 *	(1)  assigns its initial sequence number for the new
158 	 *	connection to be larger than the largest sequence
159 	 *	number it used on the previous connection incarnation,
160 	 *	and
161 	 *
162 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
163 	 *	to be an old duplicate".
164 	 */
165 
166 	if (!paws_reject &&
167 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169 		/* In window segment, it may be only reset or bare ack. */
170 
171 		if (th->rst) {
172 			/* This is TIME_WAIT assassination, in two flavors.
173 			 * Oh well... nobody has a sufficient solution to this
174 			 * protocol bug yet.
175 			 */
176 			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
177 kill:
178 				inet_twsk_deschedule_put(tw);
179 				return TCP_TW_SUCCESS;
180 			}
181 		} else {
182 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183 		}
184 
185 		if (tmp_opt.saw_tstamp) {
186 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
187 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
188 		}
189 
190 		inet_twsk_put(tw);
191 		return TCP_TW_SUCCESS;
192 	}
193 
194 	/* Out of window segment.
195 
196 	   All the segments are ACKed immediately.
197 
198 	   The only exception is new SYN. We accept it, if it is
199 	   not old duplicate and we are not in danger to be killed
200 	   by delayed old duplicates. RFC check is that it has
201 	   newer sequence number works at rates <40Mbit/sec.
202 	   However, if paws works, it is reliable AND even more,
203 	   we even may relax silly seq space cutoff.
204 
205 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
206 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
207 	   we must return socket to time-wait state. It is not good,
208 	   but not fatal yet.
209 	 */
210 
211 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
212 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213 	     (tmp_opt.saw_tstamp &&
214 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
216 		if (isn == 0)
217 			isn++;
218 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
219 		return TCP_TW_SYN;
220 	}
221 
222 	if (paws_reject)
223 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
224 
225 	if (!th->rst) {
226 		/* In this case we must reset the TIMEWAIT timer.
227 		 *
228 		 * If it is ACKless SYN it may be both old duplicate
229 		 * and new good SYN with random sequence number <rcv_nxt.
230 		 * Do not reschedule in the last case.
231 		 */
232 		if (paws_reject || th->ack)
233 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
234 
235 		return tcp_timewait_check_oow_rate_limit(
236 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
237 	}
238 	inet_twsk_put(tw);
239 	return TCP_TW_SUCCESS;
240 }
241 EXPORT_SYMBOL(tcp_timewait_state_process);
242 
243 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244 {
245 #ifdef CONFIG_TCP_MD5SIG
246 	const struct tcp_sock *tp = tcp_sk(sk);
247 	struct tcp_md5sig_key *key;
248 
249 	/*
250 	 * The timewait bucket does not have the key DB from the
251 	 * sock structure. We just make a quick copy of the
252 	 * md5 key being used (if indeed we are using one)
253 	 * so the timewait ack generating code has the key.
254 	 */
255 	tcptw->tw_md5_key = NULL;
256 	if (!static_branch_unlikely(&tcp_md5_needed.key))
257 		return;
258 
259 	key = tp->af_specific->md5_lookup(sk, sk);
260 	if (key) {
261 		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262 		if (!tcptw->tw_md5_key)
263 			return;
264 		if (!tcp_alloc_md5sig_pool())
265 			goto out_free;
266 		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267 			goto out_free;
268 	}
269 	return;
270 out_free:
271 	WARN_ON_ONCE(1);
272 	kfree(tcptw->tw_md5_key);
273 	tcptw->tw_md5_key = NULL;
274 #endif
275 }
276 
277 /*
278  * Move a socket to time-wait or dead fin-wait-2 state.
279  */
280 void tcp_time_wait(struct sock *sk, int state, int timeo)
281 {
282 	const struct inet_connection_sock *icsk = inet_csk(sk);
283 	const struct tcp_sock *tp = tcp_sk(sk);
284 	struct net *net = sock_net(sk);
285 	struct inet_timewait_sock *tw;
286 
287 	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
288 
289 	if (tw) {
290 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
292 
293 		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
294 		tw->tw_mark		= sk->sk_mark;
295 		tw->tw_priority		= sk->sk_priority;
296 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
297 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
298 		tcptw->tw_snd_nxt	= tp->snd_nxt;
299 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
300 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
301 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
302 		tcptw->tw_ts_offset	= tp->tsoffset;
303 		tcptw->tw_last_oow_ack_time = 0;
304 		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
305 		tw->tw_txhash		= sk->sk_txhash;
306 #if IS_ENABLED(CONFIG_IPV6)
307 		if (tw->tw_family == PF_INET6) {
308 			struct ipv6_pinfo *np = inet6_sk(sk);
309 
310 			tw->tw_v6_daddr = sk->sk_v6_daddr;
311 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312 			tw->tw_tclass = np->tclass;
313 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314 			tw->tw_ipv6only = sk->sk_ipv6only;
315 		}
316 #endif
317 
318 		tcp_time_wait_init(sk, tcptw);
319 
320 		/* Get the TIME_WAIT timeout firing. */
321 		if (timeo < rto)
322 			timeo = rto;
323 
324 		if (state == TCP_TIME_WAIT)
325 			timeo = TCP_TIMEWAIT_LEN;
326 
327 		/* tw_timer is pinned, so we need to make sure BH are disabled
328 		 * in following section, otherwise timer handler could run before
329 		 * we complete the initialization.
330 		 */
331 		local_bh_disable();
332 		inet_twsk_schedule(tw, timeo);
333 		/* Linkage updates.
334 		 * Note that access to tw after this point is illegal.
335 		 */
336 		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
337 		local_bh_enable();
338 	} else {
339 		/* Sorry, if we're out of memory, just CLOSE this
340 		 * socket up.  We've got bigger problems than
341 		 * non-graceful socket closings.
342 		 */
343 		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
344 	}
345 
346 	tcp_update_metrics(sk);
347 	tcp_done(sk);
348 }
349 EXPORT_SYMBOL(tcp_time_wait);
350 
351 void tcp_twsk_destructor(struct sock *sk)
352 {
353 #ifdef CONFIG_TCP_MD5SIG
354 	if (static_branch_unlikely(&tcp_md5_needed.key)) {
355 		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
356 
357 		if (twsk->tw_md5_key) {
358 			kfree_rcu(twsk->tw_md5_key, rcu);
359 			static_branch_slow_dec_deferred(&tcp_md5_needed);
360 		}
361 	}
362 #endif
363 }
364 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
365 
366 void tcp_twsk_purge(struct list_head *net_exit_list)
367 {
368 	bool purged_once = false;
369 	struct net *net;
370 
371 	list_for_each_entry(net, net_exit_list, exit_list) {
372 		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
373 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
374 			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
375 		} else if (!purged_once) {
376 			inet_twsk_purge(&tcp_hashinfo);
377 			purged_once = true;
378 		}
379 	}
380 }
381 
382 /* Warning : This function is called without sk_listener being locked.
383  * Be sure to read socket fields once, as their value could change under us.
384  */
385 void tcp_openreq_init_rwin(struct request_sock *req,
386 			   const struct sock *sk_listener,
387 			   const struct dst_entry *dst)
388 {
389 	struct inet_request_sock *ireq = inet_rsk(req);
390 	const struct tcp_sock *tp = tcp_sk(sk_listener);
391 	int full_space = tcp_full_space(sk_listener);
392 	u32 window_clamp;
393 	__u8 rcv_wscale;
394 	u32 rcv_wnd;
395 	int mss;
396 
397 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
398 	window_clamp = READ_ONCE(tp->window_clamp);
399 	/* Set this up on the first call only */
400 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
401 
402 	/* limit the window selection if the user enforce a smaller rx buffer */
403 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
404 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
405 		req->rsk_window_clamp = full_space;
406 
407 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
408 	if (rcv_wnd == 0)
409 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
410 	else if (full_space < rcv_wnd * mss)
411 		full_space = rcv_wnd * mss;
412 
413 	/* tcp_full_space because it is guaranteed to be the first packet */
414 	tcp_select_initial_window(sk_listener, full_space,
415 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
416 		&req->rsk_rcv_wnd,
417 		&req->rsk_window_clamp,
418 		ireq->wscale_ok,
419 		&rcv_wscale,
420 		rcv_wnd);
421 	ireq->rcv_wscale = rcv_wscale;
422 }
423 EXPORT_SYMBOL(tcp_openreq_init_rwin);
424 
425 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
426 				  const struct request_sock *req)
427 {
428 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
429 }
430 
431 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
432 {
433 	struct inet_connection_sock *icsk = inet_csk(sk);
434 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
435 	bool ca_got_dst = false;
436 
437 	if (ca_key != TCP_CA_UNSPEC) {
438 		const struct tcp_congestion_ops *ca;
439 
440 		rcu_read_lock();
441 		ca = tcp_ca_find_key(ca_key);
442 		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
443 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
444 			icsk->icsk_ca_ops = ca;
445 			ca_got_dst = true;
446 		}
447 		rcu_read_unlock();
448 	}
449 
450 	/* If no valid choice made yet, assign current system default ca. */
451 	if (!ca_got_dst &&
452 	    (!icsk->icsk_ca_setsockopt ||
453 	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
454 		tcp_assign_congestion_control(sk);
455 
456 	tcp_set_ca_state(sk, TCP_CA_Open);
457 }
458 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
459 
460 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
461 				    struct request_sock *req,
462 				    struct tcp_sock *newtp)
463 {
464 #if IS_ENABLED(CONFIG_SMC)
465 	struct inet_request_sock *ireq;
466 
467 	if (static_branch_unlikely(&tcp_have_smc)) {
468 		ireq = inet_rsk(req);
469 		if (oldtp->syn_smc && !ireq->smc_ok)
470 			newtp->syn_smc = 0;
471 	}
472 #endif
473 }
474 
475 /* This is not only more efficient than what we used to do, it eliminates
476  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
477  *
478  * Actually, we could lots of memory writes here. tp of listening
479  * socket contains all necessary default parameters.
480  */
481 struct sock *tcp_create_openreq_child(const struct sock *sk,
482 				      struct request_sock *req,
483 				      struct sk_buff *skb)
484 {
485 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
486 	const struct inet_request_sock *ireq = inet_rsk(req);
487 	struct tcp_request_sock *treq = tcp_rsk(req);
488 	struct inet_connection_sock *newicsk;
489 	const struct tcp_sock *oldtp;
490 	struct tcp_sock *newtp;
491 	u32 seq;
492 
493 	if (!newsk)
494 		return NULL;
495 
496 	newicsk = inet_csk(newsk);
497 	newtp = tcp_sk(newsk);
498 	oldtp = tcp_sk(sk);
499 
500 	smc_check_reset_syn_req(oldtp, req, newtp);
501 
502 	/* Now setup tcp_sock */
503 	newtp->pred_flags = 0;
504 
505 	seq = treq->rcv_isn + 1;
506 	newtp->rcv_wup = seq;
507 	WRITE_ONCE(newtp->copied_seq, seq);
508 	WRITE_ONCE(newtp->rcv_nxt, seq);
509 	newtp->segs_in = 1;
510 
511 	seq = treq->snt_isn + 1;
512 	newtp->snd_sml = newtp->snd_una = seq;
513 	WRITE_ONCE(newtp->snd_nxt, seq);
514 	newtp->snd_up = seq;
515 
516 	INIT_LIST_HEAD(&newtp->tsq_node);
517 	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
518 
519 	tcp_init_wl(newtp, treq->rcv_isn);
520 
521 	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
522 	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
523 
524 	newtp->lsndtime = tcp_jiffies32;
525 	newsk->sk_txhash = READ_ONCE(treq->txhash);
526 	newtp->total_retrans = req->num_retrans;
527 
528 	tcp_init_xmit_timers(newsk);
529 	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
530 
531 	if (sock_flag(newsk, SOCK_KEEPOPEN))
532 		inet_csk_reset_keepalive_timer(newsk,
533 					       keepalive_time_when(newtp));
534 
535 	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
536 	newtp->rx_opt.sack_ok = ireq->sack_ok;
537 	newtp->window_clamp = req->rsk_window_clamp;
538 	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
539 	newtp->rcv_wnd = req->rsk_rcv_wnd;
540 	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
541 	if (newtp->rx_opt.wscale_ok) {
542 		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
543 		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
544 	} else {
545 		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
546 		newtp->window_clamp = min(newtp->window_clamp, 65535U);
547 	}
548 	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
549 	newtp->max_window = newtp->snd_wnd;
550 
551 	if (newtp->rx_opt.tstamp_ok) {
552 		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
553 		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
554 		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
555 	} else {
556 		newtp->rx_opt.ts_recent_stamp = 0;
557 		newtp->tcp_header_len = sizeof(struct tcphdr);
558 	}
559 	if (req->num_timeout) {
560 		newtp->undo_marker = treq->snt_isn;
561 		newtp->retrans_stamp = div_u64(treq->snt_synack,
562 					       USEC_PER_SEC / TCP_TS_HZ);
563 	}
564 	newtp->tsoffset = treq->ts_off;
565 #ifdef CONFIG_TCP_MD5SIG
566 	newtp->md5sig_info = NULL;	/*XXX*/
567 #endif
568 	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
569 		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
570 	newtp->rx_opt.mss_clamp = req->mss;
571 	tcp_ecn_openreq_child(newtp, req);
572 	newtp->fastopen_req = NULL;
573 	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
574 
575 	newtp->bpf_chg_cc_inprogress = 0;
576 	tcp_bpf_clone(sk, newsk);
577 
578 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
579 
580 	return newsk;
581 }
582 EXPORT_SYMBOL(tcp_create_openreq_child);
583 
584 /*
585  * Process an incoming packet for SYN_RECV sockets represented as a
586  * request_sock. Normally sk is the listener socket but for TFO it
587  * points to the child socket.
588  *
589  * XXX (TFO) - The current impl contains a special check for ack
590  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
591  *
592  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
593  *
594  * Note: If @fastopen is true, this can be called from process context.
595  *       Otherwise, this is from BH context.
596  */
597 
598 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
599 			   struct request_sock *req,
600 			   bool fastopen, bool *req_stolen)
601 {
602 	struct tcp_options_received tmp_opt;
603 	struct sock *child;
604 	const struct tcphdr *th = tcp_hdr(skb);
605 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
606 	bool paws_reject = false;
607 	bool own_req;
608 
609 	tmp_opt.saw_tstamp = 0;
610 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
611 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
612 
613 		if (tmp_opt.saw_tstamp) {
614 			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
615 			if (tmp_opt.rcv_tsecr)
616 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
617 			/* We do not store true stamp, but it is not required,
618 			 * it can be estimated (approximately)
619 			 * from another data.
620 			 */
621 			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
622 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
623 		}
624 	}
625 
626 	/* Check for pure retransmitted SYN. */
627 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
628 	    flg == TCP_FLAG_SYN &&
629 	    !paws_reject) {
630 		/*
631 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
632 		 * this case on figure 6 and figure 8, but formal
633 		 * protocol description says NOTHING.
634 		 * To be more exact, it says that we should send ACK,
635 		 * because this segment (at least, if it has no data)
636 		 * is out of window.
637 		 *
638 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
639 		 *  describe SYN-RECV state. All the description
640 		 *  is wrong, we cannot believe to it and should
641 		 *  rely only on common sense and implementation
642 		 *  experience.
643 		 *
644 		 * Enforce "SYN-ACK" according to figure 8, figure 6
645 		 * of RFC793, fixed by RFC1122.
646 		 *
647 		 * Note that even if there is new data in the SYN packet
648 		 * they will be thrown away too.
649 		 *
650 		 * Reset timer after retransmitting SYNACK, similar to
651 		 * the idea of fast retransmit in recovery.
652 		 */
653 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
654 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
655 					  &tcp_rsk(req)->last_oow_ack_time) &&
656 
657 		    !inet_rtx_syn_ack(sk, req)) {
658 			unsigned long expires = jiffies;
659 
660 			expires += reqsk_timeout(req, TCP_RTO_MAX);
661 			if (!fastopen)
662 				mod_timer_pending(&req->rsk_timer, expires);
663 			else
664 				req->rsk_timer.expires = expires;
665 		}
666 		return NULL;
667 	}
668 
669 	/* Further reproduces section "SEGMENT ARRIVES"
670 	   for state SYN-RECEIVED of RFC793.
671 	   It is broken, however, it does not work only
672 	   when SYNs are crossed.
673 
674 	   You would think that SYN crossing is impossible here, since
675 	   we should have a SYN_SENT socket (from connect()) on our end,
676 	   but this is not true if the crossed SYNs were sent to both
677 	   ends by a malicious third party.  We must defend against this,
678 	   and to do that we first verify the ACK (as per RFC793, page
679 	   36) and reset if it is invalid.  Is this a true full defense?
680 	   To convince ourselves, let us consider a way in which the ACK
681 	   test can still pass in this 'malicious crossed SYNs' case.
682 	   Malicious sender sends identical SYNs (and thus identical sequence
683 	   numbers) to both A and B:
684 
685 		A: gets SYN, seq=7
686 		B: gets SYN, seq=7
687 
688 	   By our good fortune, both A and B select the same initial
689 	   send sequence number of seven :-)
690 
691 		A: sends SYN|ACK, seq=7, ack_seq=8
692 		B: sends SYN|ACK, seq=7, ack_seq=8
693 
694 	   So we are now A eating this SYN|ACK, ACK test passes.  So
695 	   does sequence test, SYN is truncated, and thus we consider
696 	   it a bare ACK.
697 
698 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
699 	   bare ACK.  Otherwise, we create an established connection.  Both
700 	   ends (listening sockets) accept the new incoming connection and try
701 	   to talk to each other. 8-)
702 
703 	   Note: This case is both harmless, and rare.  Possibility is about the
704 	   same as us discovering intelligent life on another plant tomorrow.
705 
706 	   But generally, we should (RFC lies!) to accept ACK
707 	   from SYNACK both here and in tcp_rcv_state_process().
708 	   tcp_rcv_state_process() does not, hence, we do not too.
709 
710 	   Note that the case is absolutely generic:
711 	   we cannot optimize anything here without
712 	   violating protocol. All the checks must be made
713 	   before attempt to create socket.
714 	 */
715 
716 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
717 	 *                  and the incoming segment acknowledges something not yet
718 	 *                  sent (the segment carries an unacceptable ACK) ...
719 	 *                  a reset is sent."
720 	 *
721 	 * Invalid ACK: reset will be sent by listening socket.
722 	 * Note that the ACK validity check for a Fast Open socket is done
723 	 * elsewhere and is checked directly against the child socket rather
724 	 * than req because user data may have been sent out.
725 	 */
726 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
727 	    (TCP_SKB_CB(skb)->ack_seq !=
728 	     tcp_rsk(req)->snt_isn + 1))
729 		return sk;
730 
731 	/* Also, it would be not so bad idea to check rcv_tsecr, which
732 	 * is essentially ACK extension and too early or too late values
733 	 * should cause reset in unsynchronized states.
734 	 */
735 
736 	/* RFC793: "first check sequence number". */
737 
738 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
739 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
740 		/* Out of window: send ACK and drop. */
741 		if (!(flg & TCP_FLAG_RST) &&
742 		    !tcp_oow_rate_limited(sock_net(sk), skb,
743 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
744 					  &tcp_rsk(req)->last_oow_ack_time))
745 			req->rsk_ops->send_ack(sk, skb, req);
746 		if (paws_reject)
747 			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
748 		return NULL;
749 	}
750 
751 	/* In sequence, PAWS is OK. */
752 
753 	/* TODO: We probably should defer ts_recent change once
754 	 * we take ownership of @req.
755 	 */
756 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
757 		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
758 
759 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
760 		/* Truncate SYN, it is out of window starting
761 		   at tcp_rsk(req)->rcv_isn + 1. */
762 		flg &= ~TCP_FLAG_SYN;
763 	}
764 
765 	/* RFC793: "second check the RST bit" and
766 	 *	   "fourth, check the SYN bit"
767 	 */
768 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
769 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
770 		goto embryonic_reset;
771 	}
772 
773 	/* ACK sequence verified above, just make sure ACK is
774 	 * set.  If ACK not set, just silently drop the packet.
775 	 *
776 	 * XXX (TFO) - if we ever allow "data after SYN", the
777 	 * following check needs to be removed.
778 	 */
779 	if (!(flg & TCP_FLAG_ACK))
780 		return NULL;
781 
782 	/* For Fast Open no more processing is needed (sk is the
783 	 * child socket).
784 	 */
785 	if (fastopen)
786 		return sk;
787 
788 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
789 	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
790 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
791 		inet_rsk(req)->acked = 1;
792 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
793 		return NULL;
794 	}
795 
796 	/* OK, ACK is valid, create big socket and
797 	 * feed this segment to it. It will repeat all
798 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
799 	 * ESTABLISHED STATE. If it will be dropped after
800 	 * socket is created, wait for troubles.
801 	 */
802 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
803 							 req, &own_req);
804 	if (!child)
805 		goto listen_overflow;
806 
807 	if (own_req && rsk_drop_req(req)) {
808 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
809 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
810 		return child;
811 	}
812 
813 	sock_rps_save_rxhash(child, skb);
814 	tcp_synack_rtt_meas(child, req);
815 	*req_stolen = !own_req;
816 	return inet_csk_complete_hashdance(sk, child, req, own_req);
817 
818 listen_overflow:
819 	if (sk != req->rsk_listener)
820 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
821 
822 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
823 		inet_rsk(req)->acked = 1;
824 		return NULL;
825 	}
826 
827 embryonic_reset:
828 	if (!(flg & TCP_FLAG_RST)) {
829 		/* Received a bad SYN pkt - for TFO We try not to reset
830 		 * the local connection unless it's really necessary to
831 		 * avoid becoming vulnerable to outside attack aiming at
832 		 * resetting legit local connections.
833 		 */
834 		req->rsk_ops->send_reset(sk, skb);
835 	} else if (fastopen) { /* received a valid RST pkt */
836 		reqsk_fastopen_remove(sk, req, true);
837 		tcp_reset(sk, skb);
838 	}
839 	if (!fastopen) {
840 		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
841 
842 		if (unlinked)
843 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
844 		*req_stolen = !unlinked;
845 	}
846 	return NULL;
847 }
848 EXPORT_SYMBOL(tcp_check_req);
849 
850 /*
851  * Queue segment on the new socket if the new socket is active,
852  * otherwise we just shortcircuit this and continue with
853  * the new socket.
854  *
855  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
856  * when entering. But other states are possible due to a race condition
857  * where after __inet_lookup_established() fails but before the listener
858  * locked is obtained, other packets cause the same connection to
859  * be created.
860  */
861 
862 int tcp_child_process(struct sock *parent, struct sock *child,
863 		      struct sk_buff *skb)
864 	__releases(&((child)->sk_lock.slock))
865 {
866 	int ret = 0;
867 	int state = child->sk_state;
868 
869 	/* record sk_napi_id and sk_rx_queue_mapping of child. */
870 	sk_mark_napi_id_set(child, skb);
871 
872 	tcp_segs_in(tcp_sk(child), skb);
873 	if (!sock_owned_by_user(child)) {
874 		ret = tcp_rcv_state_process(child, skb);
875 		/* Wakeup parent, send SIGIO */
876 		if (state == TCP_SYN_RECV && child->sk_state != state)
877 			parent->sk_data_ready(parent);
878 	} else {
879 		/* Alas, it is possible again, because we do lookup
880 		 * in main socket hash table and lock on listening
881 		 * socket does not protect us more.
882 		 */
883 		__sk_add_backlog(child, skb);
884 	}
885 
886 	bh_unlock_sock(child);
887 	sock_put(child);
888 	return ret;
889 }
890 EXPORT_SYMBOL(tcp_child_process);
891