xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision 9d64fc08)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  */
20 
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29 #include <net/busy_poll.h>
30 
31 int sysctl_tcp_abort_on_overflow __read_mostly;
32 
33 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
34 {
35 	if (seq == s_win)
36 		return true;
37 	if (after(end_seq, s_win) && before(seq, e_win))
38 		return true;
39 	return seq == e_win && seq == end_seq;
40 }
41 
42 static enum tcp_tw_status
43 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 				  const struct sk_buff *skb, int mib_idx)
45 {
46 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47 
48 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 				  &tcptw->tw_last_oow_ack_time)) {
50 		/* Send ACK. Note, we do not put the bucket,
51 		 * it will be released by caller.
52 		 */
53 		return TCP_TW_ACK;
54 	}
55 
56 	/* We are rate-limiting, so just release the tw sock and drop skb. */
57 	inet_twsk_put(tw);
58 	return TCP_TW_SUCCESS;
59 }
60 
61 /*
62  * * Main purpose of TIME-WAIT state is to close connection gracefully,
63  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64  *   (and, probably, tail of data) and one or more our ACKs are lost.
65  * * What is TIME-WAIT timeout? It is associated with maximal packet
66  *   lifetime in the internet, which results in wrong conclusion, that
67  *   it is set to catch "old duplicate segments" wandering out of their path.
68  *   It is not quite correct. This timeout is calculated so that it exceeds
69  *   maximal retransmission timeout enough to allow to lose one (or more)
70  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
71  * * When TIME-WAIT socket receives RST, it means that another end
72  *   finally closed and we are allowed to kill TIME-WAIT too.
73  * * Second purpose of TIME-WAIT is catching old duplicate segments.
74  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
75  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76  * * If we invented some more clever way to catch duplicates
77  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78  *
79  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81  * from the very beginning.
82  *
83  * NOTE. With recycling (and later with fin-wait-2) TW bucket
84  * is _not_ stateless. It means, that strictly speaking we must
85  * spinlock it. I do not want! Well, probability of misbehaviour
86  * is ridiculously low and, seems, we could use some mb() tricks
87  * to avoid misread sequence numbers, states etc.  --ANK
88  *
89  * We don't need to initialize tmp_out.sack_ok as we don't use the results
90  */
91 enum tcp_tw_status
92 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 			   const struct tcphdr *th)
94 {
95 	struct tcp_options_received tmp_opt;
96 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 	bool paws_reject = false;
98 
99 	tmp_opt.saw_tstamp = 0;
100 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102 
103 		if (tmp_opt.saw_tstamp) {
104 			if (tmp_opt.rcv_tsecr)
105 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
107 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
108 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 		}
110 	}
111 
112 	if (tw->tw_substate == TCP_FIN_WAIT2) {
113 		/* Just repeat all the checks of tcp_rcv_state_process() */
114 
115 		/* Out of window, send ACK */
116 		if (paws_reject ||
117 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118 				   tcptw->tw_rcv_nxt,
119 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120 			return tcp_timewait_check_oow_rate_limit(
121 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122 
123 		if (th->rst)
124 			goto kill;
125 
126 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 			return TCP_TW_RST;
128 
129 		/* Dup ACK? */
130 		if (!th->ack ||
131 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 			inet_twsk_put(tw);
134 			return TCP_TW_SUCCESS;
135 		}
136 
137 		/* New data or FIN. If new data arrive after half-duplex close,
138 		 * reset.
139 		 */
140 		if (!th->fin ||
141 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142 			return TCP_TW_RST;
143 
144 		/* FIN arrived, enter true time-wait state. */
145 		tw->tw_substate	  = TCP_TIME_WAIT;
146 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 		if (tmp_opt.saw_tstamp) {
148 			tcptw->tw_ts_recent_stamp = get_seconds();
149 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
150 		}
151 
152 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153 		return TCP_TW_ACK;
154 	}
155 
156 	/*
157 	 *	Now real TIME-WAIT state.
158 	 *
159 	 *	RFC 1122:
160 	 *	"When a connection is [...] on TIME-WAIT state [...]
161 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
162 	 *	reopen the connection directly, if it:
163 	 *
164 	 *	(1)  assigns its initial sequence number for the new
165 	 *	connection to be larger than the largest sequence
166 	 *	number it used on the previous connection incarnation,
167 	 *	and
168 	 *
169 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
170 	 *	to be an old duplicate".
171 	 */
172 
173 	if (!paws_reject &&
174 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176 		/* In window segment, it may be only reset or bare ack. */
177 
178 		if (th->rst) {
179 			/* This is TIME_WAIT assassination, in two flavors.
180 			 * Oh well... nobody has a sufficient solution to this
181 			 * protocol bug yet.
182 			 */
183 			if (sysctl_tcp_rfc1337 == 0) {
184 kill:
185 				inet_twsk_deschedule_put(tw);
186 				return TCP_TW_SUCCESS;
187 			}
188 		}
189 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190 
191 		if (tmp_opt.saw_tstamp) {
192 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
193 			tcptw->tw_ts_recent_stamp = get_seconds();
194 		}
195 
196 		inet_twsk_put(tw);
197 		return TCP_TW_SUCCESS;
198 	}
199 
200 	/* Out of window segment.
201 
202 	   All the segments are ACKed immediately.
203 
204 	   The only exception is new SYN. We accept it, if it is
205 	   not old duplicate and we are not in danger to be killed
206 	   by delayed old duplicates. RFC check is that it has
207 	   newer sequence number works at rates <40Mbit/sec.
208 	   However, if paws works, it is reliable AND even more,
209 	   we even may relax silly seq space cutoff.
210 
211 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
212 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
213 	   we must return socket to time-wait state. It is not good,
214 	   but not fatal yet.
215 	 */
216 
217 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
218 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
219 	     (tmp_opt.saw_tstamp &&
220 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
221 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
222 		if (isn == 0)
223 			isn++;
224 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
225 		return TCP_TW_SYN;
226 	}
227 
228 	if (paws_reject)
229 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
230 
231 	if (!th->rst) {
232 		/* In this case we must reset the TIMEWAIT timer.
233 		 *
234 		 * If it is ACKless SYN it may be both old duplicate
235 		 * and new good SYN with random sequence number <rcv_nxt.
236 		 * Do not reschedule in the last case.
237 		 */
238 		if (paws_reject || th->ack)
239 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
240 
241 		return tcp_timewait_check_oow_rate_limit(
242 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
243 	}
244 	inet_twsk_put(tw);
245 	return TCP_TW_SUCCESS;
246 }
247 EXPORT_SYMBOL(tcp_timewait_state_process);
248 
249 /*
250  * Move a socket to time-wait or dead fin-wait-2 state.
251  */
252 void tcp_time_wait(struct sock *sk, int state, int timeo)
253 {
254 	const struct inet_connection_sock *icsk = inet_csk(sk);
255 	const struct tcp_sock *tp = tcp_sk(sk);
256 	struct inet_timewait_sock *tw;
257 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
258 
259 	tw = inet_twsk_alloc(sk, tcp_death_row, state);
260 
261 	if (tw) {
262 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
263 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
264 		struct inet_sock *inet = inet_sk(sk);
265 
266 		tw->tw_transparent	= inet->transparent;
267 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
268 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
269 		tcptw->tw_snd_nxt	= tp->snd_nxt;
270 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
271 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
272 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
273 		tcptw->tw_ts_offset	= tp->tsoffset;
274 		tcptw->tw_last_oow_ack_time = 0;
275 
276 #if IS_ENABLED(CONFIG_IPV6)
277 		if (tw->tw_family == PF_INET6) {
278 			struct ipv6_pinfo *np = inet6_sk(sk);
279 
280 			tw->tw_v6_daddr = sk->sk_v6_daddr;
281 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
282 			tw->tw_tclass = np->tclass;
283 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
284 			tw->tw_ipv6only = sk->sk_ipv6only;
285 		}
286 #endif
287 
288 #ifdef CONFIG_TCP_MD5SIG
289 		/*
290 		 * The timewait bucket does not have the key DB from the
291 		 * sock structure. We just make a quick copy of the
292 		 * md5 key being used (if indeed we are using one)
293 		 * so the timewait ack generating code has the key.
294 		 */
295 		do {
296 			struct tcp_md5sig_key *key;
297 			tcptw->tw_md5_key = NULL;
298 			key = tp->af_specific->md5_lookup(sk, sk);
299 			if (key) {
300 				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
301 				if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
302 					BUG();
303 			}
304 		} while (0);
305 #endif
306 
307 		/* Get the TIME_WAIT timeout firing. */
308 		if (timeo < rto)
309 			timeo = rto;
310 
311 		tw->tw_timeout = TCP_TIMEWAIT_LEN;
312 		if (state == TCP_TIME_WAIT)
313 			timeo = TCP_TIMEWAIT_LEN;
314 
315 		inet_twsk_schedule(tw, timeo);
316 		/* Linkage updates. */
317 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
318 		inet_twsk_put(tw);
319 	} else {
320 		/* Sorry, if we're out of memory, just CLOSE this
321 		 * socket up.  We've got bigger problems than
322 		 * non-graceful socket closings.
323 		 */
324 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
325 	}
326 
327 	tcp_update_metrics(sk);
328 	tcp_done(sk);
329 }
330 
331 void tcp_twsk_destructor(struct sock *sk)
332 {
333 #ifdef CONFIG_TCP_MD5SIG
334 	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
335 
336 	if (twsk->tw_md5_key)
337 		kfree_rcu(twsk->tw_md5_key, rcu);
338 #endif
339 }
340 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
341 
342 /* Warning : This function is called without sk_listener being locked.
343  * Be sure to read socket fields once, as their value could change under us.
344  */
345 void tcp_openreq_init_rwin(struct request_sock *req,
346 			   const struct sock *sk_listener,
347 			   const struct dst_entry *dst)
348 {
349 	struct inet_request_sock *ireq = inet_rsk(req);
350 	const struct tcp_sock *tp = tcp_sk(sk_listener);
351 	int full_space = tcp_full_space(sk_listener);
352 	u32 window_clamp;
353 	__u8 rcv_wscale;
354 	u32 rcv_wnd;
355 	int mss;
356 
357 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
358 	window_clamp = READ_ONCE(tp->window_clamp);
359 	/* Set this up on the first call only */
360 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
361 
362 	/* limit the window selection if the user enforce a smaller rx buffer */
363 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
364 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
365 		req->rsk_window_clamp = full_space;
366 
367 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
368 	if (rcv_wnd == 0)
369 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
370 	else if (full_space < rcv_wnd * mss)
371 		full_space = rcv_wnd * mss;
372 
373 	/* tcp_full_space because it is guaranteed to be the first packet */
374 	tcp_select_initial_window(full_space,
375 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
376 		&req->rsk_rcv_wnd,
377 		&req->rsk_window_clamp,
378 		ireq->wscale_ok,
379 		&rcv_wscale,
380 		rcv_wnd);
381 	ireq->rcv_wscale = rcv_wscale;
382 }
383 EXPORT_SYMBOL(tcp_openreq_init_rwin);
384 
385 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
386 				  const struct request_sock *req)
387 {
388 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
389 }
390 
391 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
392 {
393 	struct inet_connection_sock *icsk = inet_csk(sk);
394 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
395 	bool ca_got_dst = false;
396 
397 	if (ca_key != TCP_CA_UNSPEC) {
398 		const struct tcp_congestion_ops *ca;
399 
400 		rcu_read_lock();
401 		ca = tcp_ca_find_key(ca_key);
402 		if (likely(ca && try_module_get(ca->owner))) {
403 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
404 			icsk->icsk_ca_ops = ca;
405 			ca_got_dst = true;
406 		}
407 		rcu_read_unlock();
408 	}
409 
410 	/* If no valid choice made yet, assign current system default ca. */
411 	if (!ca_got_dst &&
412 	    (!icsk->icsk_ca_setsockopt ||
413 	     !try_module_get(icsk->icsk_ca_ops->owner)))
414 		tcp_assign_congestion_control(sk);
415 
416 	tcp_set_ca_state(sk, TCP_CA_Open);
417 }
418 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
419 
420 /* This is not only more efficient than what we used to do, it eliminates
421  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
422  *
423  * Actually, we could lots of memory writes here. tp of listening
424  * socket contains all necessary default parameters.
425  */
426 struct sock *tcp_create_openreq_child(const struct sock *sk,
427 				      struct request_sock *req,
428 				      struct sk_buff *skb)
429 {
430 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
431 
432 	if (newsk) {
433 		const struct inet_request_sock *ireq = inet_rsk(req);
434 		struct tcp_request_sock *treq = tcp_rsk(req);
435 		struct inet_connection_sock *newicsk = inet_csk(newsk);
436 		struct tcp_sock *newtp = tcp_sk(newsk);
437 
438 		/* Now setup tcp_sock */
439 		newtp->pred_flags = 0;
440 
441 		newtp->rcv_wup = newtp->copied_seq =
442 		newtp->rcv_nxt = treq->rcv_isn + 1;
443 		newtp->segs_in = 1;
444 
445 		newtp->snd_sml = newtp->snd_una =
446 		newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
447 
448 		INIT_LIST_HEAD(&newtp->tsq_node);
449 
450 		tcp_init_wl(newtp, treq->rcv_isn);
451 
452 		newtp->srtt_us = 0;
453 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
454 		minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
455 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
456 		newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
457 
458 		newtp->packets_out = 0;
459 		newtp->retrans_out = 0;
460 		newtp->sacked_out = 0;
461 		newtp->fackets_out = 0;
462 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
463 		newtp->tlp_high_seq = 0;
464 		newtp->lsndtime = tcp_jiffies32;
465 		newsk->sk_txhash = treq->txhash;
466 		newtp->last_oow_ack_time = 0;
467 		newtp->total_retrans = req->num_retrans;
468 
469 		/* So many TCP implementations out there (incorrectly) count the
470 		 * initial SYN frame in their delayed-ACK and congestion control
471 		 * algorithms that we must have the following bandaid to talk
472 		 * efficiently to them.  -DaveM
473 		 */
474 		newtp->snd_cwnd = TCP_INIT_CWND;
475 		newtp->snd_cwnd_cnt = 0;
476 
477 		/* There's a bubble in the pipe until at least the first ACK. */
478 		newtp->app_limited = ~0U;
479 
480 		tcp_init_xmit_timers(newsk);
481 		newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
482 
483 		newtp->rx_opt.saw_tstamp = 0;
484 
485 		newtp->rx_opt.dsack = 0;
486 		newtp->rx_opt.num_sacks = 0;
487 
488 		newtp->urg_data = 0;
489 
490 		if (sock_flag(newsk, SOCK_KEEPOPEN))
491 			inet_csk_reset_keepalive_timer(newsk,
492 						       keepalive_time_when(newtp));
493 
494 		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
495 		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
496 			if (sysctl_tcp_fack)
497 				tcp_enable_fack(newtp);
498 		}
499 		newtp->window_clamp = req->rsk_window_clamp;
500 		newtp->rcv_ssthresh = req->rsk_rcv_wnd;
501 		newtp->rcv_wnd = req->rsk_rcv_wnd;
502 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
503 		if (newtp->rx_opt.wscale_ok) {
504 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
505 			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
506 		} else {
507 			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
508 			newtp->window_clamp = min(newtp->window_clamp, 65535U);
509 		}
510 		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
511 				  newtp->rx_opt.snd_wscale);
512 		newtp->max_window = newtp->snd_wnd;
513 
514 		if (newtp->rx_opt.tstamp_ok) {
515 			newtp->rx_opt.ts_recent = req->ts_recent;
516 			newtp->rx_opt.ts_recent_stamp = get_seconds();
517 			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
518 		} else {
519 			newtp->rx_opt.ts_recent_stamp = 0;
520 			newtp->tcp_header_len = sizeof(struct tcphdr);
521 		}
522 		newtp->tsoffset = treq->ts_off;
523 #ifdef CONFIG_TCP_MD5SIG
524 		newtp->md5sig_info = NULL;	/*XXX*/
525 		if (newtp->af_specific->md5_lookup(sk, newsk))
526 			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
527 #endif
528 		if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
529 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
530 		newtp->rx_opt.mss_clamp = req->mss;
531 		tcp_ecn_openreq_child(newtp, req);
532 		newtp->fastopen_req = NULL;
533 		newtp->fastopen_rsk = NULL;
534 		newtp->syn_data_acked = 0;
535 		newtp->rack.mstamp = 0;
536 		newtp->rack.advanced = 0;
537 
538 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
539 	}
540 	return newsk;
541 }
542 EXPORT_SYMBOL(tcp_create_openreq_child);
543 
544 /*
545  * Process an incoming packet for SYN_RECV sockets represented as a
546  * request_sock. Normally sk is the listener socket but for TFO it
547  * points to the child socket.
548  *
549  * XXX (TFO) - The current impl contains a special check for ack
550  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
551  *
552  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
553  */
554 
555 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
556 			   struct request_sock *req,
557 			   bool fastopen)
558 {
559 	struct tcp_options_received tmp_opt;
560 	struct sock *child;
561 	const struct tcphdr *th = tcp_hdr(skb);
562 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
563 	bool paws_reject = false;
564 	bool own_req;
565 
566 	tmp_opt.saw_tstamp = 0;
567 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
568 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
569 
570 		if (tmp_opt.saw_tstamp) {
571 			tmp_opt.ts_recent = req->ts_recent;
572 			if (tmp_opt.rcv_tsecr)
573 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
574 			/* We do not store true stamp, but it is not required,
575 			 * it can be estimated (approximately)
576 			 * from another data.
577 			 */
578 			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
579 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
580 		}
581 	}
582 
583 	/* Check for pure retransmitted SYN. */
584 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
585 	    flg == TCP_FLAG_SYN &&
586 	    !paws_reject) {
587 		/*
588 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
589 		 * this case on figure 6 and figure 8, but formal
590 		 * protocol description says NOTHING.
591 		 * To be more exact, it says that we should send ACK,
592 		 * because this segment (at least, if it has no data)
593 		 * is out of window.
594 		 *
595 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
596 		 *  describe SYN-RECV state. All the description
597 		 *  is wrong, we cannot believe to it and should
598 		 *  rely only on common sense and implementation
599 		 *  experience.
600 		 *
601 		 * Enforce "SYN-ACK" according to figure 8, figure 6
602 		 * of RFC793, fixed by RFC1122.
603 		 *
604 		 * Note that even if there is new data in the SYN packet
605 		 * they will be thrown away too.
606 		 *
607 		 * Reset timer after retransmitting SYNACK, similar to
608 		 * the idea of fast retransmit in recovery.
609 		 */
610 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
611 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
612 					  &tcp_rsk(req)->last_oow_ack_time) &&
613 
614 		    !inet_rtx_syn_ack(sk, req)) {
615 			unsigned long expires = jiffies;
616 
617 			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
618 				       TCP_RTO_MAX);
619 			if (!fastopen)
620 				mod_timer_pending(&req->rsk_timer, expires);
621 			else
622 				req->rsk_timer.expires = expires;
623 		}
624 		return NULL;
625 	}
626 
627 	/* Further reproduces section "SEGMENT ARRIVES"
628 	   for state SYN-RECEIVED of RFC793.
629 	   It is broken, however, it does not work only
630 	   when SYNs are crossed.
631 
632 	   You would think that SYN crossing is impossible here, since
633 	   we should have a SYN_SENT socket (from connect()) on our end,
634 	   but this is not true if the crossed SYNs were sent to both
635 	   ends by a malicious third party.  We must defend against this,
636 	   and to do that we first verify the ACK (as per RFC793, page
637 	   36) and reset if it is invalid.  Is this a true full defense?
638 	   To convince ourselves, let us consider a way in which the ACK
639 	   test can still pass in this 'malicious crossed SYNs' case.
640 	   Malicious sender sends identical SYNs (and thus identical sequence
641 	   numbers) to both A and B:
642 
643 		A: gets SYN, seq=7
644 		B: gets SYN, seq=7
645 
646 	   By our good fortune, both A and B select the same initial
647 	   send sequence number of seven :-)
648 
649 		A: sends SYN|ACK, seq=7, ack_seq=8
650 		B: sends SYN|ACK, seq=7, ack_seq=8
651 
652 	   So we are now A eating this SYN|ACK, ACK test passes.  So
653 	   does sequence test, SYN is truncated, and thus we consider
654 	   it a bare ACK.
655 
656 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
657 	   bare ACK.  Otherwise, we create an established connection.  Both
658 	   ends (listening sockets) accept the new incoming connection and try
659 	   to talk to each other. 8-)
660 
661 	   Note: This case is both harmless, and rare.  Possibility is about the
662 	   same as us discovering intelligent life on another plant tomorrow.
663 
664 	   But generally, we should (RFC lies!) to accept ACK
665 	   from SYNACK both here and in tcp_rcv_state_process().
666 	   tcp_rcv_state_process() does not, hence, we do not too.
667 
668 	   Note that the case is absolutely generic:
669 	   we cannot optimize anything here without
670 	   violating protocol. All the checks must be made
671 	   before attempt to create socket.
672 	 */
673 
674 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
675 	 *                  and the incoming segment acknowledges something not yet
676 	 *                  sent (the segment carries an unacceptable ACK) ...
677 	 *                  a reset is sent."
678 	 *
679 	 * Invalid ACK: reset will be sent by listening socket.
680 	 * Note that the ACK validity check for a Fast Open socket is done
681 	 * elsewhere and is checked directly against the child socket rather
682 	 * than req because user data may have been sent out.
683 	 */
684 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
685 	    (TCP_SKB_CB(skb)->ack_seq !=
686 	     tcp_rsk(req)->snt_isn + 1))
687 		return sk;
688 
689 	/* Also, it would be not so bad idea to check rcv_tsecr, which
690 	 * is essentially ACK extension and too early or too late values
691 	 * should cause reset in unsynchronized states.
692 	 */
693 
694 	/* RFC793: "first check sequence number". */
695 
696 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
697 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
698 		/* Out of window: send ACK and drop. */
699 		if (!(flg & TCP_FLAG_RST) &&
700 		    !tcp_oow_rate_limited(sock_net(sk), skb,
701 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
702 					  &tcp_rsk(req)->last_oow_ack_time))
703 			req->rsk_ops->send_ack(sk, skb, req);
704 		if (paws_reject)
705 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
706 		return NULL;
707 	}
708 
709 	/* In sequence, PAWS is OK. */
710 
711 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
712 		req->ts_recent = tmp_opt.rcv_tsval;
713 
714 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
715 		/* Truncate SYN, it is out of window starting
716 		   at tcp_rsk(req)->rcv_isn + 1. */
717 		flg &= ~TCP_FLAG_SYN;
718 	}
719 
720 	/* RFC793: "second check the RST bit" and
721 	 *	   "fourth, check the SYN bit"
722 	 */
723 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
724 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
725 		goto embryonic_reset;
726 	}
727 
728 	/* ACK sequence verified above, just make sure ACK is
729 	 * set.  If ACK not set, just silently drop the packet.
730 	 *
731 	 * XXX (TFO) - if we ever allow "data after SYN", the
732 	 * following check needs to be removed.
733 	 */
734 	if (!(flg & TCP_FLAG_ACK))
735 		return NULL;
736 
737 	/* For Fast Open no more processing is needed (sk is the
738 	 * child socket).
739 	 */
740 	if (fastopen)
741 		return sk;
742 
743 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
744 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
745 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
746 		inet_rsk(req)->acked = 1;
747 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
748 		return NULL;
749 	}
750 
751 	/* OK, ACK is valid, create big socket and
752 	 * feed this segment to it. It will repeat all
753 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
754 	 * ESTABLISHED STATE. If it will be dropped after
755 	 * socket is created, wait for troubles.
756 	 */
757 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
758 							 req, &own_req);
759 	if (!child)
760 		goto listen_overflow;
761 
762 	sock_rps_save_rxhash(child, skb);
763 	tcp_synack_rtt_meas(child, req);
764 	return inet_csk_complete_hashdance(sk, child, req, own_req);
765 
766 listen_overflow:
767 	if (!sysctl_tcp_abort_on_overflow) {
768 		inet_rsk(req)->acked = 1;
769 		return NULL;
770 	}
771 
772 embryonic_reset:
773 	if (!(flg & TCP_FLAG_RST)) {
774 		/* Received a bad SYN pkt - for TFO We try not to reset
775 		 * the local connection unless it's really necessary to
776 		 * avoid becoming vulnerable to outside attack aiming at
777 		 * resetting legit local connections.
778 		 */
779 		req->rsk_ops->send_reset(sk, skb);
780 	} else if (fastopen) { /* received a valid RST pkt */
781 		reqsk_fastopen_remove(sk, req, true);
782 		tcp_reset(sk);
783 	}
784 	if (!fastopen) {
785 		inet_csk_reqsk_queue_drop(sk, req);
786 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
787 	}
788 	return NULL;
789 }
790 EXPORT_SYMBOL(tcp_check_req);
791 
792 /*
793  * Queue segment on the new socket if the new socket is active,
794  * otherwise we just shortcircuit this and continue with
795  * the new socket.
796  *
797  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
798  * when entering. But other states are possible due to a race condition
799  * where after __inet_lookup_established() fails but before the listener
800  * locked is obtained, other packets cause the same connection to
801  * be created.
802  */
803 
804 int tcp_child_process(struct sock *parent, struct sock *child,
805 		      struct sk_buff *skb)
806 {
807 	int ret = 0;
808 	int state = child->sk_state;
809 
810 	/* record NAPI ID of child */
811 	sk_mark_napi_id(child, skb);
812 
813 	tcp_segs_in(tcp_sk(child), skb);
814 	if (!sock_owned_by_user(child)) {
815 		ret = tcp_rcv_state_process(child, skb);
816 		/* Wakeup parent, send SIGIO */
817 		if (state == TCP_SYN_RECV && child->sk_state != state)
818 			parent->sk_data_ready(parent);
819 	} else {
820 		/* Alas, it is possible again, because we do lookup
821 		 * in main socket hash table and lock on listening
822 		 * socket does not protect us more.
823 		 */
824 		__sk_add_backlog(child, skb);
825 	}
826 
827 	bh_unlock_sock(child);
828 	sock_put(child);
829 	return ret;
830 }
831 EXPORT_SYMBOL(tcp_child_process);
832