xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  */
20 
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29 
30 int sysctl_tcp_abort_on_overflow __read_mostly;
31 
32 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
33 {
34 	if (seq == s_win)
35 		return true;
36 	if (after(end_seq, s_win) && before(seq, e_win))
37 		return true;
38 	return seq == e_win && seq == end_seq;
39 }
40 
41 static enum tcp_tw_status
42 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
43 				  const struct sk_buff *skb, int mib_idx)
44 {
45 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
46 
47 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
48 				  &tcptw->tw_last_oow_ack_time)) {
49 		/* Send ACK. Note, we do not put the bucket,
50 		 * it will be released by caller.
51 		 */
52 		return TCP_TW_ACK;
53 	}
54 
55 	/* We are rate-limiting, so just release the tw sock and drop skb. */
56 	inet_twsk_put(tw);
57 	return TCP_TW_SUCCESS;
58 }
59 
60 /*
61  * * Main purpose of TIME-WAIT state is to close connection gracefully,
62  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
63  *   (and, probably, tail of data) and one or more our ACKs are lost.
64  * * What is TIME-WAIT timeout? It is associated with maximal packet
65  *   lifetime in the internet, which results in wrong conclusion, that
66  *   it is set to catch "old duplicate segments" wandering out of their path.
67  *   It is not quite correct. This timeout is calculated so that it exceeds
68  *   maximal retransmission timeout enough to allow to lose one (or more)
69  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
70  * * When TIME-WAIT socket receives RST, it means that another end
71  *   finally closed and we are allowed to kill TIME-WAIT too.
72  * * Second purpose of TIME-WAIT is catching old duplicate segments.
73  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
74  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
75  * * If we invented some more clever way to catch duplicates
76  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
77  *
78  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
79  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
80  * from the very beginning.
81  *
82  * NOTE. With recycling (and later with fin-wait-2) TW bucket
83  * is _not_ stateless. It means, that strictly speaking we must
84  * spinlock it. I do not want! Well, probability of misbehaviour
85  * is ridiculously low and, seems, we could use some mb() tricks
86  * to avoid misread sequence numbers, states etc.  --ANK
87  *
88  * We don't need to initialize tmp_out.sack_ok as we don't use the results
89  */
90 enum tcp_tw_status
91 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
92 			   const struct tcphdr *th)
93 {
94 	struct tcp_options_received tmp_opt;
95 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
96 	bool paws_reject = false;
97 	struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
98 
99 	tmp_opt.saw_tstamp = 0;
100 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 		tcp_parse_options(skb, &tmp_opt, 0, NULL);
102 
103 		if (tmp_opt.saw_tstamp) {
104 			tmp_opt.rcv_tsecr	-= tcptw->tw_ts_offset;
105 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
106 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
107 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 		}
109 	}
110 
111 	if (tw->tw_substate == TCP_FIN_WAIT2) {
112 		/* Just repeat all the checks of tcp_rcv_state_process() */
113 
114 		/* Out of window, send ACK */
115 		if (paws_reject ||
116 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 				   tcptw->tw_rcv_nxt,
118 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 			return tcp_timewait_check_oow_rate_limit(
120 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
121 
122 		if (th->rst)
123 			goto kill;
124 
125 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
126 			return TCP_TW_RST;
127 
128 		/* Dup ACK? */
129 		if (!th->ack ||
130 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
131 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
132 			inet_twsk_put(tw);
133 			return TCP_TW_SUCCESS;
134 		}
135 
136 		/* New data or FIN. If new data arrive after half-duplex close,
137 		 * reset.
138 		 */
139 		if (!th->fin ||
140 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
141 			return TCP_TW_RST;
142 
143 		/* FIN arrived, enter true time-wait state. */
144 		tw->tw_substate	  = TCP_TIME_WAIT;
145 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
146 		if (tmp_opt.saw_tstamp) {
147 			tcptw->tw_ts_recent_stamp = get_seconds();
148 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
149 		}
150 
151 		if (tcp_death_row->sysctl_tw_recycle &&
152 		    tcptw->tw_ts_recent_stamp &&
153 		    tcp_tw_remember_stamp(tw))
154 			inet_twsk_reschedule(tw, tw->tw_timeout);
155 		else
156 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
157 		return TCP_TW_ACK;
158 	}
159 
160 	/*
161 	 *	Now real TIME-WAIT state.
162 	 *
163 	 *	RFC 1122:
164 	 *	"When a connection is [...] on TIME-WAIT state [...]
165 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
166 	 *	reopen the connection directly, if it:
167 	 *
168 	 *	(1)  assigns its initial sequence number for the new
169 	 *	connection to be larger than the largest sequence
170 	 *	number it used on the previous connection incarnation,
171 	 *	and
172 	 *
173 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
174 	 *	to be an old duplicate".
175 	 */
176 
177 	if (!paws_reject &&
178 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
179 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
180 		/* In window segment, it may be only reset or bare ack. */
181 
182 		if (th->rst) {
183 			/* This is TIME_WAIT assassination, in two flavors.
184 			 * Oh well... nobody has a sufficient solution to this
185 			 * protocol bug yet.
186 			 */
187 			if (sysctl_tcp_rfc1337 == 0) {
188 kill:
189 				inet_twsk_deschedule_put(tw);
190 				return TCP_TW_SUCCESS;
191 			}
192 		}
193 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
194 
195 		if (tmp_opt.saw_tstamp) {
196 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
197 			tcptw->tw_ts_recent_stamp = get_seconds();
198 		}
199 
200 		inet_twsk_put(tw);
201 		return TCP_TW_SUCCESS;
202 	}
203 
204 	/* Out of window segment.
205 
206 	   All the segments are ACKed immediately.
207 
208 	   The only exception is new SYN. We accept it, if it is
209 	   not old duplicate and we are not in danger to be killed
210 	   by delayed old duplicates. RFC check is that it has
211 	   newer sequence number works at rates <40Mbit/sec.
212 	   However, if paws works, it is reliable AND even more,
213 	   we even may relax silly seq space cutoff.
214 
215 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
216 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
217 	   we must return socket to time-wait state. It is not good,
218 	   but not fatal yet.
219 	 */
220 
221 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
222 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
223 	     (tmp_opt.saw_tstamp &&
224 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
225 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
226 		if (isn == 0)
227 			isn++;
228 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
229 		return TCP_TW_SYN;
230 	}
231 
232 	if (paws_reject)
233 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
234 
235 	if (!th->rst) {
236 		/* In this case we must reset the TIMEWAIT timer.
237 		 *
238 		 * If it is ACKless SYN it may be both old duplicate
239 		 * and new good SYN with random sequence number <rcv_nxt.
240 		 * Do not reschedule in the last case.
241 		 */
242 		if (paws_reject || th->ack)
243 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
244 
245 		return tcp_timewait_check_oow_rate_limit(
246 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
247 	}
248 	inet_twsk_put(tw);
249 	return TCP_TW_SUCCESS;
250 }
251 EXPORT_SYMBOL(tcp_timewait_state_process);
252 
253 /*
254  * Move a socket to time-wait or dead fin-wait-2 state.
255  */
256 void tcp_time_wait(struct sock *sk, int state, int timeo)
257 {
258 	const struct inet_connection_sock *icsk = inet_csk(sk);
259 	const struct tcp_sock *tp = tcp_sk(sk);
260 	struct inet_timewait_sock *tw;
261 	bool recycle_ok = false;
262 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
263 
264 	if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
265 		recycle_ok = tcp_remember_stamp(sk);
266 
267 	tw = inet_twsk_alloc(sk, tcp_death_row, state);
268 
269 	if (tw) {
270 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
271 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
272 		struct inet_sock *inet = inet_sk(sk);
273 
274 		tw->tw_transparent	= inet->transparent;
275 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
276 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
277 		tcptw->tw_snd_nxt	= tp->snd_nxt;
278 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
279 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
280 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
281 		tcptw->tw_ts_offset	= tp->tsoffset;
282 		tcptw->tw_last_oow_ack_time = 0;
283 
284 #if IS_ENABLED(CONFIG_IPV6)
285 		if (tw->tw_family == PF_INET6) {
286 			struct ipv6_pinfo *np = inet6_sk(sk);
287 
288 			tw->tw_v6_daddr = sk->sk_v6_daddr;
289 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
290 			tw->tw_tclass = np->tclass;
291 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
292 			tw->tw_ipv6only = sk->sk_ipv6only;
293 		}
294 #endif
295 
296 #ifdef CONFIG_TCP_MD5SIG
297 		/*
298 		 * The timewait bucket does not have the key DB from the
299 		 * sock structure. We just make a quick copy of the
300 		 * md5 key being used (if indeed we are using one)
301 		 * so the timewait ack generating code has the key.
302 		 */
303 		do {
304 			struct tcp_md5sig_key *key;
305 			tcptw->tw_md5_key = NULL;
306 			key = tp->af_specific->md5_lookup(sk, sk);
307 			if (key) {
308 				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
309 				if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
310 					BUG();
311 			}
312 		} while (0);
313 #endif
314 
315 		/* Get the TIME_WAIT timeout firing. */
316 		if (timeo < rto)
317 			timeo = rto;
318 
319 		if (recycle_ok) {
320 			tw->tw_timeout = rto;
321 		} else {
322 			tw->tw_timeout = TCP_TIMEWAIT_LEN;
323 			if (state == TCP_TIME_WAIT)
324 				timeo = TCP_TIMEWAIT_LEN;
325 		}
326 
327 		inet_twsk_schedule(tw, timeo);
328 		/* Linkage updates. */
329 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330 		inet_twsk_put(tw);
331 	} else {
332 		/* Sorry, if we're out of memory, just CLOSE this
333 		 * socket up.  We've got bigger problems than
334 		 * non-graceful socket closings.
335 		 */
336 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337 	}
338 
339 	tcp_update_metrics(sk);
340 	tcp_done(sk);
341 }
342 
343 void tcp_twsk_destructor(struct sock *sk)
344 {
345 #ifdef CONFIG_TCP_MD5SIG
346 	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
347 
348 	if (twsk->tw_md5_key)
349 		kfree_rcu(twsk->tw_md5_key, rcu);
350 #endif
351 }
352 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
353 
354 /* Warning : This function is called without sk_listener being locked.
355  * Be sure to read socket fields once, as their value could change under us.
356  */
357 void tcp_openreq_init_rwin(struct request_sock *req,
358 			   const struct sock *sk_listener,
359 			   const struct dst_entry *dst)
360 {
361 	struct inet_request_sock *ireq = inet_rsk(req);
362 	const struct tcp_sock *tp = tcp_sk(sk_listener);
363 	u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
364 	int full_space = tcp_full_space(sk_listener);
365 	int mss = dst_metric_advmss(dst);
366 	u32 window_clamp;
367 	__u8 rcv_wscale;
368 
369 	if (user_mss && user_mss < mss)
370 		mss = user_mss;
371 
372 	window_clamp = READ_ONCE(tp->window_clamp);
373 	/* Set this up on the first call only */
374 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
375 
376 	/* limit the window selection if the user enforce a smaller rx buffer */
377 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
378 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
379 		req->rsk_window_clamp = full_space;
380 
381 	/* tcp_full_space because it is guaranteed to be the first packet */
382 	tcp_select_initial_window(full_space,
383 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
384 		&req->rsk_rcv_wnd,
385 		&req->rsk_window_clamp,
386 		ireq->wscale_ok,
387 		&rcv_wscale,
388 		dst_metric(dst, RTAX_INITRWND));
389 	ireq->rcv_wscale = rcv_wscale;
390 }
391 EXPORT_SYMBOL(tcp_openreq_init_rwin);
392 
393 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
394 				  const struct request_sock *req)
395 {
396 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
397 }
398 
399 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
400 {
401 	struct inet_connection_sock *icsk = inet_csk(sk);
402 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
403 	bool ca_got_dst = false;
404 
405 	if (ca_key != TCP_CA_UNSPEC) {
406 		const struct tcp_congestion_ops *ca;
407 
408 		rcu_read_lock();
409 		ca = tcp_ca_find_key(ca_key);
410 		if (likely(ca && try_module_get(ca->owner))) {
411 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
412 			icsk->icsk_ca_ops = ca;
413 			ca_got_dst = true;
414 		}
415 		rcu_read_unlock();
416 	}
417 
418 	/* If no valid choice made yet, assign current system default ca. */
419 	if (!ca_got_dst &&
420 	    (!icsk->icsk_ca_setsockopt ||
421 	     !try_module_get(icsk->icsk_ca_ops->owner)))
422 		tcp_assign_congestion_control(sk);
423 
424 	tcp_set_ca_state(sk, TCP_CA_Open);
425 }
426 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
427 
428 /* This is not only more efficient than what we used to do, it eliminates
429  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
430  *
431  * Actually, we could lots of memory writes here. tp of listening
432  * socket contains all necessary default parameters.
433  */
434 struct sock *tcp_create_openreq_child(const struct sock *sk,
435 				      struct request_sock *req,
436 				      struct sk_buff *skb)
437 {
438 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
439 
440 	if (newsk) {
441 		const struct inet_request_sock *ireq = inet_rsk(req);
442 		struct tcp_request_sock *treq = tcp_rsk(req);
443 		struct inet_connection_sock *newicsk = inet_csk(newsk);
444 		struct tcp_sock *newtp = tcp_sk(newsk);
445 
446 		/* Now setup tcp_sock */
447 		newtp->pred_flags = 0;
448 
449 		newtp->rcv_wup = newtp->copied_seq =
450 		newtp->rcv_nxt = treq->rcv_isn + 1;
451 		newtp->segs_in = 1;
452 
453 		newtp->snd_sml = newtp->snd_una =
454 		newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
455 
456 		tcp_prequeue_init(newtp);
457 		INIT_LIST_HEAD(&newtp->tsq_node);
458 
459 		tcp_init_wl(newtp, treq->rcv_isn);
460 
461 		newtp->srtt_us = 0;
462 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
463 		minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
464 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
465 
466 		newtp->packets_out = 0;
467 		newtp->retrans_out = 0;
468 		newtp->sacked_out = 0;
469 		newtp->fackets_out = 0;
470 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 		tcp_enable_early_retrans(newtp);
472 		newtp->tlp_high_seq = 0;
473 		newtp->lsndtime = treq->snt_synack.stamp_jiffies;
474 		newsk->sk_txhash = treq->txhash;
475 		newtp->last_oow_ack_time = 0;
476 		newtp->total_retrans = req->num_retrans;
477 
478 		/* So many TCP implementations out there (incorrectly) count the
479 		 * initial SYN frame in their delayed-ACK and congestion control
480 		 * algorithms that we must have the following bandaid to talk
481 		 * efficiently to them.  -DaveM
482 		 */
483 		newtp->snd_cwnd = TCP_INIT_CWND;
484 		newtp->snd_cwnd_cnt = 0;
485 
486 		/* There's a bubble in the pipe until at least the first ACK. */
487 		newtp->app_limited = ~0U;
488 
489 		tcp_init_xmit_timers(newsk);
490 		newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
491 
492 		newtp->rx_opt.saw_tstamp = 0;
493 
494 		newtp->rx_opt.dsack = 0;
495 		newtp->rx_opt.num_sacks = 0;
496 
497 		newtp->urg_data = 0;
498 
499 		if (sock_flag(newsk, SOCK_KEEPOPEN))
500 			inet_csk_reset_keepalive_timer(newsk,
501 						       keepalive_time_when(newtp));
502 
503 		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
504 		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
505 			if (sysctl_tcp_fack)
506 				tcp_enable_fack(newtp);
507 		}
508 		newtp->window_clamp = req->rsk_window_clamp;
509 		newtp->rcv_ssthresh = req->rsk_rcv_wnd;
510 		newtp->rcv_wnd = req->rsk_rcv_wnd;
511 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
512 		if (newtp->rx_opt.wscale_ok) {
513 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
514 			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
515 		} else {
516 			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
517 			newtp->window_clamp = min(newtp->window_clamp, 65535U);
518 		}
519 		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
520 				  newtp->rx_opt.snd_wscale);
521 		newtp->max_window = newtp->snd_wnd;
522 
523 		if (newtp->rx_opt.tstamp_ok) {
524 			newtp->rx_opt.ts_recent = req->ts_recent;
525 			newtp->rx_opt.ts_recent_stamp = get_seconds();
526 			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
527 		} else {
528 			newtp->rx_opt.ts_recent_stamp = 0;
529 			newtp->tcp_header_len = sizeof(struct tcphdr);
530 		}
531 		newtp->tsoffset = treq->ts_off;
532 #ifdef CONFIG_TCP_MD5SIG
533 		newtp->md5sig_info = NULL;	/*XXX*/
534 		if (newtp->af_specific->md5_lookup(sk, newsk))
535 			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
536 #endif
537 		if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
538 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
539 		newtp->rx_opt.mss_clamp = req->mss;
540 		tcp_ecn_openreq_child(newtp, req);
541 		newtp->fastopen_rsk = NULL;
542 		newtp->syn_data_acked = 0;
543 		newtp->rack.mstamp.v64 = 0;
544 		newtp->rack.advanced = 0;
545 
546 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
547 	}
548 	return newsk;
549 }
550 EXPORT_SYMBOL(tcp_create_openreq_child);
551 
552 /*
553  * Process an incoming packet for SYN_RECV sockets represented as a
554  * request_sock. Normally sk is the listener socket but for TFO it
555  * points to the child socket.
556  *
557  * XXX (TFO) - The current impl contains a special check for ack
558  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
559  *
560  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
561  */
562 
563 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
564 			   struct request_sock *req,
565 			   bool fastopen)
566 {
567 	struct tcp_options_received tmp_opt;
568 	struct sock *child;
569 	const struct tcphdr *th = tcp_hdr(skb);
570 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
571 	bool paws_reject = false;
572 	bool own_req;
573 
574 	tmp_opt.saw_tstamp = 0;
575 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
576 		tcp_parse_options(skb, &tmp_opt, 0, NULL);
577 
578 		if (tmp_opt.saw_tstamp) {
579 			tmp_opt.ts_recent = req->ts_recent;
580 			if (tmp_opt.rcv_tsecr)
581 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
582 			/* We do not store true stamp, but it is not required,
583 			 * it can be estimated (approximately)
584 			 * from another data.
585 			 */
586 			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
587 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
588 		}
589 	}
590 
591 	/* Check for pure retransmitted SYN. */
592 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
593 	    flg == TCP_FLAG_SYN &&
594 	    !paws_reject) {
595 		/*
596 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
597 		 * this case on figure 6 and figure 8, but formal
598 		 * protocol description says NOTHING.
599 		 * To be more exact, it says that we should send ACK,
600 		 * because this segment (at least, if it has no data)
601 		 * is out of window.
602 		 *
603 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
604 		 *  describe SYN-RECV state. All the description
605 		 *  is wrong, we cannot believe to it and should
606 		 *  rely only on common sense and implementation
607 		 *  experience.
608 		 *
609 		 * Enforce "SYN-ACK" according to figure 8, figure 6
610 		 * of RFC793, fixed by RFC1122.
611 		 *
612 		 * Note that even if there is new data in the SYN packet
613 		 * they will be thrown away too.
614 		 *
615 		 * Reset timer after retransmitting SYNACK, similar to
616 		 * the idea of fast retransmit in recovery.
617 		 */
618 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
619 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
620 					  &tcp_rsk(req)->last_oow_ack_time) &&
621 
622 		    !inet_rtx_syn_ack(sk, req)) {
623 			unsigned long expires = jiffies;
624 
625 			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
626 				       TCP_RTO_MAX);
627 			if (!fastopen)
628 				mod_timer_pending(&req->rsk_timer, expires);
629 			else
630 				req->rsk_timer.expires = expires;
631 		}
632 		return NULL;
633 	}
634 
635 	/* Further reproduces section "SEGMENT ARRIVES"
636 	   for state SYN-RECEIVED of RFC793.
637 	   It is broken, however, it does not work only
638 	   when SYNs are crossed.
639 
640 	   You would think that SYN crossing is impossible here, since
641 	   we should have a SYN_SENT socket (from connect()) on our end,
642 	   but this is not true if the crossed SYNs were sent to both
643 	   ends by a malicious third party.  We must defend against this,
644 	   and to do that we first verify the ACK (as per RFC793, page
645 	   36) and reset if it is invalid.  Is this a true full defense?
646 	   To convince ourselves, let us consider a way in which the ACK
647 	   test can still pass in this 'malicious crossed SYNs' case.
648 	   Malicious sender sends identical SYNs (and thus identical sequence
649 	   numbers) to both A and B:
650 
651 		A: gets SYN, seq=7
652 		B: gets SYN, seq=7
653 
654 	   By our good fortune, both A and B select the same initial
655 	   send sequence number of seven :-)
656 
657 		A: sends SYN|ACK, seq=7, ack_seq=8
658 		B: sends SYN|ACK, seq=7, ack_seq=8
659 
660 	   So we are now A eating this SYN|ACK, ACK test passes.  So
661 	   does sequence test, SYN is truncated, and thus we consider
662 	   it a bare ACK.
663 
664 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
665 	   bare ACK.  Otherwise, we create an established connection.  Both
666 	   ends (listening sockets) accept the new incoming connection and try
667 	   to talk to each other. 8-)
668 
669 	   Note: This case is both harmless, and rare.  Possibility is about the
670 	   same as us discovering intelligent life on another plant tomorrow.
671 
672 	   But generally, we should (RFC lies!) to accept ACK
673 	   from SYNACK both here and in tcp_rcv_state_process().
674 	   tcp_rcv_state_process() does not, hence, we do not too.
675 
676 	   Note that the case is absolutely generic:
677 	   we cannot optimize anything here without
678 	   violating protocol. All the checks must be made
679 	   before attempt to create socket.
680 	 */
681 
682 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
683 	 *                  and the incoming segment acknowledges something not yet
684 	 *                  sent (the segment carries an unacceptable ACK) ...
685 	 *                  a reset is sent."
686 	 *
687 	 * Invalid ACK: reset will be sent by listening socket.
688 	 * Note that the ACK validity check for a Fast Open socket is done
689 	 * elsewhere and is checked directly against the child socket rather
690 	 * than req because user data may have been sent out.
691 	 */
692 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
693 	    (TCP_SKB_CB(skb)->ack_seq !=
694 	     tcp_rsk(req)->snt_isn + 1))
695 		return sk;
696 
697 	/* Also, it would be not so bad idea to check rcv_tsecr, which
698 	 * is essentially ACK extension and too early or too late values
699 	 * should cause reset in unsynchronized states.
700 	 */
701 
702 	/* RFC793: "first check sequence number". */
703 
704 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
705 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
706 		/* Out of window: send ACK and drop. */
707 		if (!(flg & TCP_FLAG_RST) &&
708 		    !tcp_oow_rate_limited(sock_net(sk), skb,
709 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
710 					  &tcp_rsk(req)->last_oow_ack_time))
711 			req->rsk_ops->send_ack(sk, skb, req);
712 		if (paws_reject)
713 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
714 		return NULL;
715 	}
716 
717 	/* In sequence, PAWS is OK. */
718 
719 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
720 		req->ts_recent = tmp_opt.rcv_tsval;
721 
722 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
723 		/* Truncate SYN, it is out of window starting
724 		   at tcp_rsk(req)->rcv_isn + 1. */
725 		flg &= ~TCP_FLAG_SYN;
726 	}
727 
728 	/* RFC793: "second check the RST bit" and
729 	 *	   "fourth, check the SYN bit"
730 	 */
731 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
732 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
733 		goto embryonic_reset;
734 	}
735 
736 	/* ACK sequence verified above, just make sure ACK is
737 	 * set.  If ACK not set, just silently drop the packet.
738 	 *
739 	 * XXX (TFO) - if we ever allow "data after SYN", the
740 	 * following check needs to be removed.
741 	 */
742 	if (!(flg & TCP_FLAG_ACK))
743 		return NULL;
744 
745 	/* For Fast Open no more processing is needed (sk is the
746 	 * child socket).
747 	 */
748 	if (fastopen)
749 		return sk;
750 
751 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
752 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
753 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
754 		inet_rsk(req)->acked = 1;
755 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
756 		return NULL;
757 	}
758 
759 	/* OK, ACK is valid, create big socket and
760 	 * feed this segment to it. It will repeat all
761 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
762 	 * ESTABLISHED STATE. If it will be dropped after
763 	 * socket is created, wait for troubles.
764 	 */
765 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
766 							 req, &own_req);
767 	if (!child)
768 		goto listen_overflow;
769 
770 	sock_rps_save_rxhash(child, skb);
771 	tcp_synack_rtt_meas(child, req);
772 	return inet_csk_complete_hashdance(sk, child, req, own_req);
773 
774 listen_overflow:
775 	if (!sysctl_tcp_abort_on_overflow) {
776 		inet_rsk(req)->acked = 1;
777 		return NULL;
778 	}
779 
780 embryonic_reset:
781 	if (!(flg & TCP_FLAG_RST)) {
782 		/* Received a bad SYN pkt - for TFO We try not to reset
783 		 * the local connection unless it's really necessary to
784 		 * avoid becoming vulnerable to outside attack aiming at
785 		 * resetting legit local connections.
786 		 */
787 		req->rsk_ops->send_reset(sk, skb);
788 	} else if (fastopen) { /* received a valid RST pkt */
789 		reqsk_fastopen_remove(sk, req, true);
790 		tcp_reset(sk);
791 	}
792 	if (!fastopen) {
793 		inet_csk_reqsk_queue_drop(sk, req);
794 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
795 	}
796 	return NULL;
797 }
798 EXPORT_SYMBOL(tcp_check_req);
799 
800 /*
801  * Queue segment on the new socket if the new socket is active,
802  * otherwise we just shortcircuit this and continue with
803  * the new socket.
804  *
805  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
806  * when entering. But other states are possible due to a race condition
807  * where after __inet_lookup_established() fails but before the listener
808  * locked is obtained, other packets cause the same connection to
809  * be created.
810  */
811 
812 int tcp_child_process(struct sock *parent, struct sock *child,
813 		      struct sk_buff *skb)
814 {
815 	int ret = 0;
816 	int state = child->sk_state;
817 
818 	tcp_segs_in(tcp_sk(child), skb);
819 	if (!sock_owned_by_user(child)) {
820 		ret = tcp_rcv_state_process(child, skb);
821 		/* Wakeup parent, send SIGIO */
822 		if (state == TCP_SYN_RECV && child->sk_state != state)
823 			parent->sk_data_ready(parent);
824 	} else {
825 		/* Alas, it is possible again, because we do lookup
826 		 * in main socket hash table and lock on listening
827 		 * socket does not protect us more.
828 		 */
829 		__sk_add_backlog(child, skb);
830 	}
831 
832 	bh_unlock_sock(child);
833 	sock_put(child);
834 	return ret;
835 }
836 EXPORT_SYMBOL(tcp_child_process);
837