xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision 33a03aad)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
11  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
15  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
16  *		Matthew Dillon, <dillon@apollo.west.oic.com>
17  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *		Jorge Cwik, <jorge@laser.satlink.net>
19  */
20 
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29 
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32 
33 int sysctl_tcp_abort_on_overflow __read_mostly;
34 
35 struct inet_timewait_death_row tcp_death_row = {
36 	.sysctl_max_tw_buckets = NR_FILE * 2,
37 	.period		= TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 	.death_lock	= __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 	.hashinfo	= &tcp_hashinfo,
40 	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 					    (unsigned long)&tcp_death_row),
42 	.twkill_work	= __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 					     inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
45 
46 	.twcal_hand	= -1,
47 	.twcal_timer	= TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 					    (unsigned long)&tcp_death_row),
49 };
50 EXPORT_SYMBOL_GPL(tcp_death_row);
51 
52 /* VJ's idea. Save last timestamp seen from this destination
53  * and hold it at least for normal timewait interval to use for duplicate
54  * segment detection in subsequent connections, before they enter synchronized
55  * state.
56  */
57 
58 static bool tcp_remember_stamp(struct sock *sk)
59 {
60 	const struct inet_connection_sock *icsk = inet_csk(sk);
61 	struct tcp_sock *tp = tcp_sk(sk);
62 	struct inet_peer *peer;
63 
64 	peer = icsk->icsk_af_ops->get_peer(sk);
65 	if (peer) {
66 		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
67 		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
68 		     peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
69 			peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
70 			peer->tcp_ts = tp->rx_opt.ts_recent;
71 		}
72 		return true;
73 	}
74 
75 	return false;
76 }
77 
78 static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
79 {
80 	const struct tcp_timewait_sock *tcptw;
81 	struct sock *sk = (struct sock *) tw;
82 	struct inet_peer *peer;
83 
84 	tcptw = tcp_twsk(sk);
85 	peer = tcptw->tw_peer;
86 	if (peer) {
87 		if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
88 		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
89 		     peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
90 			peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
91 			peer->tcp_ts	   = tcptw->tw_ts_recent;
92 		}
93 		return true;
94 	}
95 	return false;
96 }
97 
98 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
99 {
100 	if (seq == s_win)
101 		return true;
102 	if (after(end_seq, s_win) && before(seq, e_win))
103 		return true;
104 	return seq == e_win && seq == end_seq;
105 }
106 
107 /*
108  * * Main purpose of TIME-WAIT state is to close connection gracefully,
109  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
110  *   (and, probably, tail of data) and one or more our ACKs are lost.
111  * * What is TIME-WAIT timeout? It is associated with maximal packet
112  *   lifetime in the internet, which results in wrong conclusion, that
113  *   it is set to catch "old duplicate segments" wandering out of their path.
114  *   It is not quite correct. This timeout is calculated so that it exceeds
115  *   maximal retransmission timeout enough to allow to lose one (or more)
116  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
117  * * When TIME-WAIT socket receives RST, it means that another end
118  *   finally closed and we are allowed to kill TIME-WAIT too.
119  * * Second purpose of TIME-WAIT is catching old duplicate segments.
120  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
121  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
122  * * If we invented some more clever way to catch duplicates
123  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
124  *
125  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
126  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
127  * from the very beginning.
128  *
129  * NOTE. With recycling (and later with fin-wait-2) TW bucket
130  * is _not_ stateless. It means, that strictly speaking we must
131  * spinlock it. I do not want! Well, probability of misbehaviour
132  * is ridiculously low and, seems, we could use some mb() tricks
133  * to avoid misread sequence numbers, states etc.  --ANK
134  */
135 enum tcp_tw_status
136 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
137 			   const struct tcphdr *th)
138 {
139 	struct tcp_options_received tmp_opt;
140 	const u8 *hash_location;
141 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
142 	bool paws_reject = false;
143 
144 	tmp_opt.saw_tstamp = 0;
145 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
146 		tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
147 
148 		if (tmp_opt.saw_tstamp) {
149 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
150 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
151 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
152 		}
153 	}
154 
155 	if (tw->tw_substate == TCP_FIN_WAIT2) {
156 		/* Just repeat all the checks of tcp_rcv_state_process() */
157 
158 		/* Out of window, send ACK */
159 		if (paws_reject ||
160 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
161 				   tcptw->tw_rcv_nxt,
162 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
163 			return TCP_TW_ACK;
164 
165 		if (th->rst)
166 			goto kill;
167 
168 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
169 			goto kill_with_rst;
170 
171 		/* Dup ACK? */
172 		if (!th->ack ||
173 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
174 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
175 			inet_twsk_put(tw);
176 			return TCP_TW_SUCCESS;
177 		}
178 
179 		/* New data or FIN. If new data arrive after half-duplex close,
180 		 * reset.
181 		 */
182 		if (!th->fin ||
183 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
184 kill_with_rst:
185 			inet_twsk_deschedule(tw, &tcp_death_row);
186 			inet_twsk_put(tw);
187 			return TCP_TW_RST;
188 		}
189 
190 		/* FIN arrived, enter true time-wait state. */
191 		tw->tw_substate	  = TCP_TIME_WAIT;
192 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
193 		if (tmp_opt.saw_tstamp) {
194 			tcptw->tw_ts_recent_stamp = get_seconds();
195 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
196 		}
197 
198 		if (tcp_death_row.sysctl_tw_recycle &&
199 		    tcptw->tw_ts_recent_stamp &&
200 		    tcp_tw_remember_stamp(tw))
201 			inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
202 					   TCP_TIMEWAIT_LEN);
203 		else
204 			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
205 					   TCP_TIMEWAIT_LEN);
206 		return TCP_TW_ACK;
207 	}
208 
209 	/*
210 	 *	Now real TIME-WAIT state.
211 	 *
212 	 *	RFC 1122:
213 	 *	"When a connection is [...] on TIME-WAIT state [...]
214 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
215 	 *	reopen the connection directly, if it:
216 	 *
217 	 *	(1)  assigns its initial sequence number for the new
218 	 *	connection to be larger than the largest sequence
219 	 *	number it used on the previous connection incarnation,
220 	 *	and
221 	 *
222 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
223 	 *	to be an old duplicate".
224 	 */
225 
226 	if (!paws_reject &&
227 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
228 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
229 		/* In window segment, it may be only reset or bare ack. */
230 
231 		if (th->rst) {
232 			/* This is TIME_WAIT assassination, in two flavors.
233 			 * Oh well... nobody has a sufficient solution to this
234 			 * protocol bug yet.
235 			 */
236 			if (sysctl_tcp_rfc1337 == 0) {
237 kill:
238 				inet_twsk_deschedule(tw, &tcp_death_row);
239 				inet_twsk_put(tw);
240 				return TCP_TW_SUCCESS;
241 			}
242 		}
243 		inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
244 				   TCP_TIMEWAIT_LEN);
245 
246 		if (tmp_opt.saw_tstamp) {
247 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
248 			tcptw->tw_ts_recent_stamp = get_seconds();
249 		}
250 
251 		inet_twsk_put(tw);
252 		return TCP_TW_SUCCESS;
253 	}
254 
255 	/* Out of window segment.
256 
257 	   All the segments are ACKed immediately.
258 
259 	   The only exception is new SYN. We accept it, if it is
260 	   not old duplicate and we are not in danger to be killed
261 	   by delayed old duplicates. RFC check is that it has
262 	   newer sequence number works at rates <40Mbit/sec.
263 	   However, if paws works, it is reliable AND even more,
264 	   we even may relax silly seq space cutoff.
265 
266 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
267 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
268 	   we must return socket to time-wait state. It is not good,
269 	   but not fatal yet.
270 	 */
271 
272 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
273 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
274 	     (tmp_opt.saw_tstamp &&
275 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
276 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
277 		if (isn == 0)
278 			isn++;
279 		TCP_SKB_CB(skb)->when = isn;
280 		return TCP_TW_SYN;
281 	}
282 
283 	if (paws_reject)
284 		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
285 
286 	if (!th->rst) {
287 		/* In this case we must reset the TIMEWAIT timer.
288 		 *
289 		 * If it is ACKless SYN it may be both old duplicate
290 		 * and new good SYN with random sequence number <rcv_nxt.
291 		 * Do not reschedule in the last case.
292 		 */
293 		if (paws_reject || th->ack)
294 			inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
295 					   TCP_TIMEWAIT_LEN);
296 
297 		/* Send ACK. Note, we do not put the bucket,
298 		 * it will be released by caller.
299 		 */
300 		return TCP_TW_ACK;
301 	}
302 	inet_twsk_put(tw);
303 	return TCP_TW_SUCCESS;
304 }
305 EXPORT_SYMBOL(tcp_timewait_state_process);
306 
307 /*
308  * Move a socket to time-wait or dead fin-wait-2 state.
309  */
310 void tcp_time_wait(struct sock *sk, int state, int timeo)
311 {
312 	struct inet_timewait_sock *tw = NULL;
313 	const struct inet_connection_sock *icsk = inet_csk(sk);
314 	const struct tcp_sock *tp = tcp_sk(sk);
315 	bool recycle_ok = false;
316 	bool recycle_on = false;
317 
318 	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) {
319 		recycle_ok = tcp_remember_stamp(sk);
320 		recycle_on = true;
321 	}
322 
323 	if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
324 		tw = inet_twsk_alloc(sk, state);
325 
326 	if (tw != NULL) {
327 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
328 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
329 		struct inet_sock *inet = inet_sk(sk);
330 		struct inet_peer *peer = NULL;
331 
332 		tw->tw_transparent	= inet->transparent;
333 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
334 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
335 		tcptw->tw_snd_nxt	= tp->snd_nxt;
336 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
337 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
338 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
339 
340 #if IS_ENABLED(CONFIG_IPV6)
341 		if (tw->tw_family == PF_INET6) {
342 			struct ipv6_pinfo *np = inet6_sk(sk);
343 			struct inet6_timewait_sock *tw6;
344 
345 			tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
346 			tw6 = inet6_twsk((struct sock *)tw);
347 			tw6->tw_v6_daddr = np->daddr;
348 			tw6->tw_v6_rcv_saddr = np->rcv_saddr;
349 			tw->tw_tclass = np->tclass;
350 			tw->tw_ipv6only = np->ipv6only;
351 		}
352 #endif
353 
354 		if (recycle_on)
355 			peer = icsk->icsk_af_ops->get_peer(sk);
356 		tcptw->tw_peer = peer;
357 		if (peer)
358 			atomic_inc(&peer->refcnt);
359 
360 #ifdef CONFIG_TCP_MD5SIG
361 		/*
362 		 * The timewait bucket does not have the key DB from the
363 		 * sock structure. We just make a quick copy of the
364 		 * md5 key being used (if indeed we are using one)
365 		 * so the timewait ack generating code has the key.
366 		 */
367 		do {
368 			struct tcp_md5sig_key *key;
369 			tcptw->tw_md5_key = NULL;
370 			key = tp->af_specific->md5_lookup(sk, sk);
371 			if (key != NULL) {
372 				tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
373 				if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
374 					BUG();
375 			}
376 		} while (0);
377 #endif
378 
379 		/* Linkage updates. */
380 		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
381 
382 		/* Get the TIME_WAIT timeout firing. */
383 		if (timeo < rto)
384 			timeo = rto;
385 
386 		if (recycle_ok) {
387 			tw->tw_timeout = rto;
388 		} else {
389 			tw->tw_timeout = TCP_TIMEWAIT_LEN;
390 			if (state == TCP_TIME_WAIT)
391 				timeo = TCP_TIMEWAIT_LEN;
392 		}
393 
394 		inet_twsk_schedule(tw, &tcp_death_row, timeo,
395 				   TCP_TIMEWAIT_LEN);
396 		inet_twsk_put(tw);
397 	} else {
398 		/* Sorry, if we're out of memory, just CLOSE this
399 		 * socket up.  We've got bigger problems than
400 		 * non-graceful socket closings.
401 		 */
402 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
403 	}
404 
405 	tcp_update_metrics(sk);
406 	tcp_done(sk);
407 }
408 
409 void tcp_twsk_destructor(struct sock *sk)
410 {
411 	struct tcp_timewait_sock *twsk = tcp_twsk(sk);
412 
413 	if (twsk->tw_peer)
414 		inet_putpeer(twsk->tw_peer);
415 #ifdef CONFIG_TCP_MD5SIG
416 	if (twsk->tw_md5_key) {
417 		tcp_free_md5sig_pool();
418 		kfree_rcu(twsk->tw_md5_key, rcu);
419 	}
420 #endif
421 }
422 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
423 
424 static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
425 					 struct request_sock *req)
426 {
427 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
428 }
429 
430 /* This is not only more efficient than what we used to do, it eliminates
431  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
432  *
433  * Actually, we could lots of memory writes here. tp of listening
434  * socket contains all necessary default parameters.
435  */
436 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
437 {
438 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
439 
440 	if (newsk != NULL) {
441 		const struct inet_request_sock *ireq = inet_rsk(req);
442 		struct tcp_request_sock *treq = tcp_rsk(req);
443 		struct inet_connection_sock *newicsk = inet_csk(newsk);
444 		struct tcp_sock *newtp = tcp_sk(newsk);
445 		struct tcp_sock *oldtp = tcp_sk(sk);
446 		struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
447 
448 		/* TCP Cookie Transactions require space for the cookie pair,
449 		 * as it differs for each connection.  There is no need to
450 		 * copy any s_data_payload stored at the original socket.
451 		 * Failure will prevent resuming the connection.
452 		 *
453 		 * Presumed copied, in order of appearance:
454 		 *	cookie_in_always, cookie_out_never
455 		 */
456 		if (oldcvp != NULL) {
457 			struct tcp_cookie_values *newcvp =
458 				kzalloc(sizeof(*newtp->cookie_values),
459 					GFP_ATOMIC);
460 
461 			if (newcvp != NULL) {
462 				kref_init(&newcvp->kref);
463 				newcvp->cookie_desired =
464 						oldcvp->cookie_desired;
465 				newtp->cookie_values = newcvp;
466 			} else {
467 				/* Not Yet Implemented */
468 				newtp->cookie_values = NULL;
469 			}
470 		}
471 
472 		/* Now setup tcp_sock */
473 		newtp->pred_flags = 0;
474 
475 		newtp->rcv_wup = newtp->copied_seq =
476 		newtp->rcv_nxt = treq->rcv_isn + 1;
477 
478 		newtp->snd_sml = newtp->snd_una =
479 		newtp->snd_nxt = newtp->snd_up =
480 			treq->snt_isn + 1 + tcp_s_data_size(oldtp);
481 
482 		tcp_prequeue_init(newtp);
483 
484 		tcp_init_wl(newtp, treq->rcv_isn);
485 
486 		newtp->srtt = 0;
487 		newtp->mdev = TCP_TIMEOUT_INIT;
488 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
489 
490 		newtp->packets_out = 0;
491 		newtp->retrans_out = 0;
492 		newtp->sacked_out = 0;
493 		newtp->fackets_out = 0;
494 		newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
495 		tcp_enable_early_retrans(newtp);
496 
497 		/* So many TCP implementations out there (incorrectly) count the
498 		 * initial SYN frame in their delayed-ACK and congestion control
499 		 * algorithms that we must have the following bandaid to talk
500 		 * efficiently to them.  -DaveM
501 		 */
502 		newtp->snd_cwnd = TCP_INIT_CWND;
503 		newtp->snd_cwnd_cnt = 0;
504 		newtp->bytes_acked = 0;
505 
506 		newtp->frto_counter = 0;
507 		newtp->frto_highmark = 0;
508 
509 		if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
510 		    !try_module_get(newicsk->icsk_ca_ops->owner))
511 			newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
512 
513 		tcp_set_ca_state(newsk, TCP_CA_Open);
514 		tcp_init_xmit_timers(newsk);
515 		skb_queue_head_init(&newtp->out_of_order_queue);
516 		newtp->write_seq = newtp->pushed_seq =
517 			treq->snt_isn + 1 + tcp_s_data_size(oldtp);
518 
519 		newtp->rx_opt.saw_tstamp = 0;
520 
521 		newtp->rx_opt.dsack = 0;
522 		newtp->rx_opt.num_sacks = 0;
523 
524 		newtp->urg_data = 0;
525 
526 		if (sock_flag(newsk, SOCK_KEEPOPEN))
527 			inet_csk_reset_keepalive_timer(newsk,
528 						       keepalive_time_when(newtp));
529 
530 		newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
531 		if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
532 			if (sysctl_tcp_fack)
533 				tcp_enable_fack(newtp);
534 		}
535 		newtp->window_clamp = req->window_clamp;
536 		newtp->rcv_ssthresh = req->rcv_wnd;
537 		newtp->rcv_wnd = req->rcv_wnd;
538 		newtp->rx_opt.wscale_ok = ireq->wscale_ok;
539 		if (newtp->rx_opt.wscale_ok) {
540 			newtp->rx_opt.snd_wscale = ireq->snd_wscale;
541 			newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
542 		} else {
543 			newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
544 			newtp->window_clamp = min(newtp->window_clamp, 65535U);
545 		}
546 		newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
547 				  newtp->rx_opt.snd_wscale);
548 		newtp->max_window = newtp->snd_wnd;
549 
550 		if (newtp->rx_opt.tstamp_ok) {
551 			newtp->rx_opt.ts_recent = req->ts_recent;
552 			newtp->rx_opt.ts_recent_stamp = get_seconds();
553 			newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
554 		} else {
555 			newtp->rx_opt.ts_recent_stamp = 0;
556 			newtp->tcp_header_len = sizeof(struct tcphdr);
557 		}
558 #ifdef CONFIG_TCP_MD5SIG
559 		newtp->md5sig_info = NULL;	/*XXX*/
560 		if (newtp->af_specific->md5_lookup(sk, newsk))
561 			newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
562 #endif
563 		if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
564 			newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
565 		newtp->rx_opt.mss_clamp = req->mss;
566 		TCP_ECN_openreq_child(newtp, req);
567 
568 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
569 	}
570 	return newsk;
571 }
572 EXPORT_SYMBOL(tcp_create_openreq_child);
573 
574 /*
575  *	Process an incoming packet for SYN_RECV sockets represented
576  *	as a request_sock.
577  */
578 
579 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
580 			   struct request_sock *req,
581 			   struct request_sock **prev)
582 {
583 	struct tcp_options_received tmp_opt;
584 	const u8 *hash_location;
585 	struct sock *child;
586 	const struct tcphdr *th = tcp_hdr(skb);
587 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
588 	bool paws_reject = false;
589 
590 	tmp_opt.saw_tstamp = 0;
591 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
592 		tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
593 
594 		if (tmp_opt.saw_tstamp) {
595 			tmp_opt.ts_recent = req->ts_recent;
596 			/* We do not store true stamp, but it is not required,
597 			 * it can be estimated (approximately)
598 			 * from another data.
599 			 */
600 			tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
601 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
602 		}
603 	}
604 
605 	/* Check for pure retransmitted SYN. */
606 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
607 	    flg == TCP_FLAG_SYN &&
608 	    !paws_reject) {
609 		/*
610 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
611 		 * this case on figure 6 and figure 8, but formal
612 		 * protocol description says NOTHING.
613 		 * To be more exact, it says that we should send ACK,
614 		 * because this segment (at least, if it has no data)
615 		 * is out of window.
616 		 *
617 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
618 		 *  describe SYN-RECV state. All the description
619 		 *  is wrong, we cannot believe to it and should
620 		 *  rely only on common sense and implementation
621 		 *  experience.
622 		 *
623 		 * Enforce "SYN-ACK" according to figure 8, figure 6
624 		 * of RFC793, fixed by RFC1122.
625 		 */
626 		req->rsk_ops->rtx_syn_ack(sk, req, NULL);
627 		return NULL;
628 	}
629 
630 	/* Further reproduces section "SEGMENT ARRIVES"
631 	   for state SYN-RECEIVED of RFC793.
632 	   It is broken, however, it does not work only
633 	   when SYNs are crossed.
634 
635 	   You would think that SYN crossing is impossible here, since
636 	   we should have a SYN_SENT socket (from connect()) on our end,
637 	   but this is not true if the crossed SYNs were sent to both
638 	   ends by a malicious third party.  We must defend against this,
639 	   and to do that we first verify the ACK (as per RFC793, page
640 	   36) and reset if it is invalid.  Is this a true full defense?
641 	   To convince ourselves, let us consider a way in which the ACK
642 	   test can still pass in this 'malicious crossed SYNs' case.
643 	   Malicious sender sends identical SYNs (and thus identical sequence
644 	   numbers) to both A and B:
645 
646 		A: gets SYN, seq=7
647 		B: gets SYN, seq=7
648 
649 	   By our good fortune, both A and B select the same initial
650 	   send sequence number of seven :-)
651 
652 		A: sends SYN|ACK, seq=7, ack_seq=8
653 		B: sends SYN|ACK, seq=7, ack_seq=8
654 
655 	   So we are now A eating this SYN|ACK, ACK test passes.  So
656 	   does sequence test, SYN is truncated, and thus we consider
657 	   it a bare ACK.
658 
659 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
660 	   bare ACK.  Otherwise, we create an established connection.  Both
661 	   ends (listening sockets) accept the new incoming connection and try
662 	   to talk to each other. 8-)
663 
664 	   Note: This case is both harmless, and rare.  Possibility is about the
665 	   same as us discovering intelligent life on another plant tomorrow.
666 
667 	   But generally, we should (RFC lies!) to accept ACK
668 	   from SYNACK both here and in tcp_rcv_state_process().
669 	   tcp_rcv_state_process() does not, hence, we do not too.
670 
671 	   Note that the case is absolutely generic:
672 	   we cannot optimize anything here without
673 	   violating protocol. All the checks must be made
674 	   before attempt to create socket.
675 	 */
676 
677 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
678 	 *                  and the incoming segment acknowledges something not yet
679 	 *                  sent (the segment carries an unacceptable ACK) ...
680 	 *                  a reset is sent."
681 	 *
682 	 * Invalid ACK: reset will be sent by listening socket
683 	 */
684 	if ((flg & TCP_FLAG_ACK) &&
685 	    (TCP_SKB_CB(skb)->ack_seq !=
686 	     tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
687 		return sk;
688 
689 	/* Also, it would be not so bad idea to check rcv_tsecr, which
690 	 * is essentially ACK extension and too early or too late values
691 	 * should cause reset in unsynchronized states.
692 	 */
693 
694 	/* RFC793: "first check sequence number". */
695 
696 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
697 					  tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
698 		/* Out of window: send ACK and drop. */
699 		if (!(flg & TCP_FLAG_RST))
700 			req->rsk_ops->send_ack(sk, skb, req);
701 		if (paws_reject)
702 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
703 		return NULL;
704 	}
705 
706 	/* In sequence, PAWS is OK. */
707 
708 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
709 		req->ts_recent = tmp_opt.rcv_tsval;
710 
711 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
712 		/* Truncate SYN, it is out of window starting
713 		   at tcp_rsk(req)->rcv_isn + 1. */
714 		flg &= ~TCP_FLAG_SYN;
715 	}
716 
717 	/* RFC793: "second check the RST bit" and
718 	 *	   "fourth, check the SYN bit"
719 	 */
720 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
721 		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
722 		goto embryonic_reset;
723 	}
724 
725 	/* ACK sequence verified above, just make sure ACK is
726 	 * set.  If ACK not set, just silently drop the packet.
727 	 */
728 	if (!(flg & TCP_FLAG_ACK))
729 		return NULL;
730 
731 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
732 	if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
733 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
734 		inet_rsk(req)->acked = 1;
735 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
736 		return NULL;
737 	}
738 	if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
739 		tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
740 	else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
741 		tcp_rsk(req)->snt_synack = 0;
742 
743 	/* OK, ACK is valid, create big socket and
744 	 * feed this segment to it. It will repeat all
745 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
746 	 * ESTABLISHED STATE. If it will be dropped after
747 	 * socket is created, wait for troubles.
748 	 */
749 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
750 	if (child == NULL)
751 		goto listen_overflow;
752 
753 	inet_csk_reqsk_queue_unlink(sk, req, prev);
754 	inet_csk_reqsk_queue_removed(sk, req);
755 
756 	inet_csk_reqsk_queue_add(sk, req, child);
757 	return child;
758 
759 listen_overflow:
760 	if (!sysctl_tcp_abort_on_overflow) {
761 		inet_rsk(req)->acked = 1;
762 		return NULL;
763 	}
764 
765 embryonic_reset:
766 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
767 	if (!(flg & TCP_FLAG_RST))
768 		req->rsk_ops->send_reset(sk, skb);
769 
770 	inet_csk_reqsk_queue_drop(sk, req, prev);
771 	return NULL;
772 }
773 EXPORT_SYMBOL(tcp_check_req);
774 
775 /*
776  * Queue segment on the new socket if the new socket is active,
777  * otherwise we just shortcircuit this and continue with
778  * the new socket.
779  */
780 
781 int tcp_child_process(struct sock *parent, struct sock *child,
782 		      struct sk_buff *skb)
783 {
784 	int ret = 0;
785 	int state = child->sk_state;
786 
787 	if (!sock_owned_by_user(child)) {
788 		ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
789 					    skb->len);
790 		/* Wakeup parent, send SIGIO */
791 		if (state == TCP_SYN_RECV && child->sk_state != state)
792 			parent->sk_data_ready(parent, 0);
793 	} else {
794 		/* Alas, it is possible again, because we do lookup
795 		 * in main socket hash table and lock on listening
796 		 * socket does not protect us more.
797 		 */
798 		__sk_add_backlog(child, skb);
799 	}
800 
801 	bh_unlock_sock(child);
802 	sock_put(child);
803 	return ret;
804 }
805 EXPORT_SYMBOL(tcp_child_process);
806