xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision 580f98cc33a260bb8c6a39ae2921b29586b84fdf)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  */
211da177e4SLinus Torvalds 
221da177e4SLinus Torvalds #include <net/tcp.h>
231da177e4SLinus Torvalds #include <net/xfrm.h>
24e5907459SAlexander Duyck #include <net/busy_poll.h>
251da177e4SLinus Torvalds 
26a2a385d6SEric Dumazet static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
271da177e4SLinus Torvalds {
281da177e4SLinus Torvalds 	if (seq == s_win)
29a2a385d6SEric Dumazet 		return true;
301da177e4SLinus Torvalds 	if (after(end_seq, s_win) && before(seq, e_win))
31a2a385d6SEric Dumazet 		return true;
32a02cec21SEric Dumazet 	return seq == e_win && seq == end_seq;
331da177e4SLinus Torvalds }
341da177e4SLinus Torvalds 
354fb17a60SNeal Cardwell static enum tcp_tw_status
364fb17a60SNeal Cardwell tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
374fb17a60SNeal Cardwell 				  const struct sk_buff *skb, int mib_idx)
384fb17a60SNeal Cardwell {
394fb17a60SNeal Cardwell 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
404fb17a60SNeal Cardwell 
414fb17a60SNeal Cardwell 	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
424fb17a60SNeal Cardwell 				  &tcptw->tw_last_oow_ack_time)) {
434fb17a60SNeal Cardwell 		/* Send ACK. Note, we do not put the bucket,
444fb17a60SNeal Cardwell 		 * it will be released by caller.
454fb17a60SNeal Cardwell 		 */
464fb17a60SNeal Cardwell 		return TCP_TW_ACK;
474fb17a60SNeal Cardwell 	}
484fb17a60SNeal Cardwell 
494fb17a60SNeal Cardwell 	/* We are rate-limiting, so just release the tw sock and drop skb. */
504fb17a60SNeal Cardwell 	inet_twsk_put(tw);
514fb17a60SNeal Cardwell 	return TCP_TW_SUCCESS;
524fb17a60SNeal Cardwell }
534fb17a60SNeal Cardwell 
541da177e4SLinus Torvalds /*
551da177e4SLinus Torvalds  * * Main purpose of TIME-WAIT state is to close connection gracefully,
561da177e4SLinus Torvalds  *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
571da177e4SLinus Torvalds  *   (and, probably, tail of data) and one or more our ACKs are lost.
581da177e4SLinus Torvalds  * * What is TIME-WAIT timeout? It is associated with maximal packet
591da177e4SLinus Torvalds  *   lifetime in the internet, which results in wrong conclusion, that
601da177e4SLinus Torvalds  *   it is set to catch "old duplicate segments" wandering out of their path.
611da177e4SLinus Torvalds  *   It is not quite correct. This timeout is calculated so that it exceeds
621da177e4SLinus Torvalds  *   maximal retransmission timeout enough to allow to lose one (or more)
631da177e4SLinus Torvalds  *   segments sent by peer and our ACKs. This time may be calculated from RTO.
641da177e4SLinus Torvalds  * * When TIME-WAIT socket receives RST, it means that another end
651da177e4SLinus Torvalds  *   finally closed and we are allowed to kill TIME-WAIT too.
661da177e4SLinus Torvalds  * * Second purpose of TIME-WAIT is catching old duplicate segments.
671da177e4SLinus Torvalds  *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
681da177e4SLinus Torvalds  *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
691da177e4SLinus Torvalds  * * If we invented some more clever way to catch duplicates
701da177e4SLinus Torvalds  *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
711da177e4SLinus Torvalds  *
721da177e4SLinus Torvalds  * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
731da177e4SLinus Torvalds  * When you compare it to RFCs, please, read section SEGMENT ARRIVES
741da177e4SLinus Torvalds  * from the very beginning.
751da177e4SLinus Torvalds  *
761da177e4SLinus Torvalds  * NOTE. With recycling (and later with fin-wait-2) TW bucket
771da177e4SLinus Torvalds  * is _not_ stateless. It means, that strictly speaking we must
781da177e4SLinus Torvalds  * spinlock it. I do not want! Well, probability of misbehaviour
791da177e4SLinus Torvalds  * is ridiculously low and, seems, we could use some mb() tricks
801da177e4SLinus Torvalds  * to avoid misread sequence numbers, states etc.  --ANK
814308fc58SAlan Cox  *
824308fc58SAlan Cox  * We don't need to initialize tmp_out.sack_ok as we don't use the results
831da177e4SLinus Torvalds  */
841da177e4SLinus Torvalds enum tcp_tw_status
858feaf0c0SArnaldo Carvalho de Melo tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
868feaf0c0SArnaldo Carvalho de Melo 			   const struct tcphdr *th)
871da177e4SLinus Torvalds {
881da177e4SLinus Torvalds 	struct tcp_options_received tmp_opt;
894957faadSWilliam Allen Simpson 	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90a2a385d6SEric Dumazet 	bool paws_reject = false;
911da177e4SLinus Torvalds 
92bb5b7c11SDavid S. Miller 	tmp_opt.saw_tstamp = 0;
938feaf0c0SArnaldo Carvalho de Melo 	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94eed29f17SEric Dumazet 		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds 		if (tmp_opt.saw_tstamp) {
97eee2faabSAlexey Kodanev 			if (tmp_opt.rcv_tsecr)
98ee684b6fSAndrey Vagin 				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
998feaf0c0SArnaldo Carvalho de Melo 			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
1008feaf0c0SArnaldo Carvalho de Melo 			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
101c887e6d2SIlpo Järvinen 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
1021da177e4SLinus Torvalds 		}
1031da177e4SLinus Torvalds 	}
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds 	if (tw->tw_substate == TCP_FIN_WAIT2) {
1061da177e4SLinus Torvalds 		/* Just repeat all the checks of tcp_rcv_state_process() */
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds 		/* Out of window, send ACK */
1091da177e4SLinus Torvalds 		if (paws_reject ||
1101da177e4SLinus Torvalds 		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
1118feaf0c0SArnaldo Carvalho de Melo 				   tcptw->tw_rcv_nxt,
1128feaf0c0SArnaldo Carvalho de Melo 				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
1134fb17a60SNeal Cardwell 			return tcp_timewait_check_oow_rate_limit(
1144fb17a60SNeal Cardwell 				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
1151da177e4SLinus Torvalds 
1161da177e4SLinus Torvalds 		if (th->rst)
1171da177e4SLinus Torvalds 			goto kill;
1181da177e4SLinus Torvalds 
1198feaf0c0SArnaldo Carvalho de Melo 		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120271c3b9bSFlorian Westphal 			return TCP_TW_RST;
1211da177e4SLinus Torvalds 
1221da177e4SLinus Torvalds 		/* Dup ACK? */
1231ac530b3SWei Yongjun 		if (!th->ack ||
1241ac530b3SWei Yongjun 		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
1251da177e4SLinus Torvalds 		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
1268feaf0c0SArnaldo Carvalho de Melo 			inet_twsk_put(tw);
1271da177e4SLinus Torvalds 			return TCP_TW_SUCCESS;
1281da177e4SLinus Torvalds 		}
1291da177e4SLinus Torvalds 
1301da177e4SLinus Torvalds 		/* New data or FIN. If new data arrive after half-duplex close,
1311da177e4SLinus Torvalds 		 * reset.
1321da177e4SLinus Torvalds 		 */
1331da177e4SLinus Torvalds 		if (!th->fin ||
134271c3b9bSFlorian Westphal 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
1351da177e4SLinus Torvalds 			return TCP_TW_RST;
1361da177e4SLinus Torvalds 
1371da177e4SLinus Torvalds 		/* FIN arrived, enter true time-wait state. */
1381da177e4SLinus Torvalds 		tw->tw_substate	  = TCP_TIME_WAIT;
1398feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1401da177e4SLinus Torvalds 		if (tmp_opt.saw_tstamp) {
141cca9bab1SArnd Bergmann 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
1428feaf0c0SArnaldo Carvalho de Melo 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
1431da177e4SLinus Torvalds 		}
1441da177e4SLinus Torvalds 
145ed2e9239SEric Dumazet 		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
1461da177e4SLinus Torvalds 		return TCP_TW_ACK;
1471da177e4SLinus Torvalds 	}
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds 	/*
1501da177e4SLinus Torvalds 	 *	Now real TIME-WAIT state.
1511da177e4SLinus Torvalds 	 *
1521da177e4SLinus Torvalds 	 *	RFC 1122:
1531da177e4SLinus Torvalds 	 *	"When a connection is [...] on TIME-WAIT state [...]
1541da177e4SLinus Torvalds 	 *	[a TCP] MAY accept a new SYN from the remote TCP to
1551da177e4SLinus Torvalds 	 *	reopen the connection directly, if it:
1561da177e4SLinus Torvalds 	 *
1571da177e4SLinus Torvalds 	 *	(1)  assigns its initial sequence number for the new
1581da177e4SLinus Torvalds 	 *	connection to be larger than the largest sequence
1591da177e4SLinus Torvalds 	 *	number it used on the previous connection incarnation,
1601da177e4SLinus Torvalds 	 *	and
1611da177e4SLinus Torvalds 	 *
1621da177e4SLinus Torvalds 	 *	(2)  returns to TIME-WAIT state if the SYN turns out
1631da177e4SLinus Torvalds 	 *	to be an old duplicate".
1641da177e4SLinus Torvalds 	 */
1651da177e4SLinus Torvalds 
1661da177e4SLinus Torvalds 	if (!paws_reject &&
1678feaf0c0SArnaldo Carvalho de Melo 	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
1681da177e4SLinus Torvalds 	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
1691da177e4SLinus Torvalds 		/* In window segment, it may be only reset or bare ack. */
1701da177e4SLinus Torvalds 
1711da177e4SLinus Torvalds 		if (th->rst) {
172caa20d9aSStephen Hemminger 			/* This is TIME_WAIT assassination, in two flavors.
1731da177e4SLinus Torvalds 			 * Oh well... nobody has a sufficient solution to this
1741da177e4SLinus Torvalds 			 * protocol bug yet.
1751da177e4SLinus Torvalds 			 */
1760b484c91SKuniyuki Iwashima 			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
1771da177e4SLinus Torvalds kill:
178dbe7faa4SEric Dumazet 				inet_twsk_deschedule_put(tw);
1791da177e4SLinus Torvalds 				return TCP_TW_SUCCESS;
1801da177e4SLinus Torvalds 			}
18163cc357fSFlorian Westphal 		} else {
182ed2e9239SEric Dumazet 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
18363cc357fSFlorian Westphal 		}
1841da177e4SLinus Torvalds 
1851da177e4SLinus Torvalds 		if (tmp_opt.saw_tstamp) {
1868feaf0c0SArnaldo Carvalho de Melo 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
187cca9bab1SArnd Bergmann 			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
1881da177e4SLinus Torvalds 		}
1891da177e4SLinus Torvalds 
1908feaf0c0SArnaldo Carvalho de Melo 		inet_twsk_put(tw);
1911da177e4SLinus Torvalds 		return TCP_TW_SUCCESS;
1921da177e4SLinus Torvalds 	}
1931da177e4SLinus Torvalds 
1941da177e4SLinus Torvalds 	/* Out of window segment.
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds 	   All the segments are ACKed immediately.
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds 	   The only exception is new SYN. We accept it, if it is
1991da177e4SLinus Torvalds 	   not old duplicate and we are not in danger to be killed
2001da177e4SLinus Torvalds 	   by delayed old duplicates. RFC check is that it has
2011da177e4SLinus Torvalds 	   newer sequence number works at rates <40Mbit/sec.
2021da177e4SLinus Torvalds 	   However, if paws works, it is reliable AND even more,
2031da177e4SLinus Torvalds 	   we even may relax silly seq space cutoff.
2041da177e4SLinus Torvalds 
2051da177e4SLinus Torvalds 	   RED-PEN: we violate main RFC requirement, if this SYN will appear
2061da177e4SLinus Torvalds 	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
2071da177e4SLinus Torvalds 	   we must return socket to time-wait state. It is not good,
2081da177e4SLinus Torvalds 	   but not fatal yet.
2091da177e4SLinus Torvalds 	 */
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds 	if (th->syn && !th->rst && !th->ack && !paws_reject &&
2128feaf0c0SArnaldo Carvalho de Melo 	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
2138feaf0c0SArnaldo Carvalho de Melo 	     (tmp_opt.saw_tstamp &&
2148feaf0c0SArnaldo Carvalho de Melo 	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
2158feaf0c0SArnaldo Carvalho de Melo 		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
2161da177e4SLinus Torvalds 		if (isn == 0)
2171da177e4SLinus Torvalds 			isn++;
21804317dafSEric Dumazet 		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
2191da177e4SLinus Torvalds 		return TCP_TW_SYN;
2201da177e4SLinus Torvalds 	}
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	if (paws_reject)
22302a1d6e7SEric Dumazet 		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
2241da177e4SLinus Torvalds 
2251da177e4SLinus Torvalds 	if (!th->rst) {
2261da177e4SLinus Torvalds 		/* In this case we must reset the TIMEWAIT timer.
2271da177e4SLinus Torvalds 		 *
2281da177e4SLinus Torvalds 		 * If it is ACKless SYN it may be both old duplicate
2291da177e4SLinus Torvalds 		 * and new good SYN with random sequence number <rcv_nxt.
2301da177e4SLinus Torvalds 		 * Do not reschedule in the last case.
2311da177e4SLinus Torvalds 		 */
2321da177e4SLinus Torvalds 		if (paws_reject || th->ack)
233ed2e9239SEric Dumazet 			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2341da177e4SLinus Torvalds 
2354fb17a60SNeal Cardwell 		return tcp_timewait_check_oow_rate_limit(
2364fb17a60SNeal Cardwell 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
2371da177e4SLinus Torvalds 	}
2388feaf0c0SArnaldo Carvalho de Melo 	inet_twsk_put(tw);
2391da177e4SLinus Torvalds 	return TCP_TW_SUCCESS;
2401da177e4SLinus Torvalds }
2414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_timewait_state_process);
2421da177e4SLinus Torvalds 
243c5b8b515SDmitry Safonov static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244c5b8b515SDmitry Safonov {
245c5b8b515SDmitry Safonov #ifdef CONFIG_TCP_MD5SIG
246c5b8b515SDmitry Safonov 	const struct tcp_sock *tp = tcp_sk(sk);
247c5b8b515SDmitry Safonov 	struct tcp_md5sig_key *key;
248c5b8b515SDmitry Safonov 
249c5b8b515SDmitry Safonov 	/*
250c5b8b515SDmitry Safonov 	 * The timewait bucket does not have the key DB from the
251c5b8b515SDmitry Safonov 	 * sock structure. We just make a quick copy of the
252c5b8b515SDmitry Safonov 	 * md5 key being used (if indeed we are using one)
253c5b8b515SDmitry Safonov 	 * so the timewait ack generating code has the key.
254c5b8b515SDmitry Safonov 	 */
255c5b8b515SDmitry Safonov 	tcptw->tw_md5_key = NULL;
256c5b8b515SDmitry Safonov 	if (!static_branch_unlikely(&tcp_md5_needed.key))
257c5b8b515SDmitry Safonov 		return;
258c5b8b515SDmitry Safonov 
259c5b8b515SDmitry Safonov 	key = tp->af_specific->md5_lookup(sk, sk);
260c5b8b515SDmitry Safonov 	if (key) {
261c5b8b515SDmitry Safonov 		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262c5b8b515SDmitry Safonov 		if (!tcptw->tw_md5_key)
263c5b8b515SDmitry Safonov 			return;
264c5b8b515SDmitry Safonov 		if (!tcp_alloc_md5sig_pool())
265c5b8b515SDmitry Safonov 			goto out_free;
266c5b8b515SDmitry Safonov 		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267c5b8b515SDmitry Safonov 			goto out_free;
268c5b8b515SDmitry Safonov 	}
269c5b8b515SDmitry Safonov 	return;
270c5b8b515SDmitry Safonov out_free:
271c5b8b515SDmitry Safonov 	WARN_ON_ONCE(1);
272c5b8b515SDmitry Safonov 	kfree(tcptw->tw_md5_key);
273c5b8b515SDmitry Safonov 	tcptw->tw_md5_key = NULL;
274c5b8b515SDmitry Safonov #endif
275c5b8b515SDmitry Safonov }
276c5b8b515SDmitry Safonov 
2771da177e4SLinus Torvalds /*
2781da177e4SLinus Torvalds  * Move a socket to time-wait or dead fin-wait-2 state.
2791da177e4SLinus Torvalds  */
2801da177e4SLinus Torvalds void tcp_time_wait(struct sock *sk, int state, int timeo)
2811da177e4SLinus Torvalds {
2828292a17aSArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
2838feaf0c0SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
28408eaef90SKuniyuki Iwashima 	struct net *net = sock_net(sk);
285789f558cSEric Dumazet 	struct inet_timewait_sock *tw;
2861da177e4SLinus Torvalds 
287e9bd0ccaSKuniyuki Iwashima 	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
2881da177e4SLinus Torvalds 
28900db4124SIan Morris 	if (tw) {
2908feaf0c0SArnaldo Carvalho de Melo 		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291463c84b9SArnaldo Carvalho de Melo 		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
2922397849bSDavid S. Miller 		struct inet_sock *inet = inet_sk(sk);
2938feaf0c0SArnaldo Carvalho de Melo 
2942397849bSDavid S. Miller 		tw->tw_transparent	= inet->transparent;
29500483690SJon Maxwell 		tw->tw_mark		= sk->sk_mark;
296f6c0f5d2SEric Dumazet 		tw->tw_priority		= sk->sk_priority;
2971da177e4SLinus Torvalds 		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
2988feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
2998feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_snd_nxt	= tp->snd_nxt;
3008feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
3018feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
3028feaf0c0SArnaldo Carvalho de Melo 		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
303ceaa1fefSAndrey Vagin 		tcptw->tw_ts_offset	= tp->tsoffset;
3044fb17a60SNeal Cardwell 		tcptw->tw_last_oow_ack_time = 0;
305a842fe14SEric Dumazet 		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
306dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
3071da177e4SLinus Torvalds 		if (tw->tw_family == PF_INET6) {
3081da177e4SLinus Torvalds 			struct ipv6_pinfo *np = inet6_sk(sk);
3091da177e4SLinus Torvalds 
310efe4208fSEric Dumazet 			tw->tw_v6_daddr = sk->sk_v6_daddr;
311efe4208fSEric Dumazet 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312b903d324SEric Dumazet 			tw->tw_tclass = np->tclass;
31321858cd0SFlorent Fourcot 			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314c67b8555SEric Dumazet 			tw->tw_txhash = sk->sk_txhash;
3159fe516baSEric Dumazet 			tw->tw_ipv6only = sk->sk_ipv6only;
316c676270bSArnaldo Carvalho de Melo 		}
3171da177e4SLinus Torvalds #endif
318cfb6eeb4SYOSHIFUJI Hideaki 
319c5b8b515SDmitry Safonov 		tcp_time_wait_init(sk, tcptw);
320cfb6eeb4SYOSHIFUJI Hideaki 
3211da177e4SLinus Torvalds 		/* Get the TIME_WAIT timeout firing. */
3221da177e4SLinus Torvalds 		if (timeo < rto)
3231da177e4SLinus Torvalds 			timeo = rto;
3241da177e4SLinus Torvalds 
3251da177e4SLinus Torvalds 		if (state == TCP_TIME_WAIT)
3261da177e4SLinus Torvalds 			timeo = TCP_TIMEWAIT_LEN;
3271da177e4SLinus Torvalds 
328cfac7f83SEric Dumazet 		/* tw_timer is pinned, so we need to make sure BH are disabled
329cfac7f83SEric Dumazet 		 * in following section, otherwise timer handler could run before
330cfac7f83SEric Dumazet 		 * we complete the initialization.
331cfac7f83SEric Dumazet 		 */
332cfac7f83SEric Dumazet 		local_bh_disable();
333789f558cSEric Dumazet 		inet_twsk_schedule(tw, timeo);
334ec94c269SEric Dumazet 		/* Linkage updates.
335ec94c269SEric Dumazet 		 * Note that access to tw after this point is illegal.
336ec94c269SEric Dumazet 		 */
3374461568aSKuniyuki Iwashima 		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
338cfac7f83SEric Dumazet 		local_bh_enable();
3391da177e4SLinus Torvalds 	} else {
3401da177e4SLinus Torvalds 		/* Sorry, if we're out of memory, just CLOSE this
3411da177e4SLinus Torvalds 		 * socket up.  We've got bigger problems than
3421da177e4SLinus Torvalds 		 * non-graceful socket closings.
3431da177e4SLinus Torvalds 		 */
34408eaef90SKuniyuki Iwashima 		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
3451da177e4SLinus Torvalds 	}
3461da177e4SLinus Torvalds 
3471da177e4SLinus Torvalds 	tcp_update_metrics(sk);
3481da177e4SLinus Torvalds 	tcp_done(sk);
3491da177e4SLinus Torvalds }
350cc35c88aSAtul Gupta EXPORT_SYMBOL(tcp_time_wait);
3511da177e4SLinus Torvalds 
352cfb6eeb4SYOSHIFUJI Hideaki void tcp_twsk_destructor(struct sock *sk)
353cfb6eeb4SYOSHIFUJI Hideaki {
354b6242b9bSDavid S. Miller #ifdef CONFIG_TCP_MD5SIG
355459837b5SDmitry Safonov 	if (static_branch_unlikely(&tcp_md5_needed.key)) {
356a928630aSDavid S. Miller 		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
3572397849bSDavid S. Miller 
358459837b5SDmitry Safonov 		if (twsk->tw_md5_key) {
359a915da9bSEric Dumazet 			kfree_rcu(twsk->tw_md5_key, rcu);
360459837b5SDmitry Safonov 			static_branch_slow_dec_deferred(&tcp_md5_needed);
361459837b5SDmitry Safonov 		}
3626aedbf98SEric Dumazet 	}
363cfb6eeb4SYOSHIFUJI Hideaki #endif
364cfb6eeb4SYOSHIFUJI Hideaki }
365cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
366cfb6eeb4SYOSHIFUJI Hideaki 
367edc12f03SKuniyuki Iwashima void tcp_twsk_purge(struct list_head *net_exit_list, int family)
368edc12f03SKuniyuki Iwashima {
369d1e5e640SKuniyuki Iwashima 	bool purged_once = false;
370edc12f03SKuniyuki Iwashima 	struct net *net;
371edc12f03SKuniyuki Iwashima 
372edc12f03SKuniyuki Iwashima 	list_for_each_entry(net, net_exit_list, exit_list) {
373740ea3c4SKuniyuki Iwashima 		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
374740ea3c4SKuniyuki Iwashima 			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
375740ea3c4SKuniyuki Iwashima 			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
376740ea3c4SKuniyuki Iwashima 		} else if (!purged_once) {
377edc12f03SKuniyuki Iwashima 			/* The last refcount is decremented in tcp_sk_exit_batch() */
378edc12f03SKuniyuki Iwashima 			if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
379edc12f03SKuniyuki Iwashima 				continue;
380edc12f03SKuniyuki Iwashima 
381edc12f03SKuniyuki Iwashima 			inet_twsk_purge(&tcp_hashinfo, family);
382d1e5e640SKuniyuki Iwashima 			purged_once = true;
383d1e5e640SKuniyuki Iwashima 		}
384edc12f03SKuniyuki Iwashima 	}
385edc12f03SKuniyuki Iwashima }
386edc12f03SKuniyuki Iwashima EXPORT_SYMBOL_GPL(tcp_twsk_purge);
387edc12f03SKuniyuki Iwashima 
388b1964b5fSEric Dumazet /* Warning : This function is called without sk_listener being locked.
389b1964b5fSEric Dumazet  * Be sure to read socket fields once, as their value could change under us.
390b1964b5fSEric Dumazet  */
391843f4a55SYuchung Cheng void tcp_openreq_init_rwin(struct request_sock *req,
392b1964b5fSEric Dumazet 			   const struct sock *sk_listener,
393b1964b5fSEric Dumazet 			   const struct dst_entry *dst)
394843f4a55SYuchung Cheng {
395843f4a55SYuchung Cheng 	struct inet_request_sock *ireq = inet_rsk(req);
396b1964b5fSEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk_listener);
397b1964b5fSEric Dumazet 	int full_space = tcp_full_space(sk_listener);
398b1964b5fSEric Dumazet 	u32 window_clamp;
399b1964b5fSEric Dumazet 	__u8 rcv_wscale;
40013d3b1ebSLawrence Brakmo 	u32 rcv_wnd;
4013541f9e8SEric Dumazet 	int mss;
402843f4a55SYuchung Cheng 
4033541f9e8SEric Dumazet 	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
404b1964b5fSEric Dumazet 	window_clamp = READ_ONCE(tp->window_clamp);
405843f4a55SYuchung Cheng 	/* Set this up on the first call only */
406ed53d0abSEric Dumazet 	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
407843f4a55SYuchung Cheng 
408843f4a55SYuchung Cheng 	/* limit the window selection if the user enforce a smaller rx buffer */
409b1964b5fSEric Dumazet 	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
410ed53d0abSEric Dumazet 	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
411ed53d0abSEric Dumazet 		req->rsk_window_clamp = full_space;
412843f4a55SYuchung Cheng 
41313d3b1ebSLawrence Brakmo 	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
41413d3b1ebSLawrence Brakmo 	if (rcv_wnd == 0)
41513d3b1ebSLawrence Brakmo 		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
41613d3b1ebSLawrence Brakmo 	else if (full_space < rcv_wnd * mss)
41713d3b1ebSLawrence Brakmo 		full_space = rcv_wnd * mss;
41813d3b1ebSLawrence Brakmo 
419843f4a55SYuchung Cheng 	/* tcp_full_space because it is guaranteed to be the first packet */
420ceef9ab6SEric Dumazet 	tcp_select_initial_window(sk_listener, full_space,
421843f4a55SYuchung Cheng 		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
422ed53d0abSEric Dumazet 		&req->rsk_rcv_wnd,
423ed53d0abSEric Dumazet 		&req->rsk_window_clamp,
424843f4a55SYuchung Cheng 		ireq->wscale_ok,
425843f4a55SYuchung Cheng 		&rcv_wscale,
42613d3b1ebSLawrence Brakmo 		rcv_wnd);
427843f4a55SYuchung Cheng 	ireq->rcv_wscale = rcv_wscale;
428843f4a55SYuchung Cheng }
429843f4a55SYuchung Cheng EXPORT_SYMBOL(tcp_openreq_init_rwin);
430843f4a55SYuchung Cheng 
431735d3831SFlorian Westphal static void tcp_ecn_openreq_child(struct tcp_sock *tp,
432735d3831SFlorian Westphal 				  const struct request_sock *req)
433bdf1ee5dSIlpo Järvinen {
434bdf1ee5dSIlpo Järvinen 	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
435bdf1ee5dSIlpo Järvinen }
436bdf1ee5dSIlpo Järvinen 
43781164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
43881164413SDaniel Borkmann {
43981164413SDaniel Borkmann 	struct inet_connection_sock *icsk = inet_csk(sk);
44081164413SDaniel Borkmann 	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
44181164413SDaniel Borkmann 	bool ca_got_dst = false;
44281164413SDaniel Borkmann 
44381164413SDaniel Borkmann 	if (ca_key != TCP_CA_UNSPEC) {
44481164413SDaniel Borkmann 		const struct tcp_congestion_ops *ca;
44581164413SDaniel Borkmann 
44681164413SDaniel Borkmann 		rcu_read_lock();
44781164413SDaniel Borkmann 		ca = tcp_ca_find_key(ca_key);
4480baf26b0SMartin KaFai Lau 		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
44981164413SDaniel Borkmann 			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
45081164413SDaniel Borkmann 			icsk->icsk_ca_ops = ca;
45181164413SDaniel Borkmann 			ca_got_dst = true;
45281164413SDaniel Borkmann 		}
45381164413SDaniel Borkmann 		rcu_read_unlock();
45481164413SDaniel Borkmann 	}
45581164413SDaniel Borkmann 
4569f950415SNeal Cardwell 	/* If no valid choice made yet, assign current system default ca. */
4579f950415SNeal Cardwell 	if (!ca_got_dst &&
4589f950415SNeal Cardwell 	    (!icsk->icsk_ca_setsockopt ||
4590baf26b0SMartin KaFai Lau 	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
46081164413SDaniel Borkmann 		tcp_assign_congestion_control(sk);
46181164413SDaniel Borkmann 
46281164413SDaniel Borkmann 	tcp_set_ca_state(sk, TCP_CA_Open);
46381164413SDaniel Borkmann }
46481164413SDaniel Borkmann EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
46581164413SDaniel Borkmann 
46660e2a778SUrsula Braun static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
46760e2a778SUrsula Braun 				    struct request_sock *req,
46860e2a778SUrsula Braun 				    struct tcp_sock *newtp)
46960e2a778SUrsula Braun {
47060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
47160e2a778SUrsula Braun 	struct inet_request_sock *ireq;
47260e2a778SUrsula Braun 
47360e2a778SUrsula Braun 	if (static_branch_unlikely(&tcp_have_smc)) {
47460e2a778SUrsula Braun 		ireq = inet_rsk(req);
47560e2a778SUrsula Braun 		if (oldtp->syn_smc && !ireq->smc_ok)
47660e2a778SUrsula Braun 			newtp->syn_smc = 0;
47760e2a778SUrsula Braun 	}
47860e2a778SUrsula Braun #endif
47960e2a778SUrsula Braun }
48060e2a778SUrsula Braun 
4811da177e4SLinus Torvalds /* This is not only more efficient than what we used to do, it eliminates
4821da177e4SLinus Torvalds  * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
4831da177e4SLinus Torvalds  *
4841da177e4SLinus Torvalds  * Actually, we could lots of memory writes here. tp of listening
4851da177e4SLinus Torvalds  * socket contains all necessary default parameters.
4861da177e4SLinus Torvalds  */
487c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
488c28c6f04SEric Dumazet 				      struct request_sock *req,
489c28c6f04SEric Dumazet 				      struct sk_buff *skb)
4901da177e4SLinus Torvalds {
491e56c57d0SEric Dumazet 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
4929f1d2604SArnaldo Carvalho de Melo 	const struct inet_request_sock *ireq = inet_rsk(req);
4932e6599cbSArnaldo Carvalho de Melo 	struct tcp_request_sock *treq = tcp_rsk(req);
494242b1bbeSEric Dumazet 	struct inet_connection_sock *newicsk;
495242b1bbeSEric Dumazet 	struct tcp_sock *oldtp, *newtp;
496dba7d9b8SEric Dumazet 	u32 seq;
497242b1bbeSEric Dumazet 
498242b1bbeSEric Dumazet 	if (!newsk)
499242b1bbeSEric Dumazet 		return NULL;
500242b1bbeSEric Dumazet 
501242b1bbeSEric Dumazet 	newicsk = inet_csk(newsk);
502242b1bbeSEric Dumazet 	newtp = tcp_sk(newsk);
503242b1bbeSEric Dumazet 	oldtp = tcp_sk(sk);
50460e2a778SUrsula Braun 
50560e2a778SUrsula Braun 	smc_check_reset_syn_req(oldtp, req, newtp);
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds 	/* Now setup tcp_sock */
50831770e34SFlorian Westphal 	newtp->pred_flags = 0;
50931770e34SFlorian Westphal 
510dba7d9b8SEric Dumazet 	seq = treq->rcv_isn + 1;
511dba7d9b8SEric Dumazet 	newtp->rcv_wup = seq;
5127db48e98SEric Dumazet 	WRITE_ONCE(newtp->copied_seq, seq);
513dba7d9b8SEric Dumazet 	WRITE_ONCE(newtp->rcv_nxt, seq);
514a9d99ce2SEric Dumazet 	newtp->segs_in = 1;
515435cf559SWilliam Allen Simpson 
516e0d694d6SEric Dumazet 	seq = treq->snt_isn + 1;
517e0d694d6SEric Dumazet 	newtp->snd_sml = newtp->snd_una = seq;
518e0d694d6SEric Dumazet 	WRITE_ONCE(newtp->snd_nxt, seq);
519e0d694d6SEric Dumazet 	newtp->snd_up = seq;
5201da177e4SLinus Torvalds 
52146d3ceabSEric Dumazet 	INIT_LIST_HEAD(&newtp->tsq_node);
522e2080072SEric Dumazet 	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
5231da177e4SLinus Torvalds 
524ee7537b6SHantzis Fotis 	tcp_init_wl(newtp, treq->rcv_isn);
5251da177e4SLinus Torvalds 
526ac9517fcSEric Dumazet 	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
52770eabf0eSEric Dumazet 	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
5281da177e4SLinus Torvalds 
5299a568de4SEric Dumazet 	newtp->lsndtime = tcp_jiffies32;
530d8ed6250SEric Dumazet 	newsk->sk_txhash = treq->txhash;
531375fe02cSYuchung Cheng 	newtp->total_retrans = req->num_retrans;
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	tcp_init_xmit_timers(newsk);
5340f317464SEric Dumazet 	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
5351da177e4SLinus Torvalds 
5361da177e4SLinus Torvalds 	if (sock_flag(newsk, SOCK_KEEPOPEN))
537463c84b9SArnaldo Carvalho de Melo 		inet_csk_reset_keepalive_timer(newsk,
5381da177e4SLinus Torvalds 					       keepalive_time_when(newtp));
5391da177e4SLinus Torvalds 
5402e6599cbSArnaldo Carvalho de Melo 	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
541713bafeaSYuchung Cheng 	newtp->rx_opt.sack_ok = ireq->sack_ok;
542ed53d0abSEric Dumazet 	newtp->window_clamp = req->rsk_window_clamp;
543ed53d0abSEric Dumazet 	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
544ed53d0abSEric Dumazet 	newtp->rcv_wnd = req->rsk_rcv_wnd;
5452e6599cbSArnaldo Carvalho de Melo 	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
5461da177e4SLinus Torvalds 	if (newtp->rx_opt.wscale_ok) {
5472e6599cbSArnaldo Carvalho de Melo 		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
5482e6599cbSArnaldo Carvalho de Melo 		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
5491da177e4SLinus Torvalds 	} else {
5501da177e4SLinus Torvalds 		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
5511da177e4SLinus Torvalds 		newtp->window_clamp = min(newtp->window_clamp, 65535U);
5521da177e4SLinus Torvalds 	}
553242b1bbeSEric Dumazet 	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
5541da177e4SLinus Torvalds 	newtp->max_window = newtp->snd_wnd;
5551da177e4SLinus Torvalds 
5561da177e4SLinus Torvalds 	if (newtp->rx_opt.tstamp_ok) {
5571da177e4SLinus Torvalds 		newtp->rx_opt.ts_recent = req->ts_recent;
558cca9bab1SArnd Bergmann 		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
5591da177e4SLinus Torvalds 		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5601da177e4SLinus Torvalds 	} else {
5611da177e4SLinus Torvalds 		newtp->rx_opt.ts_recent_stamp = 0;
5621da177e4SLinus Torvalds 		newtp->tcp_header_len = sizeof(struct tcphdr);
5631da177e4SLinus Torvalds 	}
564336c39a0SYuchung Cheng 	if (req->num_timeout) {
565336c39a0SYuchung Cheng 		newtp->undo_marker = treq->snt_isn;
566336c39a0SYuchung Cheng 		newtp->retrans_stamp = div_u64(treq->snt_synack,
567336c39a0SYuchung Cheng 					       USEC_PER_SEC / TCP_TS_HZ);
568336c39a0SYuchung Cheng 	}
56995a22caeSFlorian Westphal 	newtp->tsoffset = treq->ts_off;
570cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
571cfb6eeb4SYOSHIFUJI Hideaki 	newtp->md5sig_info = NULL;	/*XXX*/
5725b0b9e4cSFrancesco Ruggeri 	if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
573cfb6eeb4SYOSHIFUJI Hideaki 		newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
574cfb6eeb4SYOSHIFUJI Hideaki #endif
575bee7ca9eSWilliam Allen Simpson 	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
576463c84b9SArnaldo Carvalho de Melo 		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
5771da177e4SLinus Torvalds 	newtp->rx_opt.mss_clamp = req->mss;
578735d3831SFlorian Westphal 	tcp_ecn_openreq_child(newtp, req);
5798b485ce6SEric Dumazet 	newtp->fastopen_req = NULL;
580d983ea6fSEric Dumazet 	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
5811da177e4SLinus Torvalds 
582061ff040SMartin KaFai Lau 	newtp->bpf_chg_cc_inprogress = 0;
583e8025155SJakub Sitnicki 	tcp_bpf_clone(sk, newsk);
584e8025155SJakub Sitnicki 
58590bbcc60SEric Dumazet 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
586242b1bbeSEric Dumazet 
5871da177e4SLinus Torvalds 	return newsk;
5881da177e4SLinus Torvalds }
5894bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_create_openreq_child);
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds /*
5928336886fSJerry Chu  * Process an incoming packet for SYN_RECV sockets represented as a
5938336886fSJerry Chu  * request_sock. Normally sk is the listener socket but for TFO it
5948336886fSJerry Chu  * points to the child socket.
5958336886fSJerry Chu  *
5968336886fSJerry Chu  * XXX (TFO) - The current impl contains a special check for ack
5978336886fSJerry Chu  * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
5984308fc58SAlan Cox  *
5994308fc58SAlan Cox  * We don't need to initialize tmp_opt.sack_ok as we don't use the results
600*580f98ccSEric Dumazet  *
601*580f98ccSEric Dumazet  * Note: If @fastopen is true, this can be called from process context.
602*580f98ccSEric Dumazet  *       Otherwise, this is from BH context.
6031da177e4SLinus Torvalds  */
6041da177e4SLinus Torvalds 
6051da177e4SLinus Torvalds struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
60660236fddSArnaldo Carvalho de Melo 			   struct request_sock *req,
607e0f9759fSEric Dumazet 			   bool fastopen, bool *req_stolen)
6081da177e4SLinus Torvalds {
6094957faadSWilliam Allen Simpson 	struct tcp_options_received tmp_opt;
6104957faadSWilliam Allen Simpson 	struct sock *child;
611aa8223c7SArnaldo Carvalho de Melo 	const struct tcphdr *th = tcp_hdr(skb);
612714e85beSAl Viro 	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
613a2a385d6SEric Dumazet 	bool paws_reject = false;
6145e0724d0SEric Dumazet 	bool own_req;
6151da177e4SLinus Torvalds 
616bb5b7c11SDavid S. Miller 	tmp_opt.saw_tstamp = 0;
617bb5b7c11SDavid S. Miller 	if (th->doff > (sizeof(struct tcphdr)>>2)) {
618eed29f17SEric Dumazet 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
6191da177e4SLinus Torvalds 
6201da177e4SLinus Torvalds 		if (tmp_opt.saw_tstamp) {
6211da177e4SLinus Torvalds 			tmp_opt.ts_recent = req->ts_recent;
62295a22caeSFlorian Westphal 			if (tmp_opt.rcv_tsecr)
62395a22caeSFlorian Westphal 				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
6241da177e4SLinus Torvalds 			/* We do not store true stamp, but it is not required,
6251da177e4SLinus Torvalds 			 * it can be estimated (approximately)
6261da177e4SLinus Torvalds 			 * from another data.
6271da177e4SLinus Torvalds 			 */
6285903123fSAkhmat Karakotov 			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
629c887e6d2SIlpo Järvinen 			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
6301da177e4SLinus Torvalds 		}
6311da177e4SLinus Torvalds 	}
6321da177e4SLinus Torvalds 
6331da177e4SLinus Torvalds 	/* Check for pure retransmitted SYN. */
6342e6599cbSArnaldo Carvalho de Melo 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
6351da177e4SLinus Torvalds 	    flg == TCP_FLAG_SYN &&
6361da177e4SLinus Torvalds 	    !paws_reject) {
6371da177e4SLinus Torvalds 		/*
6381da177e4SLinus Torvalds 		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
6391da177e4SLinus Torvalds 		 * this case on figure 6 and figure 8, but formal
6401da177e4SLinus Torvalds 		 * protocol description says NOTHING.
6411da177e4SLinus Torvalds 		 * To be more exact, it says that we should send ACK,
6421da177e4SLinus Torvalds 		 * because this segment (at least, if it has no data)
6431da177e4SLinus Torvalds 		 * is out of window.
6441da177e4SLinus Torvalds 		 *
6451da177e4SLinus Torvalds 		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
6461da177e4SLinus Torvalds 		 *  describe SYN-RECV state. All the description
6471da177e4SLinus Torvalds 		 *  is wrong, we cannot believe to it and should
6481da177e4SLinus Torvalds 		 *  rely only on common sense and implementation
6491da177e4SLinus Torvalds 		 *  experience.
6501da177e4SLinus Torvalds 		 *
6511da177e4SLinus Torvalds 		 * Enforce "SYN-ACK" according to figure 8, figure 6
6521da177e4SLinus Torvalds 		 * of RFC793, fixed by RFC1122.
6538336886fSJerry Chu 		 *
6548336886fSJerry Chu 		 * Note that even if there is new data in the SYN packet
6558336886fSJerry Chu 		 * they will be thrown away too.
656cd75eff6SYuchung Cheng 		 *
657cd75eff6SYuchung Cheng 		 * Reset timer after retransmitting SYNACK, similar to
658cd75eff6SYuchung Cheng 		 * the idea of fast retransmit in recovery.
6591da177e4SLinus Torvalds 		 */
660a9b2c06dSNeal Cardwell 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
661a9b2c06dSNeal Cardwell 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
662a9b2c06dSNeal Cardwell 					  &tcp_rsk(req)->last_oow_ack_time) &&
663a9b2c06dSNeal Cardwell 
664dd929c1bSEric Dumazet 		    !inet_rtx_syn_ack(sk, req)) {
665dd929c1bSEric Dumazet 			unsigned long expires = jiffies;
666dd929c1bSEric Dumazet 
6675903123fSAkhmat Karakotov 			expires += reqsk_timeout(req, TCP_RTO_MAX);
668dd929c1bSEric Dumazet 			if (!fastopen)
669dd929c1bSEric Dumazet 				mod_timer_pending(&req->rsk_timer, expires);
670dd929c1bSEric Dumazet 			else
671dd929c1bSEric Dumazet 				req->rsk_timer.expires = expires;
672dd929c1bSEric Dumazet 		}
6731da177e4SLinus Torvalds 		return NULL;
6741da177e4SLinus Torvalds 	}
6751da177e4SLinus Torvalds 
6761da177e4SLinus Torvalds 	/* Further reproduces section "SEGMENT ARRIVES"
6771da177e4SLinus Torvalds 	   for state SYN-RECEIVED of RFC793.
6781da177e4SLinus Torvalds 	   It is broken, however, it does not work only
6791da177e4SLinus Torvalds 	   when SYNs are crossed.
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	   You would think that SYN crossing is impossible here, since
6821da177e4SLinus Torvalds 	   we should have a SYN_SENT socket (from connect()) on our end,
6831da177e4SLinus Torvalds 	   but this is not true if the crossed SYNs were sent to both
6841da177e4SLinus Torvalds 	   ends by a malicious third party.  We must defend against this,
6851da177e4SLinus Torvalds 	   and to do that we first verify the ACK (as per RFC793, page
6861da177e4SLinus Torvalds 	   36) and reset if it is invalid.  Is this a true full defense?
6871da177e4SLinus Torvalds 	   To convince ourselves, let us consider a way in which the ACK
6881da177e4SLinus Torvalds 	   test can still pass in this 'malicious crossed SYNs' case.
6891da177e4SLinus Torvalds 	   Malicious sender sends identical SYNs (and thus identical sequence
6901da177e4SLinus Torvalds 	   numbers) to both A and B:
6911da177e4SLinus Torvalds 
6921da177e4SLinus Torvalds 		A: gets SYN, seq=7
6931da177e4SLinus Torvalds 		B: gets SYN, seq=7
6941da177e4SLinus Torvalds 
6951da177e4SLinus Torvalds 	   By our good fortune, both A and B select the same initial
6961da177e4SLinus Torvalds 	   send sequence number of seven :-)
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds 		A: sends SYN|ACK, seq=7, ack_seq=8
6991da177e4SLinus Torvalds 		B: sends SYN|ACK, seq=7, ack_seq=8
7001da177e4SLinus Torvalds 
7011da177e4SLinus Torvalds 	   So we are now A eating this SYN|ACK, ACK test passes.  So
7021da177e4SLinus Torvalds 	   does sequence test, SYN is truncated, and thus we consider
7031da177e4SLinus Torvalds 	   it a bare ACK.
7041da177e4SLinus Torvalds 
705ec0a1966SDavid S. Miller 	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
706ec0a1966SDavid S. Miller 	   bare ACK.  Otherwise, we create an established connection.  Both
707ec0a1966SDavid S. Miller 	   ends (listening sockets) accept the new incoming connection and try
708ec0a1966SDavid S. Miller 	   to talk to each other. 8-)
7091da177e4SLinus Torvalds 
7101da177e4SLinus Torvalds 	   Note: This case is both harmless, and rare.  Possibility is about the
7111da177e4SLinus Torvalds 	   same as us discovering intelligent life on another plant tomorrow.
7121da177e4SLinus Torvalds 
7131da177e4SLinus Torvalds 	   But generally, we should (RFC lies!) to accept ACK
7141da177e4SLinus Torvalds 	   from SYNACK both here and in tcp_rcv_state_process().
7151da177e4SLinus Torvalds 	   tcp_rcv_state_process() does not, hence, we do not too.
7161da177e4SLinus Torvalds 
7171da177e4SLinus Torvalds 	   Note that the case is absolutely generic:
7181da177e4SLinus Torvalds 	   we cannot optimize anything here without
7191da177e4SLinus Torvalds 	   violating protocol. All the checks must be made
7201da177e4SLinus Torvalds 	   before attempt to create socket.
7211da177e4SLinus Torvalds 	 */
7221da177e4SLinus Torvalds 
7231da177e4SLinus Torvalds 	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
7241da177e4SLinus Torvalds 	 *                  and the incoming segment acknowledges something not yet
725caa20d9aSStephen Hemminger 	 *                  sent (the segment carries an unacceptable ACK) ...
7261da177e4SLinus Torvalds 	 *                  a reset is sent."
7271da177e4SLinus Torvalds 	 *
7288336886fSJerry Chu 	 * Invalid ACK: reset will be sent by listening socket.
7298336886fSJerry Chu 	 * Note that the ACK validity check for a Fast Open socket is done
7308336886fSJerry Chu 	 * elsewhere and is checked directly against the child socket rather
7318336886fSJerry Chu 	 * than req because user data may have been sent out.
7321da177e4SLinus Torvalds 	 */
7338336886fSJerry Chu 	if ((flg & TCP_FLAG_ACK) && !fastopen &&
734435cf559SWilliam Allen Simpson 	    (TCP_SKB_CB(skb)->ack_seq !=
7351a2c6181SChristoph Paasch 	     tcp_rsk(req)->snt_isn + 1))
7361da177e4SLinus Torvalds 		return sk;
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds 	/* Also, it would be not so bad idea to check rcv_tsecr, which
7391da177e4SLinus Torvalds 	 * is essentially ACK extension and too early or too late values
7401da177e4SLinus Torvalds 	 * should cause reset in unsynchronized states.
7411da177e4SLinus Torvalds 	 */
7421da177e4SLinus Torvalds 
7431da177e4SLinus Torvalds 	/* RFC793: "first check sequence number". */
7441da177e4SLinus Torvalds 
7451da177e4SLinus Torvalds 	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
746ed53d0abSEric Dumazet 					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
7471da177e4SLinus Torvalds 		/* Out of window: send ACK and drop. */
7484ce7e93cSEric Dumazet 		if (!(flg & TCP_FLAG_RST) &&
7494ce7e93cSEric Dumazet 		    !tcp_oow_rate_limited(sock_net(sk), skb,
7504ce7e93cSEric Dumazet 					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
7514ce7e93cSEric Dumazet 					  &tcp_rsk(req)->last_oow_ack_time))
7526edafaafSGui Jianfeng 			req->rsk_ops->send_ack(sk, skb, req);
7531da177e4SLinus Torvalds 		if (paws_reject)
754*580f98ccSEric Dumazet 			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
7551da177e4SLinus Torvalds 		return NULL;
7561da177e4SLinus Torvalds 	}
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds 	/* In sequence, PAWS is OK. */
7591da177e4SLinus Torvalds 
7608336886fSJerry Chu 	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
7611da177e4SLinus Torvalds 		req->ts_recent = tmp_opt.rcv_tsval;
7621da177e4SLinus Torvalds 
7632e6599cbSArnaldo Carvalho de Melo 	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
7641da177e4SLinus Torvalds 		/* Truncate SYN, it is out of window starting
7652e6599cbSArnaldo Carvalho de Melo 		   at tcp_rsk(req)->rcv_isn + 1. */
7661da177e4SLinus Torvalds 		flg &= ~TCP_FLAG_SYN;
7671da177e4SLinus Torvalds 	}
7681da177e4SLinus Torvalds 
7691da177e4SLinus Torvalds 	/* RFC793: "second check the RST bit" and
7701da177e4SLinus Torvalds 	 *	   "fourth, check the SYN bit"
7711da177e4SLinus Torvalds 	 */
7723687b1dcSWei Yongjun 	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
773*580f98ccSEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
7741da177e4SLinus Torvalds 		goto embryonic_reset;
7753687b1dcSWei Yongjun 	}
7761da177e4SLinus Torvalds 
7771da177e4SLinus Torvalds 	/* ACK sequence verified above, just make sure ACK is
7781da177e4SLinus Torvalds 	 * set.  If ACK not set, just silently drop the packet.
7798336886fSJerry Chu 	 *
7808336886fSJerry Chu 	 * XXX (TFO) - if we ever allow "data after SYN", the
7818336886fSJerry Chu 	 * following check needs to be removed.
7821da177e4SLinus Torvalds 	 */
7831da177e4SLinus Torvalds 	if (!(flg & TCP_FLAG_ACK))
7841da177e4SLinus Torvalds 		return NULL;
7851da177e4SLinus Torvalds 
7868336886fSJerry Chu 	/* For Fast Open no more processing is needed (sk is the
7878336886fSJerry Chu 	 * child socket).
7888336886fSJerry Chu 	 */
7898336886fSJerry Chu 	if (fastopen)
7908336886fSJerry Chu 		return sk;
7918336886fSJerry Chu 
792d1b99ba4SJulian Anastasov 	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
793e6c022a4SEric Dumazet 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
794ec0a1966SDavid S. Miller 	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
795ec0a1966SDavid S. Miller 		inet_rsk(req)->acked = 1;
79602a1d6e7SEric Dumazet 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
797ec0a1966SDavid S. Miller 		return NULL;
798ec0a1966SDavid S. Miller 	}
799ec0a1966SDavid S. Miller 
8001da177e4SLinus Torvalds 	/* OK, ACK is valid, create big socket and
8011da177e4SLinus Torvalds 	 * feed this segment to it. It will repeat all
8021da177e4SLinus Torvalds 	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
8031da177e4SLinus Torvalds 	 * ESTABLISHED STATE. If it will be dropped after
8041da177e4SLinus Torvalds 	 * socket is created, wait for troubles.
8051da177e4SLinus Torvalds 	 */
8065e0724d0SEric Dumazet 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
8075e0724d0SEric Dumazet 							 req, &own_req);
80851456b29SIan Morris 	if (!child)
8091da177e4SLinus Torvalds 		goto listen_overflow;
8101da177e4SLinus Torvalds 
81190bf4513SPaolo Abeni 	if (own_req && rsk_drop_req(req)) {
812d4f2c86bSKuniyuki Iwashima 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
813d4f2c86bSKuniyuki Iwashima 		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
814f296234cSPeter Krystad 		return child;
815f296234cSPeter Krystad 	}
816f296234cSPeter Krystad 
8176bcfd7f8SEric Dumazet 	sock_rps_save_rxhash(child, skb);
8180f1c28aeSYuchung Cheng 	tcp_synack_rtt_meas(child, req);
819e0f9759fSEric Dumazet 	*req_stolen = !own_req;
8205e0724d0SEric Dumazet 	return inet_csk_complete_hashdance(sk, child, req, own_req);
8211da177e4SLinus Torvalds 
8221da177e4SLinus Torvalds listen_overflow:
82355d444b3SKuniyuki Iwashima 	if (sk != req->rsk_listener)
82455d444b3SKuniyuki Iwashima 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
82555d444b3SKuniyuki Iwashima 
8262d17d9c7SKuniyuki Iwashima 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
8272e6599cbSArnaldo Carvalho de Melo 		inet_rsk(req)->acked = 1;
8281da177e4SLinus Torvalds 		return NULL;
8291da177e4SLinus Torvalds 	}
8301da177e4SLinus Torvalds 
8311da177e4SLinus Torvalds embryonic_reset:
8328336886fSJerry Chu 	if (!(flg & TCP_FLAG_RST)) {
8338336886fSJerry Chu 		/* Received a bad SYN pkt - for TFO We try not to reset
8348336886fSJerry Chu 		 * the local connection unless it's really necessary to
8358336886fSJerry Chu 		 * avoid becoming vulnerable to outside attack aiming at
8368336886fSJerry Chu 		 * resetting legit local connections.
8378336886fSJerry Chu 		 */
838cfb6eeb4SYOSHIFUJI Hideaki 		req->rsk_ops->send_reset(sk, skb);
8398336886fSJerry Chu 	} else if (fastopen) { /* received a valid RST pkt */
8408336886fSJerry Chu 		reqsk_fastopen_remove(sk, req, true);
841049fe386SFlorian Westphal 		tcp_reset(sk, skb);
8428336886fSJerry Chu 	}
8438336886fSJerry Chu 	if (!fastopen) {
8447233da86SAlexander Ovechkin 		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
8457233da86SAlexander Ovechkin 
8467233da86SAlexander Ovechkin 		if (unlinked)
84702a1d6e7SEric Dumazet 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
8487233da86SAlexander Ovechkin 		*req_stolen = !unlinked;
8498336886fSJerry Chu 	}
8501da177e4SLinus Torvalds 	return NULL;
8511da177e4SLinus Torvalds }
8524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_check_req);
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds /*
8551da177e4SLinus Torvalds  * Queue segment on the new socket if the new socket is active,
8561da177e4SLinus Torvalds  * otherwise we just shortcircuit this and continue with
8571da177e4SLinus Torvalds  * the new socket.
8588336886fSJerry Chu  *
8598336886fSJerry Chu  * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
8608336886fSJerry Chu  * when entering. But other states are possible due to a race condition
8618336886fSJerry Chu  * where after __inet_lookup_established() fails but before the listener
8628336886fSJerry Chu  * locked is obtained, other packets cause the same connection to
8638336886fSJerry Chu  * be created.
8641da177e4SLinus Torvalds  */
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds int tcp_child_process(struct sock *parent, struct sock *child,
8671da177e4SLinus Torvalds 		      struct sk_buff *skb)
868734c8f75SJules Irenge 	__releases(&((child)->sk_lock.slock))
8691da177e4SLinus Torvalds {
8701da177e4SLinus Torvalds 	int ret = 0;
8711da177e4SLinus Torvalds 	int state = child->sk_state;
8721da177e4SLinus Torvalds 
87303cfda4fSEric Dumazet 	/* record sk_napi_id and sk_rx_queue_mapping of child. */
87403cfda4fSEric Dumazet 	sk_mark_napi_id_set(child, skb);
875e5907459SAlexander Duyck 
876a44d6eacSMartin KaFai Lau 	tcp_segs_in(tcp_sk(child), skb);
8771da177e4SLinus Torvalds 	if (!sock_owned_by_user(child)) {
87872ab4a86SEric Dumazet 		ret = tcp_rcv_state_process(child, skb);
8791da177e4SLinus Torvalds 		/* Wakeup parent, send SIGIO */
8801da177e4SLinus Torvalds 		if (state == TCP_SYN_RECV && child->sk_state != state)
881676d2369SDavid S. Miller 			parent->sk_data_ready(parent);
8821da177e4SLinus Torvalds 	} else {
8831da177e4SLinus Torvalds 		/* Alas, it is possible again, because we do lookup
8841da177e4SLinus Torvalds 		 * in main socket hash table and lock on listening
8851da177e4SLinus Torvalds 		 * socket does not protect us more.
8861da177e4SLinus Torvalds 		 */
887a3a858ffSZhu Yi 		__sk_add_backlog(child, skb);
8881da177e4SLinus Torvalds 	}
8891da177e4SLinus Torvalds 
8901da177e4SLinus Torvalds 	bh_unlock_sock(child);
8911da177e4SLinus Torvalds 	sock_put(child);
8921da177e4SLinus Torvalds 	return ret;
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_child_process);
895