xref: /openbmc/linux/net/ipv4/tcp_minisocks.c (revision 7b7fd0ac7dc1ffcaf24d9bca0f051b0168e43cd4)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * INET		An implementation of the TCP/IP protocol suite for the LINUX
4   *		operating system.  INET is implemented using the  BSD Socket
5   *		interface as the means of communication with the user level.
6   *
7   *		Implementation of the Transmission Control Protocol(TCP).
8   *
9   * Authors:	Ross Biro
10   *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11   *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12   *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13   *		Florian La Roche, <flla@stud.uni-sb.de>
14   *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15   *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16   *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17   *		Matthew Dillon, <dillon@apollo.west.oic.com>
18   *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19   *		Jorge Cwik, <jorge@laser.satlink.net>
20   */
21  
22  #include <net/tcp.h>
23  #include <net/xfrm.h>
24  #include <net/busy_poll.h>
25  
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)26  static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
27  {
28  	if (seq == s_win)
29  		return true;
30  	if (after(end_seq, s_win) && before(seq, e_win))
31  		return true;
32  	return seq == e_win && seq == end_seq;
33  }
34  
35  static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock * tw,const struct sk_buff * skb,int mib_idx)36  tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
37  				  const struct sk_buff *skb, int mib_idx)
38  {
39  	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
40  
41  	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
42  				  &tcptw->tw_last_oow_ack_time)) {
43  		/* Send ACK. Note, we do not put the bucket,
44  		 * it will be released by caller.
45  		 */
46  		return TCP_TW_ACK;
47  	}
48  
49  	/* We are rate-limiting, so just release the tw sock and drop skb. */
50  	inet_twsk_put(tw);
51  	return TCP_TW_SUCCESS;
52  }
53  
54  /*
55   * * Main purpose of TIME-WAIT state is to close connection gracefully,
56   *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
57   *   (and, probably, tail of data) and one or more our ACKs are lost.
58   * * What is TIME-WAIT timeout? It is associated with maximal packet
59   *   lifetime in the internet, which results in wrong conclusion, that
60   *   it is set to catch "old duplicate segments" wandering out of their path.
61   *   It is not quite correct. This timeout is calculated so that it exceeds
62   *   maximal retransmission timeout enough to allow to lose one (or more)
63   *   segments sent by peer and our ACKs. This time may be calculated from RTO.
64   * * When TIME-WAIT socket receives RST, it means that another end
65   *   finally closed and we are allowed to kill TIME-WAIT too.
66   * * Second purpose of TIME-WAIT is catching old duplicate segments.
67   *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
68   *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
69   * * If we invented some more clever way to catch duplicates
70   *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
71   *
72   * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
73   * When you compare it to RFCs, please, read section SEGMENT ARRIVES
74   * from the very beginning.
75   *
76   * NOTE. With recycling (and later with fin-wait-2) TW bucket
77   * is _not_ stateless. It means, that strictly speaking we must
78   * spinlock it. I do not want! Well, probability of misbehaviour
79   * is ridiculously low and, seems, we could use some mb() tricks
80   * to avoid misread sequence numbers, states etc.  --ANK
81   *
82   * We don't need to initialize tmp_out.sack_ok as we don't use the results
83   */
84  enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th)85  tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
86  			   const struct tcphdr *th)
87  {
88  	struct tcp_options_received tmp_opt;
89  	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
90  	bool paws_reject = false;
91  
92  	tmp_opt.saw_tstamp = 0;
93  	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
94  		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
95  
96  		if (tmp_opt.saw_tstamp) {
97  			if (tmp_opt.rcv_tsecr)
98  				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
99  			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
100  			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
101  			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
102  		}
103  	}
104  
105  	if (tw->tw_substate == TCP_FIN_WAIT2) {
106  		/* Just repeat all the checks of tcp_rcv_state_process() */
107  
108  		/* Out of window, send ACK */
109  		if (paws_reject ||
110  		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
111  				   tcptw->tw_rcv_nxt,
112  				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
113  			return tcp_timewait_check_oow_rate_limit(
114  				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
115  
116  		if (th->rst)
117  			goto kill;
118  
119  		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
120  			return TCP_TW_RST;
121  
122  		/* Dup ACK? */
123  		if (!th->ack ||
124  		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
125  		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
126  			inet_twsk_put(tw);
127  			return TCP_TW_SUCCESS;
128  		}
129  
130  		/* New data or FIN. If new data arrive after half-duplex close,
131  		 * reset.
132  		 */
133  		if (!th->fin ||
134  		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
135  			return TCP_TW_RST;
136  
137  		/* FIN arrived, enter true time-wait state. */
138  		tw->tw_substate	  = TCP_TIME_WAIT;
139  		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
140  		if (tmp_opt.saw_tstamp) {
141  			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
142  			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
143  		}
144  
145  		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
146  		return TCP_TW_ACK;
147  	}
148  
149  	/*
150  	 *	Now real TIME-WAIT state.
151  	 *
152  	 *	RFC 1122:
153  	 *	"When a connection is [...] on TIME-WAIT state [...]
154  	 *	[a TCP] MAY accept a new SYN from the remote TCP to
155  	 *	reopen the connection directly, if it:
156  	 *
157  	 *	(1)  assigns its initial sequence number for the new
158  	 *	connection to be larger than the largest sequence
159  	 *	number it used on the previous connection incarnation,
160  	 *	and
161  	 *
162  	 *	(2)  returns to TIME-WAIT state if the SYN turns out
163  	 *	to be an old duplicate".
164  	 */
165  
166  	if (!paws_reject &&
167  	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
168  	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
169  		/* In window segment, it may be only reset or bare ack. */
170  
171  		if (th->rst) {
172  			/* This is TIME_WAIT assassination, in two flavors.
173  			 * Oh well... nobody has a sufficient solution to this
174  			 * protocol bug yet.
175  			 */
176  			if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
177  kill:
178  				inet_twsk_deschedule_put(tw);
179  				return TCP_TW_SUCCESS;
180  			}
181  		} else {
182  			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
183  		}
184  
185  		if (tmp_opt.saw_tstamp) {
186  			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
187  			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
188  		}
189  
190  		inet_twsk_put(tw);
191  		return TCP_TW_SUCCESS;
192  	}
193  
194  	/* Out of window segment.
195  
196  	   All the segments are ACKed immediately.
197  
198  	   The only exception is new SYN. We accept it, if it is
199  	   not old duplicate and we are not in danger to be killed
200  	   by delayed old duplicates. RFC check is that it has
201  	   newer sequence number works at rates <40Mbit/sec.
202  	   However, if paws works, it is reliable AND even more,
203  	   we even may relax silly seq space cutoff.
204  
205  	   RED-PEN: we violate main RFC requirement, if this SYN will appear
206  	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
207  	   we must return socket to time-wait state. It is not good,
208  	   but not fatal yet.
209  	 */
210  
211  	if (th->syn && !th->rst && !th->ack && !paws_reject &&
212  	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
213  	     (tmp_opt.saw_tstamp &&
214  	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
215  		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
216  		if (isn == 0)
217  			isn++;
218  		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
219  		return TCP_TW_SYN;
220  	}
221  
222  	if (paws_reject)
223  		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
224  
225  	if (!th->rst) {
226  		/* In this case we must reset the TIMEWAIT timer.
227  		 *
228  		 * If it is ACKless SYN it may be both old duplicate
229  		 * and new good SYN with random sequence number <rcv_nxt.
230  		 * Do not reschedule in the last case.
231  		 */
232  		if (paws_reject || th->ack)
233  			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
234  
235  		return tcp_timewait_check_oow_rate_limit(
236  			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
237  	}
238  	inet_twsk_put(tw);
239  	return TCP_TW_SUCCESS;
240  }
241  EXPORT_SYMBOL(tcp_timewait_state_process);
242  
tcp_time_wait_init(struct sock * sk,struct tcp_timewait_sock * tcptw)243  static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
244  {
245  #ifdef CONFIG_TCP_MD5SIG
246  	const struct tcp_sock *tp = tcp_sk(sk);
247  	struct tcp_md5sig_key *key;
248  
249  	/*
250  	 * The timewait bucket does not have the key DB from the
251  	 * sock structure. We just make a quick copy of the
252  	 * md5 key being used (if indeed we are using one)
253  	 * so the timewait ack generating code has the key.
254  	 */
255  	tcptw->tw_md5_key = NULL;
256  	if (!static_branch_unlikely(&tcp_md5_needed.key))
257  		return;
258  
259  	key = tp->af_specific->md5_lookup(sk, sk);
260  	if (key) {
261  		tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
262  		if (!tcptw->tw_md5_key)
263  			return;
264  		if (!tcp_alloc_md5sig_pool())
265  			goto out_free;
266  		if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
267  			goto out_free;
268  	}
269  	return;
270  out_free:
271  	WARN_ON_ONCE(1);
272  	kfree(tcptw->tw_md5_key);
273  	tcptw->tw_md5_key = NULL;
274  #endif
275  }
276  
277  /*
278   * Move a socket to time-wait or dead fin-wait-2 state.
279   */
tcp_time_wait(struct sock * sk,int state,int timeo)280  void tcp_time_wait(struct sock *sk, int state, int timeo)
281  {
282  	const struct inet_connection_sock *icsk = inet_csk(sk);
283  	const struct tcp_sock *tp = tcp_sk(sk);
284  	struct net *net = sock_net(sk);
285  	struct inet_timewait_sock *tw;
286  
287  	tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
288  
289  	if (tw) {
290  		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
291  		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
292  
293  		tw->tw_transparent	= inet_test_bit(TRANSPARENT, sk);
294  		tw->tw_mark		= sk->sk_mark;
295  		tw->tw_priority		= sk->sk_priority;
296  		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
297  		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
298  		tcptw->tw_snd_nxt	= tp->snd_nxt;
299  		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
300  		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
301  		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
302  		tcptw->tw_ts_offset	= tp->tsoffset;
303  		tcptw->tw_last_oow_ack_time = 0;
304  		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
305  		tw->tw_txhash		= sk->sk_txhash;
306  #if IS_ENABLED(CONFIG_IPV6)
307  		if (tw->tw_family == PF_INET6) {
308  			struct ipv6_pinfo *np = inet6_sk(sk);
309  
310  			tw->tw_v6_daddr = sk->sk_v6_daddr;
311  			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312  			tw->tw_tclass = np->tclass;
313  			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314  			tw->tw_ipv6only = sk->sk_ipv6only;
315  		}
316  #endif
317  
318  		tcp_time_wait_init(sk, tcptw);
319  
320  		/* Get the TIME_WAIT timeout firing. */
321  		if (timeo < rto)
322  			timeo = rto;
323  
324  		if (state == TCP_TIME_WAIT)
325  			timeo = TCP_TIMEWAIT_LEN;
326  
327  		/* tw_timer is pinned, so we need to make sure BH are disabled
328  		 * in following section, otherwise timer handler could run before
329  		 * we complete the initialization.
330  		 */
331  		local_bh_disable();
332  		inet_twsk_schedule(tw, timeo);
333  		/* Linkage updates.
334  		 * Note that access to tw after this point is illegal.
335  		 */
336  		inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
337  		local_bh_enable();
338  	} else {
339  		/* Sorry, if we're out of memory, just CLOSE this
340  		 * socket up.  We've got bigger problems than
341  		 * non-graceful socket closings.
342  		 */
343  		NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
344  	}
345  
346  	tcp_update_metrics(sk);
347  	tcp_done(sk);
348  }
349  EXPORT_SYMBOL(tcp_time_wait);
350  
tcp_twsk_destructor(struct sock * sk)351  void tcp_twsk_destructor(struct sock *sk)
352  {
353  #ifdef CONFIG_TCP_MD5SIG
354  	if (static_branch_unlikely(&tcp_md5_needed.key)) {
355  		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
356  
357  		if (twsk->tw_md5_key) {
358  			kfree_rcu(twsk->tw_md5_key, rcu);
359  			static_branch_slow_dec_deferred(&tcp_md5_needed);
360  		}
361  	}
362  #endif
363  }
364  EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
365  
tcp_twsk_purge(struct list_head * net_exit_list)366  void tcp_twsk_purge(struct list_head *net_exit_list)
367  {
368  	bool purged_once = false;
369  	struct net *net;
370  
371  	list_for_each_entry(net, net_exit_list, exit_list) {
372  		if (net->ipv4.tcp_death_row.hashinfo->pernet) {
373  			/* Even if tw_refcount == 1, we must clean up kernel reqsk */
374  			inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
375  		} else if (!purged_once) {
376  			inet_twsk_purge(&tcp_hashinfo);
377  			purged_once = true;
378  		}
379  	}
380  }
381  
382  /* Warning : This function is called without sk_listener being locked.
383   * Be sure to read socket fields once, as their value could change under us.
384   */
tcp_openreq_init_rwin(struct request_sock * req,const struct sock * sk_listener,const struct dst_entry * dst)385  void tcp_openreq_init_rwin(struct request_sock *req,
386  			   const struct sock *sk_listener,
387  			   const struct dst_entry *dst)
388  {
389  	struct inet_request_sock *ireq = inet_rsk(req);
390  	const struct tcp_sock *tp = tcp_sk(sk_listener);
391  	int full_space = tcp_full_space(sk_listener);
392  	u32 window_clamp;
393  	__u8 rcv_wscale;
394  	u32 rcv_wnd;
395  	int mss;
396  
397  	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
398  	window_clamp = READ_ONCE(tp->window_clamp);
399  	/* Set this up on the first call only */
400  	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
401  
402  	/* limit the window selection if the user enforce a smaller rx buffer */
403  	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
404  	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
405  		req->rsk_window_clamp = full_space;
406  
407  	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
408  	if (rcv_wnd == 0)
409  		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
410  	else if (full_space < rcv_wnd * mss)
411  		full_space = rcv_wnd * mss;
412  
413  	/* tcp_full_space because it is guaranteed to be the first packet */
414  	tcp_select_initial_window(sk_listener, full_space,
415  		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
416  		&req->rsk_rcv_wnd,
417  		&req->rsk_window_clamp,
418  		ireq->wscale_ok,
419  		&rcv_wscale,
420  		rcv_wnd);
421  	ireq->rcv_wscale = rcv_wscale;
422  }
423  EXPORT_SYMBOL(tcp_openreq_init_rwin);
424  
tcp_ecn_openreq_child(struct tcp_sock * tp,const struct request_sock * req)425  static void tcp_ecn_openreq_child(struct tcp_sock *tp,
426  				  const struct request_sock *req)
427  {
428  	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
429  }
430  
tcp_ca_openreq_child(struct sock * sk,const struct dst_entry * dst)431  void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
432  {
433  	struct inet_connection_sock *icsk = inet_csk(sk);
434  	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
435  	bool ca_got_dst = false;
436  
437  	if (ca_key != TCP_CA_UNSPEC) {
438  		const struct tcp_congestion_ops *ca;
439  
440  		rcu_read_lock();
441  		ca = tcp_ca_find_key(ca_key);
442  		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
443  			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
444  			icsk->icsk_ca_ops = ca;
445  			ca_got_dst = true;
446  		}
447  		rcu_read_unlock();
448  	}
449  
450  	/* If no valid choice made yet, assign current system default ca. */
451  	if (!ca_got_dst &&
452  	    (!icsk->icsk_ca_setsockopt ||
453  	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
454  		tcp_assign_congestion_control(sk);
455  
456  	tcp_set_ca_state(sk, TCP_CA_Open);
457  }
458  EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
459  
smc_check_reset_syn_req(const struct tcp_sock * oldtp,struct request_sock * req,struct tcp_sock * newtp)460  static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
461  				    struct request_sock *req,
462  				    struct tcp_sock *newtp)
463  {
464  #if IS_ENABLED(CONFIG_SMC)
465  	struct inet_request_sock *ireq;
466  
467  	if (static_branch_unlikely(&tcp_have_smc)) {
468  		ireq = inet_rsk(req);
469  		if (oldtp->syn_smc && !ireq->smc_ok)
470  			newtp->syn_smc = 0;
471  	}
472  #endif
473  }
474  
475  /* This is not only more efficient than what we used to do, it eliminates
476   * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
477   *
478   * Actually, we could lots of memory writes here. tp of listening
479   * socket contains all necessary default parameters.
480   */
tcp_create_openreq_child(const struct sock * sk,struct request_sock * req,struct sk_buff * skb)481  struct sock *tcp_create_openreq_child(const struct sock *sk,
482  				      struct request_sock *req,
483  				      struct sk_buff *skb)
484  {
485  	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
486  	const struct inet_request_sock *ireq = inet_rsk(req);
487  	struct tcp_request_sock *treq = tcp_rsk(req);
488  	struct inet_connection_sock *newicsk;
489  	const struct tcp_sock *oldtp;
490  	struct tcp_sock *newtp;
491  	u32 seq;
492  
493  	if (!newsk)
494  		return NULL;
495  
496  	newicsk = inet_csk(newsk);
497  	newtp = tcp_sk(newsk);
498  	oldtp = tcp_sk(sk);
499  
500  	smc_check_reset_syn_req(oldtp, req, newtp);
501  
502  	/* Now setup tcp_sock */
503  	newtp->pred_flags = 0;
504  
505  	seq = treq->rcv_isn + 1;
506  	newtp->rcv_wup = seq;
507  	WRITE_ONCE(newtp->copied_seq, seq);
508  	WRITE_ONCE(newtp->rcv_nxt, seq);
509  	newtp->segs_in = 1;
510  
511  	seq = treq->snt_isn + 1;
512  	newtp->snd_sml = newtp->snd_una = seq;
513  	WRITE_ONCE(newtp->snd_nxt, seq);
514  	newtp->snd_up = seq;
515  
516  	INIT_LIST_HEAD(&newtp->tsq_node);
517  	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
518  
519  	tcp_init_wl(newtp, treq->rcv_isn);
520  
521  	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
522  	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
523  
524  	newtp->lsndtime = tcp_jiffies32;
525  	newsk->sk_txhash = READ_ONCE(treq->txhash);
526  	newtp->total_retrans = req->num_retrans;
527  
528  	tcp_init_xmit_timers(newsk);
529  	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
530  
531  	if (sock_flag(newsk, SOCK_KEEPOPEN))
532  		inet_csk_reset_keepalive_timer(newsk,
533  					       keepalive_time_when(newtp));
534  
535  	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
536  	newtp->rx_opt.sack_ok = ireq->sack_ok;
537  	newtp->window_clamp = req->rsk_window_clamp;
538  	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
539  	newtp->rcv_wnd = req->rsk_rcv_wnd;
540  	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
541  	if (newtp->rx_opt.wscale_ok) {
542  		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
543  		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
544  	} else {
545  		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
546  		newtp->window_clamp = min(newtp->window_clamp, 65535U);
547  	}
548  	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
549  	newtp->max_window = newtp->snd_wnd;
550  
551  	if (newtp->rx_opt.tstamp_ok) {
552  		newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
553  		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
554  		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
555  	} else {
556  		newtp->rx_opt.ts_recent_stamp = 0;
557  		newtp->tcp_header_len = sizeof(struct tcphdr);
558  	}
559  	if (req->num_timeout) {
560  		newtp->undo_marker = treq->snt_isn;
561  		newtp->retrans_stamp = div_u64(treq->snt_synack,
562  					       USEC_PER_SEC / TCP_TS_HZ);
563  		newtp->total_rto = req->num_timeout;
564  		newtp->total_rto_recoveries = 1;
565  		newtp->total_rto_time = tcp_time_stamp_raw() -
566  						newtp->retrans_stamp;
567  	}
568  	newtp->tsoffset = treq->ts_off;
569  #ifdef CONFIG_TCP_MD5SIG
570  	newtp->md5sig_info = NULL;	/*XXX*/
571  #endif
572  	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
573  		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
574  	newtp->rx_opt.mss_clamp = req->mss;
575  	tcp_ecn_openreq_child(newtp, req);
576  	newtp->fastopen_req = NULL;
577  	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
578  
579  	newtp->bpf_chg_cc_inprogress = 0;
580  	tcp_bpf_clone(sk, newsk);
581  
582  	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
583  
584  	return newsk;
585  }
586  EXPORT_SYMBOL(tcp_create_openreq_child);
587  
588  /*
589   * Process an incoming packet for SYN_RECV sockets represented as a
590   * request_sock. Normally sk is the listener socket but for TFO it
591   * points to the child socket.
592   *
593   * XXX (TFO) - The current impl contains a special check for ack
594   * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
595   *
596   * We don't need to initialize tmp_opt.sack_ok as we don't use the results
597   *
598   * Note: If @fastopen is true, this can be called from process context.
599   *       Otherwise, this is from BH context.
600   */
601  
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,bool fastopen,bool * req_stolen)602  struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
603  			   struct request_sock *req,
604  			   bool fastopen, bool *req_stolen)
605  {
606  	struct tcp_options_received tmp_opt;
607  	struct sock *child;
608  	const struct tcphdr *th = tcp_hdr(skb);
609  	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
610  	bool paws_reject = false;
611  	bool own_req;
612  
613  	tmp_opt.saw_tstamp = 0;
614  	if (th->doff > (sizeof(struct tcphdr)>>2)) {
615  		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
616  
617  		if (tmp_opt.saw_tstamp) {
618  			tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
619  			if (tmp_opt.rcv_tsecr)
620  				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
621  			/* We do not store true stamp, but it is not required,
622  			 * it can be estimated (approximately)
623  			 * from another data.
624  			 */
625  			tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
626  			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
627  		}
628  	}
629  
630  	/* Check for pure retransmitted SYN. */
631  	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
632  	    flg == TCP_FLAG_SYN &&
633  	    !paws_reject) {
634  		/*
635  		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
636  		 * this case on figure 6 and figure 8, but formal
637  		 * protocol description says NOTHING.
638  		 * To be more exact, it says that we should send ACK,
639  		 * because this segment (at least, if it has no data)
640  		 * is out of window.
641  		 *
642  		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
643  		 *  describe SYN-RECV state. All the description
644  		 *  is wrong, we cannot believe to it and should
645  		 *  rely only on common sense and implementation
646  		 *  experience.
647  		 *
648  		 * Enforce "SYN-ACK" according to figure 8, figure 6
649  		 * of RFC793, fixed by RFC1122.
650  		 *
651  		 * Note that even if there is new data in the SYN packet
652  		 * they will be thrown away too.
653  		 *
654  		 * Reset timer after retransmitting SYNACK, similar to
655  		 * the idea of fast retransmit in recovery.
656  		 */
657  		if (!tcp_oow_rate_limited(sock_net(sk), skb,
658  					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
659  					  &tcp_rsk(req)->last_oow_ack_time) &&
660  
661  		    !inet_rtx_syn_ack(sk, req)) {
662  			unsigned long expires = jiffies;
663  
664  			expires += reqsk_timeout(req, TCP_RTO_MAX);
665  			if (!fastopen)
666  				mod_timer_pending(&req->rsk_timer, expires);
667  			else
668  				req->rsk_timer.expires = expires;
669  		}
670  		return NULL;
671  	}
672  
673  	/* Further reproduces section "SEGMENT ARRIVES"
674  	   for state SYN-RECEIVED of RFC793.
675  	   It is broken, however, it does not work only
676  	   when SYNs are crossed.
677  
678  	   You would think that SYN crossing is impossible here, since
679  	   we should have a SYN_SENT socket (from connect()) on our end,
680  	   but this is not true if the crossed SYNs were sent to both
681  	   ends by a malicious third party.  We must defend against this,
682  	   and to do that we first verify the ACK (as per RFC793, page
683  	   36) and reset if it is invalid.  Is this a true full defense?
684  	   To convince ourselves, let us consider a way in which the ACK
685  	   test can still pass in this 'malicious crossed SYNs' case.
686  	   Malicious sender sends identical SYNs (and thus identical sequence
687  	   numbers) to both A and B:
688  
689  		A: gets SYN, seq=7
690  		B: gets SYN, seq=7
691  
692  	   By our good fortune, both A and B select the same initial
693  	   send sequence number of seven :-)
694  
695  		A: sends SYN|ACK, seq=7, ack_seq=8
696  		B: sends SYN|ACK, seq=7, ack_seq=8
697  
698  	   So we are now A eating this SYN|ACK, ACK test passes.  So
699  	   does sequence test, SYN is truncated, and thus we consider
700  	   it a bare ACK.
701  
702  	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
703  	   bare ACK.  Otherwise, we create an established connection.  Both
704  	   ends (listening sockets) accept the new incoming connection and try
705  	   to talk to each other. 8-)
706  
707  	   Note: This case is both harmless, and rare.  Possibility is about the
708  	   same as us discovering intelligent life on another plant tomorrow.
709  
710  	   But generally, we should (RFC lies!) to accept ACK
711  	   from SYNACK both here and in tcp_rcv_state_process().
712  	   tcp_rcv_state_process() does not, hence, we do not too.
713  
714  	   Note that the case is absolutely generic:
715  	   we cannot optimize anything here without
716  	   violating protocol. All the checks must be made
717  	   before attempt to create socket.
718  	 */
719  
720  	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
721  	 *                  and the incoming segment acknowledges something not yet
722  	 *                  sent (the segment carries an unacceptable ACK) ...
723  	 *                  a reset is sent."
724  	 *
725  	 * Invalid ACK: reset will be sent by listening socket.
726  	 * Note that the ACK validity check for a Fast Open socket is done
727  	 * elsewhere and is checked directly against the child socket rather
728  	 * than req because user data may have been sent out.
729  	 */
730  	if ((flg & TCP_FLAG_ACK) && !fastopen &&
731  	    (TCP_SKB_CB(skb)->ack_seq !=
732  	     tcp_rsk(req)->snt_isn + 1))
733  		return sk;
734  
735  	/* Also, it would be not so bad idea to check rcv_tsecr, which
736  	 * is essentially ACK extension and too early or too late values
737  	 * should cause reset in unsynchronized states.
738  	 */
739  
740  	/* RFC793: "first check sequence number". */
741  
742  	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
743  					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
744  		/* Out of window: send ACK and drop. */
745  		if (!(flg & TCP_FLAG_RST) &&
746  		    !tcp_oow_rate_limited(sock_net(sk), skb,
747  					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
748  					  &tcp_rsk(req)->last_oow_ack_time))
749  			req->rsk_ops->send_ack(sk, skb, req);
750  		if (paws_reject)
751  			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
752  		return NULL;
753  	}
754  
755  	/* In sequence, PAWS is OK. */
756  
757  	/* TODO: We probably should defer ts_recent change once
758  	 * we take ownership of @req.
759  	 */
760  	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
761  		WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
762  
763  	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
764  		/* Truncate SYN, it is out of window starting
765  		   at tcp_rsk(req)->rcv_isn + 1. */
766  		flg &= ~TCP_FLAG_SYN;
767  	}
768  
769  	/* RFC793: "second check the RST bit" and
770  	 *	   "fourth, check the SYN bit"
771  	 */
772  	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
773  		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
774  		goto embryonic_reset;
775  	}
776  
777  	/* ACK sequence verified above, just make sure ACK is
778  	 * set.  If ACK not set, just silently drop the packet.
779  	 *
780  	 * XXX (TFO) - if we ever allow "data after SYN", the
781  	 * following check needs to be removed.
782  	 */
783  	if (!(flg & TCP_FLAG_ACK))
784  		return NULL;
785  
786  	/* For Fast Open no more processing is needed (sk is the
787  	 * child socket).
788  	 */
789  	if (fastopen)
790  		return sk;
791  
792  	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
793  	if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
794  	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
795  		inet_rsk(req)->acked = 1;
796  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
797  		return NULL;
798  	}
799  
800  	/* OK, ACK is valid, create big socket and
801  	 * feed this segment to it. It will repeat all
802  	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
803  	 * ESTABLISHED STATE. If it will be dropped after
804  	 * socket is created, wait for troubles.
805  	 */
806  	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
807  							 req, &own_req);
808  	if (!child)
809  		goto listen_overflow;
810  
811  	if (own_req && rsk_drop_req(req)) {
812  		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
813  		inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
814  		return child;
815  	}
816  
817  	sock_rps_save_rxhash(child, skb);
818  	tcp_synack_rtt_meas(child, req);
819  	*req_stolen = !own_req;
820  	return inet_csk_complete_hashdance(sk, child, req, own_req);
821  
822  listen_overflow:
823  	if (sk != req->rsk_listener)
824  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
825  
826  	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
827  		inet_rsk(req)->acked = 1;
828  		return NULL;
829  	}
830  
831  embryonic_reset:
832  	if (!(flg & TCP_FLAG_RST)) {
833  		/* Received a bad SYN pkt - for TFO We try not to reset
834  		 * the local connection unless it's really necessary to
835  		 * avoid becoming vulnerable to outside attack aiming at
836  		 * resetting legit local connections.
837  		 */
838  		req->rsk_ops->send_reset(sk, skb);
839  	} else if (fastopen) { /* received a valid RST pkt */
840  		reqsk_fastopen_remove(sk, req, true);
841  		tcp_reset(sk, skb);
842  	}
843  	if (!fastopen) {
844  		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
845  
846  		if (unlinked)
847  			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
848  		*req_stolen = !unlinked;
849  	}
850  	return NULL;
851  }
852  EXPORT_SYMBOL(tcp_check_req);
853  
854  /*
855   * Queue segment on the new socket if the new socket is active,
856   * otherwise we just shortcircuit this and continue with
857   * the new socket.
858   *
859   * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
860   * when entering. But other states are possible due to a race condition
861   * where after __inet_lookup_established() fails but before the listener
862   * locked is obtained, other packets cause the same connection to
863   * be created.
864   */
865  
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)866  int tcp_child_process(struct sock *parent, struct sock *child,
867  		      struct sk_buff *skb)
868  	__releases(&((child)->sk_lock.slock))
869  {
870  	int ret = 0;
871  	int state = child->sk_state;
872  
873  	/* record sk_napi_id and sk_rx_queue_mapping of child. */
874  	sk_mark_napi_id_set(child, skb);
875  
876  	tcp_segs_in(tcp_sk(child), skb);
877  	if (!sock_owned_by_user(child)) {
878  		ret = tcp_rcv_state_process(child, skb);
879  		/* Wakeup parent, send SIGIO */
880  		if (state == TCP_SYN_RECV && child->sk_state != state)
881  			parent->sk_data_ready(parent);
882  	} else {
883  		/* Alas, it is possible again, because we do lookup
884  		 * in main socket hash table and lock on listening
885  		 * socket does not protect us more.
886  		 */
887  		__sk_add_backlog(child, skb);
888  	}
889  
890  	bh_unlock_sock(child);
891  	sock_put(child);
892  	return ret;
893  }
894  EXPORT_SYMBOL(tcp_child_process);
895