xref: /openbmc/linux/net/ipv4/inet_connection_sock.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * INET		An implementation of the TCP/IP protocol suite for the LINUX
4   *		operating system.  INET is implemented using the  BSD Socket
5   *		interface as the means of communication with the user level.
6   *
7   *		Support for INET connection oriented protocols.
8   *
9   * Authors:	See the TCP sources
10   */
11  
12  #include <linux/module.h>
13  #include <linux/jhash.h>
14  
15  #include <net/inet_connection_sock.h>
16  #include <net/inet_hashtables.h>
17  #include <net/inet_timewait_sock.h>
18  #include <net/ip.h>
19  #include <net/route.h>
20  #include <net/tcp_states.h>
21  #include <net/xfrm.h>
22  #include <net/tcp.h>
23  #include <net/sock_reuseport.h>
24  #include <net/addrconf.h>
25  
26  #if IS_ENABLED(CONFIG_IPV6)
27  /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
28   *				if IPv6 only, and any IPv4 addresses
29   *				if not IPv6 only
30   * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
31   *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
32   *				and 0.0.0.0 equals to 0.0.0.0 only
33   */
ipv6_rcv_saddr_equal(const struct in6_addr * sk1_rcv_saddr6,const struct in6_addr * sk2_rcv_saddr6,__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk1_ipv6only,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)34  static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
35  				 const struct in6_addr *sk2_rcv_saddr6,
36  				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
37  				 bool sk1_ipv6only, bool sk2_ipv6only,
38  				 bool match_sk1_wildcard,
39  				 bool match_sk2_wildcard)
40  {
41  	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
42  	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
43  
44  	/* if both are mapped, treat as IPv4 */
45  	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
46  		if (!sk2_ipv6only) {
47  			if (sk1_rcv_saddr == sk2_rcv_saddr)
48  				return true;
49  			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
50  				(match_sk2_wildcard && !sk2_rcv_saddr);
51  		}
52  		return false;
53  	}
54  
55  	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
56  		return true;
57  
58  	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
59  	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
60  		return true;
61  
62  	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
63  	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
64  		return true;
65  
66  	if (sk2_rcv_saddr6 &&
67  	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
68  		return true;
69  
70  	return false;
71  }
72  #endif
73  
74  /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
75   * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
76   *				0.0.0.0 only equals to 0.0.0.0
77   */
ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)78  static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
79  				 bool sk2_ipv6only, bool match_sk1_wildcard,
80  				 bool match_sk2_wildcard)
81  {
82  	if (!sk2_ipv6only) {
83  		if (sk1_rcv_saddr == sk2_rcv_saddr)
84  			return true;
85  		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
86  			(match_sk2_wildcard && !sk2_rcv_saddr);
87  	}
88  	return false;
89  }
90  
inet_rcv_saddr_equal(const struct sock * sk,const struct sock * sk2,bool match_wildcard)91  bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
92  			  bool match_wildcard)
93  {
94  #if IS_ENABLED(CONFIG_IPV6)
95  	if (sk->sk_family == AF_INET6)
96  		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
97  					    inet6_rcv_saddr(sk2),
98  					    sk->sk_rcv_saddr,
99  					    sk2->sk_rcv_saddr,
100  					    ipv6_only_sock(sk),
101  					    ipv6_only_sock(sk2),
102  					    match_wildcard,
103  					    match_wildcard);
104  #endif
105  	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106  				    ipv6_only_sock(sk2), match_wildcard,
107  				    match_wildcard);
108  }
109  EXPORT_SYMBOL(inet_rcv_saddr_equal);
110  
inet_rcv_saddr_any(const struct sock * sk)111  bool inet_rcv_saddr_any(const struct sock *sk)
112  {
113  #if IS_ENABLED(CONFIG_IPV6)
114  	if (sk->sk_family == AF_INET6)
115  		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116  #endif
117  	return !sk->sk_rcv_saddr;
118  }
119  
inet_get_local_port_range(const struct net * net,int * low,int * high)120  void inet_get_local_port_range(const struct net *net, int *low, int *high)
121  {
122  	unsigned int seq;
123  
124  	do {
125  		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
126  
127  		*low = net->ipv4.ip_local_ports.range[0];
128  		*high = net->ipv4.ip_local_ports.range[1];
129  	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
130  }
131  EXPORT_SYMBOL(inet_get_local_port_range);
132  
inet_sk_get_local_port_range(const struct sock * sk,int * low,int * high)133  void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
134  {
135  	const struct inet_sock *inet = inet_sk(sk);
136  	const struct net *net = sock_net(sk);
137  	int lo, hi, sk_lo, sk_hi;
138  
139  	inet_get_local_port_range(net, &lo, &hi);
140  
141  	sk_lo = inet->local_port_range.lo;
142  	sk_hi = inet->local_port_range.hi;
143  
144  	if (unlikely(lo <= sk_lo && sk_lo <= hi))
145  		lo = sk_lo;
146  	if (unlikely(lo <= sk_hi && sk_hi <= hi))
147  		hi = sk_hi;
148  
149  	*low = lo;
150  	*high = hi;
151  }
152  EXPORT_SYMBOL(inet_sk_get_local_port_range);
153  
inet_use_bhash2_on_bind(const struct sock * sk)154  static bool inet_use_bhash2_on_bind(const struct sock *sk)
155  {
156  #if IS_ENABLED(CONFIG_IPV6)
157  	if (sk->sk_family == AF_INET6) {
158  		int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
159  
160  		return addr_type != IPV6_ADDR_ANY &&
161  			addr_type != IPV6_ADDR_MAPPED;
162  	}
163  #endif
164  	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
165  }
166  
inet_bind_conflict(const struct sock * sk,struct sock * sk2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)167  static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
168  			       kuid_t sk_uid, bool relax,
169  			       bool reuseport_cb_ok, bool reuseport_ok)
170  {
171  	int bound_dev_if2;
172  
173  	if (sk == sk2)
174  		return false;
175  
176  	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
177  
178  	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
179  	    sk->sk_bound_dev_if == bound_dev_if2) {
180  		if (sk->sk_reuse && sk2->sk_reuse &&
181  		    sk2->sk_state != TCP_LISTEN) {
182  			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
183  				       sk2->sk_reuseport && reuseport_cb_ok &&
184  				       (sk2->sk_state == TCP_TIME_WAIT ||
185  					uid_eq(sk_uid, sock_i_uid(sk2)))))
186  				return true;
187  		} else if (!reuseport_ok || !sk->sk_reuseport ||
188  			   !sk2->sk_reuseport || !reuseport_cb_ok ||
189  			   (sk2->sk_state != TCP_TIME_WAIT &&
190  			    !uid_eq(sk_uid, sock_i_uid(sk2)))) {
191  			return true;
192  		}
193  	}
194  	return false;
195  }
196  
__inet_bhash2_conflict(const struct sock * sk,struct sock * sk2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)197  static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
198  				   kuid_t sk_uid, bool relax,
199  				   bool reuseport_cb_ok, bool reuseport_ok)
200  {
201  	if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
202  		return false;
203  
204  	return inet_bind_conflict(sk, sk2, sk_uid, relax,
205  				  reuseport_cb_ok, reuseport_ok);
206  }
207  
inet_bhash2_conflict(const struct sock * sk,const struct inet_bind2_bucket * tb2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)208  static bool inet_bhash2_conflict(const struct sock *sk,
209  				 const struct inet_bind2_bucket *tb2,
210  				 kuid_t sk_uid,
211  				 bool relax, bool reuseport_cb_ok,
212  				 bool reuseport_ok)
213  {
214  	struct inet_timewait_sock *tw2;
215  	struct sock *sk2;
216  
217  	sk_for_each_bound_bhash2(sk2, &tb2->owners) {
218  		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
219  					   reuseport_cb_ok, reuseport_ok))
220  			return true;
221  	}
222  
223  	twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
224  		sk2 = (struct sock *)tw2;
225  
226  		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
227  					   reuseport_cb_ok, reuseport_ok))
228  			return true;
229  	}
230  
231  	return false;
232  }
233  
234  /* This should be called only when the tb and tb2 hashbuckets' locks are held */
inet_csk_bind_conflict(const struct sock * sk,const struct inet_bind_bucket * tb,const struct inet_bind2_bucket * tb2,bool relax,bool reuseport_ok)235  static int inet_csk_bind_conflict(const struct sock *sk,
236  				  const struct inet_bind_bucket *tb,
237  				  const struct inet_bind2_bucket *tb2, /* may be null */
238  				  bool relax, bool reuseport_ok)
239  {
240  	bool reuseport_cb_ok;
241  	struct sock_reuseport *reuseport_cb;
242  	kuid_t uid = sock_i_uid((struct sock *)sk);
243  
244  	rcu_read_lock();
245  	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
246  	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
247  	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
248  	rcu_read_unlock();
249  
250  	/*
251  	 * Unlike other sk lookup places we do not check
252  	 * for sk_net here, since _all_ the socks listed
253  	 * in tb->owners and tb2->owners list belong
254  	 * to the same net - the one this bucket belongs to.
255  	 */
256  
257  	if (!inet_use_bhash2_on_bind(sk)) {
258  		struct sock *sk2;
259  
260  		sk_for_each_bound(sk2, &tb->owners)
261  			if (inet_bind_conflict(sk, sk2, uid, relax,
262  					       reuseport_cb_ok, reuseport_ok) &&
263  			    inet_rcv_saddr_equal(sk, sk2, true))
264  				return true;
265  
266  		return false;
267  	}
268  
269  	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
270  	 * ipv4) should have been checked already. We need to do these two
271  	 * checks separately because their spinlocks have to be acquired/released
272  	 * independently of each other, to prevent possible deadlocks
273  	 */
274  	return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
275  					   reuseport_ok);
276  }
277  
278  /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
279   * INADDR_ANY (if ipv4) socket.
280   *
281   * Caller must hold bhash hashbucket lock with local bh disabled, to protect
282   * against concurrent binds on the port for addr any
283   */
inet_bhash2_addr_any_conflict(const struct sock * sk,int port,int l3mdev,bool relax,bool reuseport_ok)284  static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
285  					  bool relax, bool reuseport_ok)
286  {
287  	kuid_t uid = sock_i_uid((struct sock *)sk);
288  	const struct net *net = sock_net(sk);
289  	struct sock_reuseport *reuseport_cb;
290  	struct inet_bind_hashbucket *head2;
291  	struct inet_bind2_bucket *tb2;
292  	bool conflict = false;
293  	bool reuseport_cb_ok;
294  
295  	rcu_read_lock();
296  	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
297  	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
298  	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
299  	rcu_read_unlock();
300  
301  	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
302  
303  	spin_lock(&head2->lock);
304  
305  	inet_bind_bucket_for_each(tb2, &head2->chain) {
306  		if (!inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
307  			continue;
308  
309  		if (!inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,	reuseport_ok))
310  			continue;
311  
312  		conflict = true;
313  		break;
314  	}
315  
316  	spin_unlock(&head2->lock);
317  
318  	return conflict;
319  }
320  
321  /*
322   * Find an open port number for the socket.  Returns with the
323   * inet_bind_hashbucket locks held if successful.
324   */
325  static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock * sk,struct inet_bind_bucket ** tb_ret,struct inet_bind2_bucket ** tb2_ret,struct inet_bind_hashbucket ** head2_ret,int * port_ret)326  inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
327  			struct inet_bind2_bucket **tb2_ret,
328  			struct inet_bind_hashbucket **head2_ret, int *port_ret)
329  {
330  	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
331  	int i, low, high, attempt_half, port, l3mdev;
332  	struct inet_bind_hashbucket *head, *head2;
333  	struct net *net = sock_net(sk);
334  	struct inet_bind2_bucket *tb2;
335  	struct inet_bind_bucket *tb;
336  	u32 remaining, offset;
337  	bool relax = false;
338  
339  	l3mdev = inet_sk_bound_l3mdev(sk);
340  ports_exhausted:
341  	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
342  other_half_scan:
343  	inet_sk_get_local_port_range(sk, &low, &high);
344  	high++; /* [32768, 60999] -> [32768, 61000[ */
345  	if (high - low < 4)
346  		attempt_half = 0;
347  	if (attempt_half) {
348  		int half = low + (((high - low) >> 2) << 1);
349  
350  		if (attempt_half == 1)
351  			high = half;
352  		else
353  			low = half;
354  	}
355  	remaining = high - low;
356  	if (likely(remaining > 1))
357  		remaining &= ~1U;
358  
359  	offset = get_random_u32_below(remaining);
360  	/* __inet_hash_connect() favors ports having @low parity
361  	 * We do the opposite to not pollute connect() users.
362  	 */
363  	offset |= 1U;
364  
365  other_parity_scan:
366  	port = low + offset;
367  	for (i = 0; i < remaining; i += 2, port += 2) {
368  		if (unlikely(port >= high))
369  			port -= remaining;
370  		if (inet_is_local_reserved_port(net, port))
371  			continue;
372  		head = &hinfo->bhash[inet_bhashfn(net, port,
373  						  hinfo->bhash_size)];
374  		spin_lock_bh(&head->lock);
375  		if (inet_use_bhash2_on_bind(sk)) {
376  			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
377  				goto next_port;
378  		}
379  
380  		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
381  		spin_lock(&head2->lock);
382  		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
383  		inet_bind_bucket_for_each(tb, &head->chain)
384  			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
385  				if (!inet_csk_bind_conflict(sk, tb, tb2,
386  							    relax, false))
387  					goto success;
388  				spin_unlock(&head2->lock);
389  				goto next_port;
390  			}
391  		tb = NULL;
392  		goto success;
393  next_port:
394  		spin_unlock_bh(&head->lock);
395  		cond_resched();
396  	}
397  
398  	offset--;
399  	if (!(offset & 1))
400  		goto other_parity_scan;
401  
402  	if (attempt_half == 1) {
403  		/* OK we now try the upper half of the range */
404  		attempt_half = 2;
405  		goto other_half_scan;
406  	}
407  
408  	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
409  		/* We still have a chance to connect to different destinations */
410  		relax = true;
411  		goto ports_exhausted;
412  	}
413  	return NULL;
414  success:
415  	*port_ret = port;
416  	*tb_ret = tb;
417  	*tb2_ret = tb2;
418  	*head2_ret = head2;
419  	return head;
420  }
421  
sk_reuseport_match(struct inet_bind_bucket * tb,struct sock * sk)422  static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
423  				     struct sock *sk)
424  {
425  	kuid_t uid = sock_i_uid(sk);
426  
427  	if (tb->fastreuseport <= 0)
428  		return 0;
429  	if (!sk->sk_reuseport)
430  		return 0;
431  	if (rcu_access_pointer(sk->sk_reuseport_cb))
432  		return 0;
433  	if (!uid_eq(tb->fastuid, uid))
434  		return 0;
435  	/* We only need to check the rcv_saddr if this tb was once marked
436  	 * without fastreuseport and then was reset, as we can only know that
437  	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
438  	 * owners list.
439  	 */
440  	if (tb->fastreuseport == FASTREUSEPORT_ANY)
441  		return 1;
442  #if IS_ENABLED(CONFIG_IPV6)
443  	if (tb->fast_sk_family == AF_INET6)
444  		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
445  					    inet6_rcv_saddr(sk),
446  					    tb->fast_rcv_saddr,
447  					    sk->sk_rcv_saddr,
448  					    tb->fast_ipv6_only,
449  					    ipv6_only_sock(sk), true, false);
450  #endif
451  	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
452  				    ipv6_only_sock(sk), true, false);
453  }
454  
inet_csk_update_fastreuse(struct inet_bind_bucket * tb,struct sock * sk)455  void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
456  			       struct sock *sk)
457  {
458  	kuid_t uid = sock_i_uid(sk);
459  	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
460  
461  	if (hlist_empty(&tb->owners)) {
462  		tb->fastreuse = reuse;
463  		if (sk->sk_reuseport) {
464  			tb->fastreuseport = FASTREUSEPORT_ANY;
465  			tb->fastuid = uid;
466  			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
467  			tb->fast_ipv6_only = ipv6_only_sock(sk);
468  			tb->fast_sk_family = sk->sk_family;
469  #if IS_ENABLED(CONFIG_IPV6)
470  			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
471  #endif
472  		} else {
473  			tb->fastreuseport = 0;
474  		}
475  	} else {
476  		if (!reuse)
477  			tb->fastreuse = 0;
478  		if (sk->sk_reuseport) {
479  			/* We didn't match or we don't have fastreuseport set on
480  			 * the tb, but we have sk_reuseport set on this socket
481  			 * and we know that there are no bind conflicts with
482  			 * this socket in this tb, so reset our tb's reuseport
483  			 * settings so that any subsequent sockets that match
484  			 * our current socket will be put on the fast path.
485  			 *
486  			 * If we reset we need to set FASTREUSEPORT_STRICT so we
487  			 * do extra checking for all subsequent sk_reuseport
488  			 * socks.
489  			 */
490  			if (!sk_reuseport_match(tb, sk)) {
491  				tb->fastreuseport = FASTREUSEPORT_STRICT;
492  				tb->fastuid = uid;
493  				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
494  				tb->fast_ipv6_only = ipv6_only_sock(sk);
495  				tb->fast_sk_family = sk->sk_family;
496  #if IS_ENABLED(CONFIG_IPV6)
497  				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
498  #endif
499  			}
500  		} else {
501  			tb->fastreuseport = 0;
502  		}
503  	}
504  }
505  
506  /* Obtain a reference to a local port for the given sock,
507   * if snum is zero it means select any available local port.
508   * We try to allocate an odd port (and leave even ports for connect())
509   */
inet_csk_get_port(struct sock * sk,unsigned short snum)510  int inet_csk_get_port(struct sock *sk, unsigned short snum)
511  {
512  	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
513  	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
514  	bool found_port = false, check_bind_conflict = true;
515  	bool bhash_created = false, bhash2_created = false;
516  	int ret = -EADDRINUSE, port = snum, l3mdev;
517  	struct inet_bind_hashbucket *head, *head2;
518  	struct inet_bind2_bucket *tb2 = NULL;
519  	struct inet_bind_bucket *tb = NULL;
520  	bool head2_lock_acquired = false;
521  	struct net *net = sock_net(sk);
522  
523  	l3mdev = inet_sk_bound_l3mdev(sk);
524  
525  	if (!port) {
526  		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
527  		if (!head)
528  			return ret;
529  
530  		head2_lock_acquired = true;
531  
532  		if (tb && tb2)
533  			goto success;
534  		found_port = true;
535  	} else {
536  		head = &hinfo->bhash[inet_bhashfn(net, port,
537  						  hinfo->bhash_size)];
538  		spin_lock_bh(&head->lock);
539  		inet_bind_bucket_for_each(tb, &head->chain)
540  			if (inet_bind_bucket_match(tb, net, port, l3mdev))
541  				break;
542  	}
543  
544  	if (!tb) {
545  		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
546  					     head, port, l3mdev);
547  		if (!tb)
548  			goto fail_unlock;
549  		bhash_created = true;
550  	}
551  
552  	if (!found_port) {
553  		if (!hlist_empty(&tb->owners)) {
554  			if (sk->sk_reuse == SK_FORCE_REUSE ||
555  			    (tb->fastreuse > 0 && reuse) ||
556  			    sk_reuseport_match(tb, sk))
557  				check_bind_conflict = false;
558  		}
559  
560  		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
561  			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
562  				goto fail_unlock;
563  		}
564  
565  		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
566  		spin_lock(&head2->lock);
567  		head2_lock_acquired = true;
568  		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
569  	}
570  
571  	if (!tb2) {
572  		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
573  					       net, head2, port, l3mdev, sk);
574  		if (!tb2)
575  			goto fail_unlock;
576  		bhash2_created = true;
577  	}
578  
579  	if (!found_port && check_bind_conflict) {
580  		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
581  			goto fail_unlock;
582  	}
583  
584  success:
585  	inet_csk_update_fastreuse(tb, sk);
586  
587  	if (!inet_csk(sk)->icsk_bind_hash)
588  		inet_bind_hash(sk, tb, tb2, port);
589  	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
590  	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
591  	ret = 0;
592  
593  fail_unlock:
594  	if (ret) {
595  		if (bhash_created)
596  			inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
597  		if (bhash2_created)
598  			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
599  						  tb2);
600  	}
601  	if (head2_lock_acquired)
602  		spin_unlock(&head2->lock);
603  	spin_unlock_bh(&head->lock);
604  	return ret;
605  }
606  EXPORT_SYMBOL_GPL(inet_csk_get_port);
607  
608  /*
609   * Wait for an incoming connection, avoid race conditions. This must be called
610   * with the socket locked.
611   */
inet_csk_wait_for_connect(struct sock * sk,long timeo)612  static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
613  {
614  	struct inet_connection_sock *icsk = inet_csk(sk);
615  	DEFINE_WAIT(wait);
616  	int err;
617  
618  	/*
619  	 * True wake-one mechanism for incoming connections: only
620  	 * one process gets woken up, not the 'whole herd'.
621  	 * Since we do not 'race & poll' for established sockets
622  	 * anymore, the common case will execute the loop only once.
623  	 *
624  	 * Subtle issue: "add_wait_queue_exclusive()" will be added
625  	 * after any current non-exclusive waiters, and we know that
626  	 * it will always _stay_ after any new non-exclusive waiters
627  	 * because all non-exclusive waiters are added at the
628  	 * beginning of the wait-queue. As such, it's ok to "drop"
629  	 * our exclusiveness temporarily when we get woken up without
630  	 * having to remove and re-insert us on the wait queue.
631  	 */
632  	for (;;) {
633  		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
634  					  TASK_INTERRUPTIBLE);
635  		release_sock(sk);
636  		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
637  			timeo = schedule_timeout(timeo);
638  		sched_annotate_sleep();
639  		lock_sock(sk);
640  		err = 0;
641  		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
642  			break;
643  		err = -EINVAL;
644  		if (sk->sk_state != TCP_LISTEN)
645  			break;
646  		err = sock_intr_errno(timeo);
647  		if (signal_pending(current))
648  			break;
649  		err = -EAGAIN;
650  		if (!timeo)
651  			break;
652  	}
653  	finish_wait(sk_sleep(sk), &wait);
654  	return err;
655  }
656  
657  /*
658   * This will accept the next outstanding connection.
659   */
inet_csk_accept(struct sock * sk,int flags,int * err,bool kern)660  struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
661  {
662  	struct inet_connection_sock *icsk = inet_csk(sk);
663  	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
664  	struct request_sock *req;
665  	struct sock *newsk;
666  	int error;
667  
668  	lock_sock(sk);
669  
670  	/* We need to make sure that this socket is listening,
671  	 * and that it has something pending.
672  	 */
673  	error = -EINVAL;
674  	if (sk->sk_state != TCP_LISTEN)
675  		goto out_err;
676  
677  	/* Find already established connection */
678  	if (reqsk_queue_empty(queue)) {
679  		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
680  
681  		/* If this is a non blocking socket don't sleep */
682  		error = -EAGAIN;
683  		if (!timeo)
684  			goto out_err;
685  
686  		error = inet_csk_wait_for_connect(sk, timeo);
687  		if (error)
688  			goto out_err;
689  	}
690  	req = reqsk_queue_remove(queue, sk);
691  	newsk = req->sk;
692  
693  	if (sk->sk_protocol == IPPROTO_TCP &&
694  	    tcp_rsk(req)->tfo_listener) {
695  		spin_lock_bh(&queue->fastopenq.lock);
696  		if (tcp_rsk(req)->tfo_listener) {
697  			/* We are still waiting for the final ACK from 3WHS
698  			 * so can't free req now. Instead, we set req->sk to
699  			 * NULL to signify that the child socket is taken
700  			 * so reqsk_fastopen_remove() will free the req
701  			 * when 3WHS finishes (or is aborted).
702  			 */
703  			req->sk = NULL;
704  			req = NULL;
705  		}
706  		spin_unlock_bh(&queue->fastopenq.lock);
707  	}
708  
709  out:
710  	release_sock(sk);
711  	if (newsk && mem_cgroup_sockets_enabled) {
712  		int amt = 0;
713  
714  		/* atomically get the memory usage, set and charge the
715  		 * newsk->sk_memcg.
716  		 */
717  		lock_sock(newsk);
718  
719  		mem_cgroup_sk_alloc(newsk);
720  		if (newsk->sk_memcg) {
721  			/* The socket has not been accepted yet, no need
722  			 * to look at newsk->sk_wmem_queued.
723  			 */
724  			amt = sk_mem_pages(newsk->sk_forward_alloc +
725  					   atomic_read(&newsk->sk_rmem_alloc));
726  		}
727  
728  		if (amt)
729  			mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
730  						GFP_KERNEL | __GFP_NOFAIL);
731  
732  		release_sock(newsk);
733  	}
734  	if (req)
735  		reqsk_put(req);
736  
737  	if (newsk)
738  		inet_init_csk_locks(newsk);
739  
740  	return newsk;
741  out_err:
742  	newsk = NULL;
743  	req = NULL;
744  	*err = error;
745  	goto out;
746  }
747  EXPORT_SYMBOL(inet_csk_accept);
748  
749  /*
750   * Using different timers for retransmit, delayed acks and probes
751   * We may wish use just one timer maintaining a list of expire jiffies
752   * to optimize.
753   */
inet_csk_init_xmit_timers(struct sock * sk,void (* retransmit_handler)(struct timer_list * t),void (* delack_handler)(struct timer_list * t),void (* keepalive_handler)(struct timer_list * t))754  void inet_csk_init_xmit_timers(struct sock *sk,
755  			       void (*retransmit_handler)(struct timer_list *t),
756  			       void (*delack_handler)(struct timer_list *t),
757  			       void (*keepalive_handler)(struct timer_list *t))
758  {
759  	struct inet_connection_sock *icsk = inet_csk(sk);
760  
761  	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
762  	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
763  	timer_setup(&sk->sk_timer, keepalive_handler, 0);
764  	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
765  }
766  EXPORT_SYMBOL(inet_csk_init_xmit_timers);
767  
inet_csk_clear_xmit_timers(struct sock * sk)768  void inet_csk_clear_xmit_timers(struct sock *sk)
769  {
770  	struct inet_connection_sock *icsk = inet_csk(sk);
771  
772  	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
773  
774  	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
775  	sk_stop_timer(sk, &icsk->icsk_delack_timer);
776  	sk_stop_timer(sk, &sk->sk_timer);
777  }
778  EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
779  
inet_csk_clear_xmit_timers_sync(struct sock * sk)780  void inet_csk_clear_xmit_timers_sync(struct sock *sk)
781  {
782  	struct inet_connection_sock *icsk = inet_csk(sk);
783  
784  	/* ongoing timer handlers need to acquire socket lock. */
785  	sock_not_owned_by_me(sk);
786  
787  	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
788  
789  	sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer);
790  	sk_stop_timer_sync(sk, &icsk->icsk_delack_timer);
791  	sk_stop_timer_sync(sk, &sk->sk_timer);
792  }
793  
inet_csk_delete_keepalive_timer(struct sock * sk)794  void inet_csk_delete_keepalive_timer(struct sock *sk)
795  {
796  	sk_stop_timer(sk, &sk->sk_timer);
797  }
798  EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
799  
inet_csk_reset_keepalive_timer(struct sock * sk,unsigned long len)800  void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
801  {
802  	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
803  }
804  EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
805  
inet_csk_route_req(const struct sock * sk,struct flowi4 * fl4,const struct request_sock * req)806  struct dst_entry *inet_csk_route_req(const struct sock *sk,
807  				     struct flowi4 *fl4,
808  				     const struct request_sock *req)
809  {
810  	const struct inet_request_sock *ireq = inet_rsk(req);
811  	struct net *net = read_pnet(&ireq->ireq_net);
812  	struct ip_options_rcu *opt;
813  	struct rtable *rt;
814  
815  	rcu_read_lock();
816  	opt = rcu_dereference(ireq->ireq_opt);
817  
818  	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
819  			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
820  			   sk->sk_protocol, inet_sk_flowi_flags(sk),
821  			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
822  			   ireq->ir_loc_addr, ireq->ir_rmt_port,
823  			   htons(ireq->ir_num), sk->sk_uid);
824  	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
825  	rt = ip_route_output_flow(net, fl4, sk);
826  	if (IS_ERR(rt))
827  		goto no_route;
828  	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
829  		goto route_err;
830  	rcu_read_unlock();
831  	return &rt->dst;
832  
833  route_err:
834  	ip_rt_put(rt);
835  no_route:
836  	rcu_read_unlock();
837  	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
838  	return NULL;
839  }
840  EXPORT_SYMBOL_GPL(inet_csk_route_req);
841  
inet_csk_route_child_sock(const struct sock * sk,struct sock * newsk,const struct request_sock * req)842  struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
843  					    struct sock *newsk,
844  					    const struct request_sock *req)
845  {
846  	const struct inet_request_sock *ireq = inet_rsk(req);
847  	struct net *net = read_pnet(&ireq->ireq_net);
848  	struct inet_sock *newinet = inet_sk(newsk);
849  	struct ip_options_rcu *opt;
850  	struct flowi4 *fl4;
851  	struct rtable *rt;
852  
853  	opt = rcu_dereference(ireq->ireq_opt);
854  	fl4 = &newinet->cork.fl.u.ip4;
855  
856  	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
857  			   ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
858  			   sk->sk_protocol, inet_sk_flowi_flags(sk),
859  			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
860  			   ireq->ir_loc_addr, ireq->ir_rmt_port,
861  			   htons(ireq->ir_num), sk->sk_uid);
862  	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
863  	rt = ip_route_output_flow(net, fl4, sk);
864  	if (IS_ERR(rt))
865  		goto no_route;
866  	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
867  		goto route_err;
868  	return &rt->dst;
869  
870  route_err:
871  	ip_rt_put(rt);
872  no_route:
873  	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
874  	return NULL;
875  }
876  EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
877  
878  /* Decide when to expire the request and when to resend SYN-ACK */
syn_ack_recalc(struct request_sock * req,const int max_syn_ack_retries,const u8 rskq_defer_accept,int * expire,int * resend)879  static void syn_ack_recalc(struct request_sock *req,
880  			   const int max_syn_ack_retries,
881  			   const u8 rskq_defer_accept,
882  			   int *expire, int *resend)
883  {
884  	if (!rskq_defer_accept) {
885  		*expire = req->num_timeout >= max_syn_ack_retries;
886  		*resend = 1;
887  		return;
888  	}
889  	*expire = req->num_timeout >= max_syn_ack_retries &&
890  		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
891  	/* Do not resend while waiting for data after ACK,
892  	 * start to resend on end of deferring period to give
893  	 * last chance for data or ACK to create established socket.
894  	 */
895  	*resend = !inet_rsk(req)->acked ||
896  		  req->num_timeout >= rskq_defer_accept - 1;
897  }
898  
inet_rtx_syn_ack(const struct sock * parent,struct request_sock * req)899  int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
900  {
901  	int err = req->rsk_ops->rtx_syn_ack(parent, req);
902  
903  	if (!err)
904  		req->num_retrans++;
905  	return err;
906  }
907  EXPORT_SYMBOL(inet_rtx_syn_ack);
908  
inet_reqsk_clone(struct request_sock * req,struct sock * sk)909  static struct request_sock *inet_reqsk_clone(struct request_sock *req,
910  					     struct sock *sk)
911  {
912  	struct sock *req_sk, *nreq_sk;
913  	struct request_sock *nreq;
914  
915  	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
916  	if (!nreq) {
917  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
918  
919  		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
920  		sock_put(sk);
921  		return NULL;
922  	}
923  
924  	req_sk = req_to_sk(req);
925  	nreq_sk = req_to_sk(nreq);
926  
927  	memcpy(nreq_sk, req_sk,
928  	       offsetof(struct sock, sk_dontcopy_begin));
929  	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
930  	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
931  
932  	sk_node_init(&nreq_sk->sk_node);
933  	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
934  #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
935  	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
936  #endif
937  	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
938  
939  	nreq->rsk_listener = sk;
940  
941  	/* We need not acquire fastopenq->lock
942  	 * because the child socket is locked in inet_csk_listen_stop().
943  	 */
944  	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
945  		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
946  
947  	return nreq;
948  }
949  
reqsk_queue_migrated(struct request_sock_queue * queue,const struct request_sock * req)950  static void reqsk_queue_migrated(struct request_sock_queue *queue,
951  				 const struct request_sock *req)
952  {
953  	if (req->num_timeout == 0)
954  		atomic_inc(&queue->young);
955  	atomic_inc(&queue->qlen);
956  }
957  
reqsk_migrate_reset(struct request_sock * req)958  static void reqsk_migrate_reset(struct request_sock *req)
959  {
960  	req->saved_syn = NULL;
961  #if IS_ENABLED(CONFIG_IPV6)
962  	inet_rsk(req)->ipv6_opt = NULL;
963  	inet_rsk(req)->pktopts = NULL;
964  #else
965  	inet_rsk(req)->ireq_opt = NULL;
966  #endif
967  }
968  
969  /* return true if req was found in the ehash table */
reqsk_queue_unlink(struct request_sock * req)970  static bool reqsk_queue_unlink(struct request_sock *req)
971  {
972  	struct sock *sk = req_to_sk(req);
973  	bool found = false;
974  
975  	if (sk_hashed(sk)) {
976  		struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
977  		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
978  
979  		spin_lock(lock);
980  		found = __sk_nulls_del_node_init_rcu(sk);
981  		spin_unlock(lock);
982  	}
983  
984  	return found;
985  }
986  
__inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req,bool from_timer)987  static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
988  					struct request_sock *req,
989  					bool from_timer)
990  {
991  	bool unlinked = reqsk_queue_unlink(req);
992  
993  	if (!from_timer && timer_delete_sync(&req->rsk_timer))
994  		reqsk_put(req);
995  
996  	if (unlinked) {
997  		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
998  		reqsk_put(req);
999  	}
1000  
1001  	return unlinked;
1002  }
1003  
inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req)1004  bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
1005  {
1006  	return __inet_csk_reqsk_queue_drop(sk, req, false);
1007  }
1008  EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
1009  
inet_csk_reqsk_queue_drop_and_put(struct sock * sk,struct request_sock * req)1010  void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
1011  {
1012  	inet_csk_reqsk_queue_drop(sk, req);
1013  	reqsk_put(req);
1014  }
1015  EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
1016  
reqsk_timer_handler(struct timer_list * t)1017  static void reqsk_timer_handler(struct timer_list *t)
1018  {
1019  	struct request_sock *req = from_timer(req, t, rsk_timer);
1020  	struct request_sock *nreq = NULL, *oreq = req;
1021  	struct sock *sk_listener = req->rsk_listener;
1022  	struct inet_connection_sock *icsk;
1023  	struct request_sock_queue *queue;
1024  	struct net *net;
1025  	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
1026  
1027  	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
1028  		struct sock *nsk;
1029  
1030  		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1031  		if (!nsk)
1032  			goto drop;
1033  
1034  		nreq = inet_reqsk_clone(req, nsk);
1035  		if (!nreq)
1036  			goto drop;
1037  
1038  		/* The new timer for the cloned req can decrease the 2
1039  		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1040  		 * hold another count to prevent use-after-free and
1041  		 * call reqsk_put() just before return.
1042  		 */
1043  		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1044  		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1045  		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1046  
1047  		req = nreq;
1048  		sk_listener = nsk;
1049  	}
1050  
1051  	icsk = inet_csk(sk_listener);
1052  	net = sock_net(sk_listener);
1053  	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1054  		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1055  	/* Normally all the openreqs are young and become mature
1056  	 * (i.e. converted to established socket) for first timeout.
1057  	 * If synack was not acknowledged for 1 second, it means
1058  	 * one of the following things: synack was lost, ack was lost,
1059  	 * rtt is high or nobody planned to ack (i.e. synflood).
1060  	 * When server is a bit loaded, queue is populated with old
1061  	 * open requests, reducing effective size of queue.
1062  	 * When server is well loaded, queue size reduces to zero
1063  	 * after several minutes of work. It is not synflood,
1064  	 * it is normal operation. The solution is pruning
1065  	 * too old entries overriding normal timeout, when
1066  	 * situation becomes dangerous.
1067  	 *
1068  	 * Essentially, we reserve half of room for young
1069  	 * embrions; and abort old ones without pity, if old
1070  	 * ones are about to clog our table.
1071  	 */
1072  	queue = &icsk->icsk_accept_queue;
1073  	qlen = reqsk_queue_len(queue);
1074  	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1075  		int young = reqsk_queue_len_young(queue) << 1;
1076  
1077  		while (max_syn_ack_retries > 2) {
1078  			if (qlen < young)
1079  				break;
1080  			max_syn_ack_retries--;
1081  			young <<= 1;
1082  		}
1083  	}
1084  	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1085  		       &expire, &resend);
1086  	req->rsk_ops->syn_ack_timeout(req);
1087  	if (!expire &&
1088  	    (!resend ||
1089  	     !inet_rtx_syn_ack(sk_listener, req) ||
1090  	     inet_rsk(req)->acked)) {
1091  		if (req->num_timeout++ == 0)
1092  			atomic_dec(&queue->young);
1093  		mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
1094  
1095  		if (!nreq)
1096  			return;
1097  
1098  		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1099  			/* delete timer */
1100  			__inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
1101  			goto no_ownership;
1102  		}
1103  
1104  		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1105  		reqsk_migrate_reset(oreq);
1106  		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1107  		reqsk_put(oreq);
1108  
1109  		reqsk_put(nreq);
1110  		return;
1111  	}
1112  
1113  	/* Even if we can clone the req, we may need not retransmit any more
1114  	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1115  	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1116  	 */
1117  	if (nreq) {
1118  		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1119  no_ownership:
1120  		reqsk_migrate_reset(nreq);
1121  		reqsk_queue_removed(queue, nreq);
1122  		__reqsk_free(nreq);
1123  	}
1124  
1125  drop:
1126  	__inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
1127  	reqsk_put(oreq);
1128  }
1129  
reqsk_queue_hash_req(struct request_sock * req,unsigned long timeout)1130  static bool reqsk_queue_hash_req(struct request_sock *req,
1131  				 unsigned long timeout)
1132  {
1133  	bool found_dup_sk = false;
1134  
1135  	if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk))
1136  		return false;
1137  
1138  	/* The timer needs to be setup after a successful insertion. */
1139  	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1140  	mod_timer(&req->rsk_timer, jiffies + timeout);
1141  
1142  	/* before letting lookups find us, make sure all req fields
1143  	 * are committed to memory and refcnt initialized.
1144  	 */
1145  	smp_wmb();
1146  	refcount_set(&req->rsk_refcnt, 2 + 1);
1147  	return true;
1148  }
1149  
inet_csk_reqsk_queue_hash_add(struct sock * sk,struct request_sock * req,unsigned long timeout)1150  bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
1151  				   unsigned long timeout)
1152  {
1153  	if (!reqsk_queue_hash_req(req, timeout))
1154  		return false;
1155  
1156  	inet_csk_reqsk_queue_added(sk);
1157  	return true;
1158  }
1159  EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
1160  
inet_clone_ulp(const struct request_sock * req,struct sock * newsk,const gfp_t priority)1161  static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1162  			   const gfp_t priority)
1163  {
1164  	struct inet_connection_sock *icsk = inet_csk(newsk);
1165  
1166  	if (!icsk->icsk_ulp_ops)
1167  		return;
1168  
1169  	icsk->icsk_ulp_ops->clone(req, newsk, priority);
1170  }
1171  
1172  /**
1173   *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1174   *	@sk: the socket to clone
1175   *	@req: request_sock
1176   *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1177   *
1178   *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1179   */
inet_csk_clone_lock(const struct sock * sk,const struct request_sock * req,const gfp_t priority)1180  struct sock *inet_csk_clone_lock(const struct sock *sk,
1181  				 const struct request_sock *req,
1182  				 const gfp_t priority)
1183  {
1184  	struct sock *newsk = sk_clone_lock(sk, priority);
1185  
1186  	if (newsk) {
1187  		struct inet_connection_sock *newicsk = inet_csk(newsk);
1188  
1189  		inet_sk_set_state(newsk, TCP_SYN_RECV);
1190  		newicsk->icsk_bind_hash = NULL;
1191  		newicsk->icsk_bind2_hash = NULL;
1192  
1193  		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
1194  		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
1195  		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
1196  
1197  		/* listeners have SOCK_RCU_FREE, not the children */
1198  		sock_reset_flag(newsk, SOCK_RCU_FREE);
1199  
1200  		inet_sk(newsk)->mc_list = NULL;
1201  
1202  		newsk->sk_mark = inet_rsk(req)->ir_mark;
1203  		atomic64_set(&newsk->sk_cookie,
1204  			     atomic64_read(&inet_rsk(req)->ir_cookie));
1205  
1206  		newicsk->icsk_retransmits = 0;
1207  		newicsk->icsk_backoff	  = 0;
1208  		newicsk->icsk_probes_out  = 0;
1209  		newicsk->icsk_probes_tstamp = 0;
1210  
1211  		/* Deinitialize accept_queue to trap illegal accesses. */
1212  		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
1213  
1214  		inet_clone_ulp(req, newsk, priority);
1215  
1216  		security_inet_csk_clone(newsk, req);
1217  	}
1218  	return newsk;
1219  }
1220  EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
1221  
1222  /*
1223   * At this point, there should be no process reference to this
1224   * socket, and thus no user references at all.  Therefore we
1225   * can assume the socket waitqueue is inactive and nobody will
1226   * try to jump onto it.
1227   */
inet_csk_destroy_sock(struct sock * sk)1228  void inet_csk_destroy_sock(struct sock *sk)
1229  {
1230  	WARN_ON(sk->sk_state != TCP_CLOSE);
1231  	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1232  
1233  	/* It cannot be in hash table! */
1234  	WARN_ON(!sk_unhashed(sk));
1235  
1236  	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1237  	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1238  
1239  	sk->sk_prot->destroy(sk);
1240  
1241  	sk_stream_kill_queues(sk);
1242  
1243  	xfrm_sk_free_policy(sk);
1244  
1245  	this_cpu_dec(*sk->sk_prot->orphan_count);
1246  
1247  	sock_put(sk);
1248  }
1249  EXPORT_SYMBOL(inet_csk_destroy_sock);
1250  
1251  /* This function allows to force a closure of a socket after the call to
1252   * tcp/dccp_create_openreq_child().
1253   */
inet_csk_prepare_forced_close(struct sock * sk)1254  void inet_csk_prepare_forced_close(struct sock *sk)
1255  	__releases(&sk->sk_lock.slock)
1256  {
1257  	/* sk_clone_lock locked the socket and set refcnt to 2 */
1258  	bh_unlock_sock(sk);
1259  	sock_put(sk);
1260  	inet_csk_prepare_for_destroy_sock(sk);
1261  	inet_sk(sk)->inet_num = 0;
1262  }
1263  EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1264  
inet_ulp_can_listen(const struct sock * sk)1265  static int inet_ulp_can_listen(const struct sock *sk)
1266  {
1267  	const struct inet_connection_sock *icsk = inet_csk(sk);
1268  
1269  	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1270  		return -EINVAL;
1271  
1272  	return 0;
1273  }
1274  
inet_csk_listen_start(struct sock * sk)1275  int inet_csk_listen_start(struct sock *sk)
1276  {
1277  	struct inet_connection_sock *icsk = inet_csk(sk);
1278  	struct inet_sock *inet = inet_sk(sk);
1279  	int err;
1280  
1281  	err = inet_ulp_can_listen(sk);
1282  	if (unlikely(err))
1283  		return err;
1284  
1285  	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1286  
1287  	sk->sk_ack_backlog = 0;
1288  	inet_csk_delack_init(sk);
1289  
1290  	/* There is race window here: we announce ourselves listening,
1291  	 * but this transition is still not validated by get_port().
1292  	 * It is OK, because this socket enters to hash table only
1293  	 * after validation is complete.
1294  	 */
1295  	inet_sk_state_store(sk, TCP_LISTEN);
1296  	err = sk->sk_prot->get_port(sk, inet->inet_num);
1297  	if (!err) {
1298  		inet->inet_sport = htons(inet->inet_num);
1299  
1300  		sk_dst_reset(sk);
1301  		err = sk->sk_prot->hash(sk);
1302  
1303  		if (likely(!err))
1304  			return 0;
1305  	}
1306  
1307  	inet_sk_set_state(sk, TCP_CLOSE);
1308  	return err;
1309  }
1310  EXPORT_SYMBOL_GPL(inet_csk_listen_start);
1311  
inet_child_forget(struct sock * sk,struct request_sock * req,struct sock * child)1312  static void inet_child_forget(struct sock *sk, struct request_sock *req,
1313  			      struct sock *child)
1314  {
1315  	sk->sk_prot->disconnect(child, O_NONBLOCK);
1316  
1317  	sock_orphan(child);
1318  
1319  	this_cpu_inc(*sk->sk_prot->orphan_count);
1320  
1321  	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1322  		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1323  		BUG_ON(sk != req->rsk_listener);
1324  
1325  		/* Paranoid, to prevent race condition if
1326  		 * an inbound pkt destined for child is
1327  		 * blocked by sock lock in tcp_v4_rcv().
1328  		 * Also to satisfy an assertion in
1329  		 * tcp_v4_destroy_sock().
1330  		 */
1331  		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1332  	}
1333  	inet_csk_destroy_sock(child);
1334  }
1335  
inet_csk_reqsk_queue_add(struct sock * sk,struct request_sock * req,struct sock * child)1336  struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1337  				      struct request_sock *req,
1338  				      struct sock *child)
1339  {
1340  	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1341  
1342  	spin_lock(&queue->rskq_lock);
1343  	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1344  		inet_child_forget(sk, req, child);
1345  		child = NULL;
1346  	} else {
1347  		req->sk = child;
1348  		req->dl_next = NULL;
1349  		if (queue->rskq_accept_head == NULL)
1350  			WRITE_ONCE(queue->rskq_accept_head, req);
1351  		else
1352  			queue->rskq_accept_tail->dl_next = req;
1353  		queue->rskq_accept_tail = req;
1354  		sk_acceptq_added(sk);
1355  	}
1356  	spin_unlock(&queue->rskq_lock);
1357  	return child;
1358  }
1359  EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1360  
inet_csk_complete_hashdance(struct sock * sk,struct sock * child,struct request_sock * req,bool own_req)1361  struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1362  					 struct request_sock *req, bool own_req)
1363  {
1364  	if (own_req) {
1365  		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1366  		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1367  
1368  		if (sk != req->rsk_listener) {
1369  			/* another listening sk has been selected,
1370  			 * migrate the req to it.
1371  			 */
1372  			struct request_sock *nreq;
1373  
1374  			/* hold a refcnt for the nreq->rsk_listener
1375  			 * which is assigned in inet_reqsk_clone()
1376  			 */
1377  			sock_hold(sk);
1378  			nreq = inet_reqsk_clone(req, sk);
1379  			if (!nreq) {
1380  				inet_child_forget(sk, req, child);
1381  				goto child_put;
1382  			}
1383  
1384  			refcount_set(&nreq->rsk_refcnt, 1);
1385  			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1386  				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1387  				reqsk_migrate_reset(req);
1388  				reqsk_put(req);
1389  				return child;
1390  			}
1391  
1392  			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1393  			reqsk_migrate_reset(nreq);
1394  			__reqsk_free(nreq);
1395  		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1396  			return child;
1397  		}
1398  	}
1399  	/* Too bad, another child took ownership of the request, undo. */
1400  child_put:
1401  	bh_unlock_sock(child);
1402  	sock_put(child);
1403  	return NULL;
1404  }
1405  EXPORT_SYMBOL(inet_csk_complete_hashdance);
1406  
1407  /*
1408   *	This routine closes sockets which have been at least partially
1409   *	opened, but not yet accepted.
1410   */
inet_csk_listen_stop(struct sock * sk)1411  void inet_csk_listen_stop(struct sock *sk)
1412  {
1413  	struct inet_connection_sock *icsk = inet_csk(sk);
1414  	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1415  	struct request_sock *next, *req;
1416  
1417  	/* Following specs, it would be better either to send FIN
1418  	 * (and enter FIN-WAIT-1, it is normal close)
1419  	 * or to send active reset (abort).
1420  	 * Certainly, it is pretty dangerous while synflood, but it is
1421  	 * bad justification for our negligence 8)
1422  	 * To be honest, we are not able to make either
1423  	 * of the variants now.			--ANK
1424  	 */
1425  	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1426  		struct sock *child = req->sk, *nsk;
1427  		struct request_sock *nreq;
1428  
1429  		local_bh_disable();
1430  		bh_lock_sock(child);
1431  		WARN_ON(sock_owned_by_user(child));
1432  		sock_hold(child);
1433  
1434  		nsk = reuseport_migrate_sock(sk, child, NULL);
1435  		if (nsk) {
1436  			nreq = inet_reqsk_clone(req, nsk);
1437  			if (nreq) {
1438  				refcount_set(&nreq->rsk_refcnt, 1);
1439  
1440  				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1441  					__NET_INC_STATS(sock_net(nsk),
1442  							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1443  					reqsk_migrate_reset(req);
1444  				} else {
1445  					__NET_INC_STATS(sock_net(nsk),
1446  							LINUX_MIB_TCPMIGRATEREQFAILURE);
1447  					reqsk_migrate_reset(nreq);
1448  					__reqsk_free(nreq);
1449  				}
1450  
1451  				/* inet_csk_reqsk_queue_add() has already
1452  				 * called inet_child_forget() on failure case.
1453  				 */
1454  				goto skip_child_forget;
1455  			}
1456  		}
1457  
1458  		inet_child_forget(sk, req, child);
1459  skip_child_forget:
1460  		reqsk_put(req);
1461  		bh_unlock_sock(child);
1462  		local_bh_enable();
1463  		sock_put(child);
1464  
1465  		cond_resched();
1466  	}
1467  	if (queue->fastopenq.rskq_rst_head) {
1468  		/* Free all the reqs queued in rskq_rst_head. */
1469  		spin_lock_bh(&queue->fastopenq.lock);
1470  		req = queue->fastopenq.rskq_rst_head;
1471  		queue->fastopenq.rskq_rst_head = NULL;
1472  		spin_unlock_bh(&queue->fastopenq.lock);
1473  		while (req != NULL) {
1474  			next = req->dl_next;
1475  			reqsk_put(req);
1476  			req = next;
1477  		}
1478  	}
1479  	WARN_ON_ONCE(sk->sk_ack_backlog);
1480  }
1481  EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1482  
inet_csk_addr2sockaddr(struct sock * sk,struct sockaddr * uaddr)1483  void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1484  {
1485  	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1486  	const struct inet_sock *inet = inet_sk(sk);
1487  
1488  	sin->sin_family		= AF_INET;
1489  	sin->sin_addr.s_addr	= inet->inet_daddr;
1490  	sin->sin_port		= inet->inet_dport;
1491  }
1492  EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1493  
inet_csk_rebuild_route(struct sock * sk,struct flowi * fl)1494  static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1495  {
1496  	const struct inet_sock *inet = inet_sk(sk);
1497  	const struct ip_options_rcu *inet_opt;
1498  	__be32 daddr = inet->inet_daddr;
1499  	struct flowi4 *fl4;
1500  	struct rtable *rt;
1501  
1502  	rcu_read_lock();
1503  	inet_opt = rcu_dereference(inet->inet_opt);
1504  	if (inet_opt && inet_opt->opt.srr)
1505  		daddr = inet_opt->opt.faddr;
1506  	fl4 = &fl->u.ip4;
1507  	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1508  				   inet->inet_saddr, inet->inet_dport,
1509  				   inet->inet_sport, sk->sk_protocol,
1510  				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1511  	if (IS_ERR(rt))
1512  		rt = NULL;
1513  	if (rt)
1514  		sk_setup_caps(sk, &rt->dst);
1515  	rcu_read_unlock();
1516  
1517  	return &rt->dst;
1518  }
1519  
inet_csk_update_pmtu(struct sock * sk,u32 mtu)1520  struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1521  {
1522  	struct dst_entry *dst = __sk_dst_check(sk, 0);
1523  	struct inet_sock *inet = inet_sk(sk);
1524  
1525  	if (!dst) {
1526  		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1527  		if (!dst)
1528  			goto out;
1529  	}
1530  	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1531  
1532  	dst = __sk_dst_check(sk, 0);
1533  	if (!dst)
1534  		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1535  out:
1536  	return dst;
1537  }
1538  EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
1539