xref: /openbmc/linux/net/ipv4/inet_hashtables.c (revision 18afb028)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic INET transport hashtables
8  *
9  * Authors:	Lotsa people, from code originally in tcp
10  */
11 
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
17 #include <linux/vmalloc.h>
18 #include <linux/memblock.h>
19 
20 #include <net/addrconf.h>
21 #include <net/inet_connection_sock.h>
22 #include <net/inet_hashtables.h>
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <net/inet6_hashtables.h>
25 #endif
26 #include <net/secure_seq.h>
27 #include <net/ip.h>
28 #include <net/tcp.h>
29 #include <net/sock_reuseport.h>
30 
31 u32 inet_ehashfn(const struct net *net, const __be32 laddr,
32 		 const __u16 lport, const __be32 faddr,
33 		 const __be16 fport)
34 {
35 	static u32 inet_ehash_secret __read_mostly;
36 
37 	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
38 
39 	return __inet_ehashfn(laddr, lport, faddr, fport,
40 			      inet_ehash_secret + net_hash_mix(net));
41 }
42 EXPORT_SYMBOL_GPL(inet_ehashfn);
43 
44 /* This function handles inet_sock, but also timewait and request sockets
45  * for IPv4/IPv6.
46  */
47 static u32 sk_ehashfn(const struct sock *sk)
48 {
49 #if IS_ENABLED(CONFIG_IPV6)
50 	if (sk->sk_family == AF_INET6 &&
51 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
52 		return inet6_ehashfn(sock_net(sk),
53 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
54 				     &sk->sk_v6_daddr, sk->sk_dport);
55 #endif
56 	return inet_ehashfn(sock_net(sk),
57 			    sk->sk_rcv_saddr, sk->sk_num,
58 			    sk->sk_daddr, sk->sk_dport);
59 }
60 
61 /*
62  * Allocate and initialize a new local port bind bucket.
63  * The bindhash mutex for snum's hash chain must be held here.
64  */
65 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
66 						 struct net *net,
67 						 struct inet_bind_hashbucket *head,
68 						 const unsigned short snum,
69 						 int l3mdev)
70 {
71 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
72 
73 	if (tb) {
74 		write_pnet(&tb->ib_net, net);
75 		tb->l3mdev    = l3mdev;
76 		tb->port      = snum;
77 		tb->fastreuse = 0;
78 		tb->fastreuseport = 0;
79 		INIT_HLIST_HEAD(&tb->owners);
80 		hlist_add_head(&tb->node, &head->chain);
81 	}
82 	return tb;
83 }
84 
85 /*
86  * Caller must hold hashbucket lock for this tb with local BH disabled
87  */
88 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
89 {
90 	if (hlist_empty(&tb->owners)) {
91 		__hlist_del(&tb->node);
92 		kmem_cache_free(cachep, tb);
93 	}
94 }
95 
96 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net,
97 			    unsigned short port, int l3mdev)
98 {
99 	return net_eq(ib_net(tb), net) && tb->port == port &&
100 		tb->l3mdev == l3mdev;
101 }
102 
103 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
104 				   struct net *net,
105 				   struct inet_bind_hashbucket *head,
106 				   unsigned short port, int l3mdev,
107 				   const struct sock *sk)
108 {
109 	write_pnet(&tb->ib_net, net);
110 	tb->l3mdev    = l3mdev;
111 	tb->port      = port;
112 #if IS_ENABLED(CONFIG_IPV6)
113 	tb->family    = sk->sk_family;
114 	if (sk->sk_family == AF_INET6)
115 		tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr;
116 	else
117 #endif
118 		tb->rcv_saddr = sk->sk_rcv_saddr;
119 	INIT_HLIST_HEAD(&tb->owners);
120 	INIT_HLIST_HEAD(&tb->deathrow);
121 	hlist_add_head(&tb->node, &head->chain);
122 }
123 
124 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
125 						   struct net *net,
126 						   struct inet_bind_hashbucket *head,
127 						   unsigned short port,
128 						   int l3mdev,
129 						   const struct sock *sk)
130 {
131 	struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
132 
133 	if (tb)
134 		inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk);
135 
136 	return tb;
137 }
138 
139 /* Caller must hold hashbucket lock for this tb with local BH disabled */
140 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
141 {
142 	if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
143 		__hlist_del(&tb->node);
144 		kmem_cache_free(cachep, tb);
145 	}
146 }
147 
148 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
149 					 const struct sock *sk)
150 {
151 #if IS_ENABLED(CONFIG_IPV6)
152 	if (sk->sk_family != tb2->family) {
153 		if (sk->sk_family == AF_INET)
154 			return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
155 				tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
156 
157 		return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
158 			sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
159 	}
160 
161 	if (sk->sk_family == AF_INET6)
162 		return ipv6_addr_equal(&tb2->v6_rcv_saddr,
163 				       &sk->sk_v6_rcv_saddr);
164 #endif
165 	return tb2->rcv_saddr == sk->sk_rcv_saddr;
166 }
167 
168 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
169 		    struct inet_bind2_bucket *tb2, unsigned short port)
170 {
171 	inet_sk(sk)->inet_num = port;
172 	sk_add_bind_node(sk, &tb->owners);
173 	inet_csk(sk)->icsk_bind_hash = tb;
174 	sk_add_bind2_node(sk, &tb2->owners);
175 	inet_csk(sk)->icsk_bind2_hash = tb2;
176 }
177 
178 /*
179  * Get rid of any references to a local port held by the given sock.
180  */
181 static void __inet_put_port(struct sock *sk)
182 {
183 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
184 	struct inet_bind_hashbucket *head, *head2;
185 	struct net *net = sock_net(sk);
186 	struct inet_bind_bucket *tb;
187 	int bhash;
188 
189 	bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size);
190 	head = &hashinfo->bhash[bhash];
191 	head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num);
192 
193 	spin_lock(&head->lock);
194 	tb = inet_csk(sk)->icsk_bind_hash;
195 	__sk_del_bind_node(sk);
196 	inet_csk(sk)->icsk_bind_hash = NULL;
197 	inet_sk(sk)->inet_num = 0;
198 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
199 
200 	spin_lock(&head2->lock);
201 	if (inet_csk(sk)->icsk_bind2_hash) {
202 		struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash;
203 
204 		__sk_del_bind2_node(sk);
205 		inet_csk(sk)->icsk_bind2_hash = NULL;
206 		inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
207 	}
208 	spin_unlock(&head2->lock);
209 
210 	spin_unlock(&head->lock);
211 }
212 
213 void inet_put_port(struct sock *sk)
214 {
215 	local_bh_disable();
216 	__inet_put_port(sk);
217 	local_bh_enable();
218 }
219 EXPORT_SYMBOL(inet_put_port);
220 
221 int __inet_inherit_port(const struct sock *sk, struct sock *child)
222 {
223 	struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk);
224 	unsigned short port = inet_sk(child)->inet_num;
225 	struct inet_bind_hashbucket *head, *head2;
226 	bool created_inet_bind_bucket = false;
227 	struct net *net = sock_net(sk);
228 	bool update_fastreuse = false;
229 	struct inet_bind2_bucket *tb2;
230 	struct inet_bind_bucket *tb;
231 	int bhash, l3mdev;
232 
233 	bhash = inet_bhashfn(net, port, table->bhash_size);
234 	head = &table->bhash[bhash];
235 	head2 = inet_bhashfn_portaddr(table, child, net, port);
236 
237 	spin_lock(&head->lock);
238 	spin_lock(&head2->lock);
239 	tb = inet_csk(sk)->icsk_bind_hash;
240 	tb2 = inet_csk(sk)->icsk_bind2_hash;
241 	if (unlikely(!tb || !tb2)) {
242 		spin_unlock(&head2->lock);
243 		spin_unlock(&head->lock);
244 		return -ENOENT;
245 	}
246 	if (tb->port != port) {
247 		l3mdev = inet_sk_bound_l3mdev(sk);
248 
249 		/* NOTE: using tproxy and redirecting skbs to a proxy
250 		 * on a different listener port breaks the assumption
251 		 * that the listener socket's icsk_bind_hash is the same
252 		 * as that of the child socket. We have to look up or
253 		 * create a new bind bucket for the child here. */
254 		inet_bind_bucket_for_each(tb, &head->chain) {
255 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
256 				break;
257 		}
258 		if (!tb) {
259 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
260 						     net, head, port, l3mdev);
261 			if (!tb) {
262 				spin_unlock(&head2->lock);
263 				spin_unlock(&head->lock);
264 				return -ENOMEM;
265 			}
266 			created_inet_bind_bucket = true;
267 		}
268 		update_fastreuse = true;
269 
270 		goto bhash2_find;
271 	} else if (!inet_bind2_bucket_addr_match(tb2, child)) {
272 		l3mdev = inet_sk_bound_l3mdev(sk);
273 
274 bhash2_find:
275 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child);
276 		if (!tb2) {
277 			tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep,
278 						       net, head2, port,
279 						       l3mdev, child);
280 			if (!tb2)
281 				goto error;
282 		}
283 	}
284 	if (update_fastreuse)
285 		inet_csk_update_fastreuse(tb, child);
286 	inet_bind_hash(child, tb, tb2, port);
287 	spin_unlock(&head2->lock);
288 	spin_unlock(&head->lock);
289 
290 	return 0;
291 
292 error:
293 	if (created_inet_bind_bucket)
294 		inet_bind_bucket_destroy(table->bind_bucket_cachep, tb);
295 	spin_unlock(&head2->lock);
296 	spin_unlock(&head->lock);
297 	return -ENOMEM;
298 }
299 EXPORT_SYMBOL_GPL(__inet_inherit_port);
300 
301 static struct inet_listen_hashbucket *
302 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
303 {
304 	u32 hash;
305 
306 #if IS_ENABLED(CONFIG_IPV6)
307 	if (sk->sk_family == AF_INET6)
308 		hash = ipv6_portaddr_hash(sock_net(sk),
309 					  &sk->sk_v6_rcv_saddr,
310 					  inet_sk(sk)->inet_num);
311 	else
312 #endif
313 		hash = ipv4_portaddr_hash(sock_net(sk),
314 					  inet_sk(sk)->inet_rcv_saddr,
315 					  inet_sk(sk)->inet_num);
316 	return inet_lhash2_bucket(h, hash);
317 }
318 
319 static inline int compute_score(struct sock *sk, struct net *net,
320 				const unsigned short hnum, const __be32 daddr,
321 				const int dif, const int sdif)
322 {
323 	int score = -1;
324 
325 	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
326 			!ipv6_only_sock(sk)) {
327 		if (sk->sk_rcv_saddr != daddr)
328 			return -1;
329 
330 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
331 			return -1;
332 		score =  sk->sk_bound_dev_if ? 2 : 1;
333 
334 		if (sk->sk_family == PF_INET)
335 			score++;
336 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
337 			score++;
338 	}
339 	return score;
340 }
341 
342 /**
343  * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary.
344  * @net: network namespace.
345  * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP.
346  * @skb: context for a potential SK_REUSEPORT program.
347  * @doff: header offset.
348  * @saddr: source address.
349  * @sport: source port.
350  * @daddr: destination address.
351  * @hnum: destination port in host byte order.
352  * @ehashfn: hash function used to generate the fallback hash.
353  *
354  * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to
355  *         the selected sock or an error.
356  */
357 struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk,
358 				   struct sk_buff *skb, int doff,
359 				   __be32 saddr, __be16 sport,
360 				   __be32 daddr, unsigned short hnum,
361 				   inet_ehashfn_t *ehashfn)
362 {
363 	struct sock *reuse_sk = NULL;
364 	u32 phash;
365 
366 	if (sk->sk_reuseport) {
367 		phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn,
368 					net, daddr, hnum, saddr, sport);
369 		reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
370 	}
371 	return reuse_sk;
372 }
373 EXPORT_SYMBOL_GPL(inet_lookup_reuseport);
374 
375 /*
376  * Here are some nice properties to exploit here. The BSD API
377  * does not allow a listening sock to specify the remote port nor the
378  * remote address for the connection. So always assume those are both
379  * wildcarded during the search since they can never be otherwise.
380  */
381 
382 /* called with rcu_read_lock() : No refcount taken on the socket */
383 static struct sock *inet_lhash2_lookup(struct net *net,
384 				struct inet_listen_hashbucket *ilb2,
385 				struct sk_buff *skb, int doff,
386 				const __be32 saddr, __be16 sport,
387 				const __be32 daddr, const unsigned short hnum,
388 				const int dif, const int sdif)
389 {
390 	struct sock *sk, *result = NULL;
391 	struct hlist_nulls_node *node;
392 	int score, hiscore = 0;
393 
394 	sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) {
395 		score = compute_score(sk, net, hnum, daddr, dif, sdif);
396 		if (score > hiscore) {
397 			result = inet_lookup_reuseport(net, sk, skb, doff,
398 						       saddr, sport, daddr, hnum, inet_ehashfn);
399 			if (result)
400 				return result;
401 
402 			result = sk;
403 			hiscore = score;
404 		}
405 	}
406 
407 	return result;
408 }
409 
410 struct sock *inet_lookup_run_sk_lookup(struct net *net,
411 				       int protocol,
412 				       struct sk_buff *skb, int doff,
413 				       __be32 saddr, __be16 sport,
414 				       __be32 daddr, u16 hnum, const int dif,
415 				       inet_ehashfn_t *ehashfn)
416 {
417 	struct sock *sk, *reuse_sk;
418 	bool no_reuseport;
419 
420 	no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport,
421 					    daddr, hnum, dif, &sk);
422 	if (no_reuseport || IS_ERR_OR_NULL(sk))
423 		return sk;
424 
425 	reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum,
426 					 ehashfn);
427 	if (reuse_sk)
428 		sk = reuse_sk;
429 	return sk;
430 }
431 
432 struct sock *__inet_lookup_listener(struct net *net,
433 				    struct inet_hashinfo *hashinfo,
434 				    struct sk_buff *skb, int doff,
435 				    const __be32 saddr, __be16 sport,
436 				    const __be32 daddr, const unsigned short hnum,
437 				    const int dif, const int sdif)
438 {
439 	struct inet_listen_hashbucket *ilb2;
440 	struct sock *result = NULL;
441 	unsigned int hash2;
442 
443 	/* Lookup redirect from BPF */
444 	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
445 	    hashinfo == net->ipv4.tcp_death_row.hashinfo) {
446 		result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff,
447 						   saddr, sport, daddr, hnum, dif,
448 						   inet_ehashfn);
449 		if (result)
450 			goto done;
451 	}
452 
453 	hash2 = ipv4_portaddr_hash(net, daddr, hnum);
454 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
455 
456 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
457 				    saddr, sport, daddr, hnum,
458 				    dif, sdif);
459 	if (result)
460 		goto done;
461 
462 	/* Lookup lhash2 with INADDR_ANY */
463 	hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
464 	ilb2 = inet_lhash2_bucket(hashinfo, hash2);
465 
466 	result = inet_lhash2_lookup(net, ilb2, skb, doff,
467 				    saddr, sport, htonl(INADDR_ANY), hnum,
468 				    dif, sdif);
469 done:
470 	if (IS_ERR(result))
471 		return NULL;
472 	return result;
473 }
474 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
475 
476 /* All sockets share common refcount, but have different destructors */
477 void sock_gen_put(struct sock *sk)
478 {
479 	if (!refcount_dec_and_test(&sk->sk_refcnt))
480 		return;
481 
482 	if (sk->sk_state == TCP_TIME_WAIT)
483 		inet_twsk_free(inet_twsk(sk));
484 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
485 		reqsk_free(inet_reqsk(sk));
486 	else
487 		sk_free(sk);
488 }
489 EXPORT_SYMBOL_GPL(sock_gen_put);
490 
491 void sock_edemux(struct sk_buff *skb)
492 {
493 	sock_gen_put(skb->sk);
494 }
495 EXPORT_SYMBOL(sock_edemux);
496 
497 struct sock *__inet_lookup_established(struct net *net,
498 				  struct inet_hashinfo *hashinfo,
499 				  const __be32 saddr, const __be16 sport,
500 				  const __be32 daddr, const u16 hnum,
501 				  const int dif, const int sdif)
502 {
503 	INET_ADDR_COOKIE(acookie, saddr, daddr);
504 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
505 	struct sock *sk;
506 	const struct hlist_nulls_node *node;
507 	/* Optimize here for direct hit, only listening connections can
508 	 * have wildcards anyways.
509 	 */
510 	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
511 	unsigned int slot = hash & hashinfo->ehash_mask;
512 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
513 
514 begin:
515 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
516 		if (sk->sk_hash != hash)
517 			continue;
518 		if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) {
519 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
520 				goto out;
521 			if (unlikely(!inet_match(net, sk, acookie,
522 						 ports, dif, sdif))) {
523 				sock_gen_put(sk);
524 				goto begin;
525 			}
526 			goto found;
527 		}
528 	}
529 	/*
530 	 * if the nulls value we got at the end of this lookup is
531 	 * not the expected one, we must restart lookup.
532 	 * We probably met an item that was moved to another chain.
533 	 */
534 	if (get_nulls_value(node) != slot)
535 		goto begin;
536 out:
537 	sk = NULL;
538 found:
539 	return sk;
540 }
541 EXPORT_SYMBOL_GPL(__inet_lookup_established);
542 
543 /* called with local bh disabled */
544 static int __inet_check_established(struct inet_timewait_death_row *death_row,
545 				    struct sock *sk, __u16 lport,
546 				    struct inet_timewait_sock **twp)
547 {
548 	struct inet_hashinfo *hinfo = death_row->hashinfo;
549 	struct inet_sock *inet = inet_sk(sk);
550 	__be32 daddr = inet->inet_rcv_saddr;
551 	__be32 saddr = inet->inet_daddr;
552 	int dif = sk->sk_bound_dev_if;
553 	struct net *net = sock_net(sk);
554 	int sdif = l3mdev_master_ifindex_by_index(net, dif);
555 	INET_ADDR_COOKIE(acookie, saddr, daddr);
556 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
557 	unsigned int hash = inet_ehashfn(net, daddr, lport,
558 					 saddr, inet->inet_dport);
559 	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
560 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
561 	struct sock *sk2;
562 	const struct hlist_nulls_node *node;
563 	struct inet_timewait_sock *tw = NULL;
564 
565 	spin_lock(lock);
566 
567 	sk_nulls_for_each(sk2, node, &head->chain) {
568 		if (sk2->sk_hash != hash)
569 			continue;
570 
571 		if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) {
572 			if (sk2->sk_state == TCP_TIME_WAIT) {
573 				tw = inet_twsk(sk2);
574 				if (twsk_unique(sk, sk2, twp))
575 					break;
576 			}
577 			goto not_unique;
578 		}
579 	}
580 
581 	/* Must record num and sport now. Otherwise we will see
582 	 * in hash table socket with a funny identity.
583 	 */
584 	inet->inet_num = lport;
585 	inet->inet_sport = htons(lport);
586 	sk->sk_hash = hash;
587 	WARN_ON(!sk_unhashed(sk));
588 	__sk_nulls_add_node_rcu(sk, &head->chain);
589 	if (tw) {
590 		sk_nulls_del_node_init_rcu((struct sock *)tw);
591 		__NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
592 	}
593 	spin_unlock(lock);
594 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
595 
596 	if (twp) {
597 		*twp = tw;
598 	} else if (tw) {
599 		/* Silly. Should hash-dance instead... */
600 		inet_twsk_deschedule_put(tw);
601 	}
602 	return 0;
603 
604 not_unique:
605 	spin_unlock(lock);
606 	return -EADDRNOTAVAIL;
607 }
608 
609 static u64 inet_sk_port_offset(const struct sock *sk)
610 {
611 	const struct inet_sock *inet = inet_sk(sk);
612 
613 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
614 					  inet->inet_daddr,
615 					  inet->inet_dport);
616 }
617 
618 /* Searches for an exsiting socket in the ehash bucket list.
619  * Returns true if found, false otherwise.
620  */
621 static bool inet_ehash_lookup_by_sk(struct sock *sk,
622 				    struct hlist_nulls_head *list)
623 {
624 	const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
625 	const int sdif = sk->sk_bound_dev_if;
626 	const int dif = sk->sk_bound_dev_if;
627 	const struct hlist_nulls_node *node;
628 	struct net *net = sock_net(sk);
629 	struct sock *esk;
630 
631 	INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
632 
633 	sk_nulls_for_each_rcu(esk, node, list) {
634 		if (esk->sk_hash != sk->sk_hash)
635 			continue;
636 		if (sk->sk_family == AF_INET) {
637 			if (unlikely(inet_match(net, esk, acookie,
638 						ports, dif, sdif))) {
639 				return true;
640 			}
641 		}
642 #if IS_ENABLED(CONFIG_IPV6)
643 		else if (sk->sk_family == AF_INET6) {
644 			if (unlikely(inet6_match(net, esk,
645 						 &sk->sk_v6_daddr,
646 						 &sk->sk_v6_rcv_saddr,
647 						 ports, dif, sdif))) {
648 				return true;
649 			}
650 		}
651 #endif
652 	}
653 	return false;
654 }
655 
656 /* Insert a socket into ehash, and eventually remove another one
657  * (The another one can be a SYN_RECV or TIMEWAIT)
658  * If an existing socket already exists, socket sk is not inserted,
659  * and sets found_dup_sk parameter to true.
660  */
661 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
662 {
663 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
664 	struct inet_ehash_bucket *head;
665 	struct hlist_nulls_head *list;
666 	spinlock_t *lock;
667 	bool ret = true;
668 
669 	WARN_ON_ONCE(!sk_unhashed(sk));
670 
671 	sk->sk_hash = sk_ehashfn(sk);
672 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
673 	list = &head->chain;
674 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
675 
676 	spin_lock(lock);
677 	if (osk) {
678 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
679 		ret = sk_nulls_del_node_init_rcu(osk);
680 	} else if (found_dup_sk) {
681 		*found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
682 		if (*found_dup_sk)
683 			ret = false;
684 	}
685 
686 	if (ret)
687 		__sk_nulls_add_node_rcu(sk, list);
688 
689 	spin_unlock(lock);
690 
691 	return ret;
692 }
693 
694 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
695 {
696 	bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
697 
698 	if (ok) {
699 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
700 	} else {
701 		this_cpu_inc(*sk->sk_prot->orphan_count);
702 		inet_sk_set_state(sk, TCP_CLOSE);
703 		sock_set_flag(sk, SOCK_DEAD);
704 		inet_csk_destroy_sock(sk);
705 	}
706 	return ok;
707 }
708 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
709 
710 static int inet_reuseport_add_sock(struct sock *sk,
711 				   struct inet_listen_hashbucket *ilb)
712 {
713 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
714 	const struct hlist_nulls_node *node;
715 	struct sock *sk2;
716 	kuid_t uid = sock_i_uid(sk);
717 
718 	sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
719 		if (sk2 != sk &&
720 		    sk2->sk_family == sk->sk_family &&
721 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
722 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
723 		    inet_csk(sk2)->icsk_bind_hash == tb &&
724 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
725 		    inet_rcv_saddr_equal(sk, sk2, false))
726 			return reuseport_add_sock(sk, sk2,
727 						  inet_rcv_saddr_any(sk));
728 	}
729 
730 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
731 }
732 
733 int __inet_hash(struct sock *sk, struct sock *osk)
734 {
735 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
736 	struct inet_listen_hashbucket *ilb2;
737 	int err = 0;
738 
739 	if (sk->sk_state != TCP_LISTEN) {
740 		local_bh_disable();
741 		inet_ehash_nolisten(sk, osk, NULL);
742 		local_bh_enable();
743 		return 0;
744 	}
745 	WARN_ON(!sk_unhashed(sk));
746 	ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
747 
748 	spin_lock(&ilb2->lock);
749 	if (sk->sk_reuseport) {
750 		err = inet_reuseport_add_sock(sk, ilb2);
751 		if (err)
752 			goto unlock;
753 	}
754 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
755 		sk->sk_family == AF_INET6)
756 		__sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head);
757 	else
758 		__sk_nulls_add_node_rcu(sk, &ilb2->nulls_head);
759 	sock_set_flag(sk, SOCK_RCU_FREE);
760 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
761 unlock:
762 	spin_unlock(&ilb2->lock);
763 
764 	return err;
765 }
766 EXPORT_SYMBOL(__inet_hash);
767 
768 int inet_hash(struct sock *sk)
769 {
770 	int err = 0;
771 
772 	if (sk->sk_state != TCP_CLOSE)
773 		err = __inet_hash(sk, NULL);
774 
775 	return err;
776 }
777 EXPORT_SYMBOL_GPL(inet_hash);
778 
779 void inet_unhash(struct sock *sk)
780 {
781 	struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
782 
783 	if (sk_unhashed(sk))
784 		return;
785 
786 	if (sk->sk_state == TCP_LISTEN) {
787 		struct inet_listen_hashbucket *ilb2;
788 
789 		ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
790 		/* Don't disable bottom halves while acquiring the lock to
791 		 * avoid circular locking dependency on PREEMPT_RT.
792 		 */
793 		spin_lock(&ilb2->lock);
794 		if (sk_unhashed(sk)) {
795 			spin_unlock(&ilb2->lock);
796 			return;
797 		}
798 
799 		if (rcu_access_pointer(sk->sk_reuseport_cb))
800 			reuseport_stop_listen_sock(sk);
801 
802 		__sk_nulls_del_node_init_rcu(sk);
803 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
804 		spin_unlock(&ilb2->lock);
805 	} else {
806 		spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
807 
808 		spin_lock_bh(lock);
809 		if (sk_unhashed(sk)) {
810 			spin_unlock_bh(lock);
811 			return;
812 		}
813 		__sk_nulls_del_node_init_rcu(sk);
814 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
815 		spin_unlock_bh(lock);
816 	}
817 }
818 EXPORT_SYMBOL_GPL(inet_unhash);
819 
820 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
821 				    const struct net *net, unsigned short port,
822 				    int l3mdev, const struct sock *sk)
823 {
824 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
825 	    tb->l3mdev != l3mdev)
826 		return false;
827 
828 	return inet_bind2_bucket_addr_match(tb, sk);
829 }
830 
831 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
832 				      unsigned short port, int l3mdev, const struct sock *sk)
833 {
834 	if (!net_eq(ib2_net(tb), net) || tb->port != port ||
835 	    tb->l3mdev != l3mdev)
836 		return false;
837 
838 #if IS_ENABLED(CONFIG_IPV6)
839 	if (sk->sk_family != tb->family) {
840 		if (sk->sk_family == AF_INET)
841 			return ipv6_addr_any(&tb->v6_rcv_saddr) ||
842 				ipv6_addr_v4mapped_any(&tb->v6_rcv_saddr);
843 
844 		return false;
845 	}
846 
847 	if (sk->sk_family == AF_INET6)
848 		return ipv6_addr_any(&tb->v6_rcv_saddr);
849 #endif
850 	return tb->rcv_saddr == 0;
851 }
852 
853 /* The socket's bhash2 hashbucket spinlock must be held when this is called */
854 struct inet_bind2_bucket *
855 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net,
856 		       unsigned short port, int l3mdev, const struct sock *sk)
857 {
858 	struct inet_bind2_bucket *bhash2 = NULL;
859 
860 	inet_bind_bucket_for_each(bhash2, &head->chain)
861 		if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk))
862 			break;
863 
864 	return bhash2;
865 }
866 
867 struct inet_bind_hashbucket *
868 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port)
869 {
870 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
871 	u32 hash;
872 
873 #if IS_ENABLED(CONFIG_IPV6)
874 	if (sk->sk_family == AF_INET6)
875 		hash = ipv6_portaddr_hash(net, &in6addr_any, port);
876 	else
877 #endif
878 		hash = ipv4_portaddr_hash(net, 0, port);
879 
880 	return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
881 }
882 
883 static void inet_update_saddr(struct sock *sk, void *saddr, int family)
884 {
885 	if (family == AF_INET) {
886 		inet_sk(sk)->inet_saddr = *(__be32 *)saddr;
887 		sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr);
888 	}
889 #if IS_ENABLED(CONFIG_IPV6)
890 	else {
891 		sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr;
892 	}
893 #endif
894 }
895 
896 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset)
897 {
898 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
899 	struct inet_bind_hashbucket *head, *head2;
900 	struct inet_bind2_bucket *tb2, *new_tb2;
901 	int l3mdev = inet_sk_bound_l3mdev(sk);
902 	int port = inet_sk(sk)->inet_num;
903 	struct net *net = sock_net(sk);
904 	int bhash;
905 
906 	if (!inet_csk(sk)->icsk_bind2_hash) {
907 		/* Not bind()ed before. */
908 		if (reset)
909 			inet_reset_saddr(sk);
910 		else
911 			inet_update_saddr(sk, saddr, family);
912 
913 		return 0;
914 	}
915 
916 	/* Allocate a bind2 bucket ahead of time to avoid permanently putting
917 	 * the bhash2 table in an inconsistent state if a new tb2 bucket
918 	 * allocation fails.
919 	 */
920 	new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC);
921 	if (!new_tb2) {
922 		if (reset) {
923 			/* The (INADDR_ANY, port) bucket might have already
924 			 * been freed, then we cannot fixup icsk_bind2_hash,
925 			 * so we give up and unlink sk from bhash/bhash2 not
926 			 * to leave inconsistency in bhash2.
927 			 */
928 			inet_put_port(sk);
929 			inet_reset_saddr(sk);
930 		}
931 
932 		return -ENOMEM;
933 	}
934 
935 	bhash = inet_bhashfn(net, port, hinfo->bhash_size);
936 	head = &hinfo->bhash[bhash];
937 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
938 
939 	/* If we change saddr locklessly, another thread
940 	 * iterating over bhash might see corrupted address.
941 	 */
942 	spin_lock_bh(&head->lock);
943 
944 	spin_lock(&head2->lock);
945 	__sk_del_bind2_node(sk);
946 	inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash);
947 	spin_unlock(&head2->lock);
948 
949 	if (reset)
950 		inet_reset_saddr(sk);
951 	else
952 		inet_update_saddr(sk, saddr, family);
953 
954 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
955 
956 	spin_lock(&head2->lock);
957 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
958 	if (!tb2) {
959 		tb2 = new_tb2;
960 		inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk);
961 	}
962 	sk_add_bind2_node(sk, &tb2->owners);
963 	inet_csk(sk)->icsk_bind2_hash = tb2;
964 	spin_unlock(&head2->lock);
965 
966 	spin_unlock_bh(&head->lock);
967 
968 	if (tb2 != new_tb2)
969 		kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2);
970 
971 	return 0;
972 }
973 
974 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family)
975 {
976 	return __inet_bhash2_update_saddr(sk, saddr, family, false);
977 }
978 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr);
979 
980 void inet_bhash2_reset_saddr(struct sock *sk)
981 {
982 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
983 		__inet_bhash2_update_saddr(sk, NULL, 0, true);
984 }
985 EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr);
986 
987 /* RFC 6056 3.3.4.  Algorithm 4: Double-Hash Port Selection Algorithm
988  * Note that we use 32bit integers (vs RFC 'short integers')
989  * because 2^16 is not a multiple of num_ephemeral and this
990  * property might be used by clever attacker.
991  *
992  * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
993  * attacks were since demonstrated, thus we use 65536 by default instead
994  * to really give more isolation and privacy, at the expense of 256kB
995  * of kernel memory.
996  */
997 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER)
998 static u32 *table_perturb;
999 
1000 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1001 		struct sock *sk, u64 port_offset,
1002 		int (*check_established)(struct inet_timewait_death_row *,
1003 			struct sock *, __u16, struct inet_timewait_sock **))
1004 {
1005 	struct inet_hashinfo *hinfo = death_row->hashinfo;
1006 	struct inet_bind_hashbucket *head, *head2;
1007 	struct inet_timewait_sock *tw = NULL;
1008 	int port = inet_sk(sk)->inet_num;
1009 	struct net *net = sock_net(sk);
1010 	struct inet_bind2_bucket *tb2;
1011 	struct inet_bind_bucket *tb;
1012 	bool tb_created = false;
1013 	u32 remaining, offset;
1014 	int ret, i, low, high;
1015 	int l3mdev;
1016 	u32 index;
1017 
1018 	if (port) {
1019 		local_bh_disable();
1020 		ret = check_established(death_row, sk, port, NULL);
1021 		local_bh_enable();
1022 		return ret;
1023 	}
1024 
1025 	l3mdev = inet_sk_bound_l3mdev(sk);
1026 
1027 	inet_sk_get_local_port_range(sk, &low, &high);
1028 	high++; /* [32768, 60999] -> [32768, 61000[ */
1029 	remaining = high - low;
1030 	if (likely(remaining > 1))
1031 		remaining &= ~1U;
1032 
1033 	get_random_sleepable_once(table_perturb,
1034 				  INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
1035 	index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
1036 
1037 	offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
1038 	offset %= remaining;
1039 
1040 	/* In first pass we try ports of @low parity.
1041 	 * inet_csk_get_port() does the opposite choice.
1042 	 */
1043 	offset &= ~1U;
1044 other_parity_scan:
1045 	port = low + offset;
1046 	for (i = 0; i < remaining; i += 2, port += 2) {
1047 		if (unlikely(port >= high))
1048 			port -= remaining;
1049 		if (inet_is_local_reserved_port(net, port))
1050 			continue;
1051 		head = &hinfo->bhash[inet_bhashfn(net, port,
1052 						  hinfo->bhash_size)];
1053 		spin_lock_bh(&head->lock);
1054 
1055 		/* Does not bother with rcv_saddr checks, because
1056 		 * the established check is already unique enough.
1057 		 */
1058 		inet_bind_bucket_for_each(tb, &head->chain) {
1059 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
1060 				if (tb->fastreuse >= 0 ||
1061 				    tb->fastreuseport >= 0)
1062 					goto next_port;
1063 				WARN_ON(hlist_empty(&tb->owners));
1064 				if (!check_established(death_row, sk,
1065 						       port, &tw))
1066 					goto ok;
1067 				goto next_port;
1068 			}
1069 		}
1070 
1071 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
1072 					     net, head, port, l3mdev);
1073 		if (!tb) {
1074 			spin_unlock_bh(&head->lock);
1075 			return -ENOMEM;
1076 		}
1077 		tb_created = true;
1078 		tb->fastreuse = -1;
1079 		tb->fastreuseport = -1;
1080 		goto ok;
1081 next_port:
1082 		spin_unlock_bh(&head->lock);
1083 		cond_resched();
1084 	}
1085 
1086 	offset++;
1087 	if ((offset & 1) && remaining > 1)
1088 		goto other_parity_scan;
1089 
1090 	return -EADDRNOTAVAIL;
1091 
1092 ok:
1093 	/* Find the corresponding tb2 bucket since we need to
1094 	 * add the socket to the bhash2 table as well
1095 	 */
1096 	head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
1097 	spin_lock(&head2->lock);
1098 
1099 	tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
1100 	if (!tb2) {
1101 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net,
1102 					       head2, port, l3mdev, sk);
1103 		if (!tb2)
1104 			goto error;
1105 	}
1106 
1107 	/* Here we want to add a little bit of randomness to the next source
1108 	 * port that will be chosen. We use a max() with a random here so that
1109 	 * on low contention the randomness is maximal and on high contention
1110 	 * it may be inexistent.
1111 	 */
1112 	i = max_t(int, i, get_random_u32_below(8) * 2);
1113 	WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
1114 
1115 	/* Head lock still held and bh's disabled */
1116 	inet_bind_hash(sk, tb, tb2, port);
1117 
1118 	if (sk_unhashed(sk)) {
1119 		inet_sk(sk)->inet_sport = htons(port);
1120 		inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1121 	}
1122 	if (tw)
1123 		inet_twsk_bind_unhash(tw, hinfo);
1124 
1125 	spin_unlock(&head2->lock);
1126 	spin_unlock(&head->lock);
1127 
1128 	if (tw)
1129 		inet_twsk_deschedule_put(tw);
1130 	local_bh_enable();
1131 	return 0;
1132 
1133 error:
1134 	spin_unlock(&head2->lock);
1135 	if (tb_created)
1136 		inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
1137 	spin_unlock_bh(&head->lock);
1138 	return -ENOMEM;
1139 }
1140 
1141 /*
1142  * Bind a port for a connect operation and hash it.
1143  */
1144 int inet_hash_connect(struct inet_timewait_death_row *death_row,
1145 		      struct sock *sk)
1146 {
1147 	u64 port_offset = 0;
1148 
1149 	if (!inet_sk(sk)->inet_num)
1150 		port_offset = inet_sk_port_offset(sk);
1151 	return __inet_hash_connect(death_row, sk, port_offset,
1152 				   __inet_check_established);
1153 }
1154 EXPORT_SYMBOL_GPL(inet_hash_connect);
1155 
1156 static void init_hashinfo_lhash2(struct inet_hashinfo *h)
1157 {
1158 	int i;
1159 
1160 	for (i = 0; i <= h->lhash2_mask; i++) {
1161 		spin_lock_init(&h->lhash2[i].lock);
1162 		INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head,
1163 				      i + LISTENING_NULLS_BASE);
1164 	}
1165 }
1166 
1167 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1168 				unsigned long numentries, int scale,
1169 				unsigned long low_limit,
1170 				unsigned long high_limit)
1171 {
1172 	h->lhash2 = alloc_large_system_hash(name,
1173 					    sizeof(*h->lhash2),
1174 					    numentries,
1175 					    scale,
1176 					    0,
1177 					    NULL,
1178 					    &h->lhash2_mask,
1179 					    low_limit,
1180 					    high_limit);
1181 	init_hashinfo_lhash2(h);
1182 
1183 	/* this one is used for source ports of outgoing connections */
1184 	table_perturb = alloc_large_system_hash("Table-perturb",
1185 						sizeof(*table_perturb),
1186 						INET_TABLE_PERTURB_SIZE,
1187 						0, 0, NULL, NULL,
1188 						INET_TABLE_PERTURB_SIZE,
1189 						INET_TABLE_PERTURB_SIZE);
1190 }
1191 
1192 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
1193 {
1194 	h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
1195 	if (!h->lhash2)
1196 		return -ENOMEM;
1197 
1198 	h->lhash2_mask = INET_LHTABLE_SIZE - 1;
1199 	/* INET_LHTABLE_SIZE must be a power of 2 */
1200 	BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
1201 
1202 	init_hashinfo_lhash2(h);
1203 	return 0;
1204 }
1205 EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
1206 
1207 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1208 {
1209 	unsigned int locksz = sizeof(spinlock_t);
1210 	unsigned int i, nblocks = 1;
1211 
1212 	if (locksz != 0) {
1213 		/* allocate 2 cache lines or at least one spinlock per cpu */
1214 		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
1215 		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
1216 
1217 		/* no more locks than number of hash buckets */
1218 		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
1219 
1220 		hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL);
1221 		if (!hashinfo->ehash_locks)
1222 			return -ENOMEM;
1223 
1224 		for (i = 0; i < nblocks; i++)
1225 			spin_lock_init(&hashinfo->ehash_locks[i]);
1226 	}
1227 	hashinfo->ehash_locks_mask = nblocks - 1;
1228 	return 0;
1229 }
1230 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
1231 
1232 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
1233 						 unsigned int ehash_entries)
1234 {
1235 	struct inet_hashinfo *new_hashinfo;
1236 	int i;
1237 
1238 	new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL);
1239 	if (!new_hashinfo)
1240 		goto err;
1241 
1242 	new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket),
1243 					   GFP_KERNEL_ACCOUNT);
1244 	if (!new_hashinfo->ehash)
1245 		goto free_hashinfo;
1246 
1247 	new_hashinfo->ehash_mask = ehash_entries - 1;
1248 
1249 	if (inet_ehash_locks_alloc(new_hashinfo))
1250 		goto free_ehash;
1251 
1252 	for (i = 0; i < ehash_entries; i++)
1253 		INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i);
1254 
1255 	new_hashinfo->pernet = true;
1256 
1257 	return new_hashinfo;
1258 
1259 free_ehash:
1260 	vfree(new_hashinfo->ehash);
1261 free_hashinfo:
1262 	kfree(new_hashinfo);
1263 err:
1264 	return NULL;
1265 }
1266 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc);
1267 
1268 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo)
1269 {
1270 	if (!hashinfo->pernet)
1271 		return;
1272 
1273 	inet_ehash_locks_free(hashinfo);
1274 	vfree(hashinfo->ehash);
1275 	kfree(hashinfo);
1276 }
1277 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free);
1278