xref: /openbmc/linux/net/ipv4/inet_hashtables.c (revision 8571e645)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic INET transport hashtables
7  *
8  * Authors:	Lotsa people, from code originally in tcp
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22 
23 #include <net/addrconf.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_hashtables.h>
26 #include <net/secure_seq.h>
27 #include <net/ip.h>
28 #include <net/sock_reuseport.h>
29 
30 static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
31 			const __u16 lport, const __be32 faddr,
32 			const __be16 fport)
33 {
34 	static u32 inet_ehash_secret __read_mostly;
35 
36 	net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
37 
38 	return __inet_ehashfn(laddr, lport, faddr, fport,
39 			      inet_ehash_secret + net_hash_mix(net));
40 }
41 
42 /* This function handles inet_sock, but also timewait and request sockets
43  * for IPv4/IPv6.
44  */
45 u32 sk_ehashfn(const struct sock *sk)
46 {
47 #if IS_ENABLED(CONFIG_IPV6)
48 	if (sk->sk_family == AF_INET6 &&
49 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
50 		return inet6_ehashfn(sock_net(sk),
51 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
52 				     &sk->sk_v6_daddr, sk->sk_dport);
53 #endif
54 	return inet_ehashfn(sock_net(sk),
55 			    sk->sk_rcv_saddr, sk->sk_num,
56 			    sk->sk_daddr, sk->sk_dport);
57 }
58 
59 /*
60  * Allocate and initialize a new local port bind bucket.
61  * The bindhash mutex for snum's hash chain must be held here.
62  */
63 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
64 						 struct net *net,
65 						 struct inet_bind_hashbucket *head,
66 						 const unsigned short snum)
67 {
68 	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
69 
70 	if (tb) {
71 		write_pnet(&tb->ib_net, net);
72 		tb->port      = snum;
73 		tb->fastreuse = 0;
74 		tb->fastreuseport = 0;
75 		tb->num_owners = 0;
76 		INIT_HLIST_HEAD(&tb->owners);
77 		hlist_add_head(&tb->node, &head->chain);
78 	}
79 	return tb;
80 }
81 
82 /*
83  * Caller must hold hashbucket lock for this tb with local BH disabled
84  */
85 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
86 {
87 	if (hlist_empty(&tb->owners)) {
88 		__hlist_del(&tb->node);
89 		kmem_cache_free(cachep, tb);
90 	}
91 }
92 
93 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
94 		    const unsigned short snum)
95 {
96 	inet_sk(sk)->inet_num = snum;
97 	sk_add_bind_node(sk, &tb->owners);
98 	tb->num_owners++;
99 	inet_csk(sk)->icsk_bind_hash = tb;
100 }
101 
102 /*
103  * Get rid of any references to a local port held by the given sock.
104  */
105 static void __inet_put_port(struct sock *sk)
106 {
107 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
109 			hashinfo->bhash_size);
110 	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
111 	struct inet_bind_bucket *tb;
112 
113 	spin_lock(&head->lock);
114 	tb = inet_csk(sk)->icsk_bind_hash;
115 	__sk_del_bind_node(sk);
116 	tb->num_owners--;
117 	inet_csk(sk)->icsk_bind_hash = NULL;
118 	inet_sk(sk)->inet_num = 0;
119 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
120 	spin_unlock(&head->lock);
121 }
122 
123 void inet_put_port(struct sock *sk)
124 {
125 	local_bh_disable();
126 	__inet_put_port(sk);
127 	local_bh_enable();
128 }
129 EXPORT_SYMBOL(inet_put_port);
130 
131 int __inet_inherit_port(const struct sock *sk, struct sock *child)
132 {
133 	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
134 	unsigned short port = inet_sk(child)->inet_num;
135 	const int bhash = inet_bhashfn(sock_net(sk), port,
136 			table->bhash_size);
137 	struct inet_bind_hashbucket *head = &table->bhash[bhash];
138 	struct inet_bind_bucket *tb;
139 
140 	spin_lock(&head->lock);
141 	tb = inet_csk(sk)->icsk_bind_hash;
142 	if (unlikely(!tb)) {
143 		spin_unlock(&head->lock);
144 		return -ENOENT;
145 	}
146 	if (tb->port != port) {
147 		/* NOTE: using tproxy and redirecting skbs to a proxy
148 		 * on a different listener port breaks the assumption
149 		 * that the listener socket's icsk_bind_hash is the same
150 		 * as that of the child socket. We have to look up or
151 		 * create a new bind bucket for the child here. */
152 		inet_bind_bucket_for_each(tb, &head->chain) {
153 			if (net_eq(ib_net(tb), sock_net(sk)) &&
154 			    tb->port == port)
155 				break;
156 		}
157 		if (!tb) {
158 			tb = inet_bind_bucket_create(table->bind_bucket_cachep,
159 						     sock_net(sk), head, port);
160 			if (!tb) {
161 				spin_unlock(&head->lock);
162 				return -ENOMEM;
163 			}
164 		}
165 	}
166 	inet_bind_hash(child, tb, port);
167 	spin_unlock(&head->lock);
168 
169 	return 0;
170 }
171 EXPORT_SYMBOL_GPL(__inet_inherit_port);
172 
173 static inline int compute_score(struct sock *sk, struct net *net,
174 				const unsigned short hnum, const __be32 daddr,
175 				const int dif)
176 {
177 	int score = -1;
178 	struct inet_sock *inet = inet_sk(sk);
179 
180 	if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
181 			!ipv6_only_sock(sk)) {
182 		__be32 rcv_saddr = inet->inet_rcv_saddr;
183 		score = sk->sk_family == PF_INET ? 2 : 1;
184 		if (rcv_saddr) {
185 			if (rcv_saddr != daddr)
186 				return -1;
187 			score += 4;
188 		}
189 		if (sk->sk_bound_dev_if) {
190 			if (sk->sk_bound_dev_if != dif)
191 				return -1;
192 			score += 4;
193 		}
194 		if (sk->sk_incoming_cpu == raw_smp_processor_id())
195 			score++;
196 	}
197 	return score;
198 }
199 
200 /*
201  * Don't inline this cruft. Here are some nice properties to exploit here. The
202  * BSD API does not allow a listening sock to specify the remote port nor the
203  * remote address for the connection. So always assume those are both
204  * wildcarded during the search since they can never be otherwise.
205  */
206 
207 
208 struct sock *__inet_lookup_listener(struct net *net,
209 				    struct inet_hashinfo *hashinfo,
210 				    struct sk_buff *skb, int doff,
211 				    const __be32 saddr, __be16 sport,
212 				    const __be32 daddr, const unsigned short hnum,
213 				    const int dif)
214 {
215 	struct sock *sk, *result;
216 	struct hlist_nulls_node *node;
217 	unsigned int hash = inet_lhashfn(net, hnum);
218 	struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
219 	int score, hiscore, matches = 0, reuseport = 0;
220 	bool select_ok = true;
221 	u32 phash = 0;
222 
223 	rcu_read_lock();
224 begin:
225 	result = NULL;
226 	hiscore = 0;
227 	sk_nulls_for_each_rcu(sk, node, &ilb->head) {
228 		score = compute_score(sk, net, hnum, daddr, dif);
229 		if (score > hiscore) {
230 			result = sk;
231 			hiscore = score;
232 			reuseport = sk->sk_reuseport;
233 			if (reuseport) {
234 				phash = inet_ehashfn(net, daddr, hnum,
235 						     saddr, sport);
236 				if (select_ok) {
237 					struct sock *sk2;
238 					sk2 = reuseport_select_sock(sk, phash,
239 								    skb, doff);
240 					if (sk2) {
241 						result = sk2;
242 						goto found;
243 					}
244 				}
245 				matches = 1;
246 			}
247 		} else if (score == hiscore && reuseport) {
248 			matches++;
249 			if (reciprocal_scale(phash, matches) == 0)
250 				result = sk;
251 			phash = next_pseudo_random32(phash);
252 		}
253 	}
254 	/*
255 	 * if the nulls value we got at the end of this lookup is
256 	 * not the expected one, we must restart lookup.
257 	 * We probably met an item that was moved to another chain.
258 	 */
259 	if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
260 		goto begin;
261 	if (result) {
262 found:
263 		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
264 			result = NULL;
265 		else if (unlikely(compute_score(result, net, hnum, daddr,
266 				  dif) < hiscore)) {
267 			sock_put(result);
268 			select_ok = false;
269 			goto begin;
270 		}
271 	}
272 	rcu_read_unlock();
273 	return result;
274 }
275 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
276 
277 /* All sockets share common refcount, but have different destructors */
278 void sock_gen_put(struct sock *sk)
279 {
280 	if (!atomic_dec_and_test(&sk->sk_refcnt))
281 		return;
282 
283 	if (sk->sk_state == TCP_TIME_WAIT)
284 		inet_twsk_free(inet_twsk(sk));
285 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
286 		reqsk_free(inet_reqsk(sk));
287 	else
288 		sk_free(sk);
289 }
290 EXPORT_SYMBOL_GPL(sock_gen_put);
291 
292 void sock_edemux(struct sk_buff *skb)
293 {
294 	sock_gen_put(skb->sk);
295 }
296 EXPORT_SYMBOL(sock_edemux);
297 
298 struct sock *__inet_lookup_established(struct net *net,
299 				  struct inet_hashinfo *hashinfo,
300 				  const __be32 saddr, const __be16 sport,
301 				  const __be32 daddr, const u16 hnum,
302 				  const int dif)
303 {
304 	INET_ADDR_COOKIE(acookie, saddr, daddr);
305 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
306 	struct sock *sk;
307 	const struct hlist_nulls_node *node;
308 	/* Optimize here for direct hit, only listening connections can
309 	 * have wildcards anyways.
310 	 */
311 	unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
312 	unsigned int slot = hash & hashinfo->ehash_mask;
313 	struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
314 
315 	rcu_read_lock();
316 begin:
317 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
318 		if (sk->sk_hash != hash)
319 			continue;
320 		if (likely(INET_MATCH(sk, net, acookie,
321 				      saddr, daddr, ports, dif))) {
322 			if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
323 				goto out;
324 			if (unlikely(!INET_MATCH(sk, net, acookie,
325 						 saddr, daddr, ports, dif))) {
326 				sock_gen_put(sk);
327 				goto begin;
328 			}
329 			goto found;
330 		}
331 	}
332 	/*
333 	 * if the nulls value we got at the end of this lookup is
334 	 * not the expected one, we must restart lookup.
335 	 * We probably met an item that was moved to another chain.
336 	 */
337 	if (get_nulls_value(node) != slot)
338 		goto begin;
339 out:
340 	sk = NULL;
341 found:
342 	rcu_read_unlock();
343 	return sk;
344 }
345 EXPORT_SYMBOL_GPL(__inet_lookup_established);
346 
347 /* called with local bh disabled */
348 static int __inet_check_established(struct inet_timewait_death_row *death_row,
349 				    struct sock *sk, __u16 lport,
350 				    struct inet_timewait_sock **twp)
351 {
352 	struct inet_hashinfo *hinfo = death_row->hashinfo;
353 	struct inet_sock *inet = inet_sk(sk);
354 	__be32 daddr = inet->inet_rcv_saddr;
355 	__be32 saddr = inet->inet_daddr;
356 	int dif = sk->sk_bound_dev_if;
357 	INET_ADDR_COOKIE(acookie, saddr, daddr);
358 	const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
359 	struct net *net = sock_net(sk);
360 	unsigned int hash = inet_ehashfn(net, daddr, lport,
361 					 saddr, inet->inet_dport);
362 	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
363 	spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
364 	struct sock *sk2;
365 	const struct hlist_nulls_node *node;
366 	struct inet_timewait_sock *tw = NULL;
367 
368 	spin_lock(lock);
369 
370 	sk_nulls_for_each(sk2, node, &head->chain) {
371 		if (sk2->sk_hash != hash)
372 			continue;
373 
374 		if (likely(INET_MATCH(sk2, net, acookie,
375 					 saddr, daddr, ports, dif))) {
376 			if (sk2->sk_state == TCP_TIME_WAIT) {
377 				tw = inet_twsk(sk2);
378 				if (twsk_unique(sk, sk2, twp))
379 					break;
380 			}
381 			goto not_unique;
382 		}
383 	}
384 
385 	/* Must record num and sport now. Otherwise we will see
386 	 * in hash table socket with a funny identity.
387 	 */
388 	inet->inet_num = lport;
389 	inet->inet_sport = htons(lport);
390 	sk->sk_hash = hash;
391 	WARN_ON(!sk_unhashed(sk));
392 	__sk_nulls_add_node_rcu(sk, &head->chain);
393 	if (tw) {
394 		sk_nulls_del_node_init_rcu((struct sock *)tw);
395 		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
396 	}
397 	spin_unlock(lock);
398 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
399 
400 	if (twp) {
401 		*twp = tw;
402 	} else if (tw) {
403 		/* Silly. Should hash-dance instead... */
404 		inet_twsk_deschedule_put(tw);
405 	}
406 	return 0;
407 
408 not_unique:
409 	spin_unlock(lock);
410 	return -EADDRNOTAVAIL;
411 }
412 
413 static u32 inet_sk_port_offset(const struct sock *sk)
414 {
415 	const struct inet_sock *inet = inet_sk(sk);
416 
417 	return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
418 					  inet->inet_daddr,
419 					  inet->inet_dport);
420 }
421 
422 /* insert a socket into ehash, and eventually remove another one
423  * (The another one can be a SYN_RECV or TIMEWAIT
424  */
425 bool inet_ehash_insert(struct sock *sk, struct sock *osk)
426 {
427 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
428 	struct hlist_nulls_head *list;
429 	struct inet_ehash_bucket *head;
430 	spinlock_t *lock;
431 	bool ret = true;
432 
433 	WARN_ON_ONCE(!sk_unhashed(sk));
434 
435 	sk->sk_hash = sk_ehashfn(sk);
436 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
437 	list = &head->chain;
438 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
439 
440 	spin_lock(lock);
441 	if (osk) {
442 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
443 		ret = sk_nulls_del_node_init_rcu(osk);
444 	}
445 	if (ret)
446 		__sk_nulls_add_node_rcu(sk, list);
447 	spin_unlock(lock);
448 	return ret;
449 }
450 
451 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
452 {
453 	bool ok = inet_ehash_insert(sk, osk);
454 
455 	if (ok) {
456 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
457 	} else {
458 		percpu_counter_inc(sk->sk_prot->orphan_count);
459 		sk->sk_state = TCP_CLOSE;
460 		sock_set_flag(sk, SOCK_DEAD);
461 		inet_csk_destroy_sock(sk);
462 	}
463 	return ok;
464 }
465 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
466 
467 static int inet_reuseport_add_sock(struct sock *sk,
468 				   struct inet_listen_hashbucket *ilb,
469 				   int (*saddr_same)(const struct sock *sk1,
470 						     const struct sock *sk2,
471 						     bool match_wildcard))
472 {
473 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
474 	struct sock *sk2;
475 	struct hlist_nulls_node *node;
476 	kuid_t uid = sock_i_uid(sk);
477 
478 	sk_nulls_for_each_rcu(sk2, node, &ilb->head) {
479 		if (sk2 != sk &&
480 		    sk2->sk_family == sk->sk_family &&
481 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
482 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
483 		    inet_csk(sk2)->icsk_bind_hash == tb &&
484 		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
485 		    saddr_same(sk, sk2, false))
486 			return reuseport_add_sock(sk, sk2);
487 	}
488 
489 	/* Initial allocation may have already happened via setsockopt */
490 	if (!rcu_access_pointer(sk->sk_reuseport_cb))
491 		return reuseport_alloc(sk);
492 	return 0;
493 }
494 
495 int __inet_hash(struct sock *sk, struct sock *osk,
496 		 int (*saddr_same)(const struct sock *sk1,
497 				   const struct sock *sk2,
498 				   bool match_wildcard))
499 {
500 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
501 	struct inet_listen_hashbucket *ilb;
502 	int err = 0;
503 
504 	if (sk->sk_state != TCP_LISTEN) {
505 		inet_ehash_nolisten(sk, osk);
506 		return 0;
507 	}
508 	WARN_ON(!sk_unhashed(sk));
509 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
510 
511 	spin_lock(&ilb->lock);
512 	if (sk->sk_reuseport) {
513 		err = inet_reuseport_add_sock(sk, ilb, saddr_same);
514 		if (err)
515 			goto unlock;
516 	}
517 	__sk_nulls_add_node_rcu(sk, &ilb->head);
518 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
519 unlock:
520 	spin_unlock(&ilb->lock);
521 
522 	return err;
523 }
524 EXPORT_SYMBOL(__inet_hash);
525 
526 int inet_hash(struct sock *sk)
527 {
528 	int err = 0;
529 
530 	if (sk->sk_state != TCP_CLOSE) {
531 		local_bh_disable();
532 		err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal);
533 		local_bh_enable();
534 	}
535 
536 	return err;
537 }
538 EXPORT_SYMBOL_GPL(inet_hash);
539 
540 void inet_unhash(struct sock *sk)
541 {
542 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
543 	spinlock_t *lock;
544 	int done;
545 
546 	if (sk_unhashed(sk))
547 		return;
548 
549 	if (sk->sk_state == TCP_LISTEN)
550 		lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
551 	else
552 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
553 
554 	spin_lock_bh(lock);
555 	if (rcu_access_pointer(sk->sk_reuseport_cb))
556 		reuseport_detach_sock(sk);
557 	done = __sk_nulls_del_node_init_rcu(sk);
558 	if (done)
559 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
560 	spin_unlock_bh(lock);
561 }
562 EXPORT_SYMBOL_GPL(inet_unhash);
563 
564 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
565 		struct sock *sk, u32 port_offset,
566 		int (*check_established)(struct inet_timewait_death_row *,
567 			struct sock *, __u16, struct inet_timewait_sock **))
568 {
569 	struct inet_hashinfo *hinfo = death_row->hashinfo;
570 	struct inet_timewait_sock *tw = NULL;
571 	struct inet_bind_hashbucket *head;
572 	int port = inet_sk(sk)->inet_num;
573 	struct net *net = sock_net(sk);
574 	struct inet_bind_bucket *tb;
575 	u32 remaining, offset;
576 	int ret, i, low, high;
577 	static u32 hint;
578 
579 	if (port) {
580 		head = &hinfo->bhash[inet_bhashfn(net, port,
581 						  hinfo->bhash_size)];
582 		tb = inet_csk(sk)->icsk_bind_hash;
583 		spin_lock_bh(&head->lock);
584 		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
585 			inet_ehash_nolisten(sk, NULL);
586 			spin_unlock_bh(&head->lock);
587 			return 0;
588 		}
589 		spin_unlock(&head->lock);
590 		/* No definite answer... Walk to established hash table */
591 		ret = check_established(death_row, sk, port, NULL);
592 		local_bh_enable();
593 		return ret;
594 	}
595 
596 	inet_get_local_port_range(net, &low, &high);
597 	high++; /* [32768, 60999] -> [32768, 61000[ */
598 	remaining = high - low;
599 	if (likely(remaining > 1))
600 		remaining &= ~1U;
601 
602 	offset = (hint + port_offset) % remaining;
603 	/* In first pass we try ports of @low parity.
604 	 * inet_csk_get_port() does the opposite choice.
605 	 */
606 	offset &= ~1U;
607 other_parity_scan:
608 	port = low + offset;
609 	for (i = 0; i < remaining; i += 2, port += 2) {
610 		if (unlikely(port >= high))
611 			port -= remaining;
612 		if (inet_is_local_reserved_port(net, port))
613 			continue;
614 		head = &hinfo->bhash[inet_bhashfn(net, port,
615 						  hinfo->bhash_size)];
616 		spin_lock_bh(&head->lock);
617 
618 		/* Does not bother with rcv_saddr checks, because
619 		 * the established check is already unique enough.
620 		 */
621 		inet_bind_bucket_for_each(tb, &head->chain) {
622 			if (net_eq(ib_net(tb), net) && tb->port == port) {
623 				if (tb->fastreuse >= 0 ||
624 				    tb->fastreuseport >= 0)
625 					goto next_port;
626 				WARN_ON(hlist_empty(&tb->owners));
627 				if (!check_established(death_row, sk,
628 						       port, &tw))
629 					goto ok;
630 				goto next_port;
631 			}
632 		}
633 
634 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
635 					     net, head, port);
636 		if (!tb) {
637 			spin_unlock_bh(&head->lock);
638 			return -ENOMEM;
639 		}
640 		tb->fastreuse = -1;
641 		tb->fastreuseport = -1;
642 		goto ok;
643 next_port:
644 		spin_unlock_bh(&head->lock);
645 		cond_resched();
646 	}
647 
648 	offset++;
649 	if ((offset & 1) && remaining > 1)
650 		goto other_parity_scan;
651 
652 	return -EADDRNOTAVAIL;
653 
654 ok:
655 	hint += i + 2;
656 
657 	/* Head lock still held and bh's disabled */
658 	inet_bind_hash(sk, tb, port);
659 	if (sk_unhashed(sk)) {
660 		inet_sk(sk)->inet_sport = htons(port);
661 		inet_ehash_nolisten(sk, (struct sock *)tw);
662 	}
663 	if (tw)
664 		inet_twsk_bind_unhash(tw, hinfo);
665 	spin_unlock(&head->lock);
666 	if (tw)
667 		inet_twsk_deschedule_put(tw);
668 	local_bh_enable();
669 	return 0;
670 }
671 
672 /*
673  * Bind a port for a connect operation and hash it.
674  */
675 int inet_hash_connect(struct inet_timewait_death_row *death_row,
676 		      struct sock *sk)
677 {
678 	u32 port_offset = 0;
679 
680 	if (!inet_sk(sk)->inet_num)
681 		port_offset = inet_sk_port_offset(sk);
682 	return __inet_hash_connect(death_row, sk, port_offset,
683 				   __inet_check_established);
684 }
685 EXPORT_SYMBOL_GPL(inet_hash_connect);
686 
687 void inet_hashinfo_init(struct inet_hashinfo *h)
688 {
689 	int i;
690 
691 	for (i = 0; i < INET_LHTABLE_SIZE; i++) {
692 		spin_lock_init(&h->listening_hash[i].lock);
693 		INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
694 				      i + LISTENING_NULLS_BASE);
695 		}
696 }
697 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
698 
699 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
700 {
701 	unsigned int locksz = sizeof(spinlock_t);
702 	unsigned int i, nblocks = 1;
703 
704 	if (locksz != 0) {
705 		/* allocate 2 cache lines or at least one spinlock per cpu */
706 		nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
707 		nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
708 
709 		/* no more locks than number of hash buckets */
710 		nblocks = min(nblocks, hashinfo->ehash_mask + 1);
711 
712 		hashinfo->ehash_locks =	kmalloc_array(nblocks, locksz,
713 						      GFP_KERNEL | __GFP_NOWARN);
714 		if (!hashinfo->ehash_locks)
715 			hashinfo->ehash_locks = vmalloc(nblocks * locksz);
716 
717 		if (!hashinfo->ehash_locks)
718 			return -ENOMEM;
719 
720 		for (i = 0; i < nblocks; i++)
721 			spin_lock_init(&hashinfo->ehash_locks[i]);
722 	}
723 	hashinfo->ehash_locks_mask = nblocks - 1;
724 	return 0;
725 }
726 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
727