xref: /openbmc/linux/include/net/inet_hashtables.h (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  * Authors:	Lotsa people, from code originally in tcp
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
16 
17 
18 #include <linux/interrupt.h>
19 #include <linux/ipv6.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/socket.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/wait.h>
26 #include <linux/vmalloc.h>
27 
28 #include <net/inet_connection_sock.h>
29 #include <net/inet_sock.h>
30 #include <net/sock.h>
31 #include <net/tcp_states.h>
32 #include <net/netns/hash.h>
33 
34 #include <asm/atomic.h>
35 #include <asm/byteorder.h>
36 
37 /* This is for all connections with a full identity, no wildcards.
38  * One chain is dedicated to TIME_WAIT sockets.
39  * I'll experiment with dynamic table growth later.
40  */
41 struct inet_ehash_bucket {
42 	struct hlist_head chain;
43 	struct hlist_head twchain;
44 };
45 
46 /* There are a few simple rules, which allow for local port reuse by
47  * an application.  In essence:
48  *
49  *	1) Sockets bound to different interfaces may share a local port.
50  *	   Failing that, goto test 2.
51  *	2) If all sockets have sk->sk_reuse set, and none of them are in
52  *	   TCP_LISTEN state, the port may be shared.
53  *	   Failing that, goto test 3.
54  *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
55  *	   address, and none of them are the same, the port may be
56  *	   shared.
57  *	   Failing this, the port cannot be shared.
58  *
59  * The interesting point, is test #2.  This is what an FTP server does
60  * all day.  To optimize this case we use a specific flag bit defined
61  * below.  As we add sockets to a bind bucket list, we perform a
62  * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
63  * As long as all sockets added to a bind bucket pass this test,
64  * the flag bit will be set.
65  * The resulting situation is that tcp_v[46]_verify_bind() can just check
66  * for this flag bit, if it is set and the socket trying to bind has
67  * sk->sk_reuse set, we don't even have to walk the owners list at all,
68  * we return that it is ok to bind this socket to the requested local port.
69  *
70  * Sounds like a lot of work, but it is worth it.  In a more naive
71  * implementation (ie. current FreeBSD etc.) the entire list of ports
72  * must be walked for each data port opened by an ftp server.  Needless
73  * to say, this does not scale at all.  With a couple thousand FTP
74  * users logged onto your box, isn't it nice to know that new data
75  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
76  */
77 struct inet_bind_bucket {
78 	struct net		*ib_net;
79 	unsigned short		port;
80 	signed short		fastreuse;
81 	struct hlist_node	node;
82 	struct hlist_head	owners;
83 };
84 
85 #define inet_bind_bucket_for_each(tb, node, head) \
86 	hlist_for_each_entry(tb, node, head, node)
87 
88 struct inet_bind_hashbucket {
89 	spinlock_t		lock;
90 	struct hlist_head	chain;
91 };
92 
93 /* This is for listening sockets, thus all sockets which possess wildcards. */
94 #define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
95 
96 struct inet_hashinfo {
97 	/* This is for sockets with full identity only.  Sockets here will
98 	 * always be without wildcards and will have the following invariant:
99 	 *
100 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
101 	 *
102 	 * TIME_WAIT sockets use a separate chain (twchain).
103 	 */
104 	struct inet_ehash_bucket	*ehash;
105 	rwlock_t			*ehash_locks;
106 	unsigned int			ehash_size;
107 	unsigned int			ehash_locks_mask;
108 
109 	/* Ok, let's try this, I give up, we do need a local binding
110 	 * TCP hash as well as the others for fast bind/connect.
111 	 */
112 	struct inet_bind_hashbucket	*bhash;
113 
114 	unsigned int			bhash_size;
115 	/* Note : 4 bytes padding on 64 bit arches */
116 
117 	/* All sockets in TCP_LISTEN state will be in here.  This is the only
118 	 * table where wildcard'd TCP sockets can exist.  Hash function here
119 	 * is just local port number.
120 	 */
121 	struct hlist_head		listening_hash[INET_LHTABLE_SIZE];
122 
123 	/* All the above members are written once at bootup and
124 	 * never written again _or_ are predominantly read-access.
125 	 *
126 	 * Now align to a new cache line as all the following members
127 	 * are often dirty.
128 	 */
129 	rwlock_t			lhash_lock ____cacheline_aligned;
130 	atomic_t			lhash_users;
131 	wait_queue_head_t		lhash_wait;
132 	struct kmem_cache			*bind_bucket_cachep;
133 };
134 
135 static inline struct inet_ehash_bucket *inet_ehash_bucket(
136 	struct inet_hashinfo *hashinfo,
137 	unsigned int hash)
138 {
139 	return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
140 }
141 
142 static inline rwlock_t *inet_ehash_lockp(
143 	struct inet_hashinfo *hashinfo,
144 	unsigned int hash)
145 {
146 	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
147 }
148 
149 static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
150 {
151 	unsigned int i, size = 256;
152 #if defined(CONFIG_PROVE_LOCKING)
153 	unsigned int nr_pcpus = 2;
154 #else
155 	unsigned int nr_pcpus = num_possible_cpus();
156 #endif
157 	if (nr_pcpus >= 4)
158 		size = 512;
159 	if (nr_pcpus >= 8)
160 		size = 1024;
161 	if (nr_pcpus >= 16)
162 		size = 2048;
163 	if (nr_pcpus >= 32)
164 		size = 4096;
165 	if (sizeof(rwlock_t) != 0) {
166 #ifdef CONFIG_NUMA
167 		if (size * sizeof(rwlock_t) > PAGE_SIZE)
168 			hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
169 		else
170 #endif
171 		hashinfo->ehash_locks =	kmalloc(size * sizeof(rwlock_t),
172 						GFP_KERNEL);
173 		if (!hashinfo->ehash_locks)
174 			return ENOMEM;
175 		for (i = 0; i < size; i++)
176 			rwlock_init(&hashinfo->ehash_locks[i]);
177 	}
178 	hashinfo->ehash_locks_mask = size - 1;
179 	return 0;
180 }
181 
182 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
183 {
184 	if (hashinfo->ehash_locks) {
185 #ifdef CONFIG_NUMA
186 		unsigned int size = (hashinfo->ehash_locks_mask + 1) *
187 							sizeof(rwlock_t);
188 		if (size > PAGE_SIZE)
189 			vfree(hashinfo->ehash_locks);
190 		else
191 #endif
192 		kfree(hashinfo->ehash_locks);
193 		hashinfo->ehash_locks = NULL;
194 	}
195 }
196 
197 extern struct inet_bind_bucket *
198 		    inet_bind_bucket_create(struct kmem_cache *cachep,
199 					    struct net *net,
200 					    struct inet_bind_hashbucket *head,
201 					    const unsigned short snum);
202 extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
203 				     struct inet_bind_bucket *tb);
204 
205 static inline int inet_bhashfn(struct net *net,
206 		const __u16 lport, const int bhash_size)
207 {
208 	return (lport + net_hash_mix(net)) & (bhash_size - 1);
209 }
210 
211 extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
212 			   const unsigned short snum);
213 
214 /* These can have wildcards, don't try too hard. */
215 static inline int inet_lhashfn(struct net *net, const unsigned short num)
216 {
217 	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
218 }
219 
220 static inline int inet_sk_listen_hashfn(const struct sock *sk)
221 {
222 	return inet_lhashfn(sock_net(sk), inet_sk(sk)->num);
223 }
224 
225 /* Caller must disable local BH processing. */
226 extern void __inet_inherit_port(struct sock *sk, struct sock *child);
227 
228 extern void inet_put_port(struct sock *sk);
229 
230 extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
231 
232 /*
233  * - We may sleep inside this lock.
234  * - If sleeping is not required (or called from BH),
235  *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
236  */
237 static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
238 {
239 	/* read_lock synchronizes to candidates to writers */
240 	read_lock(&hashinfo->lhash_lock);
241 	atomic_inc(&hashinfo->lhash_users);
242 	read_unlock(&hashinfo->lhash_lock);
243 }
244 
245 static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
246 {
247 	if (atomic_dec_and_test(&hashinfo->lhash_users))
248 		wake_up(&hashinfo->lhash_wait);
249 }
250 
251 extern void __inet_hash_nolisten(struct sock *sk);
252 extern void inet_hash(struct sock *sk);
253 extern void inet_unhash(struct sock *sk);
254 
255 extern struct sock *__inet_lookup_listener(struct net *net,
256 					   struct inet_hashinfo *hashinfo,
257 					   const __be32 daddr,
258 					   const unsigned short hnum,
259 					   const int dif);
260 
261 static inline struct sock *inet_lookup_listener(struct net *net,
262 		struct inet_hashinfo *hashinfo,
263 		__be32 daddr, __be16 dport, int dif)
264 {
265 	return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
266 }
267 
268 /* Socket demux engine toys. */
269 /* What happens here is ugly; there's a pair of adjacent fields in
270    struct inet_sock; __be16 dport followed by __u16 num.  We want to
271    search by pair, so we combine the keys into a single 32bit value
272    and compare with 32bit value read from &...->dport.  Let's at least
273    make sure that it's not mixed with anything else...
274    On 64bit targets we combine comparisons with pair of adjacent __be32
275    fields in the same way.
276 */
277 typedef __u32 __bitwise __portpair;
278 #ifdef __BIG_ENDIAN
279 #define INET_COMBINED_PORTS(__sport, __dport) \
280 	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
281 #else /* __LITTLE_ENDIAN */
282 #define INET_COMBINED_PORTS(__sport, __dport) \
283 	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
284 #endif
285 
286 #if (BITS_PER_LONG == 64)
287 typedef __u64 __bitwise __addrpair;
288 #ifdef __BIG_ENDIAN
289 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
290 	const __addrpair __name = (__force __addrpair) ( \
291 				   (((__force __u64)(__be32)(__saddr)) << 32) | \
292 				   ((__force __u64)(__be32)(__daddr)));
293 #else /* __LITTLE_ENDIAN */
294 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
295 	const __addrpair __name = (__force __addrpair) ( \
296 				   (((__force __u64)(__be32)(__daddr)) << 32) | \
297 				   ((__force __u64)(__be32)(__saddr)));
298 #endif /* __BIG_ENDIAN */
299 #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
300 	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
301 	 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))	&&	\
302 	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
303 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
304 #define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
305 	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
306 	 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&	\
307 	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
308 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
309 #else /* 32-bit arch */
310 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
311 #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)	\
312 	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
313 	 (inet_sk(__sk)->daddr		== (__saddr))		&&	\
314 	 (inet_sk(__sk)->rcv_saddr	== (__daddr))		&&	\
315 	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
316 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
317 #define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif)	\
318 	(((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)	&&	\
319 	 (inet_twsk(__sk)->tw_daddr	== (__saddr))		&&	\
320 	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))		&&	\
321 	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
322 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
323 #endif /* 64-bit arch */
324 
325 /*
326  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
327  * not check it for lookups anymore, thanks Alexey. -DaveM
328  *
329  * Local BH must be disabled here.
330  */
331 extern struct sock * __inet_lookup_established(struct net *net,
332 		struct inet_hashinfo *hashinfo,
333 		const __be32 saddr, const __be16 sport,
334 		const __be32 daddr, const u16 hnum, const int dif);
335 
336 static inline struct sock *
337 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
338 				const __be32 saddr, const __be16 sport,
339 				const __be32 daddr, const __be16 dport,
340 				const int dif)
341 {
342 	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
343 					 ntohs(dport), dif);
344 }
345 
346 static inline struct sock *__inet_lookup(struct net *net,
347 					 struct inet_hashinfo *hashinfo,
348 					 const __be32 saddr, const __be16 sport,
349 					 const __be32 daddr, const __be16 dport,
350 					 const int dif)
351 {
352 	u16 hnum = ntohs(dport);
353 	struct sock *sk = __inet_lookup_established(net, hashinfo,
354 				saddr, sport, daddr, hnum, dif);
355 
356 	return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
357 }
358 
359 static inline struct sock *inet_lookup(struct net *net,
360 				       struct inet_hashinfo *hashinfo,
361 				       const __be32 saddr, const __be16 sport,
362 				       const __be32 daddr, const __be16 dport,
363 				       const int dif)
364 {
365 	struct sock *sk;
366 
367 	local_bh_disable();
368 	sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
369 	local_bh_enable();
370 
371 	return sk;
372 }
373 
374 extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
375 		struct sock *sk, u32 port_offset,
376 		int (*check_established)(struct inet_timewait_death_row *,
377 			struct sock *, __u16, struct inet_timewait_sock **),
378 			       void (*hash)(struct sock *sk));
379 extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
380 			     struct sock *sk);
381 #endif /* _INET_HASHTABLES_H */
382