xref: /openbmc/linux/include/net/inet_hashtables.h (revision 64c70b1c)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  * Authors:	Lotsa people, from code originally in tcp
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
16 
17 
18 #include <linux/interrupt.h>
19 #include <linux/ipv6.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/socket.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/wait.h>
26 
27 #include <net/inet_connection_sock.h>
28 #include <net/inet_sock.h>
29 #include <net/route.h>
30 #include <net/sock.h>
31 #include <net/tcp_states.h>
32 
33 #include <asm/atomic.h>
34 #include <asm/byteorder.h>
35 
36 /* This is for all connections with a full identity, no wildcards.
37  * One chain is dedicated to TIME_WAIT sockets.
38  * I'll experiment with dynamic table growth later.
39  */
40 struct inet_ehash_bucket {
41 	rwlock_t	  lock;
42 	struct hlist_head chain;
43 	struct hlist_head twchain;
44 };
45 
46 /* There are a few simple rules, which allow for local port reuse by
47  * an application.  In essence:
48  *
49  *	1) Sockets bound to different interfaces may share a local port.
50  *	   Failing that, goto test 2.
51  *	2) If all sockets have sk->sk_reuse set, and none of them are in
52  *	   TCP_LISTEN state, the port may be shared.
53  *	   Failing that, goto test 3.
54  *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
55  *	   address, and none of them are the same, the port may be
56  *	   shared.
57  *	   Failing this, the port cannot be shared.
58  *
59  * The interesting point, is test #2.  This is what an FTP server does
60  * all day.  To optimize this case we use a specific flag bit defined
61  * below.  As we add sockets to a bind bucket list, we perform a
62  * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
63  * As long as all sockets added to a bind bucket pass this test,
64  * the flag bit will be set.
65  * The resulting situation is that tcp_v[46]_verify_bind() can just check
66  * for this flag bit, if it is set and the socket trying to bind has
67  * sk->sk_reuse set, we don't even have to walk the owners list at all,
68  * we return that it is ok to bind this socket to the requested local port.
69  *
70  * Sounds like a lot of work, but it is worth it.  In a more naive
71  * implementation (ie. current FreeBSD etc.) the entire list of ports
72  * must be walked for each data port opened by an ftp server.  Needless
73  * to say, this does not scale at all.  With a couple thousand FTP
74  * users logged onto your box, isn't it nice to know that new data
75  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
76  */
77 struct inet_bind_bucket {
78 	unsigned short		port;
79 	signed short		fastreuse;
80 	struct hlist_node	node;
81 	struct hlist_head	owners;
82 };
83 
84 #define inet_bind_bucket_for_each(tb, node, head) \
85 	hlist_for_each_entry(tb, node, head, node)
86 
87 struct inet_bind_hashbucket {
88 	spinlock_t		lock;
89 	struct hlist_head	chain;
90 };
91 
92 /* This is for listening sockets, thus all sockets which possess wildcards. */
93 #define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
94 
95 struct inet_hashinfo {
96 	/* This is for sockets with full identity only.  Sockets here will
97 	 * always be without wildcards and will have the following invariant:
98 	 *
99 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
100 	 *
101 	 * TIME_WAIT sockets use a separate chain (twchain).
102 	 */
103 	struct inet_ehash_bucket	*ehash;
104 
105 	/* Ok, let's try this, I give up, we do need a local binding
106 	 * TCP hash as well as the others for fast bind/connect.
107 	 */
108 	struct inet_bind_hashbucket	*bhash;
109 
110 	int				bhash_size;
111 	unsigned int			ehash_size;
112 
113 	/* All sockets in TCP_LISTEN state will be in here.  This is the only
114 	 * table where wildcard'd TCP sockets can exist.  Hash function here
115 	 * is just local port number.
116 	 */
117 	struct hlist_head		listening_hash[INET_LHTABLE_SIZE];
118 
119 	/* All the above members are written once at bootup and
120 	 * never written again _or_ are predominantly read-access.
121 	 *
122 	 * Now align to a new cache line as all the following members
123 	 * are often dirty.
124 	 */
125 	rwlock_t			lhash_lock ____cacheline_aligned;
126 	atomic_t			lhash_users;
127 	wait_queue_head_t		lhash_wait;
128 	struct kmem_cache			*bind_bucket_cachep;
129 };
130 
131 static inline struct inet_ehash_bucket *inet_ehash_bucket(
132 	struct inet_hashinfo *hashinfo,
133 	unsigned int hash)
134 {
135 	return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
136 }
137 
138 extern struct inet_bind_bucket *
139 		    inet_bind_bucket_create(struct kmem_cache *cachep,
140 					    struct inet_bind_hashbucket *head,
141 					    const unsigned short snum);
142 extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
143 				     struct inet_bind_bucket *tb);
144 
145 static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
146 {
147 	return lport & (bhash_size - 1);
148 }
149 
150 extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
151 			   const unsigned short snum);
152 
153 /* These can have wildcards, don't try too hard. */
154 static inline int inet_lhashfn(const unsigned short num)
155 {
156 	return num & (INET_LHTABLE_SIZE - 1);
157 }
158 
159 static inline int inet_sk_listen_hashfn(const struct sock *sk)
160 {
161 	return inet_lhashfn(inet_sk(sk)->num);
162 }
163 
164 /* Caller must disable local BH processing. */
165 static inline void __inet_inherit_port(struct inet_hashinfo *table,
166 				       struct sock *sk, struct sock *child)
167 {
168 	const int bhash = inet_bhashfn(inet_sk(child)->num, table->bhash_size);
169 	struct inet_bind_hashbucket *head = &table->bhash[bhash];
170 	struct inet_bind_bucket *tb;
171 
172 	spin_lock(&head->lock);
173 	tb = inet_csk(sk)->icsk_bind_hash;
174 	sk_add_bind_node(child, &tb->owners);
175 	inet_csk(child)->icsk_bind_hash = tb;
176 	spin_unlock(&head->lock);
177 }
178 
179 static inline void inet_inherit_port(struct inet_hashinfo *table,
180 				     struct sock *sk, struct sock *child)
181 {
182 	local_bh_disable();
183 	__inet_inherit_port(table, sk, child);
184 	local_bh_enable();
185 }
186 
187 extern void inet_put_port(struct inet_hashinfo *table, struct sock *sk);
188 
189 extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
190 
191 /*
192  * - We may sleep inside this lock.
193  * - If sleeping is not required (or called from BH),
194  *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
195  */
196 static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
197 {
198 	/* read_lock synchronizes to candidates to writers */
199 	read_lock(&hashinfo->lhash_lock);
200 	atomic_inc(&hashinfo->lhash_users);
201 	read_unlock(&hashinfo->lhash_lock);
202 }
203 
204 static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
205 {
206 	if (atomic_dec_and_test(&hashinfo->lhash_users))
207 		wake_up(&hashinfo->lhash_wait);
208 }
209 
210 static inline void __inet_hash(struct inet_hashinfo *hashinfo,
211 			       struct sock *sk, const int listen_possible)
212 {
213 	struct hlist_head *list;
214 	rwlock_t *lock;
215 
216 	BUG_TRAP(sk_unhashed(sk));
217 	if (listen_possible && sk->sk_state == TCP_LISTEN) {
218 		list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
219 		lock = &hashinfo->lhash_lock;
220 		inet_listen_wlock(hashinfo);
221 	} else {
222 		struct inet_ehash_bucket *head;
223 		sk->sk_hash = inet_sk_ehashfn(sk);
224 		head = inet_ehash_bucket(hashinfo, sk->sk_hash);
225 		list = &head->chain;
226 		lock = &head->lock;
227 		write_lock(lock);
228 	}
229 	__sk_add_node(sk, list);
230 	sock_prot_inc_use(sk->sk_prot);
231 	write_unlock(lock);
232 	if (listen_possible && sk->sk_state == TCP_LISTEN)
233 		wake_up(&hashinfo->lhash_wait);
234 }
235 
236 static inline void inet_hash(struct inet_hashinfo *hashinfo, struct sock *sk)
237 {
238 	if (sk->sk_state != TCP_CLOSE) {
239 		local_bh_disable();
240 		__inet_hash(hashinfo, sk, 1);
241 		local_bh_enable();
242 	}
243 }
244 
245 static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
246 {
247 	rwlock_t *lock;
248 
249 	if (sk_unhashed(sk))
250 		goto out;
251 
252 	if (sk->sk_state == TCP_LISTEN) {
253 		local_bh_disable();
254 		inet_listen_wlock(hashinfo);
255 		lock = &hashinfo->lhash_lock;
256 	} else {
257 		lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
258 		write_lock_bh(lock);
259 	}
260 
261 	if (__sk_del_node_init(sk))
262 		sock_prot_dec_use(sk->sk_prot);
263 	write_unlock_bh(lock);
264 out:
265 	if (sk->sk_state == TCP_LISTEN)
266 		wake_up(&hashinfo->lhash_wait);
267 }
268 
269 static inline int inet_iif(const struct sk_buff *skb)
270 {
271 	return ((struct rtable *)skb->dst)->rt_iif;
272 }
273 
274 extern struct sock *__inet_lookup_listener(struct inet_hashinfo *hashinfo,
275 					   const __be32 daddr,
276 					   const unsigned short hnum,
277 					   const int dif);
278 
279 static inline struct sock *inet_lookup_listener(struct inet_hashinfo *hashinfo,
280 						__be32 daddr, __be16 dport, int dif)
281 {
282 	return __inet_lookup_listener(hashinfo, daddr, ntohs(dport), dif);
283 }
284 
285 /* Socket demux engine toys. */
286 /* What happens here is ugly; there's a pair of adjacent fields in
287    struct inet_sock; __be16 dport followed by __u16 num.  We want to
288    search by pair, so we combine the keys into a single 32bit value
289    and compare with 32bit value read from &...->dport.  Let's at least
290    make sure that it's not mixed with anything else...
291    On 64bit targets we combine comparisons with pair of adjacent __be32
292    fields in the same way.
293 */
294 typedef __u32 __bitwise __portpair;
295 #ifdef __BIG_ENDIAN
296 #define INET_COMBINED_PORTS(__sport, __dport) \
297 	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
298 #else /* __LITTLE_ENDIAN */
299 #define INET_COMBINED_PORTS(__sport, __dport) \
300 	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
301 #endif
302 
303 #if (BITS_PER_LONG == 64)
304 typedef __u64 __bitwise __addrpair;
305 #ifdef __BIG_ENDIAN
306 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
307 	const __addrpair __name = (__force __addrpair) ( \
308 				   (((__force __u64)(__be32)(__saddr)) << 32) | \
309 				   ((__force __u64)(__be32)(__daddr)));
310 #else /* __LITTLE_ENDIAN */
311 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
312 	const __addrpair __name = (__force __addrpair) ( \
313 				   (((__force __u64)(__be32)(__daddr)) << 32) | \
314 				   ((__force __u64)(__be32)(__saddr)));
315 #endif /* __BIG_ENDIAN */
316 #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
317 	(((__sk)->sk_hash == (__hash))				&&	\
318 	 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))	&&	\
319 	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
320 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
321 #define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
322 	(((__sk)->sk_hash == (__hash))				&&	\
323 	 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&	\
324 	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
325 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
326 #else /* 32-bit arch */
327 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
328 #define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)	\
329 	(((__sk)->sk_hash == (__hash))				&&	\
330 	 (inet_sk(__sk)->daddr		== (__saddr))		&&	\
331 	 (inet_sk(__sk)->rcv_saddr	== (__daddr))		&&	\
332 	 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))	&&	\
333 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
334 #define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif)	\
335 	(((__sk)->sk_hash == (__hash))				&&	\
336 	 (inet_twsk(__sk)->tw_daddr	== (__saddr))		&&	\
337 	 (inet_twsk(__sk)->tw_rcv_saddr	== (__daddr))		&&	\
338 	 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&	\
339 	 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
340 #endif /* 64-bit arch */
341 
342 /*
343  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
344  * not check it for lookups anymore, thanks Alexey. -DaveM
345  *
346  * Local BH must be disabled here.
347  */
348 static inline struct sock *
349 	__inet_lookup_established(struct inet_hashinfo *hashinfo,
350 				  const __be32 saddr, const __be16 sport,
351 				  const __be32 daddr, const u16 hnum,
352 				  const int dif)
353 {
354 	INET_ADDR_COOKIE(acookie, saddr, daddr)
355 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
356 	struct sock *sk;
357 	const struct hlist_node *node;
358 	/* Optimize here for direct hit, only listening connections can
359 	 * have wildcards anyways.
360 	 */
361 	unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
362 	struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
363 
364 	prefetch(head->chain.first);
365 	read_lock(&head->lock);
366 	sk_for_each(sk, node, &head->chain) {
367 		if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
368 			goto hit; /* You sunk my battleship! */
369 	}
370 
371 	/* Must check for a TIME_WAIT'er before going to listener hash. */
372 	sk_for_each(sk, node, &head->twchain) {
373 		if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
374 			goto hit;
375 	}
376 	sk = NULL;
377 out:
378 	read_unlock(&head->lock);
379 	return sk;
380 hit:
381 	sock_hold(sk);
382 	goto out;
383 }
384 
385 static inline struct sock *
386 	inet_lookup_established(struct inet_hashinfo *hashinfo,
387 				const __be32 saddr, const __be16 sport,
388 				const __be32 daddr, const __be16 dport,
389 				const int dif)
390 {
391 	return __inet_lookup_established(hashinfo, saddr, sport, daddr,
392 					 ntohs(dport), dif);
393 }
394 
395 static inline struct sock *__inet_lookup(struct inet_hashinfo *hashinfo,
396 					 const __be32 saddr, const __be16 sport,
397 					 const __be32 daddr, const __be16 dport,
398 					 const int dif)
399 {
400 	u16 hnum = ntohs(dport);
401 	struct sock *sk = __inet_lookup_established(hashinfo, saddr, sport, daddr,
402 						    hnum, dif);
403 	return sk ? : __inet_lookup_listener(hashinfo, daddr, hnum, dif);
404 }
405 
406 static inline struct sock *inet_lookup(struct inet_hashinfo *hashinfo,
407 				       const __be32 saddr, const __be16 sport,
408 				       const __be32 daddr, const __be16 dport,
409 				       const int dif)
410 {
411 	struct sock *sk;
412 
413 	local_bh_disable();
414 	sk = __inet_lookup(hashinfo, saddr, sport, daddr, dport, dif);
415 	local_bh_enable();
416 
417 	return sk;
418 }
419 
420 extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
421 			     struct sock *sk);
422 #endif /* _INET_HASHTABLES_H */
423