1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Authors: Lotsa people, from code originally in tcp 8 */ 9 10 #ifndef _INET_HASHTABLES_H 11 #define _INET_HASHTABLES_H 12 13 14 #include <linux/interrupt.h> 15 #include <linux/ip.h> 16 #include <linux/ipv6.h> 17 #include <linux/list.h> 18 #include <linux/slab.h> 19 #include <linux/socket.h> 20 #include <linux/spinlock.h> 21 #include <linux/types.h> 22 #include <linux/wait.h> 23 24 #include <net/inet_connection_sock.h> 25 #include <net/inet_sock.h> 26 #include <net/sock.h> 27 #include <net/route.h> 28 #include <net/tcp_states.h> 29 #include <net/netns/hash.h> 30 31 #include <linux/refcount.h> 32 #include <asm/byteorder.h> 33 34 /* This is for all connections with a full identity, no wildcards. 35 * The 'e' prefix stands for Establish, but we really put all sockets 36 * but LISTEN ones. 37 */ 38 struct inet_ehash_bucket { 39 struct hlist_nulls_head chain; 40 }; 41 42 /* There are a few simple rules, which allow for local port reuse by 43 * an application. In essence: 44 * 45 * 1) Sockets bound to different interfaces may share a local port. 46 * Failing that, goto test 2. 47 * 2) If all sockets have sk->sk_reuse set, and none of them are in 48 * TCP_LISTEN state, the port may be shared. 49 * Failing that, goto test 3. 50 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local 51 * address, and none of them are the same, the port may be 52 * shared. 53 * Failing this, the port cannot be shared. 54 * 55 * The interesting point, is test #2. This is what an FTP server does 56 * all day. To optimize this case we use a specific flag bit defined 57 * below. As we add sockets to a bind bucket list, we perform a 58 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN)) 59 * As long as all sockets added to a bind bucket pass this test, 60 * the flag bit will be set. 61 * The resulting situation is that tcp_v[46]_verify_bind() can just check 62 * for this flag bit, if it is set and the socket trying to bind has 63 * sk->sk_reuse set, we don't even have to walk the owners list at all, 64 * we return that it is ok to bind this socket to the requested local port. 65 * 66 * Sounds like a lot of work, but it is worth it. In a more naive 67 * implementation (ie. current FreeBSD etc.) the entire list of ports 68 * must be walked for each data port opened by an ftp server. Needless 69 * to say, this does not scale at all. With a couple thousand FTP 70 * users logged onto your box, isn't it nice to know that new data 71 * ports are created in O(1) time? I thought so. ;-) -DaveM 72 */ 73 #define FASTREUSEPORT_ANY 1 74 #define FASTREUSEPORT_STRICT 2 75 76 struct inet_bind_bucket { 77 possible_net_t ib_net; 78 int l3mdev; 79 unsigned short port; 80 signed char fastreuse; 81 signed char fastreuseport; 82 kuid_t fastuid; 83 #if IS_ENABLED(CONFIG_IPV6) 84 struct in6_addr fast_v6_rcv_saddr; 85 #endif 86 __be32 fast_rcv_saddr; 87 unsigned short fast_sk_family; 88 bool fast_ipv6_only; 89 struct hlist_node node; 90 struct hlist_head owners; 91 }; 92 93 static inline struct net *ib_net(struct inet_bind_bucket *ib) 94 { 95 return read_pnet(&ib->ib_net); 96 } 97 98 #define inet_bind_bucket_for_each(tb, head) \ 99 hlist_for_each_entry(tb, head, node) 100 101 struct inet_bind_hashbucket { 102 spinlock_t lock; 103 struct hlist_head chain; 104 }; 105 106 /* Sockets can be hashed in established or listening table. 107 * We must use different 'nulls' end-of-chain value for all hash buckets : 108 * A socket might transition from ESTABLISH to LISTEN state without 109 * RCU grace period. A lookup in ehash table needs to handle this case. 110 */ 111 #define LISTENING_NULLS_BASE (1U << 29) 112 struct inet_listen_hashbucket { 113 spinlock_t lock; 114 unsigned int count; 115 union { 116 struct hlist_head head; 117 struct hlist_nulls_head nulls_head; 118 }; 119 }; 120 121 /* This is for listening sockets, thus all sockets which possess wildcards. */ 122 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ 123 124 struct inet_hashinfo { 125 /* This is for sockets with full identity only. Sockets here will 126 * always be without wildcards and will have the following invariant: 127 * 128 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE 129 * 130 */ 131 struct inet_ehash_bucket *ehash; 132 spinlock_t *ehash_locks; 133 unsigned int ehash_mask; 134 unsigned int ehash_locks_mask; 135 136 /* Ok, let's try this, I give up, we do need a local binding 137 * TCP hash as well as the others for fast bind/connect. 138 */ 139 struct kmem_cache *bind_bucket_cachep; 140 struct inet_bind_hashbucket *bhash; 141 unsigned int bhash_size; 142 143 /* The 2nd listener table hashed by local port and address */ 144 unsigned int lhash2_mask; 145 struct inet_listen_hashbucket *lhash2; 146 147 /* All the above members are written once at bootup and 148 * never written again _or_ are predominantly read-access. 149 * 150 * Now align to a new cache line as all the following members 151 * might be often dirty. 152 */ 153 /* All sockets in TCP_LISTEN state will be in listening_hash. 154 * This is the only table where wildcard'd TCP sockets can 155 * exist. listening_hash is only hashed by local port number. 156 * If lhash2 is initialized, the same socket will also be hashed 157 * to lhash2 by port and address. 158 */ 159 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] 160 ____cacheline_aligned_in_smp; 161 }; 162 163 #define inet_lhash2_for_each_icsk_rcu(__icsk, list) \ 164 hlist_for_each_entry_rcu(__icsk, list, icsk_listen_portaddr_node) 165 166 static inline struct inet_listen_hashbucket * 167 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash) 168 { 169 return &h->lhash2[hash & h->lhash2_mask]; 170 } 171 172 static inline struct inet_ehash_bucket *inet_ehash_bucket( 173 struct inet_hashinfo *hashinfo, 174 unsigned int hash) 175 { 176 return &hashinfo->ehash[hash & hashinfo->ehash_mask]; 177 } 178 179 static inline spinlock_t *inet_ehash_lockp( 180 struct inet_hashinfo *hashinfo, 181 unsigned int hash) 182 { 183 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; 184 } 185 186 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); 187 188 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) 189 { 190 kvfree(hashinfo->ehash_locks); 191 hashinfo->ehash_locks = NULL; 192 } 193 194 static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, 195 int dif, int sdif) 196 { 197 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 198 return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, 199 bound_dev_if, dif, sdif); 200 #else 201 return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); 202 #endif 203 } 204 205 struct inet_bind_bucket * 206 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, 207 struct inet_bind_hashbucket *head, 208 const unsigned short snum, int l3mdev); 209 void inet_bind_bucket_destroy(struct kmem_cache *cachep, 210 struct inet_bind_bucket *tb); 211 212 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport, 213 const u32 bhash_size) 214 { 215 return (lport + net_hash_mix(net)) & (bhash_size - 1); 216 } 217 218 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 219 const unsigned short snum); 220 221 /* These can have wildcards, don't try too hard. */ 222 static inline u32 inet_lhashfn(const struct net *net, const unsigned short num) 223 { 224 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1); 225 } 226 227 static inline int inet_sk_listen_hashfn(const struct sock *sk) 228 { 229 return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num); 230 } 231 232 /* Caller must disable local BH processing. */ 233 int __inet_inherit_port(const struct sock *sk, struct sock *child); 234 235 void inet_put_port(struct sock *sk); 236 237 void inet_hashinfo_init(struct inet_hashinfo *h); 238 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 239 unsigned long numentries, int scale, 240 unsigned long low_limit, 241 unsigned long high_limit); 242 int inet_hashinfo2_init_mod(struct inet_hashinfo *h); 243 244 bool inet_ehash_insert(struct sock *sk, struct sock *osk); 245 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk); 246 int __inet_hash(struct sock *sk, struct sock *osk); 247 int inet_hash(struct sock *sk); 248 void inet_unhash(struct sock *sk); 249 250 struct sock *__inet_lookup_listener(struct net *net, 251 struct inet_hashinfo *hashinfo, 252 struct sk_buff *skb, int doff, 253 const __be32 saddr, const __be16 sport, 254 const __be32 daddr, 255 const unsigned short hnum, 256 const int dif, const int sdif); 257 258 static inline struct sock *inet_lookup_listener(struct net *net, 259 struct inet_hashinfo *hashinfo, 260 struct sk_buff *skb, int doff, 261 __be32 saddr, __be16 sport, 262 __be32 daddr, __be16 dport, int dif, int sdif) 263 { 264 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, 265 daddr, ntohs(dport), dif, sdif); 266 } 267 268 /* Socket demux engine toys. */ 269 /* What happens here is ugly; there's a pair of adjacent fields in 270 struct inet_sock; __be16 dport followed by __u16 num. We want to 271 search by pair, so we combine the keys into a single 32bit value 272 and compare with 32bit value read from &...->dport. Let's at least 273 make sure that it's not mixed with anything else... 274 On 64bit targets we combine comparisons with pair of adjacent __be32 275 fields in the same way. 276 */ 277 #ifdef __BIG_ENDIAN 278 #define INET_COMBINED_PORTS(__sport, __dport) \ 279 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport))) 280 #else /* __LITTLE_ENDIAN */ 281 #define INET_COMBINED_PORTS(__sport, __dport) \ 282 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport))) 283 #endif 284 285 #if (BITS_PER_LONG == 64) 286 #ifdef __BIG_ENDIAN 287 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 288 const __addrpair __name = (__force __addrpair) ( \ 289 (((__force __u64)(__be32)(__saddr)) << 32) | \ 290 ((__force __u64)(__be32)(__daddr))) 291 #else /* __LITTLE_ENDIAN */ 292 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 293 const __addrpair __name = (__force __addrpair) ( \ 294 (((__force __u64)(__be32)(__daddr)) << 32) | \ 295 ((__force __u64)(__be32)(__saddr))) 296 #endif /* __BIG_ENDIAN */ 297 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ 298 (((__sk)->sk_portpair == (__ports)) && \ 299 ((__sk)->sk_addrpair == (__cookie)) && \ 300 (((__sk)->sk_bound_dev_if == (__dif)) || \ 301 ((__sk)->sk_bound_dev_if == (__sdif))) && \ 302 net_eq(sock_net(__sk), (__net))) 303 #else /* 32-bit arch */ 304 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 305 const int __name __deprecated __attribute__((unused)) 306 307 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ 308 (((__sk)->sk_portpair == (__ports)) && \ 309 ((__sk)->sk_daddr == (__saddr)) && \ 310 ((__sk)->sk_rcv_saddr == (__daddr)) && \ 311 (((__sk)->sk_bound_dev_if == (__dif)) || \ 312 ((__sk)->sk_bound_dev_if == (__sdif))) && \ 313 net_eq(sock_net(__sk), (__net))) 314 #endif /* 64-bit arch */ 315 316 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need 317 * not check it for lookups anymore, thanks Alexey. -DaveM 318 */ 319 struct sock *__inet_lookup_established(struct net *net, 320 struct inet_hashinfo *hashinfo, 321 const __be32 saddr, const __be16 sport, 322 const __be32 daddr, const u16 hnum, 323 const int dif, const int sdif); 324 325 static inline struct sock * 326 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, 327 const __be32 saddr, const __be16 sport, 328 const __be32 daddr, const __be16 dport, 329 const int dif) 330 { 331 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr, 332 ntohs(dport), dif, 0); 333 } 334 335 static inline struct sock *__inet_lookup(struct net *net, 336 struct inet_hashinfo *hashinfo, 337 struct sk_buff *skb, int doff, 338 const __be32 saddr, const __be16 sport, 339 const __be32 daddr, const __be16 dport, 340 const int dif, const int sdif, 341 bool *refcounted) 342 { 343 u16 hnum = ntohs(dport); 344 struct sock *sk; 345 346 sk = __inet_lookup_established(net, hashinfo, saddr, sport, 347 daddr, hnum, dif, sdif); 348 *refcounted = true; 349 if (sk) 350 return sk; 351 *refcounted = false; 352 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, 353 sport, daddr, hnum, dif, sdif); 354 } 355 356 static inline struct sock *inet_lookup(struct net *net, 357 struct inet_hashinfo *hashinfo, 358 struct sk_buff *skb, int doff, 359 const __be32 saddr, const __be16 sport, 360 const __be32 daddr, const __be16 dport, 361 const int dif) 362 { 363 struct sock *sk; 364 bool refcounted; 365 366 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, 367 dport, dif, 0, &refcounted); 368 369 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) 370 sk = NULL; 371 return sk; 372 } 373 374 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, 375 struct sk_buff *skb, 376 int doff, 377 const __be16 sport, 378 const __be16 dport, 379 const int sdif, 380 bool *refcounted) 381 { 382 struct sock *sk = skb_steal_sock(skb, refcounted); 383 const struct iphdr *iph = ip_hdr(skb); 384 385 if (sk) 386 return sk; 387 388 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, 389 doff, iph->saddr, sport, 390 iph->daddr, dport, inet_iif(skb), sdif, 391 refcounted); 392 } 393 394 u32 inet6_ehashfn(const struct net *net, 395 const struct in6_addr *laddr, const u16 lport, 396 const struct in6_addr *faddr, const __be16 fport); 397 398 static inline void sk_daddr_set(struct sock *sk, __be32 addr) 399 { 400 sk->sk_daddr = addr; /* alias of inet_daddr */ 401 #if IS_ENABLED(CONFIG_IPV6) 402 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr); 403 #endif 404 } 405 406 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) 407 { 408 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */ 409 #if IS_ENABLED(CONFIG_IPV6) 410 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr); 411 #endif 412 } 413 414 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 415 struct sock *sk, u32 port_offset, 416 int (*check_established)(struct inet_timewait_death_row *, 417 struct sock *, __u16, 418 struct inet_timewait_sock **)); 419 420 int inet_hash_connect(struct inet_timewait_death_row *death_row, 421 struct sock *sk); 422 #endif /* _INET_HASHTABLES_H */ 423