1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic INET transport hashtables 8 * 9 * Authors: Lotsa people, from code originally in tcp 10 */ 11 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/wait.h> 17 #include <linux/vmalloc.h> 18 #include <linux/memblock.h> 19 20 #include <net/addrconf.h> 21 #include <net/inet_connection_sock.h> 22 #include <net/inet_hashtables.h> 23 #if IS_ENABLED(CONFIG_IPV6) 24 #include <net/inet6_hashtables.h> 25 #endif 26 #include <net/secure_seq.h> 27 #include <net/ip.h> 28 #include <net/tcp.h> 29 #include <net/sock_reuseport.h> 30 31 u32 inet_ehashfn(const struct net *net, const __be32 laddr, 32 const __u16 lport, const __be32 faddr, 33 const __be16 fport) 34 { 35 static u32 inet_ehash_secret __read_mostly; 36 37 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); 38 39 return __inet_ehashfn(laddr, lport, faddr, fport, 40 inet_ehash_secret + net_hash_mix(net)); 41 } 42 EXPORT_SYMBOL_GPL(inet_ehashfn); 43 44 /* This function handles inet_sock, but also timewait and request sockets 45 * for IPv4/IPv6. 46 */ 47 static u32 sk_ehashfn(const struct sock *sk) 48 { 49 #if IS_ENABLED(CONFIG_IPV6) 50 if (sk->sk_family == AF_INET6 && 51 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 52 return inet6_ehashfn(sock_net(sk), 53 &sk->sk_v6_rcv_saddr, sk->sk_num, 54 &sk->sk_v6_daddr, sk->sk_dport); 55 #endif 56 return inet_ehashfn(sock_net(sk), 57 sk->sk_rcv_saddr, sk->sk_num, 58 sk->sk_daddr, sk->sk_dport); 59 } 60 61 /* 62 * Allocate and initialize a new local port bind bucket. 63 * The bindhash mutex for snum's hash chain must be held here. 64 */ 65 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 66 struct net *net, 67 struct inet_bind_hashbucket *head, 68 const unsigned short snum, 69 int l3mdev) 70 { 71 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 72 73 if (tb) { 74 write_pnet(&tb->ib_net, net); 75 tb->l3mdev = l3mdev; 76 tb->port = snum; 77 tb->fastreuse = 0; 78 tb->fastreuseport = 0; 79 INIT_HLIST_HEAD(&tb->owners); 80 hlist_add_head(&tb->node, &head->chain); 81 } 82 return tb; 83 } 84 85 /* 86 * Caller must hold hashbucket lock for this tb with local BH disabled 87 */ 88 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) 89 { 90 if (hlist_empty(&tb->owners)) { 91 __hlist_del(&tb->node); 92 kmem_cache_free(cachep, tb); 93 } 94 } 95 96 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, 97 unsigned short port, int l3mdev) 98 { 99 return net_eq(ib_net(tb), net) && tb->port == port && 100 tb->l3mdev == l3mdev; 101 } 102 103 static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb, 104 struct net *net, 105 struct inet_bind_hashbucket *head, 106 unsigned short port, int l3mdev, 107 const struct sock *sk) 108 { 109 write_pnet(&tb->ib_net, net); 110 tb->l3mdev = l3mdev; 111 tb->port = port; 112 #if IS_ENABLED(CONFIG_IPV6) 113 tb->family = sk->sk_family; 114 if (sk->sk_family == AF_INET6) 115 tb->v6_rcv_saddr = sk->sk_v6_rcv_saddr; 116 else 117 #endif 118 tb->rcv_saddr = sk->sk_rcv_saddr; 119 INIT_HLIST_HEAD(&tb->owners); 120 INIT_HLIST_HEAD(&tb->deathrow); 121 hlist_add_head(&tb->node, &head->chain); 122 } 123 124 struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep, 125 struct net *net, 126 struct inet_bind_hashbucket *head, 127 unsigned short port, 128 int l3mdev, 129 const struct sock *sk) 130 { 131 struct inet_bind2_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 132 133 if (tb) 134 inet_bind2_bucket_init(tb, net, head, port, l3mdev, sk); 135 136 return tb; 137 } 138 139 /* Caller must hold hashbucket lock for this tb with local BH disabled */ 140 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb) 141 { 142 if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) { 143 __hlist_del(&tb->node); 144 kmem_cache_free(cachep, tb); 145 } 146 } 147 148 static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2, 149 const struct sock *sk) 150 { 151 #if IS_ENABLED(CONFIG_IPV6) 152 if (sk->sk_family != tb2->family) 153 return false; 154 155 if (sk->sk_family == AF_INET6) 156 return ipv6_addr_equal(&tb2->v6_rcv_saddr, 157 &sk->sk_v6_rcv_saddr); 158 #endif 159 return tb2->rcv_saddr == sk->sk_rcv_saddr; 160 } 161 162 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 163 struct inet_bind2_bucket *tb2, unsigned short port) 164 { 165 inet_sk(sk)->inet_num = port; 166 sk_add_bind_node(sk, &tb->owners); 167 inet_csk(sk)->icsk_bind_hash = tb; 168 sk_add_bind2_node(sk, &tb2->owners); 169 inet_csk(sk)->icsk_bind2_hash = tb2; 170 } 171 172 /* 173 * Get rid of any references to a local port held by the given sock. 174 */ 175 static void __inet_put_port(struct sock *sk) 176 { 177 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 178 struct inet_bind_hashbucket *head, *head2; 179 struct net *net = sock_net(sk); 180 struct inet_bind_bucket *tb; 181 int bhash; 182 183 bhash = inet_bhashfn(net, inet_sk(sk)->inet_num, hashinfo->bhash_size); 184 head = &hashinfo->bhash[bhash]; 185 head2 = inet_bhashfn_portaddr(hashinfo, sk, net, inet_sk(sk)->inet_num); 186 187 spin_lock(&head->lock); 188 tb = inet_csk(sk)->icsk_bind_hash; 189 __sk_del_bind_node(sk); 190 inet_csk(sk)->icsk_bind_hash = NULL; 191 inet_sk(sk)->inet_num = 0; 192 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 193 194 spin_lock(&head2->lock); 195 if (inet_csk(sk)->icsk_bind2_hash) { 196 struct inet_bind2_bucket *tb2 = inet_csk(sk)->icsk_bind2_hash; 197 198 __sk_del_bind2_node(sk); 199 inet_csk(sk)->icsk_bind2_hash = NULL; 200 inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); 201 } 202 spin_unlock(&head2->lock); 203 204 spin_unlock(&head->lock); 205 } 206 207 void inet_put_port(struct sock *sk) 208 { 209 local_bh_disable(); 210 __inet_put_port(sk); 211 local_bh_enable(); 212 } 213 EXPORT_SYMBOL(inet_put_port); 214 215 int __inet_inherit_port(const struct sock *sk, struct sock *child) 216 { 217 struct inet_hashinfo *table = tcp_or_dccp_get_hashinfo(sk); 218 unsigned short port = inet_sk(child)->inet_num; 219 struct inet_bind_hashbucket *head, *head2; 220 bool created_inet_bind_bucket = false; 221 struct net *net = sock_net(sk); 222 bool update_fastreuse = false; 223 struct inet_bind2_bucket *tb2; 224 struct inet_bind_bucket *tb; 225 int bhash, l3mdev; 226 227 bhash = inet_bhashfn(net, port, table->bhash_size); 228 head = &table->bhash[bhash]; 229 head2 = inet_bhashfn_portaddr(table, child, net, port); 230 231 spin_lock(&head->lock); 232 spin_lock(&head2->lock); 233 tb = inet_csk(sk)->icsk_bind_hash; 234 tb2 = inet_csk(sk)->icsk_bind2_hash; 235 if (unlikely(!tb || !tb2)) { 236 spin_unlock(&head2->lock); 237 spin_unlock(&head->lock); 238 return -ENOENT; 239 } 240 if (tb->port != port) { 241 l3mdev = inet_sk_bound_l3mdev(sk); 242 243 /* NOTE: using tproxy and redirecting skbs to a proxy 244 * on a different listener port breaks the assumption 245 * that the listener socket's icsk_bind_hash is the same 246 * as that of the child socket. We have to look up or 247 * create a new bind bucket for the child here. */ 248 inet_bind_bucket_for_each(tb, &head->chain) { 249 if (inet_bind_bucket_match(tb, net, port, l3mdev)) 250 break; 251 } 252 if (!tb) { 253 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 254 net, head, port, l3mdev); 255 if (!tb) { 256 spin_unlock(&head2->lock); 257 spin_unlock(&head->lock); 258 return -ENOMEM; 259 } 260 created_inet_bind_bucket = true; 261 } 262 update_fastreuse = true; 263 264 goto bhash2_find; 265 } else if (!inet_bind2_bucket_addr_match(tb2, child)) { 266 l3mdev = inet_sk_bound_l3mdev(sk); 267 268 bhash2_find: 269 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, child); 270 if (!tb2) { 271 tb2 = inet_bind2_bucket_create(table->bind2_bucket_cachep, 272 net, head2, port, 273 l3mdev, child); 274 if (!tb2) 275 goto error; 276 } 277 } 278 if (update_fastreuse) 279 inet_csk_update_fastreuse(tb, child); 280 inet_bind_hash(child, tb, tb2, port); 281 spin_unlock(&head2->lock); 282 spin_unlock(&head->lock); 283 284 return 0; 285 286 error: 287 if (created_inet_bind_bucket) 288 inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); 289 spin_unlock(&head2->lock); 290 spin_unlock(&head->lock); 291 return -ENOMEM; 292 } 293 EXPORT_SYMBOL_GPL(__inet_inherit_port); 294 295 static struct inet_listen_hashbucket * 296 inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) 297 { 298 u32 hash; 299 300 #if IS_ENABLED(CONFIG_IPV6) 301 if (sk->sk_family == AF_INET6) 302 hash = ipv6_portaddr_hash(sock_net(sk), 303 &sk->sk_v6_rcv_saddr, 304 inet_sk(sk)->inet_num); 305 else 306 #endif 307 hash = ipv4_portaddr_hash(sock_net(sk), 308 inet_sk(sk)->inet_rcv_saddr, 309 inet_sk(sk)->inet_num); 310 return inet_lhash2_bucket(h, hash); 311 } 312 313 static inline int compute_score(struct sock *sk, struct net *net, 314 const unsigned short hnum, const __be32 daddr, 315 const int dif, const int sdif) 316 { 317 int score = -1; 318 319 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && 320 !ipv6_only_sock(sk)) { 321 if (sk->sk_rcv_saddr != daddr) 322 return -1; 323 324 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 325 return -1; 326 score = sk->sk_bound_dev_if ? 2 : 1; 327 328 if (sk->sk_family == PF_INET) 329 score++; 330 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 331 score++; 332 } 333 return score; 334 } 335 336 /** 337 * inet_lookup_reuseport() - execute reuseport logic on AF_INET socket if necessary. 338 * @net: network namespace. 339 * @sk: AF_INET socket, must be in TCP_LISTEN state for TCP or TCP_CLOSE for UDP. 340 * @skb: context for a potential SK_REUSEPORT program. 341 * @doff: header offset. 342 * @saddr: source address. 343 * @sport: source port. 344 * @daddr: destination address. 345 * @hnum: destination port in host byte order. 346 * @ehashfn: hash function used to generate the fallback hash. 347 * 348 * Return: NULL if sk doesn't have SO_REUSEPORT set, otherwise a pointer to 349 * the selected sock or an error. 350 */ 351 struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk, 352 struct sk_buff *skb, int doff, 353 __be32 saddr, __be16 sport, 354 __be32 daddr, unsigned short hnum, 355 inet_ehashfn_t *ehashfn) 356 { 357 struct sock *reuse_sk = NULL; 358 u32 phash; 359 360 if (sk->sk_reuseport) { 361 phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn, 362 net, daddr, hnum, saddr, sport); 363 reuse_sk = reuseport_select_sock(sk, phash, skb, doff); 364 } 365 return reuse_sk; 366 } 367 EXPORT_SYMBOL_GPL(inet_lookup_reuseport); 368 369 /* 370 * Here are some nice properties to exploit here. The BSD API 371 * does not allow a listening sock to specify the remote port nor the 372 * remote address for the connection. So always assume those are both 373 * wildcarded during the search since they can never be otherwise. 374 */ 375 376 /* called with rcu_read_lock() : No refcount taken on the socket */ 377 static struct sock *inet_lhash2_lookup(struct net *net, 378 struct inet_listen_hashbucket *ilb2, 379 struct sk_buff *skb, int doff, 380 const __be32 saddr, __be16 sport, 381 const __be32 daddr, const unsigned short hnum, 382 const int dif, const int sdif) 383 { 384 struct sock *sk, *result = NULL; 385 struct hlist_nulls_node *node; 386 int score, hiscore = 0; 387 388 sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { 389 score = compute_score(sk, net, hnum, daddr, dif, sdif); 390 if (score > hiscore) { 391 result = inet_lookup_reuseport(net, sk, skb, doff, 392 saddr, sport, daddr, hnum, inet_ehashfn); 393 if (result) 394 return result; 395 396 result = sk; 397 hiscore = score; 398 } 399 } 400 401 return result; 402 } 403 404 struct sock *inet_lookup_run_sk_lookup(struct net *net, 405 int protocol, 406 struct sk_buff *skb, int doff, 407 __be32 saddr, __be16 sport, 408 __be32 daddr, u16 hnum, const int dif, 409 inet_ehashfn_t *ehashfn) 410 { 411 struct sock *sk, *reuse_sk; 412 bool no_reuseport; 413 414 no_reuseport = bpf_sk_lookup_run_v4(net, protocol, saddr, sport, 415 daddr, hnum, dif, &sk); 416 if (no_reuseport || IS_ERR_OR_NULL(sk)) 417 return sk; 418 419 reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, 420 ehashfn); 421 if (reuse_sk) 422 sk = reuse_sk; 423 return sk; 424 } 425 426 struct sock *__inet_lookup_listener(struct net *net, 427 struct inet_hashinfo *hashinfo, 428 struct sk_buff *skb, int doff, 429 const __be32 saddr, __be16 sport, 430 const __be32 daddr, const unsigned short hnum, 431 const int dif, const int sdif) 432 { 433 struct inet_listen_hashbucket *ilb2; 434 struct sock *result = NULL; 435 unsigned int hash2; 436 437 /* Lookup redirect from BPF */ 438 if (static_branch_unlikely(&bpf_sk_lookup_enabled) && 439 hashinfo == net->ipv4.tcp_death_row.hashinfo) { 440 result = inet_lookup_run_sk_lookup(net, IPPROTO_TCP, skb, doff, 441 saddr, sport, daddr, hnum, dif, 442 inet_ehashfn); 443 if (result) 444 goto done; 445 } 446 447 hash2 = ipv4_portaddr_hash(net, daddr, hnum); 448 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 449 450 result = inet_lhash2_lookup(net, ilb2, skb, doff, 451 saddr, sport, daddr, hnum, 452 dif, sdif); 453 if (result) 454 goto done; 455 456 /* Lookup lhash2 with INADDR_ANY */ 457 hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); 458 ilb2 = inet_lhash2_bucket(hashinfo, hash2); 459 460 result = inet_lhash2_lookup(net, ilb2, skb, doff, 461 saddr, sport, htonl(INADDR_ANY), hnum, 462 dif, sdif); 463 done: 464 if (IS_ERR(result)) 465 return NULL; 466 return result; 467 } 468 EXPORT_SYMBOL_GPL(__inet_lookup_listener); 469 470 /* All sockets share common refcount, but have different destructors */ 471 void sock_gen_put(struct sock *sk) 472 { 473 if (!refcount_dec_and_test(&sk->sk_refcnt)) 474 return; 475 476 if (sk->sk_state == TCP_TIME_WAIT) 477 inet_twsk_free(inet_twsk(sk)); 478 else if (sk->sk_state == TCP_NEW_SYN_RECV) 479 reqsk_free(inet_reqsk(sk)); 480 else 481 sk_free(sk); 482 } 483 EXPORT_SYMBOL_GPL(sock_gen_put); 484 485 void sock_edemux(struct sk_buff *skb) 486 { 487 sock_gen_put(skb->sk); 488 } 489 EXPORT_SYMBOL(sock_edemux); 490 491 struct sock *__inet_lookup_established(struct net *net, 492 struct inet_hashinfo *hashinfo, 493 const __be32 saddr, const __be16 sport, 494 const __be32 daddr, const u16 hnum, 495 const int dif, const int sdif) 496 { 497 INET_ADDR_COOKIE(acookie, saddr, daddr); 498 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 499 struct sock *sk; 500 const struct hlist_nulls_node *node; 501 /* Optimize here for direct hit, only listening connections can 502 * have wildcards anyways. 503 */ 504 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); 505 unsigned int slot = hash & hashinfo->ehash_mask; 506 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 507 508 begin: 509 sk_nulls_for_each_rcu(sk, node, &head->chain) { 510 if (sk->sk_hash != hash) 511 continue; 512 if (likely(inet_match(net, sk, acookie, ports, dif, sdif))) { 513 if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) 514 goto out; 515 if (unlikely(!inet_match(net, sk, acookie, 516 ports, dif, sdif))) { 517 sock_gen_put(sk); 518 goto begin; 519 } 520 goto found; 521 } 522 } 523 /* 524 * if the nulls value we got at the end of this lookup is 525 * not the expected one, we must restart lookup. 526 * We probably met an item that was moved to another chain. 527 */ 528 if (get_nulls_value(node) != slot) 529 goto begin; 530 out: 531 sk = NULL; 532 found: 533 return sk; 534 } 535 EXPORT_SYMBOL_GPL(__inet_lookup_established); 536 537 /* called with local bh disabled */ 538 static int __inet_check_established(struct inet_timewait_death_row *death_row, 539 struct sock *sk, __u16 lport, 540 struct inet_timewait_sock **twp) 541 { 542 struct inet_hashinfo *hinfo = death_row->hashinfo; 543 struct inet_sock *inet = inet_sk(sk); 544 __be32 daddr = inet->inet_rcv_saddr; 545 __be32 saddr = inet->inet_daddr; 546 int dif = sk->sk_bound_dev_if; 547 struct net *net = sock_net(sk); 548 int sdif = l3mdev_master_ifindex_by_index(net, dif); 549 INET_ADDR_COOKIE(acookie, saddr, daddr); 550 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 551 unsigned int hash = inet_ehashfn(net, daddr, lport, 552 saddr, inet->inet_dport); 553 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 554 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 555 struct sock *sk2; 556 const struct hlist_nulls_node *node; 557 struct inet_timewait_sock *tw = NULL; 558 559 spin_lock(lock); 560 561 sk_nulls_for_each(sk2, node, &head->chain) { 562 if (sk2->sk_hash != hash) 563 continue; 564 565 if (likely(inet_match(net, sk2, acookie, ports, dif, sdif))) { 566 if (sk2->sk_state == TCP_TIME_WAIT) { 567 tw = inet_twsk(sk2); 568 if (twsk_unique(sk, sk2, twp)) 569 break; 570 } 571 goto not_unique; 572 } 573 } 574 575 /* Must record num and sport now. Otherwise we will see 576 * in hash table socket with a funny identity. 577 */ 578 inet->inet_num = lport; 579 inet->inet_sport = htons(lport); 580 sk->sk_hash = hash; 581 WARN_ON(!sk_unhashed(sk)); 582 __sk_nulls_add_node_rcu(sk, &head->chain); 583 if (tw) { 584 sk_nulls_del_node_init_rcu((struct sock *)tw); 585 __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); 586 } 587 spin_unlock(lock); 588 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 589 590 if (twp) { 591 *twp = tw; 592 } else if (tw) { 593 /* Silly. Should hash-dance instead... */ 594 inet_twsk_deschedule_put(tw); 595 } 596 return 0; 597 598 not_unique: 599 spin_unlock(lock); 600 return -EADDRNOTAVAIL; 601 } 602 603 static u64 inet_sk_port_offset(const struct sock *sk) 604 { 605 const struct inet_sock *inet = inet_sk(sk); 606 607 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, 608 inet->inet_daddr, 609 inet->inet_dport); 610 } 611 612 /* Searches for an exsiting socket in the ehash bucket list. 613 * Returns true if found, false otherwise. 614 */ 615 static bool inet_ehash_lookup_by_sk(struct sock *sk, 616 struct hlist_nulls_head *list) 617 { 618 const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); 619 const int sdif = sk->sk_bound_dev_if; 620 const int dif = sk->sk_bound_dev_if; 621 const struct hlist_nulls_node *node; 622 struct net *net = sock_net(sk); 623 struct sock *esk; 624 625 INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); 626 627 sk_nulls_for_each_rcu(esk, node, list) { 628 if (esk->sk_hash != sk->sk_hash) 629 continue; 630 if (sk->sk_family == AF_INET) { 631 if (unlikely(inet_match(net, esk, acookie, 632 ports, dif, sdif))) { 633 return true; 634 } 635 } 636 #if IS_ENABLED(CONFIG_IPV6) 637 else if (sk->sk_family == AF_INET6) { 638 if (unlikely(inet6_match(net, esk, 639 &sk->sk_v6_daddr, 640 &sk->sk_v6_rcv_saddr, 641 ports, dif, sdif))) { 642 return true; 643 } 644 } 645 #endif 646 } 647 return false; 648 } 649 650 /* Insert a socket into ehash, and eventually remove another one 651 * (The another one can be a SYN_RECV or TIMEWAIT) 652 * If an existing socket already exists, socket sk is not inserted, 653 * and sets found_dup_sk parameter to true. 654 */ 655 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) 656 { 657 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 658 struct inet_ehash_bucket *head; 659 struct hlist_nulls_head *list; 660 spinlock_t *lock; 661 bool ret = true; 662 663 WARN_ON_ONCE(!sk_unhashed(sk)); 664 665 sk->sk_hash = sk_ehashfn(sk); 666 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 667 list = &head->chain; 668 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 669 670 spin_lock(lock); 671 if (osk) { 672 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 673 ret = sk_nulls_del_node_init_rcu(osk); 674 } else if (found_dup_sk) { 675 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); 676 if (*found_dup_sk) 677 ret = false; 678 } 679 680 if (ret) 681 __sk_nulls_add_node_rcu(sk, list); 682 683 spin_unlock(lock); 684 685 return ret; 686 } 687 688 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) 689 { 690 bool ok = inet_ehash_insert(sk, osk, found_dup_sk); 691 692 if (ok) { 693 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 694 } else { 695 this_cpu_inc(*sk->sk_prot->orphan_count); 696 inet_sk_set_state(sk, TCP_CLOSE); 697 sock_set_flag(sk, SOCK_DEAD); 698 inet_csk_destroy_sock(sk); 699 } 700 return ok; 701 } 702 EXPORT_SYMBOL_GPL(inet_ehash_nolisten); 703 704 static int inet_reuseport_add_sock(struct sock *sk, 705 struct inet_listen_hashbucket *ilb) 706 { 707 struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; 708 const struct hlist_nulls_node *node; 709 struct sock *sk2; 710 kuid_t uid = sock_i_uid(sk); 711 712 sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { 713 if (sk2 != sk && 714 sk2->sk_family == sk->sk_family && 715 ipv6_only_sock(sk2) == ipv6_only_sock(sk) && 716 sk2->sk_bound_dev_if == sk->sk_bound_dev_if && 717 inet_csk(sk2)->icsk_bind_hash == tb && 718 sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && 719 inet_rcv_saddr_equal(sk, sk2, false)) 720 return reuseport_add_sock(sk, sk2, 721 inet_rcv_saddr_any(sk)); 722 } 723 724 return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); 725 } 726 727 int __inet_hash(struct sock *sk, struct sock *osk) 728 { 729 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 730 struct inet_listen_hashbucket *ilb2; 731 int err = 0; 732 733 if (sk->sk_state != TCP_LISTEN) { 734 local_bh_disable(); 735 inet_ehash_nolisten(sk, osk, NULL); 736 local_bh_enable(); 737 return 0; 738 } 739 WARN_ON(!sk_unhashed(sk)); 740 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 741 742 spin_lock(&ilb2->lock); 743 if (sk->sk_reuseport) { 744 err = inet_reuseport_add_sock(sk, ilb2); 745 if (err) 746 goto unlock; 747 } 748 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && 749 sk->sk_family == AF_INET6) 750 __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head); 751 else 752 __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head); 753 sock_set_flag(sk, SOCK_RCU_FREE); 754 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 755 unlock: 756 spin_unlock(&ilb2->lock); 757 758 return err; 759 } 760 EXPORT_SYMBOL(__inet_hash); 761 762 int inet_hash(struct sock *sk) 763 { 764 int err = 0; 765 766 if (sk->sk_state != TCP_CLOSE) 767 err = __inet_hash(sk, NULL); 768 769 return err; 770 } 771 EXPORT_SYMBOL_GPL(inet_hash); 772 773 void inet_unhash(struct sock *sk) 774 { 775 struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk); 776 777 if (sk_unhashed(sk)) 778 return; 779 780 if (sk->sk_state == TCP_LISTEN) { 781 struct inet_listen_hashbucket *ilb2; 782 783 ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); 784 /* Don't disable bottom halves while acquiring the lock to 785 * avoid circular locking dependency on PREEMPT_RT. 786 */ 787 spin_lock(&ilb2->lock); 788 if (sk_unhashed(sk)) { 789 spin_unlock(&ilb2->lock); 790 return; 791 } 792 793 if (rcu_access_pointer(sk->sk_reuseport_cb)) 794 reuseport_stop_listen_sock(sk); 795 796 __sk_nulls_del_node_init_rcu(sk); 797 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 798 spin_unlock(&ilb2->lock); 799 } else { 800 spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 801 802 spin_lock_bh(lock); 803 if (sk_unhashed(sk)) { 804 spin_unlock_bh(lock); 805 return; 806 } 807 __sk_nulls_del_node_init_rcu(sk); 808 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 809 spin_unlock_bh(lock); 810 } 811 } 812 EXPORT_SYMBOL_GPL(inet_unhash); 813 814 static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb, 815 const struct net *net, unsigned short port, 816 int l3mdev, const struct sock *sk) 817 { 818 #if IS_ENABLED(CONFIG_IPV6) 819 if (sk->sk_family != tb->family) 820 return false; 821 822 if (sk->sk_family == AF_INET6) 823 return net_eq(ib2_net(tb), net) && tb->port == port && 824 tb->l3mdev == l3mdev && 825 ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr); 826 else 827 #endif 828 return net_eq(ib2_net(tb), net) && tb->port == port && 829 tb->l3mdev == l3mdev && tb->rcv_saddr == sk->sk_rcv_saddr; 830 } 831 832 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net, 833 unsigned short port, int l3mdev, const struct sock *sk) 834 { 835 #if IS_ENABLED(CONFIG_IPV6) 836 if (sk->sk_family != tb->family) { 837 if (sk->sk_family == AF_INET) 838 return net_eq(ib2_net(tb), net) && tb->port == port && 839 tb->l3mdev == l3mdev && 840 ipv6_addr_any(&tb->v6_rcv_saddr); 841 842 return false; 843 } 844 845 if (sk->sk_family == AF_INET6) 846 return net_eq(ib2_net(tb), net) && tb->port == port && 847 tb->l3mdev == l3mdev && 848 ipv6_addr_any(&tb->v6_rcv_saddr); 849 else 850 #endif 851 return net_eq(ib2_net(tb), net) && tb->port == port && 852 tb->l3mdev == l3mdev && tb->rcv_saddr == 0; 853 } 854 855 /* The socket's bhash2 hashbucket spinlock must be held when this is called */ 856 struct inet_bind2_bucket * 857 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head, const struct net *net, 858 unsigned short port, int l3mdev, const struct sock *sk) 859 { 860 struct inet_bind2_bucket *bhash2 = NULL; 861 862 inet_bind_bucket_for_each(bhash2, &head->chain) 863 if (inet_bind2_bucket_match(bhash2, net, port, l3mdev, sk)) 864 break; 865 866 return bhash2; 867 } 868 869 struct inet_bind_hashbucket * 870 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port) 871 { 872 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 873 u32 hash; 874 875 #if IS_ENABLED(CONFIG_IPV6) 876 if (sk->sk_family == AF_INET6) 877 hash = ipv6_portaddr_hash(net, &in6addr_any, port); 878 else 879 #endif 880 hash = ipv4_portaddr_hash(net, 0, port); 881 882 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)]; 883 } 884 885 static void inet_update_saddr(struct sock *sk, void *saddr, int family) 886 { 887 if (family == AF_INET) { 888 inet_sk(sk)->inet_saddr = *(__be32 *)saddr; 889 sk_rcv_saddr_set(sk, inet_sk(sk)->inet_saddr); 890 } 891 #if IS_ENABLED(CONFIG_IPV6) 892 else { 893 sk->sk_v6_rcv_saddr = *(struct in6_addr *)saddr; 894 } 895 #endif 896 } 897 898 static int __inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family, bool reset) 899 { 900 struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk); 901 struct inet_bind_hashbucket *head, *head2; 902 struct inet_bind2_bucket *tb2, *new_tb2; 903 int l3mdev = inet_sk_bound_l3mdev(sk); 904 int port = inet_sk(sk)->inet_num; 905 struct net *net = sock_net(sk); 906 int bhash; 907 908 if (!inet_csk(sk)->icsk_bind2_hash) { 909 /* Not bind()ed before. */ 910 if (reset) 911 inet_reset_saddr(sk); 912 else 913 inet_update_saddr(sk, saddr, family); 914 915 return 0; 916 } 917 918 /* Allocate a bind2 bucket ahead of time to avoid permanently putting 919 * the bhash2 table in an inconsistent state if a new tb2 bucket 920 * allocation fails. 921 */ 922 new_tb2 = kmem_cache_alloc(hinfo->bind2_bucket_cachep, GFP_ATOMIC); 923 if (!new_tb2) { 924 if (reset) { 925 /* The (INADDR_ANY, port) bucket might have already 926 * been freed, then we cannot fixup icsk_bind2_hash, 927 * so we give up and unlink sk from bhash/bhash2 not 928 * to leave inconsistency in bhash2. 929 */ 930 inet_put_port(sk); 931 inet_reset_saddr(sk); 932 } 933 934 return -ENOMEM; 935 } 936 937 bhash = inet_bhashfn(net, port, hinfo->bhash_size); 938 head = &hinfo->bhash[bhash]; 939 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 940 941 /* If we change saddr locklessly, another thread 942 * iterating over bhash might see corrupted address. 943 */ 944 spin_lock_bh(&head->lock); 945 946 spin_lock(&head2->lock); 947 __sk_del_bind2_node(sk); 948 inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, inet_csk(sk)->icsk_bind2_hash); 949 spin_unlock(&head2->lock); 950 951 if (reset) 952 inet_reset_saddr(sk); 953 else 954 inet_update_saddr(sk, saddr, family); 955 956 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 957 958 spin_lock(&head2->lock); 959 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 960 if (!tb2) { 961 tb2 = new_tb2; 962 inet_bind2_bucket_init(tb2, net, head2, port, l3mdev, sk); 963 } 964 sk_add_bind2_node(sk, &tb2->owners); 965 inet_csk(sk)->icsk_bind2_hash = tb2; 966 spin_unlock(&head2->lock); 967 968 spin_unlock_bh(&head->lock); 969 970 if (tb2 != new_tb2) 971 kmem_cache_free(hinfo->bind2_bucket_cachep, new_tb2); 972 973 return 0; 974 } 975 976 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family) 977 { 978 return __inet_bhash2_update_saddr(sk, saddr, family, false); 979 } 980 EXPORT_SYMBOL_GPL(inet_bhash2_update_saddr); 981 982 void inet_bhash2_reset_saddr(struct sock *sk) 983 { 984 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 985 __inet_bhash2_update_saddr(sk, NULL, 0, true); 986 } 987 EXPORT_SYMBOL_GPL(inet_bhash2_reset_saddr); 988 989 /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm 990 * Note that we use 32bit integers (vs RFC 'short integers') 991 * because 2^16 is not a multiple of num_ephemeral and this 992 * property might be used by clever attacker. 993 * 994 * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though 995 * attacks were since demonstrated, thus we use 65536 by default instead 996 * to really give more isolation and privacy, at the expense of 256kB 997 * of kernel memory. 998 */ 999 #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) 1000 static u32 *table_perturb; 1001 1002 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 1003 struct sock *sk, u64 port_offset, 1004 int (*check_established)(struct inet_timewait_death_row *, 1005 struct sock *, __u16, struct inet_timewait_sock **)) 1006 { 1007 struct inet_hashinfo *hinfo = death_row->hashinfo; 1008 struct inet_bind_hashbucket *head, *head2; 1009 struct inet_timewait_sock *tw = NULL; 1010 int port = inet_sk(sk)->inet_num; 1011 struct net *net = sock_net(sk); 1012 struct inet_bind2_bucket *tb2; 1013 struct inet_bind_bucket *tb; 1014 bool tb_created = false; 1015 u32 remaining, offset; 1016 int ret, i, low, high; 1017 int l3mdev; 1018 u32 index; 1019 1020 if (port) { 1021 local_bh_disable(); 1022 ret = check_established(death_row, sk, port, NULL); 1023 local_bh_enable(); 1024 return ret; 1025 } 1026 1027 l3mdev = inet_sk_bound_l3mdev(sk); 1028 1029 inet_sk_get_local_port_range(sk, &low, &high); 1030 high++; /* [32768, 60999] -> [32768, 61000[ */ 1031 remaining = high - low; 1032 if (likely(remaining > 1)) 1033 remaining &= ~1U; 1034 1035 get_random_sleepable_once(table_perturb, 1036 INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); 1037 index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); 1038 1039 offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); 1040 offset %= remaining; 1041 1042 /* In first pass we try ports of @low parity. 1043 * inet_csk_get_port() does the opposite choice. 1044 */ 1045 offset &= ~1U; 1046 other_parity_scan: 1047 port = low + offset; 1048 for (i = 0; i < remaining; i += 2, port += 2) { 1049 if (unlikely(port >= high)) 1050 port -= remaining; 1051 if (inet_is_local_reserved_port(net, port)) 1052 continue; 1053 head = &hinfo->bhash[inet_bhashfn(net, port, 1054 hinfo->bhash_size)]; 1055 spin_lock_bh(&head->lock); 1056 1057 /* Does not bother with rcv_saddr checks, because 1058 * the established check is already unique enough. 1059 */ 1060 inet_bind_bucket_for_each(tb, &head->chain) { 1061 if (inet_bind_bucket_match(tb, net, port, l3mdev)) { 1062 if (tb->fastreuse >= 0 || 1063 tb->fastreuseport >= 0) 1064 goto next_port; 1065 WARN_ON(hlist_empty(&tb->owners)); 1066 if (!check_established(death_row, sk, 1067 port, &tw)) 1068 goto ok; 1069 goto next_port; 1070 } 1071 } 1072 1073 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 1074 net, head, port, l3mdev); 1075 if (!tb) { 1076 spin_unlock_bh(&head->lock); 1077 return -ENOMEM; 1078 } 1079 tb_created = true; 1080 tb->fastreuse = -1; 1081 tb->fastreuseport = -1; 1082 goto ok; 1083 next_port: 1084 spin_unlock_bh(&head->lock); 1085 cond_resched(); 1086 } 1087 1088 offset++; 1089 if ((offset & 1) && remaining > 1) 1090 goto other_parity_scan; 1091 1092 return -EADDRNOTAVAIL; 1093 1094 ok: 1095 /* Find the corresponding tb2 bucket since we need to 1096 * add the socket to the bhash2 table as well 1097 */ 1098 head2 = inet_bhashfn_portaddr(hinfo, sk, net, port); 1099 spin_lock(&head2->lock); 1100 1101 tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk); 1102 if (!tb2) { 1103 tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep, net, 1104 head2, port, l3mdev, sk); 1105 if (!tb2) 1106 goto error; 1107 } 1108 1109 /* Here we want to add a little bit of randomness to the next source 1110 * port that will be chosen. We use a max() with a random here so that 1111 * on low contention the randomness is maximal and on high contention 1112 * it may be inexistent. 1113 */ 1114 i = max_t(int, i, get_random_u32_below(8) * 2); 1115 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); 1116 1117 /* Head lock still held and bh's disabled */ 1118 inet_bind_hash(sk, tb, tb2, port); 1119 1120 if (sk_unhashed(sk)) { 1121 inet_sk(sk)->inet_sport = htons(port); 1122 inet_ehash_nolisten(sk, (struct sock *)tw, NULL); 1123 } 1124 if (tw) 1125 inet_twsk_bind_unhash(tw, hinfo); 1126 1127 spin_unlock(&head2->lock); 1128 spin_unlock(&head->lock); 1129 1130 if (tw) 1131 inet_twsk_deschedule_put(tw); 1132 local_bh_enable(); 1133 return 0; 1134 1135 error: 1136 spin_unlock(&head2->lock); 1137 if (tb_created) 1138 inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); 1139 spin_unlock_bh(&head->lock); 1140 return -ENOMEM; 1141 } 1142 1143 /* 1144 * Bind a port for a connect operation and hash it. 1145 */ 1146 int inet_hash_connect(struct inet_timewait_death_row *death_row, 1147 struct sock *sk) 1148 { 1149 u64 port_offset = 0; 1150 1151 if (!inet_sk(sk)->inet_num) 1152 port_offset = inet_sk_port_offset(sk); 1153 return __inet_hash_connect(death_row, sk, port_offset, 1154 __inet_check_established); 1155 } 1156 EXPORT_SYMBOL_GPL(inet_hash_connect); 1157 1158 static void init_hashinfo_lhash2(struct inet_hashinfo *h) 1159 { 1160 int i; 1161 1162 for (i = 0; i <= h->lhash2_mask; i++) { 1163 spin_lock_init(&h->lhash2[i].lock); 1164 INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head, 1165 i + LISTENING_NULLS_BASE); 1166 } 1167 } 1168 1169 void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, 1170 unsigned long numentries, int scale, 1171 unsigned long low_limit, 1172 unsigned long high_limit) 1173 { 1174 h->lhash2 = alloc_large_system_hash(name, 1175 sizeof(*h->lhash2), 1176 numentries, 1177 scale, 1178 0, 1179 NULL, 1180 &h->lhash2_mask, 1181 low_limit, 1182 high_limit); 1183 init_hashinfo_lhash2(h); 1184 1185 /* this one is used for source ports of outgoing connections */ 1186 table_perturb = alloc_large_system_hash("Table-perturb", 1187 sizeof(*table_perturb), 1188 INET_TABLE_PERTURB_SIZE, 1189 0, 0, NULL, NULL, 1190 INET_TABLE_PERTURB_SIZE, 1191 INET_TABLE_PERTURB_SIZE); 1192 } 1193 1194 int inet_hashinfo2_init_mod(struct inet_hashinfo *h) 1195 { 1196 h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL); 1197 if (!h->lhash2) 1198 return -ENOMEM; 1199 1200 h->lhash2_mask = INET_LHTABLE_SIZE - 1; 1201 /* INET_LHTABLE_SIZE must be a power of 2 */ 1202 BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); 1203 1204 init_hashinfo_lhash2(h); 1205 return 0; 1206 } 1207 EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod); 1208 1209 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 1210 { 1211 unsigned int locksz = sizeof(spinlock_t); 1212 unsigned int i, nblocks = 1; 1213 1214 if (locksz != 0) { 1215 /* allocate 2 cache lines or at least one spinlock per cpu */ 1216 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); 1217 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 1218 1219 /* no more locks than number of hash buckets */ 1220 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 1221 1222 hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); 1223 if (!hashinfo->ehash_locks) 1224 return -ENOMEM; 1225 1226 for (i = 0; i < nblocks; i++) 1227 spin_lock_init(&hashinfo->ehash_locks[i]); 1228 } 1229 hashinfo->ehash_locks_mask = nblocks - 1; 1230 return 0; 1231 } 1232 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); 1233 1234 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo, 1235 unsigned int ehash_entries) 1236 { 1237 struct inet_hashinfo *new_hashinfo; 1238 int i; 1239 1240 new_hashinfo = kmemdup(hashinfo, sizeof(*hashinfo), GFP_KERNEL); 1241 if (!new_hashinfo) 1242 goto err; 1243 1244 new_hashinfo->ehash = vmalloc_huge(ehash_entries * sizeof(struct inet_ehash_bucket), 1245 GFP_KERNEL_ACCOUNT); 1246 if (!new_hashinfo->ehash) 1247 goto free_hashinfo; 1248 1249 new_hashinfo->ehash_mask = ehash_entries - 1; 1250 1251 if (inet_ehash_locks_alloc(new_hashinfo)) 1252 goto free_ehash; 1253 1254 for (i = 0; i < ehash_entries; i++) 1255 INIT_HLIST_NULLS_HEAD(&new_hashinfo->ehash[i].chain, i); 1256 1257 new_hashinfo->pernet = true; 1258 1259 return new_hashinfo; 1260 1261 free_ehash: 1262 vfree(new_hashinfo->ehash); 1263 free_hashinfo: 1264 kfree(new_hashinfo); 1265 err: 1266 return NULL; 1267 } 1268 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_alloc); 1269 1270 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo) 1271 { 1272 if (!hashinfo->pernet) 1273 return; 1274 1275 inet_ehash_locks_free(hashinfo); 1276 vfree(hashinfo->ehash); 1277 kfree(hashinfo); 1278 } 1279 EXPORT_SYMBOL_GPL(inet_pernet_hashinfo_free); 1280