1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic INET transport hashtables 7 * 8 * Authors: Lotsa people, from code originally in tcp 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/random.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/wait.h> 21 #include <linux/vmalloc.h> 22 23 #include <net/inet_connection_sock.h> 24 #include <net/inet_hashtables.h> 25 #include <net/secure_seq.h> 26 #include <net/ip.h> 27 28 static u32 inet_ehashfn(const struct net *net, const __be32 laddr, 29 const __u16 lport, const __be32 faddr, 30 const __be16 fport) 31 { 32 static u32 inet_ehash_secret __read_mostly; 33 34 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); 35 36 return __inet_ehashfn(laddr, lport, faddr, fport, 37 inet_ehash_secret + net_hash_mix(net)); 38 } 39 40 /* This function handles inet_sock, but also timewait and request sockets 41 * for IPv4/IPv6. 42 */ 43 u32 sk_ehashfn(const struct sock *sk) 44 { 45 #if IS_ENABLED(CONFIG_IPV6) 46 if (sk->sk_family == AF_INET6 && 47 !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) 48 return inet6_ehashfn(sock_net(sk), 49 &sk->sk_v6_rcv_saddr, sk->sk_num, 50 &sk->sk_v6_daddr, sk->sk_dport); 51 #endif 52 return inet_ehashfn(sock_net(sk), 53 sk->sk_rcv_saddr, sk->sk_num, 54 sk->sk_daddr, sk->sk_dport); 55 } 56 57 /* 58 * Allocate and initialize a new local port bind bucket. 59 * The bindhash mutex for snum's hash chain must be held here. 60 */ 61 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, 62 struct net *net, 63 struct inet_bind_hashbucket *head, 64 const unsigned short snum) 65 { 66 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); 67 68 if (tb) { 69 write_pnet(&tb->ib_net, net); 70 tb->port = snum; 71 tb->fastreuse = 0; 72 tb->fastreuseport = 0; 73 tb->num_owners = 0; 74 INIT_HLIST_HEAD(&tb->owners); 75 hlist_add_head(&tb->node, &head->chain); 76 } 77 return tb; 78 } 79 80 /* 81 * Caller must hold hashbucket lock for this tb with local BH disabled 82 */ 83 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) 84 { 85 if (hlist_empty(&tb->owners)) { 86 __hlist_del(&tb->node); 87 kmem_cache_free(cachep, tb); 88 } 89 } 90 91 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 92 const unsigned short snum) 93 { 94 inet_sk(sk)->inet_num = snum; 95 sk_add_bind_node(sk, &tb->owners); 96 tb->num_owners++; 97 inet_csk(sk)->icsk_bind_hash = tb; 98 } 99 100 /* 101 * Get rid of any references to a local port held by the given sock. 102 */ 103 static void __inet_put_port(struct sock *sk) 104 { 105 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 106 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, 107 hashinfo->bhash_size); 108 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 109 struct inet_bind_bucket *tb; 110 111 spin_lock(&head->lock); 112 tb = inet_csk(sk)->icsk_bind_hash; 113 __sk_del_bind_node(sk); 114 tb->num_owners--; 115 inet_csk(sk)->icsk_bind_hash = NULL; 116 inet_sk(sk)->inet_num = 0; 117 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 118 spin_unlock(&head->lock); 119 } 120 121 void inet_put_port(struct sock *sk) 122 { 123 local_bh_disable(); 124 __inet_put_port(sk); 125 local_bh_enable(); 126 } 127 EXPORT_SYMBOL(inet_put_port); 128 129 int __inet_inherit_port(const struct sock *sk, struct sock *child) 130 { 131 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; 132 unsigned short port = inet_sk(child)->inet_num; 133 const int bhash = inet_bhashfn(sock_net(sk), port, 134 table->bhash_size); 135 struct inet_bind_hashbucket *head = &table->bhash[bhash]; 136 struct inet_bind_bucket *tb; 137 138 spin_lock(&head->lock); 139 tb = inet_csk(sk)->icsk_bind_hash; 140 if (unlikely(!tb)) { 141 spin_unlock(&head->lock); 142 return -ENOENT; 143 } 144 if (tb->port != port) { 145 /* NOTE: using tproxy and redirecting skbs to a proxy 146 * on a different listener port breaks the assumption 147 * that the listener socket's icsk_bind_hash is the same 148 * as that of the child socket. We have to look up or 149 * create a new bind bucket for the child here. */ 150 inet_bind_bucket_for_each(tb, &head->chain) { 151 if (net_eq(ib_net(tb), sock_net(sk)) && 152 tb->port == port) 153 break; 154 } 155 if (!tb) { 156 tb = inet_bind_bucket_create(table->bind_bucket_cachep, 157 sock_net(sk), head, port); 158 if (!tb) { 159 spin_unlock(&head->lock); 160 return -ENOMEM; 161 } 162 } 163 } 164 inet_bind_hash(child, tb, port); 165 spin_unlock(&head->lock); 166 167 return 0; 168 } 169 EXPORT_SYMBOL_GPL(__inet_inherit_port); 170 171 static inline int compute_score(struct sock *sk, struct net *net, 172 const unsigned short hnum, const __be32 daddr, 173 const int dif) 174 { 175 int score = -1; 176 struct inet_sock *inet = inet_sk(sk); 177 178 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum && 179 !ipv6_only_sock(sk)) { 180 __be32 rcv_saddr = inet->inet_rcv_saddr; 181 score = sk->sk_family == PF_INET ? 2 : 1; 182 if (rcv_saddr) { 183 if (rcv_saddr != daddr) 184 return -1; 185 score += 4; 186 } 187 if (sk->sk_bound_dev_if) { 188 if (sk->sk_bound_dev_if != dif) 189 return -1; 190 score += 4; 191 } 192 if (sk->sk_incoming_cpu == raw_smp_processor_id()) 193 score++; 194 } 195 return score; 196 } 197 198 /* 199 * Don't inline this cruft. Here are some nice properties to exploit here. The 200 * BSD API does not allow a listening sock to specify the remote port nor the 201 * remote address for the connection. So always assume those are both 202 * wildcarded during the search since they can never be otherwise. 203 */ 204 205 206 struct sock *__inet_lookup_listener(struct net *net, 207 struct inet_hashinfo *hashinfo, 208 const __be32 saddr, __be16 sport, 209 const __be32 daddr, const unsigned short hnum, 210 const int dif) 211 { 212 struct sock *sk, *result; 213 struct hlist_nulls_node *node; 214 unsigned int hash = inet_lhashfn(net, hnum); 215 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; 216 int score, hiscore, matches = 0, reuseport = 0; 217 u32 phash = 0; 218 219 rcu_read_lock(); 220 begin: 221 result = NULL; 222 hiscore = 0; 223 sk_nulls_for_each_rcu(sk, node, &ilb->head) { 224 score = compute_score(sk, net, hnum, daddr, dif); 225 if (score > hiscore) { 226 result = sk; 227 hiscore = score; 228 reuseport = sk->sk_reuseport; 229 if (reuseport) { 230 phash = inet_ehashfn(net, daddr, hnum, 231 saddr, sport); 232 matches = 1; 233 } 234 } else if (score == hiscore && reuseport) { 235 matches++; 236 if (reciprocal_scale(phash, matches) == 0) 237 result = sk; 238 phash = next_pseudo_random32(phash); 239 } 240 } 241 /* 242 * if the nulls value we got at the end of this lookup is 243 * not the expected one, we must restart lookup. 244 * We probably met an item that was moved to another chain. 245 */ 246 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE) 247 goto begin; 248 if (result) { 249 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt))) 250 result = NULL; 251 else if (unlikely(compute_score(result, net, hnum, daddr, 252 dif) < hiscore)) { 253 sock_put(result); 254 goto begin; 255 } 256 } 257 rcu_read_unlock(); 258 return result; 259 } 260 EXPORT_SYMBOL_GPL(__inet_lookup_listener); 261 262 /* All sockets share common refcount, but have different destructors */ 263 void sock_gen_put(struct sock *sk) 264 { 265 if (!atomic_dec_and_test(&sk->sk_refcnt)) 266 return; 267 268 if (sk->sk_state == TCP_TIME_WAIT) 269 inet_twsk_free(inet_twsk(sk)); 270 else if (sk->sk_state == TCP_NEW_SYN_RECV) 271 reqsk_free(inet_reqsk(sk)); 272 else 273 sk_free(sk); 274 } 275 EXPORT_SYMBOL_GPL(sock_gen_put); 276 277 void sock_edemux(struct sk_buff *skb) 278 { 279 sock_gen_put(skb->sk); 280 } 281 EXPORT_SYMBOL(sock_edemux); 282 283 struct sock *__inet_lookup_established(struct net *net, 284 struct inet_hashinfo *hashinfo, 285 const __be32 saddr, const __be16 sport, 286 const __be32 daddr, const u16 hnum, 287 const int dif) 288 { 289 INET_ADDR_COOKIE(acookie, saddr, daddr); 290 const __portpair ports = INET_COMBINED_PORTS(sport, hnum); 291 struct sock *sk; 292 const struct hlist_nulls_node *node; 293 /* Optimize here for direct hit, only listening connections can 294 * have wildcards anyways. 295 */ 296 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); 297 unsigned int slot = hash & hashinfo->ehash_mask; 298 struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; 299 300 rcu_read_lock(); 301 begin: 302 sk_nulls_for_each_rcu(sk, node, &head->chain) { 303 if (sk->sk_hash != hash) 304 continue; 305 if (likely(INET_MATCH(sk, net, acookie, 306 saddr, daddr, ports, dif))) { 307 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) 308 goto out; 309 if (unlikely(!INET_MATCH(sk, net, acookie, 310 saddr, daddr, ports, dif))) { 311 sock_gen_put(sk); 312 goto begin; 313 } 314 goto found; 315 } 316 } 317 /* 318 * if the nulls value we got at the end of this lookup is 319 * not the expected one, we must restart lookup. 320 * We probably met an item that was moved to another chain. 321 */ 322 if (get_nulls_value(node) != slot) 323 goto begin; 324 out: 325 sk = NULL; 326 found: 327 rcu_read_unlock(); 328 return sk; 329 } 330 EXPORT_SYMBOL_GPL(__inet_lookup_established); 331 332 /* called with local bh disabled */ 333 static int __inet_check_established(struct inet_timewait_death_row *death_row, 334 struct sock *sk, __u16 lport, 335 struct inet_timewait_sock **twp) 336 { 337 struct inet_hashinfo *hinfo = death_row->hashinfo; 338 struct inet_sock *inet = inet_sk(sk); 339 __be32 daddr = inet->inet_rcv_saddr; 340 __be32 saddr = inet->inet_daddr; 341 int dif = sk->sk_bound_dev_if; 342 INET_ADDR_COOKIE(acookie, saddr, daddr); 343 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); 344 struct net *net = sock_net(sk); 345 unsigned int hash = inet_ehashfn(net, daddr, lport, 346 saddr, inet->inet_dport); 347 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 348 spinlock_t *lock = inet_ehash_lockp(hinfo, hash); 349 struct sock *sk2; 350 const struct hlist_nulls_node *node; 351 struct inet_timewait_sock *tw = NULL; 352 353 spin_lock(lock); 354 355 sk_nulls_for_each(sk2, node, &head->chain) { 356 if (sk2->sk_hash != hash) 357 continue; 358 359 if (likely(INET_MATCH(sk2, net, acookie, 360 saddr, daddr, ports, dif))) { 361 if (sk2->sk_state == TCP_TIME_WAIT) { 362 tw = inet_twsk(sk2); 363 if (twsk_unique(sk, sk2, twp)) 364 break; 365 } 366 goto not_unique; 367 } 368 } 369 370 /* Must record num and sport now. Otherwise we will see 371 * in hash table socket with a funny identity. 372 */ 373 inet->inet_num = lport; 374 inet->inet_sport = htons(lport); 375 sk->sk_hash = hash; 376 WARN_ON(!sk_unhashed(sk)); 377 __sk_nulls_add_node_rcu(sk, &head->chain); 378 if (tw) { 379 sk_nulls_del_node_init_rcu((struct sock *)tw); 380 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); 381 } 382 spin_unlock(lock); 383 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 384 385 if (twp) { 386 *twp = tw; 387 } else if (tw) { 388 /* Silly. Should hash-dance instead... */ 389 inet_twsk_deschedule_put(tw); 390 } 391 return 0; 392 393 not_unique: 394 spin_unlock(lock); 395 return -EADDRNOTAVAIL; 396 } 397 398 static u32 inet_sk_port_offset(const struct sock *sk) 399 { 400 const struct inet_sock *inet = inet_sk(sk); 401 402 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, 403 inet->inet_daddr, 404 inet->inet_dport); 405 } 406 407 /* insert a socket into ehash, and eventually remove another one 408 * (The another one can be a SYN_RECV or TIMEWAIT 409 */ 410 bool inet_ehash_insert(struct sock *sk, struct sock *osk) 411 { 412 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 413 struct hlist_nulls_head *list; 414 struct inet_ehash_bucket *head; 415 spinlock_t *lock; 416 bool ret = true; 417 418 WARN_ON_ONCE(!sk_unhashed(sk)); 419 420 sk->sk_hash = sk_ehashfn(sk); 421 head = inet_ehash_bucket(hashinfo, sk->sk_hash); 422 list = &head->chain; 423 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 424 425 spin_lock(lock); 426 if (osk) { 427 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 428 ret = sk_nulls_del_node_init_rcu(osk); 429 } 430 if (ret) 431 __sk_nulls_add_node_rcu(sk, list); 432 spin_unlock(lock); 433 return ret; 434 } 435 436 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk) 437 { 438 bool ok = inet_ehash_insert(sk, osk); 439 440 if (ok) { 441 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 442 } else { 443 percpu_counter_inc(sk->sk_prot->orphan_count); 444 sk->sk_state = TCP_CLOSE; 445 sock_set_flag(sk, SOCK_DEAD); 446 inet_csk_destroy_sock(sk); 447 } 448 return ok; 449 } 450 EXPORT_SYMBOL_GPL(inet_ehash_nolisten); 451 452 void __inet_hash(struct sock *sk, struct sock *osk) 453 { 454 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 455 struct inet_listen_hashbucket *ilb; 456 457 if (sk->sk_state != TCP_LISTEN) { 458 inet_ehash_nolisten(sk, osk); 459 return; 460 } 461 WARN_ON(!sk_unhashed(sk)); 462 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; 463 464 spin_lock(&ilb->lock); 465 __sk_nulls_add_node_rcu(sk, &ilb->head); 466 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 467 spin_unlock(&ilb->lock); 468 } 469 EXPORT_SYMBOL(__inet_hash); 470 471 void inet_hash(struct sock *sk) 472 { 473 if (sk->sk_state != TCP_CLOSE) { 474 local_bh_disable(); 475 __inet_hash(sk, NULL); 476 local_bh_enable(); 477 } 478 } 479 EXPORT_SYMBOL_GPL(inet_hash); 480 481 void inet_unhash(struct sock *sk) 482 { 483 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; 484 spinlock_t *lock; 485 int done; 486 487 if (sk_unhashed(sk)) 488 return; 489 490 if (sk->sk_state == TCP_LISTEN) 491 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock; 492 else 493 lock = inet_ehash_lockp(hashinfo, sk->sk_hash); 494 495 spin_lock_bh(lock); 496 done = __sk_nulls_del_node_init_rcu(sk); 497 if (done) 498 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 499 spin_unlock_bh(lock); 500 } 501 EXPORT_SYMBOL_GPL(inet_unhash); 502 503 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 504 struct sock *sk, u32 port_offset, 505 int (*check_established)(struct inet_timewait_death_row *, 506 struct sock *, __u16, struct inet_timewait_sock **)) 507 { 508 struct inet_hashinfo *hinfo = death_row->hashinfo; 509 const unsigned short snum = inet_sk(sk)->inet_num; 510 struct inet_bind_hashbucket *head; 511 struct inet_bind_bucket *tb; 512 int ret; 513 struct net *net = sock_net(sk); 514 515 if (!snum) { 516 int i, remaining, low, high, port; 517 static u32 hint; 518 u32 offset = hint + port_offset; 519 struct inet_timewait_sock *tw = NULL; 520 521 inet_get_local_port_range(net, &low, &high); 522 remaining = (high - low) + 1; 523 524 /* By starting with offset being an even number, 525 * we tend to leave about 50% of ports for other uses, 526 * like bind(0). 527 */ 528 offset &= ~1; 529 530 local_bh_disable(); 531 for (i = 0; i < remaining; i++) { 532 port = low + (i + offset) % remaining; 533 if (inet_is_local_reserved_port(net, port)) 534 continue; 535 head = &hinfo->bhash[inet_bhashfn(net, port, 536 hinfo->bhash_size)]; 537 spin_lock(&head->lock); 538 539 /* Does not bother with rcv_saddr checks, 540 * because the established check is already 541 * unique enough. 542 */ 543 inet_bind_bucket_for_each(tb, &head->chain) { 544 if (net_eq(ib_net(tb), net) && 545 tb->port == port) { 546 if (tb->fastreuse >= 0 || 547 tb->fastreuseport >= 0) 548 goto next_port; 549 WARN_ON(hlist_empty(&tb->owners)); 550 if (!check_established(death_row, sk, 551 port, &tw)) 552 goto ok; 553 goto next_port; 554 } 555 } 556 557 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 558 net, head, port); 559 if (!tb) { 560 spin_unlock(&head->lock); 561 break; 562 } 563 tb->fastreuse = -1; 564 tb->fastreuseport = -1; 565 goto ok; 566 567 next_port: 568 spin_unlock(&head->lock); 569 } 570 local_bh_enable(); 571 572 return -EADDRNOTAVAIL; 573 574 ok: 575 hint += (i + 2) & ~1; 576 577 /* Head lock still held and bh's disabled */ 578 inet_bind_hash(sk, tb, port); 579 if (sk_unhashed(sk)) { 580 inet_sk(sk)->inet_sport = htons(port); 581 inet_ehash_nolisten(sk, (struct sock *)tw); 582 } 583 if (tw) 584 inet_twsk_bind_unhash(tw, hinfo); 585 spin_unlock(&head->lock); 586 587 if (tw) 588 inet_twsk_deschedule_put(tw); 589 590 ret = 0; 591 goto out; 592 } 593 594 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)]; 595 tb = inet_csk(sk)->icsk_bind_hash; 596 spin_lock_bh(&head->lock); 597 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 598 inet_ehash_nolisten(sk, NULL); 599 spin_unlock_bh(&head->lock); 600 return 0; 601 } else { 602 spin_unlock(&head->lock); 603 /* No definite answer... Walk to established hash table */ 604 ret = check_established(death_row, sk, snum, NULL); 605 out: 606 local_bh_enable(); 607 return ret; 608 } 609 } 610 611 /* 612 * Bind a port for a connect operation and hash it. 613 */ 614 int inet_hash_connect(struct inet_timewait_death_row *death_row, 615 struct sock *sk) 616 { 617 u32 port_offset = 0; 618 619 if (!inet_sk(sk)->inet_num) 620 port_offset = inet_sk_port_offset(sk); 621 return __inet_hash_connect(death_row, sk, port_offset, 622 __inet_check_established); 623 } 624 EXPORT_SYMBOL_GPL(inet_hash_connect); 625 626 void inet_hashinfo_init(struct inet_hashinfo *h) 627 { 628 int i; 629 630 for (i = 0; i < INET_LHTABLE_SIZE; i++) { 631 spin_lock_init(&h->listening_hash[i].lock); 632 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head, 633 i + LISTENING_NULLS_BASE); 634 } 635 } 636 EXPORT_SYMBOL_GPL(inet_hashinfo_init); 637 638 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 639 { 640 unsigned int locksz = sizeof(spinlock_t); 641 unsigned int i, nblocks = 1; 642 643 if (locksz != 0) { 644 /* allocate 2 cache lines or at least one spinlock per cpu */ 645 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); 646 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 647 648 /* no more locks than number of hash buckets */ 649 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 650 651 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, 652 GFP_KERNEL | __GFP_NOWARN); 653 if (!hashinfo->ehash_locks) 654 hashinfo->ehash_locks = vmalloc(nblocks * locksz); 655 656 if (!hashinfo->ehash_locks) 657 return -ENOMEM; 658 659 for (i = 0; i < nblocks; i++) 660 spin_lock_init(&hashinfo->ehash_locks[i]); 661 } 662 hashinfo->ehash_locks_mask = nblocks - 1; 663 return 0; 664 } 665 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); 666