1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * 8 * Code partially derived from nft_hash 9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/log2.h> 21 #include <linux/sched.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/mm.h> 25 #include <linux/jhash.h> 26 #include <linux/random.h> 27 #include <linux/rhashtable.h> 28 #include <linux/err.h> 29 #include <linux/export.h> 30 31 #define HASH_DEFAULT_SIZE 64UL 32 #define HASH_MIN_SIZE 4U 33 #define BUCKET_LOCKS_PER_CPU 128UL 34 35 static u32 head_hashfn(struct rhashtable *ht, 36 const struct bucket_table *tbl, 37 const struct rhash_head *he) 38 { 39 return rht_head_hashfn(ht, tbl, he, ht->p); 40 } 41 42 #ifdef CONFIG_PROVE_LOCKING 43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 44 45 int lockdep_rht_mutex_is_held(struct rhashtable *ht) 46 { 47 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 48 } 49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 50 51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 52 { 53 spinlock_t *lock = rht_bucket_lock(tbl, hash); 54 55 return (debug_locks) ? lockdep_is_held(lock) : 1; 56 } 57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 58 #else 59 #define ASSERT_RHT_MUTEX(HT) 60 #endif 61 62 63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, 64 gfp_t gfp) 65 { 66 unsigned int i, size; 67 #if defined(CONFIG_PROVE_LOCKING) 68 unsigned int nr_pcpus = 2; 69 #else 70 unsigned int nr_pcpus = num_possible_cpus(); 71 #endif 72 73 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 75 76 /* Never allocate more than 0.5 locks per bucket */ 77 size = min_t(unsigned int, size, tbl->size >> 1); 78 79 if (sizeof(spinlock_t) != 0) { 80 #ifdef CONFIG_NUMA 81 if (size * sizeof(spinlock_t) > PAGE_SIZE && 82 gfp == GFP_KERNEL) 83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 84 else 85 #endif 86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 87 gfp); 88 if (!tbl->locks) 89 return -ENOMEM; 90 for (i = 0; i < size; i++) 91 spin_lock_init(&tbl->locks[i]); 92 } 93 tbl->locks_mask = size - 1; 94 95 return 0; 96 } 97 98 static void bucket_table_free(const struct bucket_table *tbl) 99 { 100 if (tbl) 101 kvfree(tbl->locks); 102 103 kvfree(tbl); 104 } 105 106 static void bucket_table_free_rcu(struct rcu_head *head) 107 { 108 bucket_table_free(container_of(head, struct bucket_table, rcu)); 109 } 110 111 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 112 size_t nbuckets, 113 gfp_t gfp) 114 { 115 struct bucket_table *tbl = NULL; 116 size_t size; 117 int i; 118 119 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 120 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || 121 gfp != GFP_KERNEL) 122 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); 123 if (tbl == NULL && gfp == GFP_KERNEL) 124 tbl = vzalloc(size); 125 if (tbl == NULL) 126 return NULL; 127 128 tbl->size = nbuckets; 129 130 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 131 bucket_table_free(tbl); 132 return NULL; 133 } 134 135 INIT_LIST_HEAD(&tbl->walkers); 136 137 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 138 139 for (i = 0; i < nbuckets; i++) 140 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 141 142 return tbl; 143 } 144 145 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, 146 struct bucket_table *tbl) 147 { 148 struct bucket_table *new_tbl; 149 150 do { 151 new_tbl = tbl; 152 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 153 } while (tbl); 154 155 return new_tbl; 156 } 157 158 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 159 { 160 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 161 struct bucket_table *new_tbl = rhashtable_last_table(ht, 162 rht_dereference_rcu(old_tbl->future_tbl, ht)); 163 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; 164 int err = -ENOENT; 165 struct rhash_head *head, *next, *entry; 166 spinlock_t *new_bucket_lock; 167 unsigned int new_hash; 168 169 rht_for_each(entry, old_tbl, old_hash) { 170 err = 0; 171 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 172 173 if (rht_is_a_nulls(next)) 174 break; 175 176 pprev = &entry->next; 177 } 178 179 if (err) 180 goto out; 181 182 new_hash = head_hashfn(ht, new_tbl, entry); 183 184 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 185 186 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 187 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 188 new_tbl, new_hash); 189 190 RCU_INIT_POINTER(entry->next, head); 191 192 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 193 spin_unlock(new_bucket_lock); 194 195 rcu_assign_pointer(*pprev, next); 196 197 out: 198 return err; 199 } 200 201 static void rhashtable_rehash_chain(struct rhashtable *ht, 202 unsigned int old_hash) 203 { 204 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 205 spinlock_t *old_bucket_lock; 206 207 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 208 209 spin_lock_bh(old_bucket_lock); 210 while (!rhashtable_rehash_one(ht, old_hash)) 211 ; 212 old_tbl->rehash++; 213 spin_unlock_bh(old_bucket_lock); 214 } 215 216 static int rhashtable_rehash_attach(struct rhashtable *ht, 217 struct bucket_table *old_tbl, 218 struct bucket_table *new_tbl) 219 { 220 /* Protect future_tbl using the first bucket lock. */ 221 spin_lock_bh(old_tbl->locks); 222 223 /* Did somebody beat us to it? */ 224 if (rcu_access_pointer(old_tbl->future_tbl)) { 225 spin_unlock_bh(old_tbl->locks); 226 return -EEXIST; 227 } 228 229 /* Make insertions go into the new, empty table right away. Deletions 230 * and lookups will be attempted in both tables until we synchronize. 231 */ 232 rcu_assign_pointer(old_tbl->future_tbl, new_tbl); 233 234 /* Ensure the new table is visible to readers. */ 235 smp_wmb(); 236 237 spin_unlock_bh(old_tbl->locks); 238 239 return 0; 240 } 241 242 static int rhashtable_rehash_table(struct rhashtable *ht) 243 { 244 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 245 struct bucket_table *new_tbl; 246 struct rhashtable_walker *walker; 247 unsigned int old_hash; 248 249 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 250 if (!new_tbl) 251 return 0; 252 253 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) 254 rhashtable_rehash_chain(ht, old_hash); 255 256 /* Publish the new table pointer. */ 257 rcu_assign_pointer(ht->tbl, new_tbl); 258 259 spin_lock(&ht->lock); 260 list_for_each_entry(walker, &old_tbl->walkers, list) 261 walker->tbl = NULL; 262 spin_unlock(&ht->lock); 263 264 /* Wait for readers. All new readers will see the new 265 * table, and thus no references to the old table will 266 * remain. 267 */ 268 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 269 270 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 271 } 272 273 /** 274 * rhashtable_expand - Expand hash table while allowing concurrent lookups 275 * @ht: the hash table to expand 276 * 277 * A secondary bucket array is allocated and the hash entries are migrated. 278 * 279 * This function may only be called in a context where it is safe to call 280 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 281 * 282 * The caller must ensure that no concurrent resizing occurs by holding 283 * ht->mutex. 284 * 285 * It is valid to have concurrent insertions and deletions protected by per 286 * bucket locks or concurrent RCU protected lookups and traversals. 287 */ 288 static int rhashtable_expand(struct rhashtable *ht) 289 { 290 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 291 int err; 292 293 ASSERT_RHT_MUTEX(ht); 294 295 old_tbl = rhashtable_last_table(ht, old_tbl); 296 297 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); 298 if (new_tbl == NULL) 299 return -ENOMEM; 300 301 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 302 if (err) 303 bucket_table_free(new_tbl); 304 305 return err; 306 } 307 308 /** 309 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 310 * @ht: the hash table to shrink 311 * 312 * This function shrinks the hash table to fit, i.e., the smallest 313 * size would not cause it to expand right away automatically. 314 * 315 * The caller must ensure that no concurrent resizing occurs by holding 316 * ht->mutex. 317 * 318 * The caller must ensure that no concurrent table mutations take place. 319 * It is however valid to have concurrent lookups if they are RCU protected. 320 * 321 * It is valid to have concurrent insertions and deletions protected by per 322 * bucket locks or concurrent RCU protected lookups and traversals. 323 */ 324 static int rhashtable_shrink(struct rhashtable *ht) 325 { 326 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 327 unsigned int size; 328 int err; 329 330 ASSERT_RHT_MUTEX(ht); 331 332 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 333 if (size < ht->p.min_size) 334 size = ht->p.min_size; 335 336 if (old_tbl->size <= size) 337 return 0; 338 339 if (rht_dereference(old_tbl->future_tbl, ht)) 340 return -EEXIST; 341 342 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 343 if (new_tbl == NULL) 344 return -ENOMEM; 345 346 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 347 if (err) 348 bucket_table_free(new_tbl); 349 350 return err; 351 } 352 353 static void rht_deferred_worker(struct work_struct *work) 354 { 355 struct rhashtable *ht; 356 struct bucket_table *tbl; 357 int err = 0; 358 359 ht = container_of(work, struct rhashtable, run_work); 360 mutex_lock(&ht->mutex); 361 362 tbl = rht_dereference(ht->tbl, ht); 363 tbl = rhashtable_last_table(ht, tbl); 364 365 if (rht_grow_above_75(ht, tbl)) 366 rhashtable_expand(ht); 367 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) 368 rhashtable_shrink(ht); 369 370 err = rhashtable_rehash_table(ht); 371 372 mutex_unlock(&ht->mutex); 373 374 if (err) 375 schedule_work(&ht->run_work); 376 } 377 378 static bool rhashtable_check_elasticity(struct rhashtable *ht, 379 struct bucket_table *tbl, 380 unsigned int hash) 381 { 382 unsigned int elasticity = ht->elasticity; 383 struct rhash_head *head; 384 385 rht_for_each(head, tbl, hash) 386 if (!--elasticity) 387 return true; 388 389 return false; 390 } 391 392 int rhashtable_insert_rehash(struct rhashtable *ht, 393 struct bucket_table *tbl) 394 { 395 struct bucket_table *old_tbl; 396 struct bucket_table *new_tbl; 397 unsigned int size; 398 int err; 399 400 old_tbl = rht_dereference_rcu(ht->tbl, ht); 401 402 size = tbl->size; 403 404 err = -EBUSY; 405 406 if (rht_grow_above_75(ht, tbl)) 407 size *= 2; 408 /* Do not schedule more than one rehash */ 409 else if (old_tbl != tbl) 410 goto fail; 411 412 err = -ENOMEM; 413 414 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 415 if (new_tbl == NULL) 416 goto fail; 417 418 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 419 if (err) { 420 bucket_table_free(new_tbl); 421 if (err == -EEXIST) 422 err = 0; 423 } else 424 schedule_work(&ht->run_work); 425 426 return err; 427 428 fail: 429 /* Do not fail the insert if someone else did a rehash. */ 430 if (likely(rcu_dereference_raw(tbl->future_tbl))) 431 return 0; 432 433 /* Schedule async rehash to retry allocation in process context. */ 434 if (err == -ENOMEM) 435 schedule_work(&ht->run_work); 436 437 return err; 438 } 439 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 440 441 struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, 442 const void *key, 443 struct rhash_head *obj, 444 struct bucket_table *tbl) 445 { 446 struct rhash_head *head; 447 unsigned int hash; 448 int err; 449 450 tbl = rhashtable_last_table(ht, tbl); 451 hash = head_hashfn(ht, tbl, obj); 452 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); 453 454 err = -EEXIST; 455 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 456 goto exit; 457 458 err = -E2BIG; 459 if (unlikely(rht_grow_above_max(ht, tbl))) 460 goto exit; 461 462 err = -EAGAIN; 463 if (rhashtable_check_elasticity(ht, tbl, hash) || 464 rht_grow_above_100(ht, tbl)) 465 goto exit; 466 467 err = 0; 468 469 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 470 471 RCU_INIT_POINTER(obj->next, head); 472 473 rcu_assign_pointer(tbl->buckets[hash], obj); 474 475 atomic_inc(&ht->nelems); 476 477 exit: 478 spin_unlock(rht_bucket_lock(tbl, hash)); 479 480 if (err == 0) 481 return NULL; 482 else if (err == -EAGAIN) 483 return tbl; 484 else 485 return ERR_PTR(err); 486 } 487 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 488 489 /** 490 * rhashtable_walk_init - Initialise an iterator 491 * @ht: Table to walk over 492 * @iter: Hash table Iterator 493 * 494 * This function prepares a hash table walk. 495 * 496 * Note that if you restart a walk after rhashtable_walk_stop you 497 * may see the same object twice. Also, you may miss objects if 498 * there are removals in between rhashtable_walk_stop and the next 499 * call to rhashtable_walk_start. 500 * 501 * For a completely stable walk you should construct your own data 502 * structure outside the hash table. 503 * 504 * This function may sleep so you must not call it from interrupt 505 * context or with spin locks held. 506 * 507 * You must call rhashtable_walk_exit if this function returns 508 * successfully. 509 */ 510 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) 511 { 512 iter->ht = ht; 513 iter->p = NULL; 514 iter->slot = 0; 515 iter->skip = 0; 516 517 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); 518 if (!iter->walker) 519 return -ENOMEM; 520 521 spin_lock(&ht->lock); 522 iter->walker->tbl = rht_dereference(ht->tbl, ht); 523 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 524 spin_unlock(&ht->lock); 525 526 return 0; 527 } 528 EXPORT_SYMBOL_GPL(rhashtable_walk_init); 529 530 /** 531 * rhashtable_walk_exit - Free an iterator 532 * @iter: Hash table Iterator 533 * 534 * This function frees resources allocated by rhashtable_walk_init. 535 */ 536 void rhashtable_walk_exit(struct rhashtable_iter *iter) 537 { 538 spin_lock(&iter->ht->lock); 539 if (iter->walker->tbl) 540 list_del(&iter->walker->list); 541 spin_unlock(&iter->ht->lock); 542 kfree(iter->walker); 543 } 544 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 545 546 /** 547 * rhashtable_walk_start - Start a hash table walk 548 * @iter: Hash table iterator 549 * 550 * Start a hash table walk. Note that we take the RCU lock in all 551 * cases including when we return an error. So you must always call 552 * rhashtable_walk_stop to clean up. 553 * 554 * Returns zero if successful. 555 * 556 * Returns -EAGAIN if resize event occured. Note that the iterator 557 * will rewind back to the beginning and you may use it immediately 558 * by calling rhashtable_walk_next. 559 */ 560 int rhashtable_walk_start(struct rhashtable_iter *iter) 561 __acquires(RCU) 562 { 563 struct rhashtable *ht = iter->ht; 564 565 rcu_read_lock(); 566 567 spin_lock(&ht->lock); 568 if (iter->walker->tbl) 569 list_del(&iter->walker->list); 570 spin_unlock(&ht->lock); 571 572 if (!iter->walker->tbl) { 573 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); 574 return -EAGAIN; 575 } 576 577 return 0; 578 } 579 EXPORT_SYMBOL_GPL(rhashtable_walk_start); 580 581 /** 582 * rhashtable_walk_next - Return the next object and advance the iterator 583 * @iter: Hash table iterator 584 * 585 * Note that you must call rhashtable_walk_stop when you are finished 586 * with the walk. 587 * 588 * Returns the next object or NULL when the end of the table is reached. 589 * 590 * Returns -EAGAIN if resize event occured. Note that the iterator 591 * will rewind back to the beginning and you may continue to use it. 592 */ 593 void *rhashtable_walk_next(struct rhashtable_iter *iter) 594 { 595 struct bucket_table *tbl = iter->walker->tbl; 596 struct rhashtable *ht = iter->ht; 597 struct rhash_head *p = iter->p; 598 599 if (p) { 600 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 601 goto next; 602 } 603 604 for (; iter->slot < tbl->size; iter->slot++) { 605 int skip = iter->skip; 606 607 rht_for_each_rcu(p, tbl, iter->slot) { 608 if (!skip) 609 break; 610 skip--; 611 } 612 613 next: 614 if (!rht_is_a_nulls(p)) { 615 iter->skip++; 616 iter->p = p; 617 return rht_obj(ht, p); 618 } 619 620 iter->skip = 0; 621 } 622 623 iter->p = NULL; 624 625 /* Ensure we see any new tables. */ 626 smp_rmb(); 627 628 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); 629 if (iter->walker->tbl) { 630 iter->slot = 0; 631 iter->skip = 0; 632 return ERR_PTR(-EAGAIN); 633 } 634 635 return NULL; 636 } 637 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 638 639 /** 640 * rhashtable_walk_stop - Finish a hash table walk 641 * @iter: Hash table iterator 642 * 643 * Finish a hash table walk. 644 */ 645 void rhashtable_walk_stop(struct rhashtable_iter *iter) 646 __releases(RCU) 647 { 648 struct rhashtable *ht; 649 struct bucket_table *tbl = iter->walker->tbl; 650 651 if (!tbl) 652 goto out; 653 654 ht = iter->ht; 655 656 spin_lock(&ht->lock); 657 if (tbl->rehash < tbl->size) 658 list_add(&iter->walker->list, &tbl->walkers); 659 else 660 iter->walker->tbl = NULL; 661 spin_unlock(&ht->lock); 662 663 iter->p = NULL; 664 665 out: 666 rcu_read_unlock(); 667 } 668 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 669 670 static size_t rounded_hashtable_size(const struct rhashtable_params *params) 671 { 672 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 673 (unsigned long)params->min_size); 674 } 675 676 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 677 { 678 return jhash2(key, length, seed); 679 } 680 681 /** 682 * rhashtable_init - initialize a new hash table 683 * @ht: hash table to be initialized 684 * @params: configuration parameters 685 * 686 * Initializes a new hash table based on the provided configuration 687 * parameters. A table can be configured either with a variable or 688 * fixed length key: 689 * 690 * Configuration Example 1: Fixed length keys 691 * struct test_obj { 692 * int key; 693 * void * my_member; 694 * struct rhash_head node; 695 * }; 696 * 697 * struct rhashtable_params params = { 698 * .head_offset = offsetof(struct test_obj, node), 699 * .key_offset = offsetof(struct test_obj, key), 700 * .key_len = sizeof(int), 701 * .hashfn = jhash, 702 * .nulls_base = (1U << RHT_BASE_SHIFT), 703 * }; 704 * 705 * Configuration Example 2: Variable length keys 706 * struct test_obj { 707 * [...] 708 * struct rhash_head node; 709 * }; 710 * 711 * u32 my_hash_fn(const void *data, u32 len, u32 seed) 712 * { 713 * struct test_obj *obj = data; 714 * 715 * return [... hash ...]; 716 * } 717 * 718 * struct rhashtable_params params = { 719 * .head_offset = offsetof(struct test_obj, node), 720 * .hashfn = jhash, 721 * .obj_hashfn = my_hash_fn, 722 * }; 723 */ 724 int rhashtable_init(struct rhashtable *ht, 725 const struct rhashtable_params *params) 726 { 727 struct bucket_table *tbl; 728 size_t size; 729 730 size = HASH_DEFAULT_SIZE; 731 732 if ((!params->key_len && !params->obj_hashfn) || 733 (params->obj_hashfn && !params->obj_cmpfn)) 734 return -EINVAL; 735 736 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 737 return -EINVAL; 738 739 memset(ht, 0, sizeof(*ht)); 740 mutex_init(&ht->mutex); 741 spin_lock_init(&ht->lock); 742 memcpy(&ht->p, params, sizeof(*params)); 743 744 if (params->min_size) 745 ht->p.min_size = roundup_pow_of_two(params->min_size); 746 747 if (params->max_size) 748 ht->p.max_size = rounddown_pow_of_two(params->max_size); 749 750 if (params->insecure_max_entries) 751 ht->p.insecure_max_entries = 752 rounddown_pow_of_two(params->insecure_max_entries); 753 else 754 ht->p.insecure_max_entries = ht->p.max_size * 2; 755 756 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 757 758 if (params->nelem_hint) 759 size = rounded_hashtable_size(&ht->p); 760 761 /* The maximum (not average) chain length grows with the 762 * size of the hash table, at a rate of (log N)/(log log N). 763 * The value of 16 is selected so that even if the hash 764 * table grew to 2^32 you would not expect the maximum 765 * chain length to exceed it unless we are under attack 766 * (or extremely unlucky). 767 * 768 * As this limit is only to detect attacks, we don't need 769 * to set it to a lower value as you'd need the chain 770 * length to vastly exceed 16 to have any real effect 771 * on the system. 772 */ 773 if (!params->insecure_elasticity) 774 ht->elasticity = 16; 775 776 if (params->locks_mul) 777 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 778 else 779 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 780 781 ht->key_len = ht->p.key_len; 782 if (!params->hashfn) { 783 ht->p.hashfn = jhash; 784 785 if (!(ht->key_len & (sizeof(u32) - 1))) { 786 ht->key_len /= sizeof(u32); 787 ht->p.hashfn = rhashtable_jhash2; 788 } 789 } 790 791 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 792 if (tbl == NULL) 793 return -ENOMEM; 794 795 atomic_set(&ht->nelems, 0); 796 797 RCU_INIT_POINTER(ht->tbl, tbl); 798 799 INIT_WORK(&ht->run_work, rht_deferred_worker); 800 801 return 0; 802 } 803 EXPORT_SYMBOL_GPL(rhashtable_init); 804 805 /** 806 * rhashtable_free_and_destroy - free elements and destroy hash table 807 * @ht: the hash table to destroy 808 * @free_fn: callback to release resources of element 809 * @arg: pointer passed to free_fn 810 * 811 * Stops an eventual async resize. If defined, invokes free_fn for each 812 * element to releasal resources. Please note that RCU protected 813 * readers may still be accessing the elements. Releasing of resources 814 * must occur in a compatible manner. Then frees the bucket array. 815 * 816 * This function will eventually sleep to wait for an async resize 817 * to complete. The caller is responsible that no further write operations 818 * occurs in parallel. 819 */ 820 void rhashtable_free_and_destroy(struct rhashtable *ht, 821 void (*free_fn)(void *ptr, void *arg), 822 void *arg) 823 { 824 const struct bucket_table *tbl; 825 unsigned int i; 826 827 cancel_work_sync(&ht->run_work); 828 829 mutex_lock(&ht->mutex); 830 tbl = rht_dereference(ht->tbl, ht); 831 if (free_fn) { 832 for (i = 0; i < tbl->size; i++) { 833 struct rhash_head *pos, *next; 834 835 for (pos = rht_dereference(tbl->buckets[i], ht), 836 next = !rht_is_a_nulls(pos) ? 837 rht_dereference(pos->next, ht) : NULL; 838 !rht_is_a_nulls(pos); 839 pos = next, 840 next = !rht_is_a_nulls(pos) ? 841 rht_dereference(pos->next, ht) : NULL) 842 free_fn(rht_obj(ht, pos), arg); 843 } 844 } 845 846 bucket_table_free(tbl); 847 mutex_unlock(&ht->mutex); 848 } 849 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 850 851 void rhashtable_destroy(struct rhashtable *ht) 852 { 853 return rhashtable_free_and_destroy(ht, NULL, NULL); 854 } 855 EXPORT_SYMBOL_GPL(rhashtable_destroy); 856