1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * 8 * Code partially derived from nft_hash 9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/log2.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/vmalloc.h> 23 #include <linux/mm.h> 24 #include <linux/jhash.h> 25 #include <linux/random.h> 26 #include <linux/rhashtable.h> 27 #include <linux/err.h> 28 29 #define HASH_DEFAULT_SIZE 64UL 30 #define HASH_MIN_SIZE 4U 31 #define BUCKET_LOCKS_PER_CPU 128UL 32 33 static u32 head_hashfn(struct rhashtable *ht, 34 const struct bucket_table *tbl, 35 const struct rhash_head *he) 36 { 37 return rht_head_hashfn(ht, tbl, he, ht->p); 38 } 39 40 #ifdef CONFIG_PROVE_LOCKING 41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 42 43 int lockdep_rht_mutex_is_held(struct rhashtable *ht) 44 { 45 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 46 } 47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 48 49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 50 { 51 spinlock_t *lock = rht_bucket_lock(tbl, hash); 52 53 return (debug_locks) ? lockdep_is_held(lock) : 1; 54 } 55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 56 #else 57 #define ASSERT_RHT_MUTEX(HT) 58 #endif 59 60 61 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, 62 gfp_t gfp) 63 { 64 unsigned int i, size; 65 #if defined(CONFIG_PROVE_LOCKING) 66 unsigned int nr_pcpus = 2; 67 #else 68 unsigned int nr_pcpus = num_possible_cpus(); 69 #endif 70 71 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 72 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 73 74 /* Never allocate more than 0.5 locks per bucket */ 75 size = min_t(unsigned int, size, tbl->size >> 1); 76 77 if (sizeof(spinlock_t) != 0) { 78 #ifdef CONFIG_NUMA 79 if (size * sizeof(spinlock_t) > PAGE_SIZE && 80 gfp == GFP_KERNEL) 81 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 82 else 83 #endif 84 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 85 gfp); 86 if (!tbl->locks) 87 return -ENOMEM; 88 for (i = 0; i < size; i++) 89 spin_lock_init(&tbl->locks[i]); 90 } 91 tbl->locks_mask = size - 1; 92 93 return 0; 94 } 95 96 static void bucket_table_free(const struct bucket_table *tbl) 97 { 98 if (tbl) 99 kvfree(tbl->locks); 100 101 kvfree(tbl); 102 } 103 104 static void bucket_table_free_rcu(struct rcu_head *head) 105 { 106 bucket_table_free(container_of(head, struct bucket_table, rcu)); 107 } 108 109 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 110 size_t nbuckets, 111 gfp_t gfp) 112 { 113 struct bucket_table *tbl = NULL; 114 size_t size; 115 int i; 116 117 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 118 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || 119 gfp != GFP_KERNEL) 120 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); 121 if (tbl == NULL && gfp == GFP_KERNEL) 122 tbl = vzalloc(size); 123 if (tbl == NULL) 124 return NULL; 125 126 tbl->size = nbuckets; 127 128 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 129 bucket_table_free(tbl); 130 return NULL; 131 } 132 133 INIT_LIST_HEAD(&tbl->walkers); 134 135 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 136 137 for (i = 0; i < nbuckets; i++) 138 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 139 140 return tbl; 141 } 142 143 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, 144 struct bucket_table *tbl) 145 { 146 struct bucket_table *new_tbl; 147 148 do { 149 new_tbl = tbl; 150 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 151 } while (tbl); 152 153 return new_tbl; 154 } 155 156 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 157 { 158 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 159 struct bucket_table *new_tbl = rhashtable_last_table(ht, 160 rht_dereference_rcu(old_tbl->future_tbl, ht)); 161 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; 162 int err = -ENOENT; 163 struct rhash_head *head, *next, *entry; 164 spinlock_t *new_bucket_lock; 165 unsigned int new_hash; 166 167 rht_for_each(entry, old_tbl, old_hash) { 168 err = 0; 169 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 170 171 if (rht_is_a_nulls(next)) 172 break; 173 174 pprev = &entry->next; 175 } 176 177 if (err) 178 goto out; 179 180 new_hash = head_hashfn(ht, new_tbl, entry); 181 182 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 183 184 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 185 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 186 new_tbl, new_hash); 187 188 if (rht_is_a_nulls(head)) 189 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); 190 else 191 RCU_INIT_POINTER(entry->next, head); 192 193 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 194 spin_unlock(new_bucket_lock); 195 196 rcu_assign_pointer(*pprev, next); 197 198 out: 199 return err; 200 } 201 202 static void rhashtable_rehash_chain(struct rhashtable *ht, 203 unsigned int old_hash) 204 { 205 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 206 spinlock_t *old_bucket_lock; 207 208 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 209 210 spin_lock_bh(old_bucket_lock); 211 while (!rhashtable_rehash_one(ht, old_hash)) 212 ; 213 old_tbl->rehash++; 214 spin_unlock_bh(old_bucket_lock); 215 } 216 217 static int rhashtable_rehash_attach(struct rhashtable *ht, 218 struct bucket_table *old_tbl, 219 struct bucket_table *new_tbl) 220 { 221 /* Protect future_tbl using the first bucket lock. */ 222 spin_lock_bh(old_tbl->locks); 223 224 /* Did somebody beat us to it? */ 225 if (rcu_access_pointer(old_tbl->future_tbl)) { 226 spin_unlock_bh(old_tbl->locks); 227 return -EEXIST; 228 } 229 230 /* Make insertions go into the new, empty table right away. Deletions 231 * and lookups will be attempted in both tables until we synchronize. 232 */ 233 rcu_assign_pointer(old_tbl->future_tbl, new_tbl); 234 235 /* Ensure the new table is visible to readers. */ 236 smp_wmb(); 237 238 spin_unlock_bh(old_tbl->locks); 239 240 return 0; 241 } 242 243 static int rhashtable_rehash_table(struct rhashtable *ht) 244 { 245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 246 struct bucket_table *new_tbl; 247 struct rhashtable_walker *walker; 248 unsigned int old_hash; 249 250 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 251 if (!new_tbl) 252 return 0; 253 254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) 255 rhashtable_rehash_chain(ht, old_hash); 256 257 /* Publish the new table pointer. */ 258 rcu_assign_pointer(ht->tbl, new_tbl); 259 260 spin_lock(&ht->lock); 261 list_for_each_entry(walker, &old_tbl->walkers, list) 262 walker->tbl = NULL; 263 spin_unlock(&ht->lock); 264 265 /* Wait for readers. All new readers will see the new 266 * table, and thus no references to the old table will 267 * remain. 268 */ 269 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 270 271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 272 } 273 274 /** 275 * rhashtable_expand - Expand hash table while allowing concurrent lookups 276 * @ht: the hash table to expand 277 * 278 * A secondary bucket array is allocated and the hash entries are migrated. 279 * 280 * This function may only be called in a context where it is safe to call 281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 282 * 283 * The caller must ensure that no concurrent resizing occurs by holding 284 * ht->mutex. 285 * 286 * It is valid to have concurrent insertions and deletions protected by per 287 * bucket locks or concurrent RCU protected lookups and traversals. 288 */ 289 static int rhashtable_expand(struct rhashtable *ht) 290 { 291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 292 int err; 293 294 ASSERT_RHT_MUTEX(ht); 295 296 old_tbl = rhashtable_last_table(ht, old_tbl); 297 298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); 299 if (new_tbl == NULL) 300 return -ENOMEM; 301 302 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 303 if (err) 304 bucket_table_free(new_tbl); 305 306 return err; 307 } 308 309 /** 310 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 311 * @ht: the hash table to shrink 312 * 313 * This function shrinks the hash table to fit, i.e., the smallest 314 * size would not cause it to expand right away automatically. 315 * 316 * The caller must ensure that no concurrent resizing occurs by holding 317 * ht->mutex. 318 * 319 * The caller must ensure that no concurrent table mutations take place. 320 * It is however valid to have concurrent lookups if they are RCU protected. 321 * 322 * It is valid to have concurrent insertions and deletions protected by per 323 * bucket locks or concurrent RCU protected lookups and traversals. 324 */ 325 static int rhashtable_shrink(struct rhashtable *ht) 326 { 327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 328 unsigned int size; 329 int err; 330 331 ASSERT_RHT_MUTEX(ht); 332 333 size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); 334 if (size < ht->p.min_size) 335 size = ht->p.min_size; 336 337 if (old_tbl->size <= size) 338 return 0; 339 340 if (rht_dereference(old_tbl->future_tbl, ht)) 341 return -EEXIST; 342 343 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 344 if (new_tbl == NULL) 345 return -ENOMEM; 346 347 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 348 if (err) 349 bucket_table_free(new_tbl); 350 351 return err; 352 } 353 354 static void rht_deferred_worker(struct work_struct *work) 355 { 356 struct rhashtable *ht; 357 struct bucket_table *tbl; 358 int err = 0; 359 360 ht = container_of(work, struct rhashtable, run_work); 361 mutex_lock(&ht->mutex); 362 363 tbl = rht_dereference(ht->tbl, ht); 364 tbl = rhashtable_last_table(ht, tbl); 365 366 if (rht_grow_above_75(ht, tbl)) 367 rhashtable_expand(ht); 368 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) 369 rhashtable_shrink(ht); 370 371 err = rhashtable_rehash_table(ht); 372 373 mutex_unlock(&ht->mutex); 374 375 if (err) 376 schedule_work(&ht->run_work); 377 } 378 379 static bool rhashtable_check_elasticity(struct rhashtable *ht, 380 struct bucket_table *tbl, 381 unsigned int hash) 382 { 383 unsigned int elasticity = ht->elasticity; 384 struct rhash_head *head; 385 386 rht_for_each(head, tbl, hash) 387 if (!--elasticity) 388 return true; 389 390 return false; 391 } 392 393 int rhashtable_insert_rehash(struct rhashtable *ht) 394 { 395 struct bucket_table *old_tbl; 396 struct bucket_table *new_tbl; 397 struct bucket_table *tbl; 398 unsigned int size; 399 int err; 400 401 old_tbl = rht_dereference_rcu(ht->tbl, ht); 402 tbl = rhashtable_last_table(ht, old_tbl); 403 404 size = tbl->size; 405 406 if (rht_grow_above_75(ht, tbl)) 407 size *= 2; 408 /* Do not schedule more than one rehash */ 409 else if (old_tbl != tbl) 410 return -EBUSY; 411 412 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 413 if (new_tbl == NULL) { 414 /* Schedule async resize/rehash to try allocation 415 * non-atomic context. 416 */ 417 schedule_work(&ht->run_work); 418 return -ENOMEM; 419 } 420 421 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 422 if (err) { 423 bucket_table_free(new_tbl); 424 if (err == -EEXIST) 425 err = 0; 426 } else 427 schedule_work(&ht->run_work); 428 429 return err; 430 } 431 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); 432 433 int rhashtable_insert_slow(struct rhashtable *ht, const void *key, 434 struct rhash_head *obj, 435 struct bucket_table *tbl) 436 { 437 struct rhash_head *head; 438 unsigned int hash; 439 int err; 440 441 tbl = rhashtable_last_table(ht, tbl); 442 hash = head_hashfn(ht, tbl, obj); 443 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); 444 445 err = -EEXIST; 446 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 447 goto exit; 448 449 err = -EAGAIN; 450 if (rhashtable_check_elasticity(ht, tbl, hash) || 451 rht_grow_above_100(ht, tbl)) 452 goto exit; 453 454 err = 0; 455 456 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 457 458 RCU_INIT_POINTER(obj->next, head); 459 460 rcu_assign_pointer(tbl->buckets[hash], obj); 461 462 atomic_inc(&ht->nelems); 463 464 exit: 465 spin_unlock(rht_bucket_lock(tbl, hash)); 466 467 return err; 468 } 469 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 470 471 /** 472 * rhashtable_walk_init - Initialise an iterator 473 * @ht: Table to walk over 474 * @iter: Hash table Iterator 475 * 476 * This function prepares a hash table walk. 477 * 478 * Note that if you restart a walk after rhashtable_walk_stop you 479 * may see the same object twice. Also, you may miss objects if 480 * there are removals in between rhashtable_walk_stop and the next 481 * call to rhashtable_walk_start. 482 * 483 * For a completely stable walk you should construct your own data 484 * structure outside the hash table. 485 * 486 * This function may sleep so you must not call it from interrupt 487 * context or with spin locks held. 488 * 489 * You must call rhashtable_walk_exit if this function returns 490 * successfully. 491 */ 492 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) 493 { 494 iter->ht = ht; 495 iter->p = NULL; 496 iter->slot = 0; 497 iter->skip = 0; 498 499 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); 500 if (!iter->walker) 501 return -ENOMEM; 502 503 mutex_lock(&ht->mutex); 504 iter->walker->tbl = rht_dereference(ht->tbl, ht); 505 list_add(&iter->walker->list, &iter->walker->tbl->walkers); 506 mutex_unlock(&ht->mutex); 507 508 return 0; 509 } 510 EXPORT_SYMBOL_GPL(rhashtable_walk_init); 511 512 /** 513 * rhashtable_walk_exit - Free an iterator 514 * @iter: Hash table Iterator 515 * 516 * This function frees resources allocated by rhashtable_walk_init. 517 */ 518 void rhashtable_walk_exit(struct rhashtable_iter *iter) 519 { 520 mutex_lock(&iter->ht->mutex); 521 if (iter->walker->tbl) 522 list_del(&iter->walker->list); 523 mutex_unlock(&iter->ht->mutex); 524 kfree(iter->walker); 525 } 526 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 527 528 /** 529 * rhashtable_walk_start - Start a hash table walk 530 * @iter: Hash table iterator 531 * 532 * Start a hash table walk. Note that we take the RCU lock in all 533 * cases including when we return an error. So you must always call 534 * rhashtable_walk_stop to clean up. 535 * 536 * Returns zero if successful. 537 * 538 * Returns -EAGAIN if resize event occured. Note that the iterator 539 * will rewind back to the beginning and you may use it immediately 540 * by calling rhashtable_walk_next. 541 */ 542 int rhashtable_walk_start(struct rhashtable_iter *iter) 543 __acquires(RCU) 544 { 545 struct rhashtable *ht = iter->ht; 546 547 mutex_lock(&ht->mutex); 548 549 if (iter->walker->tbl) 550 list_del(&iter->walker->list); 551 552 rcu_read_lock(); 553 554 mutex_unlock(&ht->mutex); 555 556 if (!iter->walker->tbl) { 557 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); 558 return -EAGAIN; 559 } 560 561 return 0; 562 } 563 EXPORT_SYMBOL_GPL(rhashtable_walk_start); 564 565 /** 566 * rhashtable_walk_next - Return the next object and advance the iterator 567 * @iter: Hash table iterator 568 * 569 * Note that you must call rhashtable_walk_stop when you are finished 570 * with the walk. 571 * 572 * Returns the next object or NULL when the end of the table is reached. 573 * 574 * Returns -EAGAIN if resize event occured. Note that the iterator 575 * will rewind back to the beginning and you may continue to use it. 576 */ 577 void *rhashtable_walk_next(struct rhashtable_iter *iter) 578 { 579 struct bucket_table *tbl = iter->walker->tbl; 580 struct rhashtable *ht = iter->ht; 581 struct rhash_head *p = iter->p; 582 void *obj = NULL; 583 584 if (p) { 585 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 586 goto next; 587 } 588 589 for (; iter->slot < tbl->size; iter->slot++) { 590 int skip = iter->skip; 591 592 rht_for_each_rcu(p, tbl, iter->slot) { 593 if (!skip) 594 break; 595 skip--; 596 } 597 598 next: 599 if (!rht_is_a_nulls(p)) { 600 iter->skip++; 601 iter->p = p; 602 obj = rht_obj(ht, p); 603 goto out; 604 } 605 606 iter->skip = 0; 607 } 608 609 /* Ensure we see any new tables. */ 610 smp_rmb(); 611 612 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); 613 if (iter->walker->tbl) { 614 iter->slot = 0; 615 iter->skip = 0; 616 return ERR_PTR(-EAGAIN); 617 } 618 619 iter->p = NULL; 620 621 out: 622 623 return obj; 624 } 625 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 626 627 /** 628 * rhashtable_walk_stop - Finish a hash table walk 629 * @iter: Hash table iterator 630 * 631 * Finish a hash table walk. 632 */ 633 void rhashtable_walk_stop(struct rhashtable_iter *iter) 634 __releases(RCU) 635 { 636 struct rhashtable *ht; 637 struct bucket_table *tbl = iter->walker->tbl; 638 639 if (!tbl) 640 goto out; 641 642 ht = iter->ht; 643 644 spin_lock(&ht->lock); 645 if (tbl->rehash < tbl->size) 646 list_add(&iter->walker->list, &tbl->walkers); 647 else 648 iter->walker->tbl = NULL; 649 spin_unlock(&ht->lock); 650 651 iter->p = NULL; 652 653 out: 654 rcu_read_unlock(); 655 } 656 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 657 658 static size_t rounded_hashtable_size(const struct rhashtable_params *params) 659 { 660 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 661 (unsigned long)params->min_size); 662 } 663 664 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 665 { 666 return jhash2(key, length, seed); 667 } 668 669 /** 670 * rhashtable_init - initialize a new hash table 671 * @ht: hash table to be initialized 672 * @params: configuration parameters 673 * 674 * Initializes a new hash table based on the provided configuration 675 * parameters. A table can be configured either with a variable or 676 * fixed length key: 677 * 678 * Configuration Example 1: Fixed length keys 679 * struct test_obj { 680 * int key; 681 * void * my_member; 682 * struct rhash_head node; 683 * }; 684 * 685 * struct rhashtable_params params = { 686 * .head_offset = offsetof(struct test_obj, node), 687 * .key_offset = offsetof(struct test_obj, key), 688 * .key_len = sizeof(int), 689 * .hashfn = jhash, 690 * .nulls_base = (1U << RHT_BASE_SHIFT), 691 * }; 692 * 693 * Configuration Example 2: Variable length keys 694 * struct test_obj { 695 * [...] 696 * struct rhash_head node; 697 * }; 698 * 699 * u32 my_hash_fn(const void *data, u32 len, u32 seed) 700 * { 701 * struct test_obj *obj = data; 702 * 703 * return [... hash ...]; 704 * } 705 * 706 * struct rhashtable_params params = { 707 * .head_offset = offsetof(struct test_obj, node), 708 * .hashfn = jhash, 709 * .obj_hashfn = my_hash_fn, 710 * }; 711 */ 712 int rhashtable_init(struct rhashtable *ht, 713 const struct rhashtable_params *params) 714 { 715 struct bucket_table *tbl; 716 size_t size; 717 718 size = HASH_DEFAULT_SIZE; 719 720 if ((!params->key_len && !params->obj_hashfn) || 721 (params->obj_hashfn && !params->obj_cmpfn)) 722 return -EINVAL; 723 724 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 725 return -EINVAL; 726 727 if (params->nelem_hint) 728 size = rounded_hashtable_size(params); 729 730 memset(ht, 0, sizeof(*ht)); 731 mutex_init(&ht->mutex); 732 spin_lock_init(&ht->lock); 733 memcpy(&ht->p, params, sizeof(*params)); 734 735 if (params->min_size) 736 ht->p.min_size = roundup_pow_of_two(params->min_size); 737 738 if (params->max_size) 739 ht->p.max_size = rounddown_pow_of_two(params->max_size); 740 741 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 742 743 /* The maximum (not average) chain length grows with the 744 * size of the hash table, at a rate of (log N)/(log log N). 745 * The value of 16 is selected so that even if the hash 746 * table grew to 2^32 you would not expect the maximum 747 * chain length to exceed it unless we are under attack 748 * (or extremely unlucky). 749 * 750 * As this limit is only to detect attacks, we don't need 751 * to set it to a lower value as you'd need the chain 752 * length to vastly exceed 16 to have any real effect 753 * on the system. 754 */ 755 if (!params->insecure_elasticity) 756 ht->elasticity = 16; 757 758 if (params->locks_mul) 759 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 760 else 761 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 762 763 ht->key_len = ht->p.key_len; 764 if (!params->hashfn) { 765 ht->p.hashfn = jhash; 766 767 if (!(ht->key_len & (sizeof(u32) - 1))) { 768 ht->key_len /= sizeof(u32); 769 ht->p.hashfn = rhashtable_jhash2; 770 } 771 } 772 773 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 774 if (tbl == NULL) 775 return -ENOMEM; 776 777 atomic_set(&ht->nelems, 0); 778 779 RCU_INIT_POINTER(ht->tbl, tbl); 780 781 INIT_WORK(&ht->run_work, rht_deferred_worker); 782 783 return 0; 784 } 785 EXPORT_SYMBOL_GPL(rhashtable_init); 786 787 /** 788 * rhashtable_free_and_destroy - free elements and destroy hash table 789 * @ht: the hash table to destroy 790 * @free_fn: callback to release resources of element 791 * @arg: pointer passed to free_fn 792 * 793 * Stops an eventual async resize. If defined, invokes free_fn for each 794 * element to releasal resources. Please note that RCU protected 795 * readers may still be accessing the elements. Releasing of resources 796 * must occur in a compatible manner. Then frees the bucket array. 797 * 798 * This function will eventually sleep to wait for an async resize 799 * to complete. The caller is responsible that no further write operations 800 * occurs in parallel. 801 */ 802 void rhashtable_free_and_destroy(struct rhashtable *ht, 803 void (*free_fn)(void *ptr, void *arg), 804 void *arg) 805 { 806 const struct bucket_table *tbl; 807 unsigned int i; 808 809 cancel_work_sync(&ht->run_work); 810 811 mutex_lock(&ht->mutex); 812 tbl = rht_dereference(ht->tbl, ht); 813 if (free_fn) { 814 for (i = 0; i < tbl->size; i++) { 815 struct rhash_head *pos, *next; 816 817 for (pos = rht_dereference(tbl->buckets[i], ht), 818 next = !rht_is_a_nulls(pos) ? 819 rht_dereference(pos->next, ht) : NULL; 820 !rht_is_a_nulls(pos); 821 pos = next, 822 next = !rht_is_a_nulls(pos) ? 823 rht_dereference(pos->next, ht) : NULL) 824 free_fn(rht_obj(ht, pos), arg); 825 } 826 } 827 828 bucket_table_free(tbl); 829 mutex_unlock(&ht->mutex); 830 } 831 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 832 833 void rhashtable_destroy(struct rhashtable *ht) 834 { 835 return rhashtable_free_and_destroy(ht, NULL, NULL); 836 } 837 EXPORT_SYMBOL_GPL(rhashtable_destroy); 838