1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 7 * 8 * Code partially derived from nft_hash 9 * Rewritten with rehash code from br_multicast plus single list 10 * pointer as suggested by Josh Triplett 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/atomic.h> 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/log2.h> 21 #include <linux/sched.h> 22 #include <linux/rculist.h> 23 #include <linux/slab.h> 24 #include <linux/vmalloc.h> 25 #include <linux/mm.h> 26 #include <linux/jhash.h> 27 #include <linux/random.h> 28 #include <linux/rhashtable.h> 29 #include <linux/err.h> 30 #include <linux/export.h> 31 32 #define HASH_DEFAULT_SIZE 64UL 33 #define HASH_MIN_SIZE 4U 34 #define BUCKET_LOCKS_PER_CPU 32UL 35 36 union nested_table { 37 union nested_table __rcu *table; 38 struct rhash_head __rcu *bucket; 39 }; 40 41 static u32 head_hashfn(struct rhashtable *ht, 42 const struct bucket_table *tbl, 43 const struct rhash_head *he) 44 { 45 return rht_head_hashfn(ht, tbl, he, ht->p); 46 } 47 48 #ifdef CONFIG_PROVE_LOCKING 49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 50 51 int lockdep_rht_mutex_is_held(struct rhashtable *ht) 52 { 53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 54 } 55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 56 57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 58 { 59 spinlock_t *lock = rht_bucket_lock(tbl, hash); 60 61 return (debug_locks) ? lockdep_is_held(lock) : 1; 62 } 63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 64 #else 65 #define ASSERT_RHT_MUTEX(HT) 66 #endif 67 68 static void nested_table_free(union nested_table *ntbl, unsigned int size) 69 { 70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 71 const unsigned int len = 1 << shift; 72 unsigned int i; 73 74 ntbl = rcu_dereference_raw(ntbl->table); 75 if (!ntbl) 76 return; 77 78 if (size > len) { 79 size >>= shift; 80 for (i = 0; i < len; i++) 81 nested_table_free(ntbl + i, size); 82 } 83 84 kfree(ntbl); 85 } 86 87 static void nested_bucket_table_free(const struct bucket_table *tbl) 88 { 89 unsigned int size = tbl->size >> tbl->nest; 90 unsigned int len = 1 << tbl->nest; 91 union nested_table *ntbl; 92 unsigned int i; 93 94 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 95 96 for (i = 0; i < len; i++) 97 nested_table_free(ntbl + i, size); 98 99 kfree(ntbl); 100 } 101 102 static void bucket_table_free(const struct bucket_table *tbl) 103 { 104 if (tbl->nest) 105 nested_bucket_table_free(tbl); 106 107 free_bucket_spinlocks(tbl->locks); 108 kvfree(tbl); 109 } 110 111 static void bucket_table_free_rcu(struct rcu_head *head) 112 { 113 bucket_table_free(container_of(head, struct bucket_table, rcu)); 114 } 115 116 static union nested_table *nested_table_alloc(struct rhashtable *ht, 117 union nested_table __rcu **prev, 118 unsigned int shifted, 119 unsigned int nhash) 120 { 121 union nested_table *ntbl; 122 int i; 123 124 ntbl = rcu_dereference(*prev); 125 if (ntbl) 126 return ntbl; 127 128 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); 129 130 if (ntbl && shifted) { 131 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) 132 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, 133 (i << shifted) | nhash); 134 } 135 136 rcu_assign_pointer(*prev, ntbl); 137 138 return ntbl; 139 } 140 141 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, 142 size_t nbuckets, 143 gfp_t gfp) 144 { 145 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 146 struct bucket_table *tbl; 147 size_t size; 148 149 if (nbuckets < (1 << (shift + 1))) 150 return NULL; 151 152 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); 153 154 tbl = kzalloc(size, gfp); 155 if (!tbl) 156 return NULL; 157 158 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, 159 0, 0)) { 160 kfree(tbl); 161 return NULL; 162 } 163 164 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; 165 166 return tbl; 167 } 168 169 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 170 size_t nbuckets, 171 gfp_t gfp) 172 { 173 struct bucket_table *tbl = NULL; 174 size_t size, max_locks; 175 int i; 176 177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 178 if (gfp != GFP_KERNEL) 179 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); 180 else 181 tbl = kvzalloc(size, gfp); 182 183 size = nbuckets; 184 185 if (tbl == NULL && gfp != GFP_KERNEL) { 186 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); 187 nbuckets = 0; 188 } 189 if (tbl == NULL) 190 return NULL; 191 192 tbl->size = size; 193 194 max_locks = size >> 1; 195 if (tbl->nest) 196 max_locks = min_t(size_t, max_locks, 1U << tbl->nest); 197 198 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, 199 ht->p.locks_mul, gfp) < 0) { 200 bucket_table_free(tbl); 201 return NULL; 202 } 203 204 INIT_LIST_HEAD(&tbl->walkers); 205 206 tbl->hash_rnd = get_random_u32(); 207 208 for (i = 0; i < nbuckets; i++) 209 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 210 211 return tbl; 212 } 213 214 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, 215 struct bucket_table *tbl) 216 { 217 struct bucket_table *new_tbl; 218 219 do { 220 new_tbl = tbl; 221 tbl = rht_dereference_rcu(tbl->future_tbl, ht); 222 } while (tbl); 223 224 return new_tbl; 225 } 226 227 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 228 { 229 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 230 struct bucket_table *new_tbl = rhashtable_last_table(ht, 231 rht_dereference_rcu(old_tbl->future_tbl, ht)); 232 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); 233 int err = -EAGAIN; 234 struct rhash_head *head, *next, *entry; 235 spinlock_t *new_bucket_lock; 236 unsigned int new_hash; 237 238 if (new_tbl->nest) 239 goto out; 240 241 err = -ENOENT; 242 243 rht_for_each(entry, old_tbl, old_hash) { 244 err = 0; 245 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 246 247 if (rht_is_a_nulls(next)) 248 break; 249 250 pprev = &entry->next; 251 } 252 253 if (err) 254 goto out; 255 256 new_hash = head_hashfn(ht, new_tbl, entry); 257 258 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 259 260 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 261 head = rht_dereference_bucket(new_tbl->buckets[new_hash], 262 new_tbl, new_hash); 263 264 RCU_INIT_POINTER(entry->next, head); 265 266 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 267 spin_unlock(new_bucket_lock); 268 269 rcu_assign_pointer(*pprev, next); 270 271 out: 272 return err; 273 } 274 275 static int rhashtable_rehash_chain(struct rhashtable *ht, 276 unsigned int old_hash) 277 { 278 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 279 spinlock_t *old_bucket_lock; 280 int err; 281 282 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 283 284 spin_lock_bh(old_bucket_lock); 285 while (!(err = rhashtable_rehash_one(ht, old_hash))) 286 ; 287 288 if (err == -ENOENT) { 289 old_tbl->rehash++; 290 err = 0; 291 } 292 spin_unlock_bh(old_bucket_lock); 293 294 return err; 295 } 296 297 static int rhashtable_rehash_attach(struct rhashtable *ht, 298 struct bucket_table *old_tbl, 299 struct bucket_table *new_tbl) 300 { 301 /* Protect future_tbl using the first bucket lock. */ 302 spin_lock_bh(old_tbl->locks); 303 304 /* Did somebody beat us to it? */ 305 if (rcu_access_pointer(old_tbl->future_tbl)) { 306 spin_unlock_bh(old_tbl->locks); 307 return -EEXIST; 308 } 309 310 /* Make insertions go into the new, empty table right away. Deletions 311 * and lookups will be attempted in both tables until we synchronize. 312 */ 313 rcu_assign_pointer(old_tbl->future_tbl, new_tbl); 314 315 spin_unlock_bh(old_tbl->locks); 316 317 return 0; 318 } 319 320 static int rhashtable_rehash_table(struct rhashtable *ht) 321 { 322 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 323 struct bucket_table *new_tbl; 324 struct rhashtable_walker *walker; 325 unsigned int old_hash; 326 int err; 327 328 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 329 if (!new_tbl) 330 return 0; 331 332 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { 333 err = rhashtable_rehash_chain(ht, old_hash); 334 if (err) 335 return err; 336 } 337 338 /* Publish the new table pointer. */ 339 rcu_assign_pointer(ht->tbl, new_tbl); 340 341 spin_lock(&ht->lock); 342 list_for_each_entry(walker, &old_tbl->walkers, list) 343 walker->tbl = NULL; 344 spin_unlock(&ht->lock); 345 346 /* Wait for readers. All new readers will see the new 347 * table, and thus no references to the old table will 348 * remain. 349 */ 350 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 351 352 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 353 } 354 355 static int rhashtable_rehash_alloc(struct rhashtable *ht, 356 struct bucket_table *old_tbl, 357 unsigned int size) 358 { 359 struct bucket_table *new_tbl; 360 int err; 361 362 ASSERT_RHT_MUTEX(ht); 363 364 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 365 if (new_tbl == NULL) 366 return -ENOMEM; 367 368 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); 369 if (err) 370 bucket_table_free(new_tbl); 371 372 return err; 373 } 374 375 /** 376 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 377 * @ht: the hash table to shrink 378 * 379 * This function shrinks the hash table to fit, i.e., the smallest 380 * size would not cause it to expand right away automatically. 381 * 382 * The caller must ensure that no concurrent resizing occurs by holding 383 * ht->mutex. 384 * 385 * The caller must ensure that no concurrent table mutations take place. 386 * It is however valid to have concurrent lookups if they are RCU protected. 387 * 388 * It is valid to have concurrent insertions and deletions protected by per 389 * bucket locks or concurrent RCU protected lookups and traversals. 390 */ 391 static int rhashtable_shrink(struct rhashtable *ht) 392 { 393 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 394 unsigned int nelems = atomic_read(&ht->nelems); 395 unsigned int size = 0; 396 397 if (nelems) 398 size = roundup_pow_of_two(nelems * 3 / 2); 399 if (size < ht->p.min_size) 400 size = ht->p.min_size; 401 402 if (old_tbl->size <= size) 403 return 0; 404 405 if (rht_dereference(old_tbl->future_tbl, ht)) 406 return -EEXIST; 407 408 return rhashtable_rehash_alloc(ht, old_tbl, size); 409 } 410 411 static void rht_deferred_worker(struct work_struct *work) 412 { 413 struct rhashtable *ht; 414 struct bucket_table *tbl; 415 int err = 0; 416 417 ht = container_of(work, struct rhashtable, run_work); 418 mutex_lock(&ht->mutex); 419 420 tbl = rht_dereference(ht->tbl, ht); 421 tbl = rhashtable_last_table(ht, tbl); 422 423 if (rht_grow_above_75(ht, tbl)) 424 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); 425 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) 426 err = rhashtable_shrink(ht); 427 else if (tbl->nest) 428 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); 429 430 if (!err) 431 err = rhashtable_rehash_table(ht); 432 433 mutex_unlock(&ht->mutex); 434 435 if (err) 436 schedule_work(&ht->run_work); 437 } 438 439 static int rhashtable_insert_rehash(struct rhashtable *ht, 440 struct bucket_table *tbl) 441 { 442 struct bucket_table *old_tbl; 443 struct bucket_table *new_tbl; 444 unsigned int size; 445 int err; 446 447 old_tbl = rht_dereference_rcu(ht->tbl, ht); 448 449 size = tbl->size; 450 451 err = -EBUSY; 452 453 if (rht_grow_above_75(ht, tbl)) 454 size *= 2; 455 /* Do not schedule more than one rehash */ 456 else if (old_tbl != tbl) 457 goto fail; 458 459 err = -ENOMEM; 460 461 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); 462 if (new_tbl == NULL) 463 goto fail; 464 465 err = rhashtable_rehash_attach(ht, tbl, new_tbl); 466 if (err) { 467 bucket_table_free(new_tbl); 468 if (err == -EEXIST) 469 err = 0; 470 } else 471 schedule_work(&ht->run_work); 472 473 return err; 474 475 fail: 476 /* Do not fail the insert if someone else did a rehash. */ 477 if (likely(rcu_dereference_raw(tbl->future_tbl))) 478 return 0; 479 480 /* Schedule async rehash to retry allocation in process context. */ 481 if (err == -ENOMEM) 482 schedule_work(&ht->run_work); 483 484 return err; 485 } 486 487 static void *rhashtable_lookup_one(struct rhashtable *ht, 488 struct bucket_table *tbl, unsigned int hash, 489 const void *key, struct rhash_head *obj) 490 { 491 struct rhashtable_compare_arg arg = { 492 .ht = ht, 493 .key = key, 494 }; 495 struct rhash_head __rcu **pprev; 496 struct rhash_head *head; 497 int elasticity; 498 499 elasticity = RHT_ELASTICITY; 500 pprev = rht_bucket_var(tbl, hash); 501 rht_for_each_continue(head, *pprev, tbl, hash) { 502 struct rhlist_head *list; 503 struct rhlist_head *plist; 504 505 elasticity--; 506 if (!key || 507 (ht->p.obj_cmpfn ? 508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : 509 rhashtable_compare(&arg, rht_obj(ht, head)))) 510 continue; 511 512 if (!ht->rhlist) 513 return rht_obj(ht, head); 514 515 list = container_of(obj, struct rhlist_head, rhead); 516 plist = container_of(head, struct rhlist_head, rhead); 517 518 RCU_INIT_POINTER(list->next, plist); 519 head = rht_dereference_bucket(head->next, tbl, hash); 520 RCU_INIT_POINTER(list->rhead.next, head); 521 rcu_assign_pointer(*pprev, obj); 522 523 return NULL; 524 } 525 526 if (elasticity <= 0) 527 return ERR_PTR(-EAGAIN); 528 529 return ERR_PTR(-ENOENT); 530 } 531 532 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 533 struct bucket_table *tbl, 534 unsigned int hash, 535 struct rhash_head *obj, 536 void *data) 537 { 538 struct rhash_head __rcu **pprev; 539 struct bucket_table *new_tbl; 540 struct rhash_head *head; 541 542 if (!IS_ERR_OR_NULL(data)) 543 return ERR_PTR(-EEXIST); 544 545 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) 546 return ERR_CAST(data); 547 548 new_tbl = rcu_dereference(tbl->future_tbl); 549 if (new_tbl) 550 return new_tbl; 551 552 if (PTR_ERR(data) != -ENOENT) 553 return ERR_CAST(data); 554 555 if (unlikely(rht_grow_above_max(ht, tbl))) 556 return ERR_PTR(-E2BIG); 557 558 if (unlikely(rht_grow_above_100(ht, tbl))) 559 return ERR_PTR(-EAGAIN); 560 561 pprev = rht_bucket_insert(ht, tbl, hash); 562 if (!pprev) 563 return ERR_PTR(-ENOMEM); 564 565 head = rht_dereference_bucket(*pprev, tbl, hash); 566 567 RCU_INIT_POINTER(obj->next, head); 568 if (ht->rhlist) { 569 struct rhlist_head *list; 570 571 list = container_of(obj, struct rhlist_head, rhead); 572 RCU_INIT_POINTER(list->next, NULL); 573 } 574 575 rcu_assign_pointer(*pprev, obj); 576 577 atomic_inc(&ht->nelems); 578 if (rht_grow_above_75(ht, tbl)) 579 schedule_work(&ht->run_work); 580 581 return NULL; 582 } 583 584 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, 585 struct rhash_head *obj) 586 { 587 struct bucket_table *new_tbl; 588 struct bucket_table *tbl; 589 unsigned int hash; 590 spinlock_t *lock; 591 void *data; 592 593 tbl = rcu_dereference(ht->tbl); 594 595 /* All insertions must grab the oldest table containing 596 * the hashed bucket that is yet to be rehashed. 597 */ 598 for (;;) { 599 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 600 lock = rht_bucket_lock(tbl, hash); 601 spin_lock_bh(lock); 602 603 if (tbl->rehash <= hash) 604 break; 605 606 spin_unlock_bh(lock); 607 tbl = rcu_dereference(tbl->future_tbl); 608 } 609 610 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 611 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 612 if (PTR_ERR(new_tbl) != -EEXIST) 613 data = ERR_CAST(new_tbl); 614 615 while (!IS_ERR_OR_NULL(new_tbl)) { 616 tbl = new_tbl; 617 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 618 spin_lock_nested(rht_bucket_lock(tbl, hash), 619 SINGLE_DEPTH_NESTING); 620 621 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 622 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 623 if (PTR_ERR(new_tbl) != -EEXIST) 624 data = ERR_CAST(new_tbl); 625 626 spin_unlock(rht_bucket_lock(tbl, hash)); 627 } 628 629 spin_unlock_bh(lock); 630 631 if (PTR_ERR(data) == -EAGAIN) 632 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: 633 -EAGAIN); 634 635 return data; 636 } 637 638 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, 639 struct rhash_head *obj) 640 { 641 void *data; 642 643 do { 644 rcu_read_lock(); 645 data = rhashtable_try_insert(ht, key, obj); 646 rcu_read_unlock(); 647 } while (PTR_ERR(data) == -EAGAIN); 648 649 return data; 650 } 651 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 652 653 /** 654 * rhashtable_walk_enter - Initialise an iterator 655 * @ht: Table to walk over 656 * @iter: Hash table Iterator 657 * 658 * This function prepares a hash table walk. 659 * 660 * Note that if you restart a walk after rhashtable_walk_stop you 661 * may see the same object twice. Also, you may miss objects if 662 * there are removals in between rhashtable_walk_stop and the next 663 * call to rhashtable_walk_start. 664 * 665 * For a completely stable walk you should construct your own data 666 * structure outside the hash table. 667 * 668 * This function may sleep so you must not call it from interrupt 669 * context or with spin locks held. 670 * 671 * You must call rhashtable_walk_exit after this function returns. 672 */ 673 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) 674 { 675 iter->ht = ht; 676 iter->p = NULL; 677 iter->slot = 0; 678 iter->skip = 0; 679 iter->end_of_table = 0; 680 681 spin_lock(&ht->lock); 682 iter->walker.tbl = 683 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); 684 list_add(&iter->walker.list, &iter->walker.tbl->walkers); 685 spin_unlock(&ht->lock); 686 } 687 EXPORT_SYMBOL_GPL(rhashtable_walk_enter); 688 689 /** 690 * rhashtable_walk_exit - Free an iterator 691 * @iter: Hash table Iterator 692 * 693 * This function frees resources allocated by rhashtable_walk_init. 694 */ 695 void rhashtable_walk_exit(struct rhashtable_iter *iter) 696 { 697 spin_lock(&iter->ht->lock); 698 if (iter->walker.tbl) 699 list_del(&iter->walker.list); 700 spin_unlock(&iter->ht->lock); 701 } 702 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 703 704 /** 705 * rhashtable_walk_start_check - Start a hash table walk 706 * @iter: Hash table iterator 707 * 708 * Start a hash table walk at the current iterator position. Note that we take 709 * the RCU lock in all cases including when we return an error. So you must 710 * always call rhashtable_walk_stop to clean up. 711 * 712 * Returns zero if successful. 713 * 714 * Returns -EAGAIN if resize event occured. Note that the iterator 715 * will rewind back to the beginning and you may use it immediately 716 * by calling rhashtable_walk_next. 717 * 718 * rhashtable_walk_start is defined as an inline variant that returns 719 * void. This is preferred in cases where the caller would ignore 720 * resize events and always continue. 721 */ 722 int rhashtable_walk_start_check(struct rhashtable_iter *iter) 723 __acquires(RCU) 724 { 725 struct rhashtable *ht = iter->ht; 726 727 rcu_read_lock(); 728 729 spin_lock(&ht->lock); 730 if (iter->walker.tbl) 731 list_del(&iter->walker.list); 732 spin_unlock(&ht->lock); 733 734 if (!iter->walker.tbl && !iter->end_of_table) { 735 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); 736 return -EAGAIN; 737 } 738 739 return 0; 740 } 741 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); 742 743 /** 744 * __rhashtable_walk_find_next - Find the next element in a table (or the first 745 * one in case of a new walk). 746 * 747 * @iter: Hash table iterator 748 * 749 * Returns the found object or NULL when the end of the table is reached. 750 * 751 * Returns -EAGAIN if resize event occurred. 752 */ 753 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) 754 { 755 struct bucket_table *tbl = iter->walker.tbl; 756 struct rhlist_head *list = iter->list; 757 struct rhashtable *ht = iter->ht; 758 struct rhash_head *p = iter->p; 759 bool rhlist = ht->rhlist; 760 761 if (!tbl) 762 return NULL; 763 764 for (; iter->slot < tbl->size; iter->slot++) { 765 int skip = iter->skip; 766 767 rht_for_each_rcu(p, tbl, iter->slot) { 768 if (rhlist) { 769 list = container_of(p, struct rhlist_head, 770 rhead); 771 do { 772 if (!skip) 773 goto next; 774 skip--; 775 list = rcu_dereference(list->next); 776 } while (list); 777 778 continue; 779 } 780 if (!skip) 781 break; 782 skip--; 783 } 784 785 next: 786 if (!rht_is_a_nulls(p)) { 787 iter->skip++; 788 iter->p = p; 789 iter->list = list; 790 return rht_obj(ht, rhlist ? &list->rhead : p); 791 } 792 793 iter->skip = 0; 794 } 795 796 iter->p = NULL; 797 798 /* Ensure we see any new tables. */ 799 smp_rmb(); 800 801 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); 802 if (iter->walker.tbl) { 803 iter->slot = 0; 804 iter->skip = 0; 805 return ERR_PTR(-EAGAIN); 806 } else { 807 iter->end_of_table = true; 808 } 809 810 return NULL; 811 } 812 813 /** 814 * rhashtable_walk_next - Return the next object and advance the iterator 815 * @iter: Hash table iterator 816 * 817 * Note that you must call rhashtable_walk_stop when you are finished 818 * with the walk. 819 * 820 * Returns the next object or NULL when the end of the table is reached. 821 * 822 * Returns -EAGAIN if resize event occurred. Note that the iterator 823 * will rewind back to the beginning and you may continue to use it. 824 */ 825 void *rhashtable_walk_next(struct rhashtable_iter *iter) 826 { 827 struct rhlist_head *list = iter->list; 828 struct rhashtable *ht = iter->ht; 829 struct rhash_head *p = iter->p; 830 bool rhlist = ht->rhlist; 831 832 if (p) { 833 if (!rhlist || !(list = rcu_dereference(list->next))) { 834 p = rcu_dereference(p->next); 835 list = container_of(p, struct rhlist_head, rhead); 836 } 837 if (!rht_is_a_nulls(p)) { 838 iter->skip++; 839 iter->p = p; 840 iter->list = list; 841 return rht_obj(ht, rhlist ? &list->rhead : p); 842 } 843 844 /* At the end of this slot, switch to next one and then find 845 * next entry from that point. 846 */ 847 iter->skip = 0; 848 iter->slot++; 849 } 850 851 return __rhashtable_walk_find_next(iter); 852 } 853 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 854 855 /** 856 * rhashtable_walk_peek - Return the next object but don't advance the iterator 857 * @iter: Hash table iterator 858 * 859 * Returns the next object or NULL when the end of the table is reached. 860 * 861 * Returns -EAGAIN if resize event occurred. Note that the iterator 862 * will rewind back to the beginning and you may continue to use it. 863 */ 864 void *rhashtable_walk_peek(struct rhashtable_iter *iter) 865 { 866 struct rhlist_head *list = iter->list; 867 struct rhashtable *ht = iter->ht; 868 struct rhash_head *p = iter->p; 869 870 if (p) 871 return rht_obj(ht, ht->rhlist ? &list->rhead : p); 872 873 /* No object found in current iter, find next one in the table. */ 874 875 if (iter->skip) { 876 /* A nonzero skip value points to the next entry in the table 877 * beyond that last one that was found. Decrement skip so 878 * we find the current value. __rhashtable_walk_find_next 879 * will restore the original value of skip assuming that 880 * the table hasn't changed. 881 */ 882 iter->skip--; 883 } 884 885 return __rhashtable_walk_find_next(iter); 886 } 887 EXPORT_SYMBOL_GPL(rhashtable_walk_peek); 888 889 /** 890 * rhashtable_walk_stop - Finish a hash table walk 891 * @iter: Hash table iterator 892 * 893 * Finish a hash table walk. Does not reset the iterator to the start of the 894 * hash table. 895 */ 896 void rhashtable_walk_stop(struct rhashtable_iter *iter) 897 __releases(RCU) 898 { 899 struct rhashtable *ht; 900 struct bucket_table *tbl = iter->walker.tbl; 901 902 if (!tbl) 903 goto out; 904 905 ht = iter->ht; 906 907 spin_lock(&ht->lock); 908 if (tbl->rehash < tbl->size) 909 list_add(&iter->walker.list, &tbl->walkers); 910 else 911 iter->walker.tbl = NULL; 912 spin_unlock(&ht->lock); 913 914 iter->p = NULL; 915 916 out: 917 rcu_read_unlock(); 918 } 919 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 920 921 static size_t rounded_hashtable_size(const struct rhashtable_params *params) 922 { 923 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 924 (unsigned long)params->min_size); 925 } 926 927 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) 928 { 929 return jhash2(key, length, seed); 930 } 931 932 /** 933 * rhashtable_init - initialize a new hash table 934 * @ht: hash table to be initialized 935 * @params: configuration parameters 936 * 937 * Initializes a new hash table based on the provided configuration 938 * parameters. A table can be configured either with a variable or 939 * fixed length key: 940 * 941 * Configuration Example 1: Fixed length keys 942 * struct test_obj { 943 * int key; 944 * void * my_member; 945 * struct rhash_head node; 946 * }; 947 * 948 * struct rhashtable_params params = { 949 * .head_offset = offsetof(struct test_obj, node), 950 * .key_offset = offsetof(struct test_obj, key), 951 * .key_len = sizeof(int), 952 * .hashfn = jhash, 953 * .nulls_base = (1U << RHT_BASE_SHIFT), 954 * }; 955 * 956 * Configuration Example 2: Variable length keys 957 * struct test_obj { 958 * [...] 959 * struct rhash_head node; 960 * }; 961 * 962 * u32 my_hash_fn(const void *data, u32 len, u32 seed) 963 * { 964 * struct test_obj *obj = data; 965 * 966 * return [... hash ...]; 967 * } 968 * 969 * struct rhashtable_params params = { 970 * .head_offset = offsetof(struct test_obj, node), 971 * .hashfn = jhash, 972 * .obj_hashfn = my_hash_fn, 973 * }; 974 */ 975 int rhashtable_init(struct rhashtable *ht, 976 const struct rhashtable_params *params) 977 { 978 struct bucket_table *tbl; 979 size_t size; 980 981 size = HASH_DEFAULT_SIZE; 982 983 if ((!params->key_len && !params->obj_hashfn) || 984 (params->obj_hashfn && !params->obj_cmpfn)) 985 return -EINVAL; 986 987 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 988 return -EINVAL; 989 990 memset(ht, 0, sizeof(*ht)); 991 mutex_init(&ht->mutex); 992 spin_lock_init(&ht->lock); 993 memcpy(&ht->p, params, sizeof(*params)); 994 995 if (params->min_size) 996 ht->p.min_size = roundup_pow_of_two(params->min_size); 997 998 /* Cap total entries at 2^31 to avoid nelems overflow. */ 999 ht->max_elems = 1u << 31; 1000 1001 if (params->max_size) { 1002 ht->p.max_size = rounddown_pow_of_two(params->max_size); 1003 if (ht->p.max_size < ht->max_elems / 2) 1004 ht->max_elems = ht->p.max_size * 2; 1005 } 1006 1007 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); 1008 1009 if (params->nelem_hint) 1010 size = rounded_hashtable_size(&ht->p); 1011 1012 if (params->locks_mul) 1013 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 1014 else 1015 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 1016 1017 ht->key_len = ht->p.key_len; 1018 if (!params->hashfn) { 1019 ht->p.hashfn = jhash; 1020 1021 if (!(ht->key_len & (sizeof(u32) - 1))) { 1022 ht->key_len /= sizeof(u32); 1023 ht->p.hashfn = rhashtable_jhash2; 1024 } 1025 } 1026 1027 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 1028 if (tbl == NULL) 1029 return -ENOMEM; 1030 1031 atomic_set(&ht->nelems, 0); 1032 1033 RCU_INIT_POINTER(ht->tbl, tbl); 1034 1035 INIT_WORK(&ht->run_work, rht_deferred_worker); 1036 1037 return 0; 1038 } 1039 EXPORT_SYMBOL_GPL(rhashtable_init); 1040 1041 /** 1042 * rhltable_init - initialize a new hash list table 1043 * @hlt: hash list table to be initialized 1044 * @params: configuration parameters 1045 * 1046 * Initializes a new hash list table. 1047 * 1048 * See documentation for rhashtable_init. 1049 */ 1050 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) 1051 { 1052 int err; 1053 1054 /* No rhlist NULLs marking for now. */ 1055 if (params->nulls_base) 1056 return -EINVAL; 1057 1058 err = rhashtable_init(&hlt->ht, params); 1059 hlt->ht.rhlist = true; 1060 return err; 1061 } 1062 EXPORT_SYMBOL_GPL(rhltable_init); 1063 1064 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, 1065 void (*free_fn)(void *ptr, void *arg), 1066 void *arg) 1067 { 1068 struct rhlist_head *list; 1069 1070 if (!ht->rhlist) { 1071 free_fn(rht_obj(ht, obj), arg); 1072 return; 1073 } 1074 1075 list = container_of(obj, struct rhlist_head, rhead); 1076 do { 1077 obj = &list->rhead; 1078 list = rht_dereference(list->next, ht); 1079 free_fn(rht_obj(ht, obj), arg); 1080 } while (list); 1081 } 1082 1083 /** 1084 * rhashtable_free_and_destroy - free elements and destroy hash table 1085 * @ht: the hash table to destroy 1086 * @free_fn: callback to release resources of element 1087 * @arg: pointer passed to free_fn 1088 * 1089 * Stops an eventual async resize. If defined, invokes free_fn for each 1090 * element to releasal resources. Please note that RCU protected 1091 * readers may still be accessing the elements. Releasing of resources 1092 * must occur in a compatible manner. Then frees the bucket array. 1093 * 1094 * This function will eventually sleep to wait for an async resize 1095 * to complete. The caller is responsible that no further write operations 1096 * occurs in parallel. 1097 */ 1098 void rhashtable_free_and_destroy(struct rhashtable *ht, 1099 void (*free_fn)(void *ptr, void *arg), 1100 void *arg) 1101 { 1102 struct bucket_table *tbl; 1103 unsigned int i; 1104 1105 cancel_work_sync(&ht->run_work); 1106 1107 mutex_lock(&ht->mutex); 1108 tbl = rht_dereference(ht->tbl, ht); 1109 if (free_fn) { 1110 for (i = 0; i < tbl->size; i++) { 1111 struct rhash_head *pos, *next; 1112 1113 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1114 next = !rht_is_a_nulls(pos) ? 1115 rht_dereference(pos->next, ht) : NULL; 1116 !rht_is_a_nulls(pos); 1117 pos = next, 1118 next = !rht_is_a_nulls(pos) ? 1119 rht_dereference(pos->next, ht) : NULL) 1120 rhashtable_free_one(ht, pos, free_fn, arg); 1121 } 1122 } 1123 1124 bucket_table_free(tbl); 1125 mutex_unlock(&ht->mutex); 1126 } 1127 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); 1128 1129 void rhashtable_destroy(struct rhashtable *ht) 1130 { 1131 return rhashtable_free_and_destroy(ht, NULL, NULL); 1132 } 1133 EXPORT_SYMBOL_GPL(rhashtable_destroy); 1134 1135 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1136 unsigned int hash) 1137 { 1138 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1139 static struct rhash_head __rcu *rhnull = 1140 (struct rhash_head __rcu *)NULLS_MARKER(0); 1141 unsigned int index = hash & ((1 << tbl->nest) - 1); 1142 unsigned int size = tbl->size >> tbl->nest; 1143 unsigned int subhash = hash; 1144 union nested_table *ntbl; 1145 1146 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 1147 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); 1148 subhash >>= tbl->nest; 1149 1150 while (ntbl && size > (1 << shift)) { 1151 index = subhash & ((1 << shift) - 1); 1152 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, 1153 tbl, hash); 1154 size >>= shift; 1155 subhash >>= shift; 1156 } 1157 1158 if (!ntbl) 1159 return &rhnull; 1160 1161 return &ntbl[subhash].bucket; 1162 1163 } 1164 EXPORT_SYMBOL_GPL(rht_bucket_nested); 1165 1166 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1167 struct bucket_table *tbl, 1168 unsigned int hash) 1169 { 1170 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1171 unsigned int index = hash & ((1 << tbl->nest) - 1); 1172 unsigned int size = tbl->size >> tbl->nest; 1173 union nested_table *ntbl; 1174 unsigned int shifted; 1175 unsigned int nhash; 1176 1177 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); 1178 hash >>= tbl->nest; 1179 nhash = index; 1180 shifted = tbl->nest; 1181 ntbl = nested_table_alloc(ht, &ntbl[index].table, 1182 size <= (1 << shift) ? shifted : 0, nhash); 1183 1184 while (ntbl && size > (1 << shift)) { 1185 index = hash & ((1 << shift) - 1); 1186 size >>= shift; 1187 hash >>= shift; 1188 nhash |= index << shifted; 1189 shifted += shift; 1190 ntbl = nested_table_alloc(ht, &ntbl[index].table, 1191 size <= (1 << shift) ? shifted : 0, 1192 nhash); 1193 } 1194 1195 if (!ntbl) 1196 return NULL; 1197 1198 return &ntbl[hash].bucket; 1199 1200 } 1201 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); 1202