1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * 7 * Based on the following paper: 8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf 9 * 10 * Code partially derived from nft_hash 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 */ 16 17 #include <linux/kernel.h> 18 #include <linux/init.h> 19 #include <linux/log2.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/mm.h> 23 #include <linux/jhash.h> 24 #include <linux/random.h> 25 #include <linux/rhashtable.h> 26 #include <linux/err.h> 27 28 #define HASH_DEFAULT_SIZE 64UL 29 #define HASH_MIN_SIZE 4UL 30 #define BUCKET_LOCKS_PER_CPU 128UL 31 32 /* Base bits plus 1 bit for nulls marker */ 33 #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) 34 35 enum { 36 RHT_LOCK_NORMAL, 37 RHT_LOCK_NESTED, 38 }; 39 40 /* The bucket lock is selected based on the hash and protects mutations 41 * on a group of hash buckets. 42 * 43 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that 44 * a single lock always covers both buckets which may both contains 45 * entries which link to the same bucket of the old table during resizing. 46 * This allows to simplify the locking as locking the bucket in both 47 * tables during resize always guarantee protection. 48 * 49 * IMPORTANT: When holding the bucket lock of both the old and new table 50 * during expansions and shrinking, the old bucket lock must always be 51 * acquired first. 52 */ 53 static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) 54 { 55 return &tbl->locks[hash & tbl->locks_mask]; 56 } 57 58 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) 59 { 60 return (void *) he - ht->p.head_offset; 61 } 62 63 static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) 64 { 65 return hash & (tbl->size - 1); 66 } 67 68 static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) 69 { 70 u32 hash; 71 72 if (unlikely(!ht->p.key_len)) 73 hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); 74 else 75 hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, 76 ht->p.hash_rnd); 77 78 return hash >> HASH_RESERVED_SPACE; 79 } 80 81 static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) 82 { 83 return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; 84 } 85 86 static u32 head_hashfn(const struct rhashtable *ht, 87 const struct bucket_table *tbl, 88 const struct rhash_head *he) 89 { 90 return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); 91 } 92 93 #ifdef CONFIG_PROVE_LOCKING 94 static void debug_dump_buckets(const struct rhashtable *ht, 95 const struct bucket_table *tbl) 96 { 97 struct rhash_head *he; 98 unsigned int i, hash; 99 100 for (i = 0; i < tbl->size; i++) { 101 pr_warn(" [Bucket %d] ", i); 102 rht_for_each_rcu(he, tbl, i) { 103 hash = head_hashfn(ht, tbl, he); 104 pr_cont("[hash = %#x, lock = %p] ", 105 hash, bucket_lock(tbl, hash)); 106 } 107 pr_cont("\n"); 108 } 109 110 } 111 112 static void debug_dump_table(struct rhashtable *ht, 113 const struct bucket_table *tbl, 114 unsigned int hash) 115 { 116 struct bucket_table *old_tbl, *future_tbl; 117 118 pr_emerg("BUG: lock for hash %#x in table %p not held\n", 119 hash, tbl); 120 121 rcu_read_lock(); 122 future_tbl = rht_dereference_rcu(ht->future_tbl, ht); 123 old_tbl = rht_dereference_rcu(ht->tbl, ht); 124 if (future_tbl != old_tbl) { 125 pr_warn("Future table %p (size: %zd)\n", 126 future_tbl, future_tbl->size); 127 debug_dump_buckets(ht, future_tbl); 128 } 129 130 pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); 131 debug_dump_buckets(ht, old_tbl); 132 133 rcu_read_unlock(); 134 } 135 136 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) 137 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ 138 do { \ 139 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ 140 debug_dump_table(HT, TBL, HASH); \ 141 BUG(); \ 142 } \ 143 } while (0) 144 145 int lockdep_rht_mutex_is_held(struct rhashtable *ht) 146 { 147 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; 148 } 149 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 150 151 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 152 { 153 spinlock_t *lock = bucket_lock(tbl, hash); 154 155 return (debug_locks) ? lockdep_is_held(lock) : 1; 156 } 157 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 158 #else 159 #define ASSERT_RHT_MUTEX(HT) 160 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) 161 #endif 162 163 164 static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) 165 { 166 struct rhash_head __rcu **pprev; 167 168 for (pprev = &tbl->buckets[n]; 169 !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); 170 pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) 171 ; 172 173 return pprev; 174 } 175 176 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) 177 { 178 unsigned int i, size; 179 #if defined(CONFIG_PROVE_LOCKING) 180 unsigned int nr_pcpus = 2; 181 #else 182 unsigned int nr_pcpus = num_possible_cpus(); 183 #endif 184 185 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); 186 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); 187 188 /* Never allocate more than 0.5 locks per bucket */ 189 size = min_t(unsigned int, size, tbl->size >> 1); 190 191 if (sizeof(spinlock_t) != 0) { 192 #ifdef CONFIG_NUMA 193 if (size * sizeof(spinlock_t) > PAGE_SIZE) 194 tbl->locks = vmalloc(size * sizeof(spinlock_t)); 195 else 196 #endif 197 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), 198 GFP_KERNEL); 199 if (!tbl->locks) 200 return -ENOMEM; 201 for (i = 0; i < size; i++) 202 spin_lock_init(&tbl->locks[i]); 203 } 204 tbl->locks_mask = size - 1; 205 206 return 0; 207 } 208 209 static void bucket_table_free(const struct bucket_table *tbl) 210 { 211 if (tbl) 212 kvfree(tbl->locks); 213 214 kvfree(tbl); 215 } 216 217 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 218 size_t nbuckets) 219 { 220 struct bucket_table *tbl; 221 size_t size; 222 int i; 223 224 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 225 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 226 if (tbl == NULL) 227 tbl = vzalloc(size); 228 229 if (tbl == NULL) 230 return NULL; 231 232 tbl->size = nbuckets; 233 234 if (alloc_bucket_locks(ht, tbl) < 0) { 235 bucket_table_free(tbl); 236 return NULL; 237 } 238 239 for (i = 0; i < nbuckets; i++) 240 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); 241 242 return tbl; 243 } 244 245 /** 246 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size 247 * @ht: hash table 248 * @new_size: new table size 249 */ 250 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) 251 { 252 /* Expand table when exceeding 75% load */ 253 return atomic_read(&ht->nelems) > (new_size / 4 * 3) && 254 (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); 255 } 256 EXPORT_SYMBOL_GPL(rht_grow_above_75); 257 258 /** 259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size 260 * @ht: hash table 261 * @new_size: new table size 262 */ 263 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) 264 { 265 /* Shrink table beneath 30% load */ 266 return atomic_read(&ht->nelems) < (new_size * 3 / 10) && 267 (atomic_read(&ht->shift) > ht->p.min_shift); 268 } 269 EXPORT_SYMBOL_GPL(rht_shrink_below_30); 270 271 static void lock_buckets(struct bucket_table *new_tbl, 272 struct bucket_table *old_tbl, unsigned int hash) 273 __acquires(old_bucket_lock) 274 { 275 spin_lock_bh(bucket_lock(old_tbl, hash)); 276 if (new_tbl != old_tbl) 277 spin_lock_bh_nested(bucket_lock(new_tbl, hash), 278 RHT_LOCK_NESTED); 279 } 280 281 static void unlock_buckets(struct bucket_table *new_tbl, 282 struct bucket_table *old_tbl, unsigned int hash) 283 __releases(old_bucket_lock) 284 { 285 if (new_tbl != old_tbl) 286 spin_unlock_bh(bucket_lock(new_tbl, hash)); 287 spin_unlock_bh(bucket_lock(old_tbl, hash)); 288 } 289 290 /** 291 * Unlink entries on bucket which hash to different bucket. 292 * 293 * Returns true if no more work needs to be performed on the bucket. 294 */ 295 static bool hashtable_chain_unzip(struct rhashtable *ht, 296 const struct bucket_table *new_tbl, 297 struct bucket_table *old_tbl, 298 size_t old_hash) 299 { 300 struct rhash_head *he, *p, *next; 301 unsigned int new_hash, new_hash2; 302 303 ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); 304 305 /* Old bucket empty, no work needed. */ 306 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, 307 old_hash); 308 if (rht_is_a_nulls(p)) 309 return false; 310 311 new_hash = head_hashfn(ht, new_tbl, p); 312 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); 313 314 /* Advance the old bucket pointer one or more times until it 315 * reaches a node that doesn't hash to the same bucket as the 316 * previous node p. Call the previous node p; 317 */ 318 rht_for_each_continue(he, p->next, old_tbl, old_hash) { 319 new_hash2 = head_hashfn(ht, new_tbl, he); 320 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); 321 322 if (new_hash != new_hash2) 323 break; 324 p = he; 325 } 326 rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); 327 328 /* Find the subsequent node which does hash to the same 329 * bucket as node P, or NULL if no such node exists. 330 */ 331 INIT_RHT_NULLS_HEAD(next, ht, old_hash); 332 if (!rht_is_a_nulls(he)) { 333 rht_for_each_continue(he, he->next, old_tbl, old_hash) { 334 if (head_hashfn(ht, new_tbl, he) == new_hash) { 335 next = he; 336 break; 337 } 338 } 339 } 340 341 /* Set p's next pointer to that subsequent node pointer, 342 * bypassing the nodes which do not hash to p's bucket 343 */ 344 rcu_assign_pointer(p->next, next); 345 346 p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, 347 old_hash); 348 349 return !rht_is_a_nulls(p); 350 } 351 352 static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, 353 unsigned int new_hash, struct rhash_head *entry) 354 { 355 ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); 356 357 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); 358 } 359 360 /** 361 * rhashtable_expand - Expand hash table while allowing concurrent lookups 362 * @ht: the hash table to expand 363 * 364 * A secondary bucket array is allocated and the hash entries are migrated 365 * while keeping them on both lists until the end of the RCU grace period. 366 * 367 * This function may only be called in a context where it is safe to call 368 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 369 * 370 * The caller must ensure that no concurrent resizing occurs by holding 371 * ht->mutex. 372 * 373 * It is valid to have concurrent insertions and deletions protected by per 374 * bucket locks or concurrent RCU protected lookups and traversals. 375 */ 376 int rhashtable_expand(struct rhashtable *ht) 377 { 378 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 379 struct rhash_head *he; 380 unsigned int new_hash, old_hash; 381 bool complete = false; 382 383 ASSERT_RHT_MUTEX(ht); 384 385 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); 386 if (new_tbl == NULL) 387 return -ENOMEM; 388 389 atomic_inc(&ht->shift); 390 391 /* Make insertions go into the new, empty table right away. Deletions 392 * and lookups will be attempted in both tables until we synchronize. 393 * The synchronize_rcu() guarantees for the new table to be picked up 394 * so no new additions go into the old table while we relink. 395 */ 396 rcu_assign_pointer(ht->future_tbl, new_tbl); 397 synchronize_rcu(); 398 399 /* For each new bucket, search the corresponding old bucket for the 400 * first entry that hashes to the new bucket, and link the end of 401 * newly formed bucket chain (containing entries added to future 402 * table) to that entry. Since all the entries which will end up in 403 * the new bucket appear in the same old bucket, this constructs an 404 * entirely valid new hash table, but with multiple buckets 405 * "zipped" together into a single imprecise chain. 406 */ 407 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { 408 old_hash = rht_bucket_index(old_tbl, new_hash); 409 lock_buckets(new_tbl, old_tbl, new_hash); 410 rht_for_each(he, old_tbl, old_hash) { 411 if (head_hashfn(ht, new_tbl, he) == new_hash) { 412 link_old_to_new(ht, new_tbl, new_hash, he); 413 break; 414 } 415 } 416 unlock_buckets(new_tbl, old_tbl, new_hash); 417 } 418 419 /* Unzip interleaved hash chains */ 420 while (!complete && !ht->being_destroyed) { 421 /* Wait for readers. All new readers will see the new 422 * table, and thus no references to the old table will 423 * remain. 424 */ 425 synchronize_rcu(); 426 427 /* For each bucket in the old table (each of which 428 * contains items from multiple buckets of the new 429 * table): ... 430 */ 431 complete = true; 432 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { 433 lock_buckets(new_tbl, old_tbl, old_hash); 434 435 if (hashtable_chain_unzip(ht, new_tbl, old_tbl, 436 old_hash)) 437 complete = false; 438 439 unlock_buckets(new_tbl, old_tbl, old_hash); 440 } 441 } 442 443 rcu_assign_pointer(ht->tbl, new_tbl); 444 synchronize_rcu(); 445 446 bucket_table_free(old_tbl); 447 return 0; 448 } 449 EXPORT_SYMBOL_GPL(rhashtable_expand); 450 451 /** 452 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups 453 * @ht: the hash table to shrink 454 * 455 * This function may only be called in a context where it is safe to call 456 * synchronize_rcu(), e.g. not within a rcu_read_lock() section. 457 * 458 * The caller must ensure that no concurrent resizing occurs by holding 459 * ht->mutex. 460 * 461 * The caller must ensure that no concurrent table mutations take place. 462 * It is however valid to have concurrent lookups if they are RCU protected. 463 * 464 * It is valid to have concurrent insertions and deletions protected by per 465 * bucket locks or concurrent RCU protected lookups and traversals. 466 */ 467 int rhashtable_shrink(struct rhashtable *ht) 468 { 469 struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); 470 unsigned int new_hash; 471 472 ASSERT_RHT_MUTEX(ht); 473 474 new_tbl = bucket_table_alloc(ht, tbl->size / 2); 475 if (new_tbl == NULL) 476 return -ENOMEM; 477 478 rcu_assign_pointer(ht->future_tbl, new_tbl); 479 synchronize_rcu(); 480 481 /* Link the first entry in the old bucket to the end of the 482 * bucket in the new table. As entries are concurrently being 483 * added to the new table, lock down the new bucket. As we 484 * always divide the size in half when shrinking, each bucket 485 * in the new table maps to exactly two buckets in the old 486 * table. 487 */ 488 for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { 489 lock_buckets(new_tbl, tbl, new_hash); 490 491 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), 492 tbl->buckets[new_hash]); 493 ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); 494 rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), 495 tbl->buckets[new_hash + new_tbl->size]); 496 497 unlock_buckets(new_tbl, tbl, new_hash); 498 } 499 500 /* Publish the new, valid hash table */ 501 rcu_assign_pointer(ht->tbl, new_tbl); 502 atomic_dec(&ht->shift); 503 504 /* Wait for readers. No new readers will have references to the 505 * old hash table. 506 */ 507 synchronize_rcu(); 508 509 bucket_table_free(tbl); 510 511 return 0; 512 } 513 EXPORT_SYMBOL_GPL(rhashtable_shrink); 514 515 static void rht_deferred_worker(struct work_struct *work) 516 { 517 struct rhashtable *ht; 518 struct bucket_table *tbl; 519 struct rhashtable_walker *walker; 520 521 ht = container_of(work, struct rhashtable, run_work); 522 mutex_lock(&ht->mutex); 523 if (ht->being_destroyed) 524 goto unlock; 525 526 tbl = rht_dereference(ht->tbl, ht); 527 528 list_for_each_entry(walker, &ht->walkers, list) 529 walker->resize = true; 530 531 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) 532 rhashtable_expand(ht); 533 else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) 534 rhashtable_shrink(ht); 535 536 unlock: 537 mutex_unlock(&ht->mutex); 538 } 539 540 static void rhashtable_wakeup_worker(struct rhashtable *ht) 541 { 542 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 543 struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 544 size_t size = tbl->size; 545 546 /* Only adjust the table if no resizing is currently in progress. */ 547 if (tbl == new_tbl && 548 ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || 549 (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) 550 schedule_work(&ht->run_work); 551 } 552 553 static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, 554 struct bucket_table *tbl, u32 hash) 555 { 556 struct rhash_head *head; 557 558 hash = rht_bucket_index(tbl, hash); 559 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 560 561 ASSERT_BUCKET_LOCK(ht, tbl, hash); 562 563 if (rht_is_a_nulls(head)) 564 INIT_RHT_NULLS_HEAD(obj->next, ht, hash); 565 else 566 RCU_INIT_POINTER(obj->next, head); 567 568 rcu_assign_pointer(tbl->buckets[hash], obj); 569 570 atomic_inc(&ht->nelems); 571 572 rhashtable_wakeup_worker(ht); 573 } 574 575 /** 576 * rhashtable_insert - insert object into hash table 577 * @ht: hash table 578 * @obj: pointer to hash head inside object 579 * 580 * Will take a per bucket spinlock to protect against mutual mutations 581 * on the same bucket. Multiple insertions may occur in parallel unless 582 * they map to the same bucket lock. 583 * 584 * It is safe to call this function from atomic context. 585 * 586 * Will trigger an automatic deferred table resizing if the size grows 587 * beyond the watermark indicated by grow_decision() which can be passed 588 * to rhashtable_init(). 589 */ 590 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) 591 { 592 struct bucket_table *tbl, *old_tbl; 593 unsigned hash; 594 595 rcu_read_lock(); 596 597 tbl = rht_dereference_rcu(ht->future_tbl, ht); 598 old_tbl = rht_dereference_rcu(ht->tbl, ht); 599 hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 600 601 lock_buckets(tbl, old_tbl, hash); 602 __rhashtable_insert(ht, obj, tbl, hash); 603 unlock_buckets(tbl, old_tbl, hash); 604 605 rcu_read_unlock(); 606 } 607 EXPORT_SYMBOL_GPL(rhashtable_insert); 608 609 /** 610 * rhashtable_remove - remove object from hash table 611 * @ht: hash table 612 * @obj: pointer to hash head inside object 613 * 614 * Since the hash chain is single linked, the removal operation needs to 615 * walk the bucket chain upon removal. The removal operation is thus 616 * considerable slow if the hash table is not correctly sized. 617 * 618 * Will automatically shrink the table via rhashtable_expand() if the 619 * shrink_decision function specified at rhashtable_init() returns true. 620 * 621 * The caller must ensure that no concurrent table mutations occur. It is 622 * however valid to have concurrent lookups if they are RCU protected. 623 */ 624 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) 625 { 626 struct bucket_table *tbl, *new_tbl, *old_tbl; 627 struct rhash_head __rcu **pprev; 628 struct rhash_head *he, *he2; 629 unsigned int hash, new_hash; 630 bool ret = false; 631 632 rcu_read_lock(); 633 old_tbl = rht_dereference_rcu(ht->tbl, ht); 634 tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 635 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 636 637 lock_buckets(new_tbl, old_tbl, new_hash); 638 restart: 639 hash = rht_bucket_index(tbl, new_hash); 640 pprev = &tbl->buckets[hash]; 641 rht_for_each(he, tbl, hash) { 642 if (he != obj) { 643 pprev = &he->next; 644 continue; 645 } 646 647 ASSERT_BUCKET_LOCK(ht, tbl, hash); 648 649 if (old_tbl->size > new_tbl->size && tbl == old_tbl && 650 !rht_is_a_nulls(obj->next) && 651 head_hashfn(ht, tbl, obj->next) != hash) { 652 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); 653 } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { 654 rht_for_each_continue(he2, obj->next, tbl, hash) { 655 if (head_hashfn(ht, tbl, he2) == hash) { 656 rcu_assign_pointer(*pprev, he2); 657 goto found; 658 } 659 } 660 661 rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); 662 } else { 663 rcu_assign_pointer(*pprev, obj->next); 664 } 665 666 found: 667 ret = true; 668 break; 669 } 670 671 /* The entry may be linked in either 'tbl', 'future_tbl', or both. 672 * 'future_tbl' only exists for a short period of time during 673 * resizing. Thus traversing both is fine and the added cost is 674 * very rare. 675 */ 676 if (tbl != old_tbl) { 677 tbl = old_tbl; 678 goto restart; 679 } 680 681 unlock_buckets(new_tbl, old_tbl, new_hash); 682 683 if (ret) { 684 atomic_dec(&ht->nelems); 685 rhashtable_wakeup_worker(ht); 686 } 687 688 rcu_read_unlock(); 689 690 return ret; 691 } 692 EXPORT_SYMBOL_GPL(rhashtable_remove); 693 694 struct rhashtable_compare_arg { 695 struct rhashtable *ht; 696 const void *key; 697 }; 698 699 static bool rhashtable_compare(void *ptr, void *arg) 700 { 701 struct rhashtable_compare_arg *x = arg; 702 struct rhashtable *ht = x->ht; 703 704 return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); 705 } 706 707 /** 708 * rhashtable_lookup - lookup key in hash table 709 * @ht: hash table 710 * @key: pointer to key 711 * 712 * Computes the hash value for the key and traverses the bucket chain looking 713 * for a entry with an identical key. The first matching entry is returned. 714 * 715 * This lookup function may only be used for fixed key hash table (key_len 716 * parameter set). It will BUG() if used inappropriately. 717 * 718 * Lookups may occur in parallel with hashtable mutations and resizing. 719 */ 720 void *rhashtable_lookup(struct rhashtable *ht, const void *key) 721 { 722 struct rhashtable_compare_arg arg = { 723 .ht = ht, 724 .key = key, 725 }; 726 727 BUG_ON(!ht->p.key_len); 728 729 return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); 730 } 731 EXPORT_SYMBOL_GPL(rhashtable_lookup); 732 733 /** 734 * rhashtable_lookup_compare - search hash table with compare function 735 * @ht: hash table 736 * @key: the pointer to the key 737 * @compare: compare function, must return true on match 738 * @arg: argument passed on to compare function 739 * 740 * Traverses the bucket chain behind the provided hash value and calls the 741 * specified compare function for each entry. 742 * 743 * Lookups may occur in parallel with hashtable mutations and resizing. 744 * 745 * Returns the first entry on which the compare function returned true. 746 */ 747 void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, 748 bool (*compare)(void *, void *), void *arg) 749 { 750 const struct bucket_table *tbl, *old_tbl; 751 struct rhash_head *he; 752 u32 hash; 753 754 rcu_read_lock(); 755 756 old_tbl = rht_dereference_rcu(ht->tbl, ht); 757 tbl = rht_dereference_rcu(ht->future_tbl, ht); 758 hash = key_hashfn(ht, key, ht->p.key_len); 759 restart: 760 rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { 761 if (!compare(rht_obj(ht, he), arg)) 762 continue; 763 rcu_read_unlock(); 764 return rht_obj(ht, he); 765 } 766 767 if (unlikely(tbl != old_tbl)) { 768 tbl = old_tbl; 769 goto restart; 770 } 771 rcu_read_unlock(); 772 773 return NULL; 774 } 775 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 776 777 /** 778 * rhashtable_lookup_insert - lookup and insert object into hash table 779 * @ht: hash table 780 * @obj: pointer to hash head inside object 781 * 782 * Locks down the bucket chain in both the old and new table if a resize 783 * is in progress to ensure that writers can't remove from the old table 784 * and can't insert to the new table during the atomic operation of search 785 * and insertion. Searches for duplicates in both the old and new table if 786 * a resize is in progress. 787 * 788 * This lookup function may only be used for fixed key hash table (key_len 789 * parameter set). It will BUG() if used inappropriately. 790 * 791 * It is safe to call this function from atomic context. 792 * 793 * Will trigger an automatic deferred table resizing if the size grows 794 * beyond the watermark indicated by grow_decision() which can be passed 795 * to rhashtable_init(). 796 */ 797 bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) 798 { 799 struct rhashtable_compare_arg arg = { 800 .ht = ht, 801 .key = rht_obj(ht, obj) + ht->p.key_offset, 802 }; 803 804 BUG_ON(!ht->p.key_len); 805 806 return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, 807 &arg); 808 } 809 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); 810 811 /** 812 * rhashtable_lookup_compare_insert - search and insert object to hash table 813 * with compare function 814 * @ht: hash table 815 * @obj: pointer to hash head inside object 816 * @compare: compare function, must return true on match 817 * @arg: argument passed on to compare function 818 * 819 * Locks down the bucket chain in both the old and new table if a resize 820 * is in progress to ensure that writers can't remove from the old table 821 * and can't insert to the new table during the atomic operation of search 822 * and insertion. Searches for duplicates in both the old and new table if 823 * a resize is in progress. 824 * 825 * Lookups may occur in parallel with hashtable mutations and resizing. 826 * 827 * Will trigger an automatic deferred table resizing if the size grows 828 * beyond the watermark indicated by grow_decision() which can be passed 829 * to rhashtable_init(). 830 */ 831 bool rhashtable_lookup_compare_insert(struct rhashtable *ht, 832 struct rhash_head *obj, 833 bool (*compare)(void *, void *), 834 void *arg) 835 { 836 struct bucket_table *new_tbl, *old_tbl; 837 u32 new_hash; 838 bool success = true; 839 840 BUG_ON(!ht->p.key_len); 841 842 rcu_read_lock(); 843 old_tbl = rht_dereference_rcu(ht->tbl, ht); 844 new_tbl = rht_dereference_rcu(ht->future_tbl, ht); 845 new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); 846 847 lock_buckets(new_tbl, old_tbl, new_hash); 848 849 if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, 850 compare, arg)) { 851 success = false; 852 goto exit; 853 } 854 855 __rhashtable_insert(ht, obj, new_tbl, new_hash); 856 857 exit: 858 unlock_buckets(new_tbl, old_tbl, new_hash); 859 rcu_read_unlock(); 860 861 return success; 862 } 863 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); 864 865 /** 866 * rhashtable_walk_init - Initialise an iterator 867 * @ht: Table to walk over 868 * @iter: Hash table Iterator 869 * 870 * This function prepares a hash table walk. 871 * 872 * Note that if you restart a walk after rhashtable_walk_stop you 873 * may see the same object twice. Also, you may miss objects if 874 * there are removals in between rhashtable_walk_stop and the next 875 * call to rhashtable_walk_start. 876 * 877 * For a completely stable walk you should construct your own data 878 * structure outside the hash table. 879 * 880 * This function may sleep so you must not call it from interrupt 881 * context or with spin locks held. 882 * 883 * You must call rhashtable_walk_exit if this function returns 884 * successfully. 885 */ 886 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) 887 { 888 iter->ht = ht; 889 iter->p = NULL; 890 iter->slot = 0; 891 iter->skip = 0; 892 893 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); 894 if (!iter->walker) 895 return -ENOMEM; 896 897 mutex_lock(&ht->mutex); 898 list_add(&iter->walker->list, &ht->walkers); 899 mutex_unlock(&ht->mutex); 900 901 return 0; 902 } 903 EXPORT_SYMBOL_GPL(rhashtable_walk_init); 904 905 /** 906 * rhashtable_walk_exit - Free an iterator 907 * @iter: Hash table Iterator 908 * 909 * This function frees resources allocated by rhashtable_walk_init. 910 */ 911 void rhashtable_walk_exit(struct rhashtable_iter *iter) 912 { 913 mutex_lock(&iter->ht->mutex); 914 list_del(&iter->walker->list); 915 mutex_unlock(&iter->ht->mutex); 916 kfree(iter->walker); 917 } 918 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 919 920 /** 921 * rhashtable_walk_start - Start a hash table walk 922 * @iter: Hash table iterator 923 * 924 * Start a hash table walk. Note that we take the RCU lock in all 925 * cases including when we return an error. So you must always call 926 * rhashtable_walk_stop to clean up. 927 * 928 * Returns zero if successful. 929 * 930 * Returns -EAGAIN if resize event occured. Note that the iterator 931 * will rewind back to the beginning and you may use it immediately 932 * by calling rhashtable_walk_next. 933 */ 934 int rhashtable_walk_start(struct rhashtable_iter *iter) 935 { 936 rcu_read_lock(); 937 938 if (iter->walker->resize) { 939 iter->slot = 0; 940 iter->skip = 0; 941 iter->walker->resize = false; 942 return -EAGAIN; 943 } 944 945 return 0; 946 } 947 EXPORT_SYMBOL_GPL(rhashtable_walk_start); 948 949 /** 950 * rhashtable_walk_next - Return the next object and advance the iterator 951 * @iter: Hash table iterator 952 * 953 * Note that you must call rhashtable_walk_stop when you are finished 954 * with the walk. 955 * 956 * Returns the next object or NULL when the end of the table is reached. 957 * 958 * Returns -EAGAIN if resize event occured. Note that the iterator 959 * will rewind back to the beginning and you may continue to use it. 960 */ 961 void *rhashtable_walk_next(struct rhashtable_iter *iter) 962 { 963 const struct bucket_table *tbl; 964 struct rhashtable *ht = iter->ht; 965 struct rhash_head *p = iter->p; 966 void *obj = NULL; 967 968 tbl = rht_dereference_rcu(ht->tbl, ht); 969 970 if (p) { 971 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); 972 goto next; 973 } 974 975 for (; iter->slot < tbl->size; iter->slot++) { 976 int skip = iter->skip; 977 978 rht_for_each_rcu(p, tbl, iter->slot) { 979 if (!skip) 980 break; 981 skip--; 982 } 983 984 next: 985 if (!rht_is_a_nulls(p)) { 986 iter->skip++; 987 iter->p = p; 988 obj = rht_obj(ht, p); 989 goto out; 990 } 991 992 iter->skip = 0; 993 } 994 995 iter->p = NULL; 996 997 out: 998 if (iter->walker->resize) { 999 iter->p = NULL; 1000 iter->slot = 0; 1001 iter->skip = 0; 1002 iter->walker->resize = false; 1003 return ERR_PTR(-EAGAIN); 1004 } 1005 1006 return obj; 1007 } 1008 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 1009 1010 /** 1011 * rhashtable_walk_stop - Finish a hash table walk 1012 * @iter: Hash table iterator 1013 * 1014 * Finish a hash table walk. 1015 */ 1016 void rhashtable_walk_stop(struct rhashtable_iter *iter) 1017 { 1018 rcu_read_unlock(); 1019 iter->p = NULL; 1020 } 1021 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 1022 1023 static size_t rounded_hashtable_size(struct rhashtable_params *params) 1024 { 1025 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), 1026 1UL << params->min_shift); 1027 } 1028 1029 /** 1030 * rhashtable_init - initialize a new hash table 1031 * @ht: hash table to be initialized 1032 * @params: configuration parameters 1033 * 1034 * Initializes a new hash table based on the provided configuration 1035 * parameters. A table can be configured either with a variable or 1036 * fixed length key: 1037 * 1038 * Configuration Example 1: Fixed length keys 1039 * struct test_obj { 1040 * int key; 1041 * void * my_member; 1042 * struct rhash_head node; 1043 * }; 1044 * 1045 * struct rhashtable_params params = { 1046 * .head_offset = offsetof(struct test_obj, node), 1047 * .key_offset = offsetof(struct test_obj, key), 1048 * .key_len = sizeof(int), 1049 * .hashfn = jhash, 1050 * .nulls_base = (1U << RHT_BASE_SHIFT), 1051 * }; 1052 * 1053 * Configuration Example 2: Variable length keys 1054 * struct test_obj { 1055 * [...] 1056 * struct rhash_head node; 1057 * }; 1058 * 1059 * u32 my_hash_fn(const void *data, u32 seed) 1060 * { 1061 * struct test_obj *obj = data; 1062 * 1063 * return [... hash ...]; 1064 * } 1065 * 1066 * struct rhashtable_params params = { 1067 * .head_offset = offsetof(struct test_obj, node), 1068 * .hashfn = jhash, 1069 * .obj_hashfn = my_hash_fn, 1070 * }; 1071 */ 1072 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) 1073 { 1074 struct bucket_table *tbl; 1075 size_t size; 1076 1077 size = HASH_DEFAULT_SIZE; 1078 1079 if ((params->key_len && !params->hashfn) || 1080 (!params->key_len && !params->obj_hashfn)) 1081 return -EINVAL; 1082 1083 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) 1084 return -EINVAL; 1085 1086 params->min_shift = max_t(size_t, params->min_shift, 1087 ilog2(HASH_MIN_SIZE)); 1088 1089 if (params->nelem_hint) 1090 size = rounded_hashtable_size(params); 1091 1092 memset(ht, 0, sizeof(*ht)); 1093 mutex_init(&ht->mutex); 1094 memcpy(&ht->p, params, sizeof(*params)); 1095 INIT_LIST_HEAD(&ht->walkers); 1096 1097 if (params->locks_mul) 1098 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); 1099 else 1100 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; 1101 1102 tbl = bucket_table_alloc(ht, size); 1103 if (tbl == NULL) 1104 return -ENOMEM; 1105 1106 atomic_set(&ht->nelems, 0); 1107 atomic_set(&ht->shift, ilog2(tbl->size)); 1108 RCU_INIT_POINTER(ht->tbl, tbl); 1109 RCU_INIT_POINTER(ht->future_tbl, tbl); 1110 1111 if (!ht->p.hash_rnd) 1112 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); 1113 1114 if (ht->p.grow_decision || ht->p.shrink_decision) 1115 INIT_WORK(&ht->run_work, rht_deferred_worker); 1116 1117 return 0; 1118 } 1119 EXPORT_SYMBOL_GPL(rhashtable_init); 1120 1121 /** 1122 * rhashtable_destroy - destroy hash table 1123 * @ht: the hash table to destroy 1124 * 1125 * Frees the bucket array. This function is not rcu safe, therefore the caller 1126 * has to make sure that no resizing may happen by unpublishing the hashtable 1127 * and waiting for the quiescent cycle before releasing the bucket array. 1128 */ 1129 void rhashtable_destroy(struct rhashtable *ht) 1130 { 1131 ht->being_destroyed = true; 1132 1133 if (ht->p.grow_decision || ht->p.shrink_decision) 1134 cancel_work_sync(&ht->run_work); 1135 1136 mutex_lock(&ht->mutex); 1137 bucket_table_free(rht_dereference(ht->tbl, ht)); 1138 mutex_unlock(&ht->mutex); 1139 } 1140 EXPORT_SYMBOL_GPL(rhashtable_destroy); 1141