Lines Matching +full:array +full:- +full:nest

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
41 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn()
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; in lockdep_rht_mutex_is_held()
57 if (unlikely(tbl->nest)) in lockdep_rht_bucket_is_held()
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); in lockdep_rht_bucket_is_held()
69 /* The top-level bucket entry does not need RCU protection in nested_table_top()
70 * because it's set at the same time as tbl->nest. in nested_table_top()
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1); in nested_table_top()
77 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); in nested_table_free()
81 ntbl = rcu_dereference_protected(ntbl->table, 1); in nested_table_free()
96 unsigned int size = tbl->size >> tbl->nest; in nested_bucket_table_free()
97 unsigned int len = 1 << tbl->nest; in nested_bucket_table_free()
111 if (tbl->nest) in bucket_table_free()
151 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); in nested_bucket_table_alloc()
158 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc()
164 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc()
170 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; in nested_bucket_table_alloc()
196 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); in bucket_table_alloc()
198 tbl->size = size; in bucket_table_alloc()
200 rcu_head_init(&tbl->rcu); in bucket_table_alloc()
201 INIT_LIST_HEAD(&tbl->walkers); in bucket_table_alloc()
203 tbl->hash_rnd = get_random_u32(); in bucket_table_alloc()
206 INIT_RHT_NULLS_HEAD(tbl->buckets[i]); in bucket_table_alloc()
218 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_last_table()
228 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_one()
230 int err = -EAGAIN; in rhashtable_rehash_one()
236 if (new_tbl->nest) in rhashtable_rehash_one()
239 err = -ENOENT; in rhashtable_rehash_one()
244 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); in rhashtable_rehash_one()
249 pprev = &entry->next; in rhashtable_rehash_one()
257 flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], in rhashtable_rehash_one()
260 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); in rhashtable_rehash_one()
262 RCU_INIT_POINTER(entry->next, head); in rhashtable_rehash_one()
264 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); in rhashtable_rehash_one()
279 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_chain()
291 if (err == -ENOENT) in rhashtable_rehash_chain()
308 if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, in rhashtable_rehash_attach()
310 return -EEXIST; in rhashtable_rehash_attach()
317 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_table()
323 new_tbl = rht_dereference(old_tbl->future_tbl, ht); in rhashtable_rehash_table()
327 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { in rhashtable_rehash_table()
335 rcu_assign_pointer(ht->tbl, new_tbl); in rhashtable_rehash_table()
337 spin_lock(&ht->lock); in rhashtable_rehash_table()
338 list_for_each_entry(walker, &old_tbl->walkers, list) in rhashtable_rehash_table()
339 walker->tbl = NULL; in rhashtable_rehash_table()
346 * to check if it should not re-link the table. in rhashtable_rehash_table()
348 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); in rhashtable_rehash_table()
349 spin_unlock(&ht->lock); in rhashtable_rehash_table()
351 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; in rhashtable_rehash_table()
365 return -ENOMEM; in rhashtable_rehash_alloc()
375 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
382 * ht->mutex.
392 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_shrink()
393 unsigned int nelems = atomic_read(&ht->nelems); in rhashtable_shrink()
398 if (size < ht->p.min_size) in rhashtable_shrink()
399 size = ht->p.min_size; in rhashtable_shrink()
401 if (old_tbl->size <= size) in rhashtable_shrink()
404 if (rht_dereference(old_tbl->future_tbl, ht)) in rhashtable_shrink()
405 return -EEXIST; in rhashtable_shrink()
417 mutex_lock(&ht->mutex); in rht_deferred_worker()
419 tbl = rht_dereference(ht->tbl, ht); in rht_deferred_worker()
423 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); in rht_deferred_worker()
424 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) in rht_deferred_worker()
426 else if (tbl->nest) in rht_deferred_worker()
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); in rht_deferred_worker()
429 if (!err || err == -EEXIST) { in rht_deferred_worker()
436 mutex_unlock(&ht->mutex); in rht_deferred_worker()
439 schedule_work(&ht->run_work); in rht_deferred_worker()
450 old_tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_insert_rehash()
452 size = tbl->size; in rhashtable_insert_rehash()
454 err = -EBUSY; in rhashtable_insert_rehash()
462 err = -ENOMEM; in rhashtable_insert_rehash()
471 if (err == -EEXIST) in rhashtable_insert_rehash()
474 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
480 if (likely(rcu_access_pointer(tbl->future_tbl))) in rhashtable_insert_rehash()
484 if (err == -ENOMEM) in rhashtable_insert_rehash()
485 schedule_work(&ht->run_work); in rhashtable_insert_rehash()
508 elasticity--; in rhashtable_lookup_one()
510 (ht->p.obj_cmpfn ? in rhashtable_lookup_one()
511 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : in rhashtable_lookup_one()
513 pprev = &head->next; in rhashtable_lookup_one()
517 if (!ht->rhlist) in rhashtable_lookup_one()
523 RCU_INIT_POINTER(list->next, plist); in rhashtable_lookup_one()
524 head = rht_dereference_bucket(head->next, tbl, hash); in rhashtable_lookup_one()
525 RCU_INIT_POINTER(list->rhead.next, head); in rhashtable_lookup_one()
536 return ERR_PTR(-EAGAIN); in rhashtable_lookup_one()
538 return ERR_PTR(-ENOENT); in rhashtable_lookup_one()
550 return ERR_PTR(-EEXIST); in rhashtable_insert_one()
552 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) in rhashtable_insert_one()
555 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_insert_one()
559 if (PTR_ERR(data) != -ENOENT) in rhashtable_insert_one()
563 return ERR_PTR(-E2BIG); in rhashtable_insert_one()
566 return ERR_PTR(-EAGAIN); in rhashtable_insert_one()
570 RCU_INIT_POINTER(obj->next, head); in rhashtable_insert_one()
571 if (ht->rhlist) { in rhashtable_insert_one()
575 RCU_INIT_POINTER(list->next, NULL); in rhashtable_insert_one()
583 atomic_inc(&ht->nelems); in rhashtable_insert_one()
585 schedule_work(&ht->run_work); in rhashtable_insert_one()
600 new_tbl = rcu_dereference(ht->tbl); in rhashtable_try_insert()
604 hash = rht_head_hashfn(ht, tbl, obj, ht->p); in rhashtable_try_insert()
605 if (rcu_access_pointer(tbl->future_tbl)) in rhashtable_try_insert()
611 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_try_insert()
612 data = ERR_PTR(-EAGAIN); in rhashtable_try_insert()
619 if (PTR_ERR(new_tbl) != -EEXIST) in rhashtable_try_insert()
626 if (PTR_ERR(data) == -EAGAIN) in rhashtable_try_insert()
628 -EAGAIN); in rhashtable_try_insert()
642 } while (PTR_ERR(data) == -EAGAIN); in rhashtable_insert_slow()
649 * rhashtable_walk_enter - Initialise an iterator
664 * non-preemptable context, but cannot be called from softirq or
671 iter->ht = ht; in rhashtable_walk_enter()
672 iter->p = NULL; in rhashtable_walk_enter()
673 iter->slot = 0; in rhashtable_walk_enter()
674 iter->skip = 0; in rhashtable_walk_enter()
675 iter->end_of_table = 0; in rhashtable_walk_enter()
677 spin_lock(&ht->lock); in rhashtable_walk_enter()
678 iter->walker.tbl = in rhashtable_walk_enter()
679 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); in rhashtable_walk_enter()
680 list_add(&iter->walker.list, &iter->walker.tbl->walkers); in rhashtable_walk_enter()
681 spin_unlock(&ht->lock); in rhashtable_walk_enter()
686 * rhashtable_walk_exit - Free an iterator
693 spin_lock(&iter->ht->lock); in rhashtable_walk_exit()
694 if (iter->walker.tbl) in rhashtable_walk_exit()
695 list_del(&iter->walker.list); in rhashtable_walk_exit()
696 spin_unlock(&iter->ht->lock); in rhashtable_walk_exit()
701 * rhashtable_walk_start_check - Start a hash table walk
710 * Returns -EAGAIN if resize event occurred. Note that the iterator
721 struct rhashtable *ht = iter->ht; in rhashtable_walk_start_check()
722 bool rhlist = ht->rhlist; in rhashtable_walk_start_check()
726 spin_lock(&ht->lock); in rhashtable_walk_start_check()
727 if (iter->walker.tbl) in rhashtable_walk_start_check()
728 list_del(&iter->walker.list); in rhashtable_walk_start_check()
729 spin_unlock(&ht->lock); in rhashtable_walk_start_check()
731 if (iter->end_of_table) in rhashtable_walk_start_check()
733 if (!iter->walker.tbl) { in rhashtable_walk_start_check()
734 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_walk_start_check()
735 iter->slot = 0; in rhashtable_walk_start_check()
736 iter->skip = 0; in rhashtable_walk_start_check()
737 return -EAGAIN; in rhashtable_walk_start_check()
740 if (iter->p && !rhlist) { in rhashtable_walk_start_check()
747 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { in rhashtable_walk_start_check()
749 if (p == iter->p) { in rhashtable_walk_start_check()
750 iter->skip = skip; in rhashtable_walk_start_check()
754 iter->p = NULL; in rhashtable_walk_start_check()
755 } else if (iter->p && rhlist) { in rhashtable_walk_start_check()
762 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { in rhashtable_walk_start_check()
765 list = rcu_dereference(list->next)) { in rhashtable_walk_start_check()
767 if (list == iter->list) { in rhashtable_walk_start_check()
768 iter->p = p; in rhashtable_walk_start_check()
769 iter->skip = skip; in rhashtable_walk_start_check()
774 iter->p = NULL; in rhashtable_walk_start_check()
782 * __rhashtable_walk_find_next - Find the next element in a table (or the first
789 * Returns -EAGAIN if resize event occurred.
793 struct bucket_table *tbl = iter->walker.tbl; in __rhashtable_walk_find_next()
794 struct rhlist_head *list = iter->list; in __rhashtable_walk_find_next()
795 struct rhashtable *ht = iter->ht; in __rhashtable_walk_find_next()
796 struct rhash_head *p = iter->p; in __rhashtable_walk_find_next()
797 bool rhlist = ht->rhlist; in __rhashtable_walk_find_next()
802 for (; iter->slot < tbl->size; iter->slot++) { in __rhashtable_walk_find_next()
803 int skip = iter->skip; in __rhashtable_walk_find_next()
805 rht_for_each_rcu(p, tbl, iter->slot) { in __rhashtable_walk_find_next()
812 skip--; in __rhashtable_walk_find_next()
813 list = rcu_dereference(list->next); in __rhashtable_walk_find_next()
820 skip--; in __rhashtable_walk_find_next()
825 iter->skip++; in __rhashtable_walk_find_next()
826 iter->p = p; in __rhashtable_walk_find_next()
827 iter->list = list; in __rhashtable_walk_find_next()
828 return rht_obj(ht, rhlist ? &list->rhead : p); in __rhashtable_walk_find_next()
831 iter->skip = 0; in __rhashtable_walk_find_next()
834 iter->p = NULL; in __rhashtable_walk_find_next()
839 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_walk_find_next()
840 if (iter->walker.tbl) { in __rhashtable_walk_find_next()
841 iter->slot = 0; in __rhashtable_walk_find_next()
842 iter->skip = 0; in __rhashtable_walk_find_next()
843 return ERR_PTR(-EAGAIN); in __rhashtable_walk_find_next()
845 iter->end_of_table = true; in __rhashtable_walk_find_next()
852 * rhashtable_walk_next - Return the next object and advance the iterator
860 * Returns -EAGAIN if resize event occurred. Note that the iterator
865 struct rhlist_head *list = iter->list; in rhashtable_walk_next()
866 struct rhashtable *ht = iter->ht; in rhashtable_walk_next()
867 struct rhash_head *p = iter->p; in rhashtable_walk_next()
868 bool rhlist = ht->rhlist; in rhashtable_walk_next()
871 if (!rhlist || !(list = rcu_dereference(list->next))) { in rhashtable_walk_next()
872 p = rcu_dereference(p->next); in rhashtable_walk_next()
876 iter->skip++; in rhashtable_walk_next()
877 iter->p = p; in rhashtable_walk_next()
878 iter->list = list; in rhashtable_walk_next()
879 return rht_obj(ht, rhlist ? &list->rhead : p); in rhashtable_walk_next()
885 iter->skip = 0; in rhashtable_walk_next()
886 iter->slot++; in rhashtable_walk_next()
894 * rhashtable_walk_peek - Return the next object but don't advance the iterator
899 * Returns -EAGAIN if resize event occurred. Note that the iterator
904 struct rhlist_head *list = iter->list; in rhashtable_walk_peek()
905 struct rhashtable *ht = iter->ht; in rhashtable_walk_peek()
906 struct rhash_head *p = iter->p; in rhashtable_walk_peek()
909 return rht_obj(ht, ht->rhlist ? &list->rhead : p); in rhashtable_walk_peek()
913 if (iter->skip) { in rhashtable_walk_peek()
920 iter->skip--; in rhashtable_walk_peek()
928 * rhashtable_walk_stop - Finish a hash table walk
938 struct bucket_table *tbl = iter->walker.tbl; in rhashtable_walk_stop()
943 ht = iter->ht; in rhashtable_walk_stop()
945 spin_lock(&ht->lock); in rhashtable_walk_stop()
946 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) in rhashtable_walk_stop()
947 /* This bucket table is being freed, don't re-link it. */ in rhashtable_walk_stop()
948 iter->walker.tbl = NULL; in rhashtable_walk_stop()
950 list_add(&iter->walker.list, &tbl->walkers); in rhashtable_walk_stop()
951 spin_unlock(&ht->lock); in rhashtable_walk_stop()
962 if (params->nelem_hint) in rounded_hashtable_size()
963 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), in rounded_hashtable_size()
964 (unsigned long)params->min_size); in rounded_hashtable_size()
967 (unsigned long)params->min_size); in rounded_hashtable_size()
978 * rhashtable_init - initialize a new hash table
1025 if ((!params->key_len && !params->obj_hashfn) || in rhashtable_init()
1026 (params->obj_hashfn && !params->obj_cmpfn)) in rhashtable_init()
1027 return -EINVAL; in rhashtable_init()
1030 mutex_init(&ht->mutex); in rhashtable_init()
1031 spin_lock_init(&ht->lock); in rhashtable_init()
1032 memcpy(&ht->p, params, sizeof(*params)); in rhashtable_init()
1034 if (params->min_size) in rhashtable_init()
1035 ht->p.min_size = roundup_pow_of_two(params->min_size); in rhashtable_init()
1038 ht->max_elems = 1u << 31; in rhashtable_init()
1040 if (params->max_size) { in rhashtable_init()
1041 ht->p.max_size = rounddown_pow_of_two(params->max_size); in rhashtable_init()
1042 if (ht->p.max_size < ht->max_elems / 2) in rhashtable_init()
1043 ht->max_elems = ht->p.max_size * 2; in rhashtable_init()
1046 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); in rhashtable_init()
1048 size = rounded_hashtable_size(&ht->p); in rhashtable_init()
1050 ht->key_len = ht->p.key_len; in rhashtable_init()
1051 if (!params->hashfn) { in rhashtable_init()
1052 ht->p.hashfn = jhash; in rhashtable_init()
1054 if (!(ht->key_len & (sizeof(u32) - 1))) { in rhashtable_init()
1055 ht->key_len /= sizeof(u32); in rhashtable_init()
1056 ht->p.hashfn = rhashtable_jhash2; in rhashtable_init()
1067 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); in rhashtable_init()
1071 atomic_set(&ht->nelems, 0); in rhashtable_init()
1073 RCU_INIT_POINTER(ht->tbl, tbl); in rhashtable_init()
1075 INIT_WORK(&ht->run_work, rht_deferred_worker); in rhashtable_init()
1082 * rhltable_init - initialize a new hash list table
1094 err = rhashtable_init(&hlt->ht, params); in rhltable_init()
1095 hlt->ht.rhlist = true; in rhltable_init()
1106 if (!ht->rhlist) { in rhashtable_free_one()
1113 obj = &list->rhead; in rhashtable_free_one()
1114 list = rht_dereference(list->next, ht); in rhashtable_free_one()
1120 * rhashtable_free_and_destroy - free elements and destroy hash table
1128 * must occur in a compatible manner. Then frees the bucket array.
1141 cancel_work_sync(&ht->run_work); in rhashtable_free_and_destroy()
1143 mutex_lock(&ht->mutex); in rhashtable_free_and_destroy()
1144 tbl = rht_dereference(ht->tbl, ht); in rhashtable_free_and_destroy()
1147 for (i = 0; i < tbl->size; i++) { in rhashtable_free_and_destroy()
1153 rht_dereference(pos->next, ht) : NULL; in rhashtable_free_and_destroy()
1157 rht_dereference(pos->next, ht) : NULL) in rhashtable_free_and_destroy()
1162 next_tbl = rht_dereference(tbl->future_tbl, ht); in rhashtable_free_and_destroy()
1168 mutex_unlock(&ht->mutex); in rhashtable_free_and_destroy()
1181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); in __rht_bucket_nested()
1182 unsigned int index = hash & ((1 << tbl->nest) - 1); in __rht_bucket_nested()
1183 unsigned int size = tbl->size >> tbl->nest; in __rht_bucket_nested()
1189 subhash >>= tbl->nest; in __rht_bucket_nested()
1192 index = subhash & ((1 << shift) - 1); in __rht_bucket_nested()
1221 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); in rht_bucket_nested_insert()
1222 unsigned int index = hash & ((1 << tbl->nest) - 1); in rht_bucket_nested_insert()
1223 unsigned int size = tbl->size >> tbl->nest; in rht_bucket_nested_insert()
1227 hash >>= tbl->nest; in rht_bucket_nested_insert()
1232 index = hash & ((1 << shift) - 1); in rht_bucket_nested_insert()