xref: /openbmc/linux/lib/rhashtable.c (revision 965f22bc)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 #include <linux/rhashtable.h>
32 
33 #define HASH_DEFAULT_SIZE	64UL
34 #define HASH_MIN_SIZE		4U
35 #define BUCKET_LOCKS_PER_CPU	32UL
36 
37 union nested_table {
38 	union nested_table __rcu *table;
39 	struct rhash_head __rcu *bucket;
40 };
41 
42 static u32 head_hashfn(struct rhashtable *ht,
43 		       const struct bucket_table *tbl,
44 		       const struct rhash_head *he)
45 {
46 	return rht_head_hashfn(ht, tbl, he, ht->p);
47 }
48 
49 #ifdef CONFIG_PROVE_LOCKING
50 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
51 
52 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
53 {
54 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
55 }
56 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
57 
58 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
59 {
60 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
61 
62 	return (debug_locks) ? lockdep_is_held(lock) : 1;
63 }
64 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65 #else
66 #define ASSERT_RHT_MUTEX(HT)
67 #endif
68 
69 static void nested_table_free(union nested_table *ntbl, unsigned int size)
70 {
71 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
72 	const unsigned int len = 1 << shift;
73 	unsigned int i;
74 
75 	ntbl = rcu_dereference_raw(ntbl->table);
76 	if (!ntbl)
77 		return;
78 
79 	if (size > len) {
80 		size >>= shift;
81 		for (i = 0; i < len; i++)
82 			nested_table_free(ntbl + i, size);
83 	}
84 
85 	kfree(ntbl);
86 }
87 
88 static void nested_bucket_table_free(const struct bucket_table *tbl)
89 {
90 	unsigned int size = tbl->size >> tbl->nest;
91 	unsigned int len = 1 << tbl->nest;
92 	union nested_table *ntbl;
93 	unsigned int i;
94 
95 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
96 
97 	for (i = 0; i < len; i++)
98 		nested_table_free(ntbl + i, size);
99 
100 	kfree(ntbl);
101 }
102 
103 static void bucket_table_free(const struct bucket_table *tbl)
104 {
105 	if (tbl->nest)
106 		nested_bucket_table_free(tbl);
107 
108 	free_bucket_spinlocks(tbl->locks);
109 	kvfree(tbl);
110 }
111 
112 static void bucket_table_free_rcu(struct rcu_head *head)
113 {
114 	bucket_table_free(container_of(head, struct bucket_table, rcu));
115 }
116 
117 static union nested_table *nested_table_alloc(struct rhashtable *ht,
118 					      union nested_table __rcu **prev,
119 					      bool leaf)
120 {
121 	union nested_table *ntbl;
122 	int i;
123 
124 	ntbl = rcu_dereference(*prev);
125 	if (ntbl)
126 		return ntbl;
127 
128 	ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
129 
130 	if (ntbl && leaf) {
131 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
132 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
133 	}
134 
135 	rcu_assign_pointer(*prev, ntbl);
136 
137 	return ntbl;
138 }
139 
140 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
141 						      size_t nbuckets,
142 						      gfp_t gfp)
143 {
144 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
145 	struct bucket_table *tbl;
146 	size_t size;
147 
148 	if (nbuckets < (1 << (shift + 1)))
149 		return NULL;
150 
151 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
152 
153 	tbl = kzalloc(size, gfp);
154 	if (!tbl)
155 		return NULL;
156 
157 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
158 				false)) {
159 		kfree(tbl);
160 		return NULL;
161 	}
162 
163 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
164 
165 	return tbl;
166 }
167 
168 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
169 					       size_t nbuckets,
170 					       gfp_t gfp)
171 {
172 	struct bucket_table *tbl = NULL;
173 	size_t size, max_locks;
174 	int i;
175 
176 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
177 	tbl = kvzalloc(size, gfp);
178 
179 	size = nbuckets;
180 
181 	if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
182 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
183 		nbuckets = 0;
184 	}
185 
186 	if (tbl == NULL)
187 		return NULL;
188 
189 	tbl->size = size;
190 
191 	max_locks = size >> 1;
192 	if (tbl->nest)
193 		max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
194 
195 	if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
196 				   ht->p.locks_mul, gfp) < 0) {
197 		bucket_table_free(tbl);
198 		return NULL;
199 	}
200 
201 	INIT_LIST_HEAD(&tbl->walkers);
202 
203 	tbl->hash_rnd = get_random_u32();
204 
205 	for (i = 0; i < nbuckets; i++)
206 		INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
207 
208 	return tbl;
209 }
210 
211 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
212 						  struct bucket_table *tbl)
213 {
214 	struct bucket_table *new_tbl;
215 
216 	do {
217 		new_tbl = tbl;
218 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
219 	} while (tbl);
220 
221 	return new_tbl;
222 }
223 
224 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
225 {
226 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
227 	struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
228 	struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
229 	int err = -EAGAIN;
230 	struct rhash_head *head, *next, *entry;
231 	spinlock_t *new_bucket_lock;
232 	unsigned int new_hash;
233 
234 	if (new_tbl->nest)
235 		goto out;
236 
237 	err = -ENOENT;
238 
239 	rht_for_each(entry, old_tbl, old_hash) {
240 		err = 0;
241 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
242 
243 		if (rht_is_a_nulls(next))
244 			break;
245 
246 		pprev = &entry->next;
247 	}
248 
249 	if (err)
250 		goto out;
251 
252 	new_hash = head_hashfn(ht, new_tbl, entry);
253 
254 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
255 
256 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
257 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
258 				      new_tbl, new_hash);
259 
260 	RCU_INIT_POINTER(entry->next, head);
261 
262 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
263 	spin_unlock(new_bucket_lock);
264 
265 	rcu_assign_pointer(*pprev, next);
266 
267 out:
268 	return err;
269 }
270 
271 static int rhashtable_rehash_chain(struct rhashtable *ht,
272 				    unsigned int old_hash)
273 {
274 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
275 	spinlock_t *old_bucket_lock;
276 	int err;
277 
278 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
279 
280 	spin_lock_bh(old_bucket_lock);
281 	while (!(err = rhashtable_rehash_one(ht, old_hash)))
282 		;
283 
284 	if (err == -ENOENT) {
285 		old_tbl->rehash++;
286 		err = 0;
287 	}
288 	spin_unlock_bh(old_bucket_lock);
289 
290 	return err;
291 }
292 
293 static int rhashtable_rehash_attach(struct rhashtable *ht,
294 				    struct bucket_table *old_tbl,
295 				    struct bucket_table *new_tbl)
296 {
297 	/* Make insertions go into the new, empty table right away. Deletions
298 	 * and lookups will be attempted in both tables until we synchronize.
299 	 * As cmpxchg() provides strong barriers, we do not need
300 	 * rcu_assign_pointer().
301 	 */
302 
303 	if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
304 		return -EEXIST;
305 
306 	return 0;
307 }
308 
309 static int rhashtable_rehash_table(struct rhashtable *ht)
310 {
311 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
312 	struct bucket_table *new_tbl;
313 	struct rhashtable_walker *walker;
314 	unsigned int old_hash;
315 	int err;
316 
317 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
318 	if (!new_tbl)
319 		return 0;
320 
321 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
322 		err = rhashtable_rehash_chain(ht, old_hash);
323 		if (err)
324 			return err;
325 		cond_resched();
326 	}
327 
328 	/* Publish the new table pointer. */
329 	rcu_assign_pointer(ht->tbl, new_tbl);
330 
331 	spin_lock(&ht->lock);
332 	list_for_each_entry(walker, &old_tbl->walkers, list)
333 		walker->tbl = NULL;
334 	spin_unlock(&ht->lock);
335 
336 	/* Wait for readers. All new readers will see the new
337 	 * table, and thus no references to the old table will
338 	 * remain.
339 	 */
340 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
341 
342 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
343 }
344 
345 static int rhashtable_rehash_alloc(struct rhashtable *ht,
346 				   struct bucket_table *old_tbl,
347 				   unsigned int size)
348 {
349 	struct bucket_table *new_tbl;
350 	int err;
351 
352 	ASSERT_RHT_MUTEX(ht);
353 
354 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
355 	if (new_tbl == NULL)
356 		return -ENOMEM;
357 
358 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
359 	if (err)
360 		bucket_table_free(new_tbl);
361 
362 	return err;
363 }
364 
365 /**
366  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
367  * @ht:		the hash table to shrink
368  *
369  * This function shrinks the hash table to fit, i.e., the smallest
370  * size would not cause it to expand right away automatically.
371  *
372  * The caller must ensure that no concurrent resizing occurs by holding
373  * ht->mutex.
374  *
375  * The caller must ensure that no concurrent table mutations take place.
376  * It is however valid to have concurrent lookups if they are RCU protected.
377  *
378  * It is valid to have concurrent insertions and deletions protected by per
379  * bucket locks or concurrent RCU protected lookups and traversals.
380  */
381 static int rhashtable_shrink(struct rhashtable *ht)
382 {
383 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
384 	unsigned int nelems = atomic_read(&ht->nelems);
385 	unsigned int size = 0;
386 
387 	if (nelems)
388 		size = roundup_pow_of_two(nelems * 3 / 2);
389 	if (size < ht->p.min_size)
390 		size = ht->p.min_size;
391 
392 	if (old_tbl->size <= size)
393 		return 0;
394 
395 	if (rht_dereference(old_tbl->future_tbl, ht))
396 		return -EEXIST;
397 
398 	return rhashtable_rehash_alloc(ht, old_tbl, size);
399 }
400 
401 static void rht_deferred_worker(struct work_struct *work)
402 {
403 	struct rhashtable *ht;
404 	struct bucket_table *tbl;
405 	int err = 0;
406 
407 	ht = container_of(work, struct rhashtable, run_work);
408 	mutex_lock(&ht->mutex);
409 
410 	tbl = rht_dereference(ht->tbl, ht);
411 	tbl = rhashtable_last_table(ht, tbl);
412 
413 	if (rht_grow_above_75(ht, tbl))
414 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
415 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
416 		err = rhashtable_shrink(ht);
417 	else if (tbl->nest)
418 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
419 
420 	if (!err)
421 		err = rhashtable_rehash_table(ht);
422 
423 	mutex_unlock(&ht->mutex);
424 
425 	if (err)
426 		schedule_work(&ht->run_work);
427 }
428 
429 static int rhashtable_insert_rehash(struct rhashtable *ht,
430 				    struct bucket_table *tbl)
431 {
432 	struct bucket_table *old_tbl;
433 	struct bucket_table *new_tbl;
434 	unsigned int size;
435 	int err;
436 
437 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
438 
439 	size = tbl->size;
440 
441 	err = -EBUSY;
442 
443 	if (rht_grow_above_75(ht, tbl))
444 		size *= 2;
445 	/* Do not schedule more than one rehash */
446 	else if (old_tbl != tbl)
447 		goto fail;
448 
449 	err = -ENOMEM;
450 
451 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
452 	if (new_tbl == NULL)
453 		goto fail;
454 
455 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
456 	if (err) {
457 		bucket_table_free(new_tbl);
458 		if (err == -EEXIST)
459 			err = 0;
460 	} else
461 		schedule_work(&ht->run_work);
462 
463 	return err;
464 
465 fail:
466 	/* Do not fail the insert if someone else did a rehash. */
467 	if (likely(rcu_access_pointer(tbl->future_tbl)))
468 		return 0;
469 
470 	/* Schedule async rehash to retry allocation in process context. */
471 	if (err == -ENOMEM)
472 		schedule_work(&ht->run_work);
473 
474 	return err;
475 }
476 
477 static void *rhashtable_lookup_one(struct rhashtable *ht,
478 				   struct bucket_table *tbl, unsigned int hash,
479 				   const void *key, struct rhash_head *obj)
480 {
481 	struct rhashtable_compare_arg arg = {
482 		.ht = ht,
483 		.key = key,
484 	};
485 	struct rhash_head __rcu **pprev;
486 	struct rhash_head *head;
487 	int elasticity;
488 
489 	elasticity = RHT_ELASTICITY;
490 	pprev = rht_bucket_var(tbl, hash);
491 	rht_for_each_continue(head, *pprev, tbl, hash) {
492 		struct rhlist_head *list;
493 		struct rhlist_head *plist;
494 
495 		elasticity--;
496 		if (!key ||
497 		    (ht->p.obj_cmpfn ?
498 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
499 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
500 			pprev = &head->next;
501 			continue;
502 		}
503 
504 		if (!ht->rhlist)
505 			return rht_obj(ht, head);
506 
507 		list = container_of(obj, struct rhlist_head, rhead);
508 		plist = container_of(head, struct rhlist_head, rhead);
509 
510 		RCU_INIT_POINTER(list->next, plist);
511 		head = rht_dereference_bucket(head->next, tbl, hash);
512 		RCU_INIT_POINTER(list->rhead.next, head);
513 		rcu_assign_pointer(*pprev, obj);
514 
515 		return NULL;
516 	}
517 
518 	if (elasticity <= 0)
519 		return ERR_PTR(-EAGAIN);
520 
521 	return ERR_PTR(-ENOENT);
522 }
523 
524 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
525 						  struct bucket_table *tbl,
526 						  unsigned int hash,
527 						  struct rhash_head *obj,
528 						  void *data)
529 {
530 	struct rhash_head __rcu **pprev;
531 	struct bucket_table *new_tbl;
532 	struct rhash_head *head;
533 
534 	if (!IS_ERR_OR_NULL(data))
535 		return ERR_PTR(-EEXIST);
536 
537 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
538 		return ERR_CAST(data);
539 
540 	new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
541 	if (new_tbl)
542 		return new_tbl;
543 
544 	if (PTR_ERR(data) != -ENOENT)
545 		return ERR_CAST(data);
546 
547 	if (unlikely(rht_grow_above_max(ht, tbl)))
548 		return ERR_PTR(-E2BIG);
549 
550 	if (unlikely(rht_grow_above_100(ht, tbl)))
551 		return ERR_PTR(-EAGAIN);
552 
553 	pprev = rht_bucket_insert(ht, tbl, hash);
554 	if (!pprev)
555 		return ERR_PTR(-ENOMEM);
556 
557 	head = rht_dereference_bucket(*pprev, tbl, hash);
558 
559 	RCU_INIT_POINTER(obj->next, head);
560 	if (ht->rhlist) {
561 		struct rhlist_head *list;
562 
563 		list = container_of(obj, struct rhlist_head, rhead);
564 		RCU_INIT_POINTER(list->next, NULL);
565 	}
566 
567 	rcu_assign_pointer(*pprev, obj);
568 
569 	atomic_inc(&ht->nelems);
570 	if (rht_grow_above_75(ht, tbl))
571 		schedule_work(&ht->run_work);
572 
573 	return NULL;
574 }
575 
576 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
577 				   struct rhash_head *obj)
578 {
579 	struct bucket_table *new_tbl;
580 	struct bucket_table *tbl;
581 	unsigned int hash;
582 	spinlock_t *lock;
583 	void *data;
584 
585 	tbl = rcu_dereference(ht->tbl);
586 
587 	/* All insertions must grab the oldest table containing
588 	 * the hashed bucket that is yet to be rehashed.
589 	 */
590 	for (;;) {
591 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
592 		lock = rht_bucket_lock(tbl, hash);
593 		spin_lock_bh(lock);
594 
595 		if (tbl->rehash <= hash)
596 			break;
597 
598 		spin_unlock_bh(lock);
599 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
600 	}
601 
602 	data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
603 	new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
604 	if (PTR_ERR(new_tbl) != -EEXIST)
605 		data = ERR_CAST(new_tbl);
606 
607 	while (!IS_ERR_OR_NULL(new_tbl)) {
608 		tbl = new_tbl;
609 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
610 		spin_lock_nested(rht_bucket_lock(tbl, hash),
611 				 SINGLE_DEPTH_NESTING);
612 
613 		data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
614 		new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
615 		if (PTR_ERR(new_tbl) != -EEXIST)
616 			data = ERR_CAST(new_tbl);
617 
618 		spin_unlock(rht_bucket_lock(tbl, hash));
619 	}
620 
621 	spin_unlock_bh(lock);
622 
623 	if (PTR_ERR(data) == -EAGAIN)
624 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
625 			       -EAGAIN);
626 
627 	return data;
628 }
629 
630 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
631 			     struct rhash_head *obj)
632 {
633 	void *data;
634 
635 	do {
636 		rcu_read_lock();
637 		data = rhashtable_try_insert(ht, key, obj);
638 		rcu_read_unlock();
639 	} while (PTR_ERR(data) == -EAGAIN);
640 
641 	return data;
642 }
643 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
644 
645 /**
646  * rhashtable_walk_enter - Initialise an iterator
647  * @ht:		Table to walk over
648  * @iter:	Hash table Iterator
649  *
650  * This function prepares a hash table walk.
651  *
652  * Note that if you restart a walk after rhashtable_walk_stop you
653  * may see the same object twice.  Also, you may miss objects if
654  * there are removals in between rhashtable_walk_stop and the next
655  * call to rhashtable_walk_start.
656  *
657  * For a completely stable walk you should construct your own data
658  * structure outside the hash table.
659  *
660  * This function may be called from any process context, including
661  * non-preemptable context, but cannot be called from softirq or
662  * hardirq context.
663  *
664  * You must call rhashtable_walk_exit after this function returns.
665  */
666 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
667 {
668 	iter->ht = ht;
669 	iter->p = NULL;
670 	iter->slot = 0;
671 	iter->skip = 0;
672 	iter->end_of_table = 0;
673 
674 	spin_lock(&ht->lock);
675 	iter->walker.tbl =
676 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
677 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
678 	spin_unlock(&ht->lock);
679 }
680 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
681 
682 /**
683  * rhashtable_walk_exit - Free an iterator
684  * @iter:	Hash table Iterator
685  *
686  * This function frees resources allocated by rhashtable_walk_init.
687  */
688 void rhashtable_walk_exit(struct rhashtable_iter *iter)
689 {
690 	spin_lock(&iter->ht->lock);
691 	if (iter->walker.tbl)
692 		list_del(&iter->walker.list);
693 	spin_unlock(&iter->ht->lock);
694 }
695 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
696 
697 /**
698  * rhashtable_walk_start_check - Start a hash table walk
699  * @iter:	Hash table iterator
700  *
701  * Start a hash table walk at the current iterator position.  Note that we take
702  * the RCU lock in all cases including when we return an error.  So you must
703  * always call rhashtable_walk_stop to clean up.
704  *
705  * Returns zero if successful.
706  *
707  * Returns -EAGAIN if resize event occured.  Note that the iterator
708  * will rewind back to the beginning and you may use it immediately
709  * by calling rhashtable_walk_next.
710  *
711  * rhashtable_walk_start is defined as an inline variant that returns
712  * void. This is preferred in cases where the caller would ignore
713  * resize events and always continue.
714  */
715 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
716 	__acquires(RCU)
717 {
718 	struct rhashtable *ht = iter->ht;
719 	bool rhlist = ht->rhlist;
720 
721 	rcu_read_lock();
722 
723 	spin_lock(&ht->lock);
724 	if (iter->walker.tbl)
725 		list_del(&iter->walker.list);
726 	spin_unlock(&ht->lock);
727 
728 	if (iter->end_of_table)
729 		return 0;
730 	if (!iter->walker.tbl) {
731 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
732 		iter->slot = 0;
733 		iter->skip = 0;
734 		return -EAGAIN;
735 	}
736 
737 	if (iter->p && !rhlist) {
738 		/*
739 		 * We need to validate that 'p' is still in the table, and
740 		 * if so, update 'skip'
741 		 */
742 		struct rhash_head *p;
743 		int skip = 0;
744 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
745 			skip++;
746 			if (p == iter->p) {
747 				iter->skip = skip;
748 				goto found;
749 			}
750 		}
751 		iter->p = NULL;
752 	} else if (iter->p && rhlist) {
753 		/* Need to validate that 'list' is still in the table, and
754 		 * if so, update 'skip' and 'p'.
755 		 */
756 		struct rhash_head *p;
757 		struct rhlist_head *list;
758 		int skip = 0;
759 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
760 			for (list = container_of(p, struct rhlist_head, rhead);
761 			     list;
762 			     list = rcu_dereference(list->next)) {
763 				skip++;
764 				if (list == iter->list) {
765 					iter->p = p;
766 					iter->skip = skip;
767 					goto found;
768 				}
769 			}
770 		}
771 		iter->p = NULL;
772 	}
773 found:
774 	return 0;
775 }
776 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
777 
778 /**
779  * __rhashtable_walk_find_next - Find the next element in a table (or the first
780  * one in case of a new walk).
781  *
782  * @iter:	Hash table iterator
783  *
784  * Returns the found object or NULL when the end of the table is reached.
785  *
786  * Returns -EAGAIN if resize event occurred.
787  */
788 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
789 {
790 	struct bucket_table *tbl = iter->walker.tbl;
791 	struct rhlist_head *list = iter->list;
792 	struct rhashtable *ht = iter->ht;
793 	struct rhash_head *p = iter->p;
794 	bool rhlist = ht->rhlist;
795 
796 	if (!tbl)
797 		return NULL;
798 
799 	for (; iter->slot < tbl->size; iter->slot++) {
800 		int skip = iter->skip;
801 
802 		rht_for_each_rcu(p, tbl, iter->slot) {
803 			if (rhlist) {
804 				list = container_of(p, struct rhlist_head,
805 						    rhead);
806 				do {
807 					if (!skip)
808 						goto next;
809 					skip--;
810 					list = rcu_dereference(list->next);
811 				} while (list);
812 
813 				continue;
814 			}
815 			if (!skip)
816 				break;
817 			skip--;
818 		}
819 
820 next:
821 		if (!rht_is_a_nulls(p)) {
822 			iter->skip++;
823 			iter->p = p;
824 			iter->list = list;
825 			return rht_obj(ht, rhlist ? &list->rhead : p);
826 		}
827 
828 		iter->skip = 0;
829 	}
830 
831 	iter->p = NULL;
832 
833 	/* Ensure we see any new tables. */
834 	smp_rmb();
835 
836 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
837 	if (iter->walker.tbl) {
838 		iter->slot = 0;
839 		iter->skip = 0;
840 		return ERR_PTR(-EAGAIN);
841 	} else {
842 		iter->end_of_table = true;
843 	}
844 
845 	return NULL;
846 }
847 
848 /**
849  * rhashtable_walk_next - Return the next object and advance the iterator
850  * @iter:	Hash table iterator
851  *
852  * Note that you must call rhashtable_walk_stop when you are finished
853  * with the walk.
854  *
855  * Returns the next object or NULL when the end of the table is reached.
856  *
857  * Returns -EAGAIN if resize event occurred.  Note that the iterator
858  * will rewind back to the beginning and you may continue to use it.
859  */
860 void *rhashtable_walk_next(struct rhashtable_iter *iter)
861 {
862 	struct rhlist_head *list = iter->list;
863 	struct rhashtable *ht = iter->ht;
864 	struct rhash_head *p = iter->p;
865 	bool rhlist = ht->rhlist;
866 
867 	if (p) {
868 		if (!rhlist || !(list = rcu_dereference(list->next))) {
869 			p = rcu_dereference(p->next);
870 			list = container_of(p, struct rhlist_head, rhead);
871 		}
872 		if (!rht_is_a_nulls(p)) {
873 			iter->skip++;
874 			iter->p = p;
875 			iter->list = list;
876 			return rht_obj(ht, rhlist ? &list->rhead : p);
877 		}
878 
879 		/* At the end of this slot, switch to next one and then find
880 		 * next entry from that point.
881 		 */
882 		iter->skip = 0;
883 		iter->slot++;
884 	}
885 
886 	return __rhashtable_walk_find_next(iter);
887 }
888 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
889 
890 /**
891  * rhashtable_walk_peek - Return the next object but don't advance the iterator
892  * @iter:	Hash table iterator
893  *
894  * Returns the next object or NULL when the end of the table is reached.
895  *
896  * Returns -EAGAIN if resize event occurred.  Note that the iterator
897  * will rewind back to the beginning and you may continue to use it.
898  */
899 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
900 {
901 	struct rhlist_head *list = iter->list;
902 	struct rhashtable *ht = iter->ht;
903 	struct rhash_head *p = iter->p;
904 
905 	if (p)
906 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
907 
908 	/* No object found in current iter, find next one in the table. */
909 
910 	if (iter->skip) {
911 		/* A nonzero skip value points to the next entry in the table
912 		 * beyond that last one that was found. Decrement skip so
913 		 * we find the current value. __rhashtable_walk_find_next
914 		 * will restore the original value of skip assuming that
915 		 * the table hasn't changed.
916 		 */
917 		iter->skip--;
918 	}
919 
920 	return __rhashtable_walk_find_next(iter);
921 }
922 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
923 
924 /**
925  * rhashtable_walk_stop - Finish a hash table walk
926  * @iter:	Hash table iterator
927  *
928  * Finish a hash table walk.  Does not reset the iterator to the start of the
929  * hash table.
930  */
931 void rhashtable_walk_stop(struct rhashtable_iter *iter)
932 	__releases(RCU)
933 {
934 	struct rhashtable *ht;
935 	struct bucket_table *tbl = iter->walker.tbl;
936 
937 	if (!tbl)
938 		goto out;
939 
940 	ht = iter->ht;
941 
942 	spin_lock(&ht->lock);
943 	if (tbl->rehash < tbl->size)
944 		list_add(&iter->walker.list, &tbl->walkers);
945 	else
946 		iter->walker.tbl = NULL;
947 	spin_unlock(&ht->lock);
948 
949 out:
950 	rcu_read_unlock();
951 }
952 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
953 
954 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
955 {
956 	size_t retsize;
957 
958 	if (params->nelem_hint)
959 		retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
960 			      (unsigned long)params->min_size);
961 	else
962 		retsize = max(HASH_DEFAULT_SIZE,
963 			      (unsigned long)params->min_size);
964 
965 	return retsize;
966 }
967 
968 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
969 {
970 	return jhash2(key, length, seed);
971 }
972 
973 /**
974  * rhashtable_init - initialize a new hash table
975  * @ht:		hash table to be initialized
976  * @params:	configuration parameters
977  *
978  * Initializes a new hash table based on the provided configuration
979  * parameters. A table can be configured either with a variable or
980  * fixed length key:
981  *
982  * Configuration Example 1: Fixed length keys
983  * struct test_obj {
984  *	int			key;
985  *	void *			my_member;
986  *	struct rhash_head	node;
987  * };
988  *
989  * struct rhashtable_params params = {
990  *	.head_offset = offsetof(struct test_obj, node),
991  *	.key_offset = offsetof(struct test_obj, key),
992  *	.key_len = sizeof(int),
993  *	.hashfn = jhash,
994  * };
995  *
996  * Configuration Example 2: Variable length keys
997  * struct test_obj {
998  *	[...]
999  *	struct rhash_head	node;
1000  * };
1001  *
1002  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1003  * {
1004  *	struct test_obj *obj = data;
1005  *
1006  *	return [... hash ...];
1007  * }
1008  *
1009  * struct rhashtable_params params = {
1010  *	.head_offset = offsetof(struct test_obj, node),
1011  *	.hashfn = jhash,
1012  *	.obj_hashfn = my_hash_fn,
1013  * };
1014  */
1015 int rhashtable_init(struct rhashtable *ht,
1016 		    const struct rhashtable_params *params)
1017 {
1018 	struct bucket_table *tbl;
1019 	size_t size;
1020 
1021 	if ((!params->key_len && !params->obj_hashfn) ||
1022 	    (params->obj_hashfn && !params->obj_cmpfn))
1023 		return -EINVAL;
1024 
1025 	memset(ht, 0, sizeof(*ht));
1026 	mutex_init(&ht->mutex);
1027 	spin_lock_init(&ht->lock);
1028 	memcpy(&ht->p, params, sizeof(*params));
1029 
1030 	if (params->min_size)
1031 		ht->p.min_size = roundup_pow_of_two(params->min_size);
1032 
1033 	/* Cap total entries at 2^31 to avoid nelems overflow. */
1034 	ht->max_elems = 1u << 31;
1035 
1036 	if (params->max_size) {
1037 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
1038 		if (ht->p.max_size < ht->max_elems / 2)
1039 			ht->max_elems = ht->p.max_size * 2;
1040 	}
1041 
1042 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1043 
1044 	size = rounded_hashtable_size(&ht->p);
1045 
1046 	if (params->locks_mul)
1047 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1048 	else
1049 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1050 
1051 	ht->key_len = ht->p.key_len;
1052 	if (!params->hashfn) {
1053 		ht->p.hashfn = jhash;
1054 
1055 		if (!(ht->key_len & (sizeof(u32) - 1))) {
1056 			ht->key_len /= sizeof(u32);
1057 			ht->p.hashfn = rhashtable_jhash2;
1058 		}
1059 	}
1060 
1061 	/*
1062 	 * This is api initialization and thus we need to guarantee the
1063 	 * initial rhashtable allocation. Upon failure, retry with the
1064 	 * smallest possible size with __GFP_NOFAIL semantics.
1065 	 */
1066 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1067 	if (unlikely(tbl == NULL)) {
1068 		size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1069 		tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1070 	}
1071 
1072 	atomic_set(&ht->nelems, 0);
1073 
1074 	RCU_INIT_POINTER(ht->tbl, tbl);
1075 
1076 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1077 
1078 	return 0;
1079 }
1080 EXPORT_SYMBOL_GPL(rhashtable_init);
1081 
1082 /**
1083  * rhltable_init - initialize a new hash list table
1084  * @hlt:	hash list table to be initialized
1085  * @params:	configuration parameters
1086  *
1087  * Initializes a new hash list table.
1088  *
1089  * See documentation for rhashtable_init.
1090  */
1091 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1092 {
1093 	int err;
1094 
1095 	err = rhashtable_init(&hlt->ht, params);
1096 	hlt->ht.rhlist = true;
1097 	return err;
1098 }
1099 EXPORT_SYMBOL_GPL(rhltable_init);
1100 
1101 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1102 				void (*free_fn)(void *ptr, void *arg),
1103 				void *arg)
1104 {
1105 	struct rhlist_head *list;
1106 
1107 	if (!ht->rhlist) {
1108 		free_fn(rht_obj(ht, obj), arg);
1109 		return;
1110 	}
1111 
1112 	list = container_of(obj, struct rhlist_head, rhead);
1113 	do {
1114 		obj = &list->rhead;
1115 		list = rht_dereference(list->next, ht);
1116 		free_fn(rht_obj(ht, obj), arg);
1117 	} while (list);
1118 }
1119 
1120 /**
1121  * rhashtable_free_and_destroy - free elements and destroy hash table
1122  * @ht:		the hash table to destroy
1123  * @free_fn:	callback to release resources of element
1124  * @arg:	pointer passed to free_fn
1125  *
1126  * Stops an eventual async resize. If defined, invokes free_fn for each
1127  * element to releasal resources. Please note that RCU protected
1128  * readers may still be accessing the elements. Releasing of resources
1129  * must occur in a compatible manner. Then frees the bucket array.
1130  *
1131  * This function will eventually sleep to wait for an async resize
1132  * to complete. The caller is responsible that no further write operations
1133  * occurs in parallel.
1134  */
1135 void rhashtable_free_and_destroy(struct rhashtable *ht,
1136 				 void (*free_fn)(void *ptr, void *arg),
1137 				 void *arg)
1138 {
1139 	struct bucket_table *tbl, *next_tbl;
1140 	unsigned int i;
1141 
1142 	cancel_work_sync(&ht->run_work);
1143 
1144 	mutex_lock(&ht->mutex);
1145 	tbl = rht_dereference(ht->tbl, ht);
1146 restart:
1147 	if (free_fn) {
1148 		for (i = 0; i < tbl->size; i++) {
1149 			struct rhash_head *pos, *next;
1150 
1151 			cond_resched();
1152 			for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1153 			     next = !rht_is_a_nulls(pos) ?
1154 					rht_dereference(pos->next, ht) : NULL;
1155 			     !rht_is_a_nulls(pos);
1156 			     pos = next,
1157 			     next = !rht_is_a_nulls(pos) ?
1158 					rht_dereference(pos->next, ht) : NULL)
1159 				rhashtable_free_one(ht, pos, free_fn, arg);
1160 		}
1161 	}
1162 
1163 	next_tbl = rht_dereference(tbl->future_tbl, ht);
1164 	bucket_table_free(tbl);
1165 	if (next_tbl) {
1166 		tbl = next_tbl;
1167 		goto restart;
1168 	}
1169 	mutex_unlock(&ht->mutex);
1170 }
1171 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1172 
1173 void rhashtable_destroy(struct rhashtable *ht)
1174 {
1175 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1176 }
1177 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1178 
1179 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1180 					    unsigned int hash)
1181 {
1182 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1183 	static struct rhash_head __rcu *rhnull =
1184 		(struct rhash_head __rcu *)NULLS_MARKER(0);
1185 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1186 	unsigned int size = tbl->size >> tbl->nest;
1187 	unsigned int subhash = hash;
1188 	union nested_table *ntbl;
1189 
1190 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1191 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1192 	subhash >>= tbl->nest;
1193 
1194 	while (ntbl && size > (1 << shift)) {
1195 		index = subhash & ((1 << shift) - 1);
1196 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1197 						  tbl, hash);
1198 		size >>= shift;
1199 		subhash >>= shift;
1200 	}
1201 
1202 	if (!ntbl)
1203 		return &rhnull;
1204 
1205 	return &ntbl[subhash].bucket;
1206 
1207 }
1208 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1209 
1210 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1211 						   struct bucket_table *tbl,
1212 						   unsigned int hash)
1213 {
1214 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1215 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1216 	unsigned int size = tbl->size >> tbl->nest;
1217 	union nested_table *ntbl;
1218 
1219 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1220 	hash >>= tbl->nest;
1221 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1222 				  size <= (1 << shift));
1223 
1224 	while (ntbl && size > (1 << shift)) {
1225 		index = hash & ((1 << shift) - 1);
1226 		size >>= shift;
1227 		hash >>= shift;
1228 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1229 					  size <= (1 << shift));
1230 	}
1231 
1232 	if (!ntbl)
1233 		return NULL;
1234 
1235 	return &ntbl[hash].bucket;
1236 
1237 }
1238 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1239