xref: /openbmc/linux/lib/rhashtable.c (revision d623f60d)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/rculist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mm.h>
26 #include <linux/jhash.h>
27 #include <linux/random.h>
28 #include <linux/rhashtable.h>
29 #include <linux/err.h>
30 #include <linux/export.h>
31 
32 #define HASH_DEFAULT_SIZE	64UL
33 #define HASH_MIN_SIZE		4U
34 #define BUCKET_LOCKS_PER_CPU	32UL
35 
36 union nested_table {
37 	union nested_table __rcu *table;
38 	struct rhash_head __rcu *bucket;
39 };
40 
41 static u32 head_hashfn(struct rhashtable *ht,
42 		       const struct bucket_table *tbl,
43 		       const struct rhash_head *he)
44 {
45 	return rht_head_hashfn(ht, tbl, he, ht->p);
46 }
47 
48 #ifdef CONFIG_PROVE_LOCKING
49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
50 
51 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
52 {
53 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
54 }
55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56 
57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58 {
59 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
60 
61 	return (debug_locks) ? lockdep_is_held(lock) : 1;
62 }
63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64 #else
65 #define ASSERT_RHT_MUTEX(HT)
66 #endif
67 
68 static void nested_table_free(union nested_table *ntbl, unsigned int size)
69 {
70 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
71 	const unsigned int len = 1 << shift;
72 	unsigned int i;
73 
74 	ntbl = rcu_dereference_raw(ntbl->table);
75 	if (!ntbl)
76 		return;
77 
78 	if (size > len) {
79 		size >>= shift;
80 		for (i = 0; i < len; i++)
81 			nested_table_free(ntbl + i, size);
82 	}
83 
84 	kfree(ntbl);
85 }
86 
87 static void nested_bucket_table_free(const struct bucket_table *tbl)
88 {
89 	unsigned int size = tbl->size >> tbl->nest;
90 	unsigned int len = 1 << tbl->nest;
91 	union nested_table *ntbl;
92 	unsigned int i;
93 
94 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
95 
96 	for (i = 0; i < len; i++)
97 		nested_table_free(ntbl + i, size);
98 
99 	kfree(ntbl);
100 }
101 
102 static void bucket_table_free(const struct bucket_table *tbl)
103 {
104 	if (tbl->nest)
105 		nested_bucket_table_free(tbl);
106 
107 	free_bucket_spinlocks(tbl->locks);
108 	kvfree(tbl);
109 }
110 
111 static void bucket_table_free_rcu(struct rcu_head *head)
112 {
113 	bucket_table_free(container_of(head, struct bucket_table, rcu));
114 }
115 
116 static union nested_table *nested_table_alloc(struct rhashtable *ht,
117 					      union nested_table __rcu **prev,
118 					      unsigned int shifted,
119 					      unsigned int nhash)
120 {
121 	union nested_table *ntbl;
122 	int i;
123 
124 	ntbl = rcu_dereference(*prev);
125 	if (ntbl)
126 		return ntbl;
127 
128 	ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
129 
130 	if (ntbl && shifted) {
131 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
132 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
133 					    (i << shifted) | nhash);
134 	}
135 
136 	rcu_assign_pointer(*prev, ntbl);
137 
138 	return ntbl;
139 }
140 
141 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
142 						      size_t nbuckets,
143 						      gfp_t gfp)
144 {
145 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
146 	struct bucket_table *tbl;
147 	size_t size;
148 
149 	if (nbuckets < (1 << (shift + 1)))
150 		return NULL;
151 
152 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
153 
154 	tbl = kzalloc(size, gfp);
155 	if (!tbl)
156 		return NULL;
157 
158 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
159 				0, 0)) {
160 		kfree(tbl);
161 		return NULL;
162 	}
163 
164 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
165 
166 	return tbl;
167 }
168 
169 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
170 					       size_t nbuckets,
171 					       gfp_t gfp)
172 {
173 	struct bucket_table *tbl = NULL;
174 	size_t size, max_locks;
175 	int i;
176 
177 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
178 	if (gfp != GFP_KERNEL)
179 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
180 	else
181 		tbl = kvzalloc(size, gfp);
182 
183 	size = nbuckets;
184 
185 	if (tbl == NULL && gfp != GFP_KERNEL) {
186 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
187 		nbuckets = 0;
188 	}
189 	if (tbl == NULL)
190 		return NULL;
191 
192 	tbl->size = size;
193 
194 	max_locks = size >> 1;
195 	if (tbl->nest)
196 		max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
197 
198 	if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
199 				   ht->p.locks_mul, gfp) < 0) {
200 		bucket_table_free(tbl);
201 		return NULL;
202 	}
203 
204 	INIT_LIST_HEAD(&tbl->walkers);
205 
206 	tbl->hash_rnd = get_random_u32();
207 
208 	for (i = 0; i < nbuckets; i++)
209 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
210 
211 	return tbl;
212 }
213 
214 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
215 						  struct bucket_table *tbl)
216 {
217 	struct bucket_table *new_tbl;
218 
219 	do {
220 		new_tbl = tbl;
221 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
222 	} while (tbl);
223 
224 	return new_tbl;
225 }
226 
227 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
228 {
229 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
230 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
231 		rht_dereference_rcu(old_tbl->future_tbl, ht));
232 	struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
233 	int err = -EAGAIN;
234 	struct rhash_head *head, *next, *entry;
235 	spinlock_t *new_bucket_lock;
236 	unsigned int new_hash;
237 
238 	if (new_tbl->nest)
239 		goto out;
240 
241 	err = -ENOENT;
242 
243 	rht_for_each(entry, old_tbl, old_hash) {
244 		err = 0;
245 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
246 
247 		if (rht_is_a_nulls(next))
248 			break;
249 
250 		pprev = &entry->next;
251 	}
252 
253 	if (err)
254 		goto out;
255 
256 	new_hash = head_hashfn(ht, new_tbl, entry);
257 
258 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
259 
260 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
261 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
262 				      new_tbl, new_hash);
263 
264 	RCU_INIT_POINTER(entry->next, head);
265 
266 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
267 	spin_unlock(new_bucket_lock);
268 
269 	rcu_assign_pointer(*pprev, next);
270 
271 out:
272 	return err;
273 }
274 
275 static int rhashtable_rehash_chain(struct rhashtable *ht,
276 				    unsigned int old_hash)
277 {
278 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
279 	spinlock_t *old_bucket_lock;
280 	int err;
281 
282 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
283 
284 	spin_lock_bh(old_bucket_lock);
285 	while (!(err = rhashtable_rehash_one(ht, old_hash)))
286 		;
287 
288 	if (err == -ENOENT) {
289 		old_tbl->rehash++;
290 		err = 0;
291 	}
292 	spin_unlock_bh(old_bucket_lock);
293 
294 	return err;
295 }
296 
297 static int rhashtable_rehash_attach(struct rhashtable *ht,
298 				    struct bucket_table *old_tbl,
299 				    struct bucket_table *new_tbl)
300 {
301 	/* Protect future_tbl using the first bucket lock. */
302 	spin_lock_bh(old_tbl->locks);
303 
304 	/* Did somebody beat us to it? */
305 	if (rcu_access_pointer(old_tbl->future_tbl)) {
306 		spin_unlock_bh(old_tbl->locks);
307 		return -EEXIST;
308 	}
309 
310 	/* Make insertions go into the new, empty table right away. Deletions
311 	 * and lookups will be attempted in both tables until we synchronize.
312 	 */
313 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
314 
315 	spin_unlock_bh(old_tbl->locks);
316 
317 	return 0;
318 }
319 
320 static int rhashtable_rehash_table(struct rhashtable *ht)
321 {
322 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
323 	struct bucket_table *new_tbl;
324 	struct rhashtable_walker *walker;
325 	unsigned int old_hash;
326 	int err;
327 
328 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
329 	if (!new_tbl)
330 		return 0;
331 
332 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
333 		err = rhashtable_rehash_chain(ht, old_hash);
334 		if (err)
335 			return err;
336 		cond_resched();
337 	}
338 
339 	/* Publish the new table pointer. */
340 	rcu_assign_pointer(ht->tbl, new_tbl);
341 
342 	spin_lock(&ht->lock);
343 	list_for_each_entry(walker, &old_tbl->walkers, list)
344 		walker->tbl = NULL;
345 	spin_unlock(&ht->lock);
346 
347 	/* Wait for readers. All new readers will see the new
348 	 * table, and thus no references to the old table will
349 	 * remain.
350 	 */
351 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
352 
353 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
354 }
355 
356 static int rhashtable_rehash_alloc(struct rhashtable *ht,
357 				   struct bucket_table *old_tbl,
358 				   unsigned int size)
359 {
360 	struct bucket_table *new_tbl;
361 	int err;
362 
363 	ASSERT_RHT_MUTEX(ht);
364 
365 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
366 	if (new_tbl == NULL)
367 		return -ENOMEM;
368 
369 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
370 	if (err)
371 		bucket_table_free(new_tbl);
372 
373 	return err;
374 }
375 
376 /**
377  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
378  * @ht:		the hash table to shrink
379  *
380  * This function shrinks the hash table to fit, i.e., the smallest
381  * size would not cause it to expand right away automatically.
382  *
383  * The caller must ensure that no concurrent resizing occurs by holding
384  * ht->mutex.
385  *
386  * The caller must ensure that no concurrent table mutations take place.
387  * It is however valid to have concurrent lookups if they are RCU protected.
388  *
389  * It is valid to have concurrent insertions and deletions protected by per
390  * bucket locks or concurrent RCU protected lookups and traversals.
391  */
392 static int rhashtable_shrink(struct rhashtable *ht)
393 {
394 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
395 	unsigned int nelems = atomic_read(&ht->nelems);
396 	unsigned int size = 0;
397 
398 	if (nelems)
399 		size = roundup_pow_of_two(nelems * 3 / 2);
400 	if (size < ht->p.min_size)
401 		size = ht->p.min_size;
402 
403 	if (old_tbl->size <= size)
404 		return 0;
405 
406 	if (rht_dereference(old_tbl->future_tbl, ht))
407 		return -EEXIST;
408 
409 	return rhashtable_rehash_alloc(ht, old_tbl, size);
410 }
411 
412 static void rht_deferred_worker(struct work_struct *work)
413 {
414 	struct rhashtable *ht;
415 	struct bucket_table *tbl;
416 	int err = 0;
417 
418 	ht = container_of(work, struct rhashtable, run_work);
419 	mutex_lock(&ht->mutex);
420 
421 	tbl = rht_dereference(ht->tbl, ht);
422 	tbl = rhashtable_last_table(ht, tbl);
423 
424 	if (rht_grow_above_75(ht, tbl))
425 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
426 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
427 		err = rhashtable_shrink(ht);
428 	else if (tbl->nest)
429 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
430 
431 	if (!err)
432 		err = rhashtable_rehash_table(ht);
433 
434 	mutex_unlock(&ht->mutex);
435 
436 	if (err)
437 		schedule_work(&ht->run_work);
438 }
439 
440 static int rhashtable_insert_rehash(struct rhashtable *ht,
441 				    struct bucket_table *tbl)
442 {
443 	struct bucket_table *old_tbl;
444 	struct bucket_table *new_tbl;
445 	unsigned int size;
446 	int err;
447 
448 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
449 
450 	size = tbl->size;
451 
452 	err = -EBUSY;
453 
454 	if (rht_grow_above_75(ht, tbl))
455 		size *= 2;
456 	/* Do not schedule more than one rehash */
457 	else if (old_tbl != tbl)
458 		goto fail;
459 
460 	err = -ENOMEM;
461 
462 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
463 	if (new_tbl == NULL)
464 		goto fail;
465 
466 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
467 	if (err) {
468 		bucket_table_free(new_tbl);
469 		if (err == -EEXIST)
470 			err = 0;
471 	} else
472 		schedule_work(&ht->run_work);
473 
474 	return err;
475 
476 fail:
477 	/* Do not fail the insert if someone else did a rehash. */
478 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
479 		return 0;
480 
481 	/* Schedule async rehash to retry allocation in process context. */
482 	if (err == -ENOMEM)
483 		schedule_work(&ht->run_work);
484 
485 	return err;
486 }
487 
488 static void *rhashtable_lookup_one(struct rhashtable *ht,
489 				   struct bucket_table *tbl, unsigned int hash,
490 				   const void *key, struct rhash_head *obj)
491 {
492 	struct rhashtable_compare_arg arg = {
493 		.ht = ht,
494 		.key = key,
495 	};
496 	struct rhash_head __rcu **pprev;
497 	struct rhash_head *head;
498 	int elasticity;
499 
500 	elasticity = RHT_ELASTICITY;
501 	pprev = rht_bucket_var(tbl, hash);
502 	rht_for_each_continue(head, *pprev, tbl, hash) {
503 		struct rhlist_head *list;
504 		struct rhlist_head *plist;
505 
506 		elasticity--;
507 		if (!key ||
508 		    (ht->p.obj_cmpfn ?
509 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
510 		     rhashtable_compare(&arg, rht_obj(ht, head)))) {
511 			pprev = &head->next;
512 			continue;
513 		}
514 
515 		if (!ht->rhlist)
516 			return rht_obj(ht, head);
517 
518 		list = container_of(obj, struct rhlist_head, rhead);
519 		plist = container_of(head, struct rhlist_head, rhead);
520 
521 		RCU_INIT_POINTER(list->next, plist);
522 		head = rht_dereference_bucket(head->next, tbl, hash);
523 		RCU_INIT_POINTER(list->rhead.next, head);
524 		rcu_assign_pointer(*pprev, obj);
525 
526 		return NULL;
527 	}
528 
529 	if (elasticity <= 0)
530 		return ERR_PTR(-EAGAIN);
531 
532 	return ERR_PTR(-ENOENT);
533 }
534 
535 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
536 						  struct bucket_table *tbl,
537 						  unsigned int hash,
538 						  struct rhash_head *obj,
539 						  void *data)
540 {
541 	struct rhash_head __rcu **pprev;
542 	struct bucket_table *new_tbl;
543 	struct rhash_head *head;
544 
545 	if (!IS_ERR_OR_NULL(data))
546 		return ERR_PTR(-EEXIST);
547 
548 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
549 		return ERR_CAST(data);
550 
551 	new_tbl = rcu_dereference(tbl->future_tbl);
552 	if (new_tbl)
553 		return new_tbl;
554 
555 	if (PTR_ERR(data) != -ENOENT)
556 		return ERR_CAST(data);
557 
558 	if (unlikely(rht_grow_above_max(ht, tbl)))
559 		return ERR_PTR(-E2BIG);
560 
561 	if (unlikely(rht_grow_above_100(ht, tbl)))
562 		return ERR_PTR(-EAGAIN);
563 
564 	pprev = rht_bucket_insert(ht, tbl, hash);
565 	if (!pprev)
566 		return ERR_PTR(-ENOMEM);
567 
568 	head = rht_dereference_bucket(*pprev, tbl, hash);
569 
570 	RCU_INIT_POINTER(obj->next, head);
571 	if (ht->rhlist) {
572 		struct rhlist_head *list;
573 
574 		list = container_of(obj, struct rhlist_head, rhead);
575 		RCU_INIT_POINTER(list->next, NULL);
576 	}
577 
578 	rcu_assign_pointer(*pprev, obj);
579 
580 	atomic_inc(&ht->nelems);
581 	if (rht_grow_above_75(ht, tbl))
582 		schedule_work(&ht->run_work);
583 
584 	return NULL;
585 }
586 
587 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
588 				   struct rhash_head *obj)
589 {
590 	struct bucket_table *new_tbl;
591 	struct bucket_table *tbl;
592 	unsigned int hash;
593 	spinlock_t *lock;
594 	void *data;
595 
596 	tbl = rcu_dereference(ht->tbl);
597 
598 	/* All insertions must grab the oldest table containing
599 	 * the hashed bucket that is yet to be rehashed.
600 	 */
601 	for (;;) {
602 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
603 		lock = rht_bucket_lock(tbl, hash);
604 		spin_lock_bh(lock);
605 
606 		if (tbl->rehash <= hash)
607 			break;
608 
609 		spin_unlock_bh(lock);
610 		tbl = rcu_dereference(tbl->future_tbl);
611 	}
612 
613 	data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
614 	new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
615 	if (PTR_ERR(new_tbl) != -EEXIST)
616 		data = ERR_CAST(new_tbl);
617 
618 	while (!IS_ERR_OR_NULL(new_tbl)) {
619 		tbl = new_tbl;
620 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
621 		spin_lock_nested(rht_bucket_lock(tbl, hash),
622 				 SINGLE_DEPTH_NESTING);
623 
624 		data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
625 		new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
626 		if (PTR_ERR(new_tbl) != -EEXIST)
627 			data = ERR_CAST(new_tbl);
628 
629 		spin_unlock(rht_bucket_lock(tbl, hash));
630 	}
631 
632 	spin_unlock_bh(lock);
633 
634 	if (PTR_ERR(data) == -EAGAIN)
635 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
636 			       -EAGAIN);
637 
638 	return data;
639 }
640 
641 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
642 			     struct rhash_head *obj)
643 {
644 	void *data;
645 
646 	do {
647 		rcu_read_lock();
648 		data = rhashtable_try_insert(ht, key, obj);
649 		rcu_read_unlock();
650 	} while (PTR_ERR(data) == -EAGAIN);
651 
652 	return data;
653 }
654 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
655 
656 /**
657  * rhashtable_walk_enter - Initialise an iterator
658  * @ht:		Table to walk over
659  * @iter:	Hash table Iterator
660  *
661  * This function prepares a hash table walk.
662  *
663  * Note that if you restart a walk after rhashtable_walk_stop you
664  * may see the same object twice.  Also, you may miss objects if
665  * there are removals in between rhashtable_walk_stop and the next
666  * call to rhashtable_walk_start.
667  *
668  * For a completely stable walk you should construct your own data
669  * structure outside the hash table.
670  *
671  * This function may be called from any process context, including
672  * non-preemptable context, but cannot be called from softirq or
673  * hardirq context.
674  *
675  * You must call rhashtable_walk_exit after this function returns.
676  */
677 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
678 {
679 	iter->ht = ht;
680 	iter->p = NULL;
681 	iter->slot = 0;
682 	iter->skip = 0;
683 	iter->end_of_table = 0;
684 
685 	spin_lock(&ht->lock);
686 	iter->walker.tbl =
687 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
688 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
689 	spin_unlock(&ht->lock);
690 }
691 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
692 
693 /**
694  * rhashtable_walk_exit - Free an iterator
695  * @iter:	Hash table Iterator
696  *
697  * This function frees resources allocated by rhashtable_walk_init.
698  */
699 void rhashtable_walk_exit(struct rhashtable_iter *iter)
700 {
701 	spin_lock(&iter->ht->lock);
702 	if (iter->walker.tbl)
703 		list_del(&iter->walker.list);
704 	spin_unlock(&iter->ht->lock);
705 }
706 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
707 
708 /**
709  * rhashtable_walk_start_check - Start a hash table walk
710  * @iter:	Hash table iterator
711  *
712  * Start a hash table walk at the current iterator position.  Note that we take
713  * the RCU lock in all cases including when we return an error.  So you must
714  * always call rhashtable_walk_stop to clean up.
715  *
716  * Returns zero if successful.
717  *
718  * Returns -EAGAIN if resize event occured.  Note that the iterator
719  * will rewind back to the beginning and you may use it immediately
720  * by calling rhashtable_walk_next.
721  *
722  * rhashtable_walk_start is defined as an inline variant that returns
723  * void. This is preferred in cases where the caller would ignore
724  * resize events and always continue.
725  */
726 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
727 	__acquires(RCU)
728 {
729 	struct rhashtable *ht = iter->ht;
730 	bool rhlist = ht->rhlist;
731 
732 	rcu_read_lock();
733 
734 	spin_lock(&ht->lock);
735 	if (iter->walker.tbl)
736 		list_del(&iter->walker.list);
737 	spin_unlock(&ht->lock);
738 
739 	if (iter->end_of_table)
740 		return 0;
741 	if (!iter->walker.tbl) {
742 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
743 		iter->slot = 0;
744 		iter->skip = 0;
745 		return -EAGAIN;
746 	}
747 
748 	if (iter->p && !rhlist) {
749 		/*
750 		 * We need to validate that 'p' is still in the table, and
751 		 * if so, update 'skip'
752 		 */
753 		struct rhash_head *p;
754 		int skip = 0;
755 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
756 			skip++;
757 			if (p == iter->p) {
758 				iter->skip = skip;
759 				goto found;
760 			}
761 		}
762 		iter->p = NULL;
763 	} else if (iter->p && rhlist) {
764 		/* Need to validate that 'list' is still in the table, and
765 		 * if so, update 'skip' and 'p'.
766 		 */
767 		struct rhash_head *p;
768 		struct rhlist_head *list;
769 		int skip = 0;
770 		rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
771 			for (list = container_of(p, struct rhlist_head, rhead);
772 			     list;
773 			     list = rcu_dereference(list->next)) {
774 				skip++;
775 				if (list == iter->list) {
776 					iter->p = p;
777 					skip = skip;
778 					goto found;
779 				}
780 			}
781 		}
782 		iter->p = NULL;
783 	}
784 found:
785 	return 0;
786 }
787 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
788 
789 /**
790  * __rhashtable_walk_find_next - Find the next element in a table (or the first
791  * one in case of a new walk).
792  *
793  * @iter:	Hash table iterator
794  *
795  * Returns the found object or NULL when the end of the table is reached.
796  *
797  * Returns -EAGAIN if resize event occurred.
798  */
799 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
800 {
801 	struct bucket_table *tbl = iter->walker.tbl;
802 	struct rhlist_head *list = iter->list;
803 	struct rhashtable *ht = iter->ht;
804 	struct rhash_head *p = iter->p;
805 	bool rhlist = ht->rhlist;
806 
807 	if (!tbl)
808 		return NULL;
809 
810 	for (; iter->slot < tbl->size; iter->slot++) {
811 		int skip = iter->skip;
812 
813 		rht_for_each_rcu(p, tbl, iter->slot) {
814 			if (rhlist) {
815 				list = container_of(p, struct rhlist_head,
816 						    rhead);
817 				do {
818 					if (!skip)
819 						goto next;
820 					skip--;
821 					list = rcu_dereference(list->next);
822 				} while (list);
823 
824 				continue;
825 			}
826 			if (!skip)
827 				break;
828 			skip--;
829 		}
830 
831 next:
832 		if (!rht_is_a_nulls(p)) {
833 			iter->skip++;
834 			iter->p = p;
835 			iter->list = list;
836 			return rht_obj(ht, rhlist ? &list->rhead : p);
837 		}
838 
839 		iter->skip = 0;
840 	}
841 
842 	iter->p = NULL;
843 
844 	/* Ensure we see any new tables. */
845 	smp_rmb();
846 
847 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
848 	if (iter->walker.tbl) {
849 		iter->slot = 0;
850 		iter->skip = 0;
851 		return ERR_PTR(-EAGAIN);
852 	} else {
853 		iter->end_of_table = true;
854 	}
855 
856 	return NULL;
857 }
858 
859 /**
860  * rhashtable_walk_next - Return the next object and advance the iterator
861  * @iter:	Hash table iterator
862  *
863  * Note that you must call rhashtable_walk_stop when you are finished
864  * with the walk.
865  *
866  * Returns the next object or NULL when the end of the table is reached.
867  *
868  * Returns -EAGAIN if resize event occurred.  Note that the iterator
869  * will rewind back to the beginning and you may continue to use it.
870  */
871 void *rhashtable_walk_next(struct rhashtable_iter *iter)
872 {
873 	struct rhlist_head *list = iter->list;
874 	struct rhashtable *ht = iter->ht;
875 	struct rhash_head *p = iter->p;
876 	bool rhlist = ht->rhlist;
877 
878 	if (p) {
879 		if (!rhlist || !(list = rcu_dereference(list->next))) {
880 			p = rcu_dereference(p->next);
881 			list = container_of(p, struct rhlist_head, rhead);
882 		}
883 		if (!rht_is_a_nulls(p)) {
884 			iter->skip++;
885 			iter->p = p;
886 			iter->list = list;
887 			return rht_obj(ht, rhlist ? &list->rhead : p);
888 		}
889 
890 		/* At the end of this slot, switch to next one and then find
891 		 * next entry from that point.
892 		 */
893 		iter->skip = 0;
894 		iter->slot++;
895 	}
896 
897 	return __rhashtable_walk_find_next(iter);
898 }
899 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
900 
901 /**
902  * rhashtable_walk_peek - Return the next object but don't advance the iterator
903  * @iter:	Hash table iterator
904  *
905  * Returns the next object or NULL when the end of the table is reached.
906  *
907  * Returns -EAGAIN if resize event occurred.  Note that the iterator
908  * will rewind back to the beginning and you may continue to use it.
909  */
910 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
911 {
912 	struct rhlist_head *list = iter->list;
913 	struct rhashtable *ht = iter->ht;
914 	struct rhash_head *p = iter->p;
915 
916 	if (p)
917 		return rht_obj(ht, ht->rhlist ? &list->rhead : p);
918 
919 	/* No object found in current iter, find next one in the table. */
920 
921 	if (iter->skip) {
922 		/* A nonzero skip value points to the next entry in the table
923 		 * beyond that last one that was found. Decrement skip so
924 		 * we find the current value. __rhashtable_walk_find_next
925 		 * will restore the original value of skip assuming that
926 		 * the table hasn't changed.
927 		 */
928 		iter->skip--;
929 	}
930 
931 	return __rhashtable_walk_find_next(iter);
932 }
933 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
934 
935 /**
936  * rhashtable_walk_stop - Finish a hash table walk
937  * @iter:	Hash table iterator
938  *
939  * Finish a hash table walk.  Does not reset the iterator to the start of the
940  * hash table.
941  */
942 void rhashtable_walk_stop(struct rhashtable_iter *iter)
943 	__releases(RCU)
944 {
945 	struct rhashtable *ht;
946 	struct bucket_table *tbl = iter->walker.tbl;
947 
948 	if (!tbl)
949 		goto out;
950 
951 	ht = iter->ht;
952 
953 	spin_lock(&ht->lock);
954 	if (tbl->rehash < tbl->size)
955 		list_add(&iter->walker.list, &tbl->walkers);
956 	else
957 		iter->walker.tbl = NULL;
958 	spin_unlock(&ht->lock);
959 
960 out:
961 	rcu_read_unlock();
962 }
963 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
964 
965 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
966 {
967 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
968 		   (unsigned long)params->min_size);
969 }
970 
971 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
972 {
973 	return jhash2(key, length, seed);
974 }
975 
976 /**
977  * rhashtable_init - initialize a new hash table
978  * @ht:		hash table to be initialized
979  * @params:	configuration parameters
980  *
981  * Initializes a new hash table based on the provided configuration
982  * parameters. A table can be configured either with a variable or
983  * fixed length key:
984  *
985  * Configuration Example 1: Fixed length keys
986  * struct test_obj {
987  *	int			key;
988  *	void *			my_member;
989  *	struct rhash_head	node;
990  * };
991  *
992  * struct rhashtable_params params = {
993  *	.head_offset = offsetof(struct test_obj, node),
994  *	.key_offset = offsetof(struct test_obj, key),
995  *	.key_len = sizeof(int),
996  *	.hashfn = jhash,
997  *	.nulls_base = (1U << RHT_BASE_SHIFT),
998  * };
999  *
1000  * Configuration Example 2: Variable length keys
1001  * struct test_obj {
1002  *	[...]
1003  *	struct rhash_head	node;
1004  * };
1005  *
1006  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1007  * {
1008  *	struct test_obj *obj = data;
1009  *
1010  *	return [... hash ...];
1011  * }
1012  *
1013  * struct rhashtable_params params = {
1014  *	.head_offset = offsetof(struct test_obj, node),
1015  *	.hashfn = jhash,
1016  *	.obj_hashfn = my_hash_fn,
1017  * };
1018  */
1019 int rhashtable_init(struct rhashtable *ht,
1020 		    const struct rhashtable_params *params)
1021 {
1022 	struct bucket_table *tbl;
1023 	size_t size;
1024 
1025 	size = HASH_DEFAULT_SIZE;
1026 
1027 	if ((!params->key_len && !params->obj_hashfn) ||
1028 	    (params->obj_hashfn && !params->obj_cmpfn))
1029 		return -EINVAL;
1030 
1031 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
1032 		return -EINVAL;
1033 
1034 	memset(ht, 0, sizeof(*ht));
1035 	mutex_init(&ht->mutex);
1036 	spin_lock_init(&ht->lock);
1037 	memcpy(&ht->p, params, sizeof(*params));
1038 
1039 	if (params->min_size)
1040 		ht->p.min_size = roundup_pow_of_two(params->min_size);
1041 
1042 	/* Cap total entries at 2^31 to avoid nelems overflow. */
1043 	ht->max_elems = 1u << 31;
1044 
1045 	if (params->max_size) {
1046 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
1047 		if (ht->p.max_size < ht->max_elems / 2)
1048 			ht->max_elems = ht->p.max_size * 2;
1049 	}
1050 
1051 	ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1052 
1053 	if (params->nelem_hint)
1054 		size = rounded_hashtable_size(&ht->p);
1055 
1056 	if (params->locks_mul)
1057 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1058 	else
1059 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1060 
1061 	ht->key_len = ht->p.key_len;
1062 	if (!params->hashfn) {
1063 		ht->p.hashfn = jhash;
1064 
1065 		if (!(ht->key_len & (sizeof(u32) - 1))) {
1066 			ht->key_len /= sizeof(u32);
1067 			ht->p.hashfn = rhashtable_jhash2;
1068 		}
1069 	}
1070 
1071 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1072 	if (tbl == NULL)
1073 		return -ENOMEM;
1074 
1075 	atomic_set(&ht->nelems, 0);
1076 
1077 	RCU_INIT_POINTER(ht->tbl, tbl);
1078 
1079 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1080 
1081 	return 0;
1082 }
1083 EXPORT_SYMBOL_GPL(rhashtable_init);
1084 
1085 /**
1086  * rhltable_init - initialize a new hash list table
1087  * @hlt:	hash list table to be initialized
1088  * @params:	configuration parameters
1089  *
1090  * Initializes a new hash list table.
1091  *
1092  * See documentation for rhashtable_init.
1093  */
1094 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1095 {
1096 	int err;
1097 
1098 	/* No rhlist NULLs marking for now. */
1099 	if (params->nulls_base)
1100 		return -EINVAL;
1101 
1102 	err = rhashtable_init(&hlt->ht, params);
1103 	hlt->ht.rhlist = true;
1104 	return err;
1105 }
1106 EXPORT_SYMBOL_GPL(rhltable_init);
1107 
1108 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1109 				void (*free_fn)(void *ptr, void *arg),
1110 				void *arg)
1111 {
1112 	struct rhlist_head *list;
1113 
1114 	if (!ht->rhlist) {
1115 		free_fn(rht_obj(ht, obj), arg);
1116 		return;
1117 	}
1118 
1119 	list = container_of(obj, struct rhlist_head, rhead);
1120 	do {
1121 		obj = &list->rhead;
1122 		list = rht_dereference(list->next, ht);
1123 		free_fn(rht_obj(ht, obj), arg);
1124 	} while (list);
1125 }
1126 
1127 /**
1128  * rhashtable_free_and_destroy - free elements and destroy hash table
1129  * @ht:		the hash table to destroy
1130  * @free_fn:	callback to release resources of element
1131  * @arg:	pointer passed to free_fn
1132  *
1133  * Stops an eventual async resize. If defined, invokes free_fn for each
1134  * element to releasal resources. Please note that RCU protected
1135  * readers may still be accessing the elements. Releasing of resources
1136  * must occur in a compatible manner. Then frees the bucket array.
1137  *
1138  * This function will eventually sleep to wait for an async resize
1139  * to complete. The caller is responsible that no further write operations
1140  * occurs in parallel.
1141  */
1142 void rhashtable_free_and_destroy(struct rhashtable *ht,
1143 				 void (*free_fn)(void *ptr, void *arg),
1144 				 void *arg)
1145 {
1146 	struct bucket_table *tbl;
1147 	unsigned int i;
1148 
1149 	cancel_work_sync(&ht->run_work);
1150 
1151 	mutex_lock(&ht->mutex);
1152 	tbl = rht_dereference(ht->tbl, ht);
1153 	if (free_fn) {
1154 		for (i = 0; i < tbl->size; i++) {
1155 			struct rhash_head *pos, *next;
1156 
1157 			cond_resched();
1158 			for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1159 			     next = !rht_is_a_nulls(pos) ?
1160 					rht_dereference(pos->next, ht) : NULL;
1161 			     !rht_is_a_nulls(pos);
1162 			     pos = next,
1163 			     next = !rht_is_a_nulls(pos) ?
1164 					rht_dereference(pos->next, ht) : NULL)
1165 				rhashtable_free_one(ht, pos, free_fn, arg);
1166 		}
1167 	}
1168 
1169 	bucket_table_free(tbl);
1170 	mutex_unlock(&ht->mutex);
1171 }
1172 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1173 
1174 void rhashtable_destroy(struct rhashtable *ht)
1175 {
1176 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1177 }
1178 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1179 
1180 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1181 					    unsigned int hash)
1182 {
1183 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1184 	static struct rhash_head __rcu *rhnull =
1185 		(struct rhash_head __rcu *)NULLS_MARKER(0);
1186 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1187 	unsigned int size = tbl->size >> tbl->nest;
1188 	unsigned int subhash = hash;
1189 	union nested_table *ntbl;
1190 
1191 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1192 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1193 	subhash >>= tbl->nest;
1194 
1195 	while (ntbl && size > (1 << shift)) {
1196 		index = subhash & ((1 << shift) - 1);
1197 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1198 						  tbl, hash);
1199 		size >>= shift;
1200 		subhash >>= shift;
1201 	}
1202 
1203 	if (!ntbl)
1204 		return &rhnull;
1205 
1206 	return &ntbl[subhash].bucket;
1207 
1208 }
1209 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1210 
1211 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1212 						   struct bucket_table *tbl,
1213 						   unsigned int hash)
1214 {
1215 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1216 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1217 	unsigned int size = tbl->size >> tbl->nest;
1218 	union nested_table *ntbl;
1219 	unsigned int shifted;
1220 	unsigned int nhash;
1221 
1222 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1223 	hash >>= tbl->nest;
1224 	nhash = index;
1225 	shifted = tbl->nest;
1226 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1227 				  size <= (1 << shift) ? shifted : 0, nhash);
1228 
1229 	while (ntbl && size > (1 << shift)) {
1230 		index = hash & ((1 << shift) - 1);
1231 		size >>= shift;
1232 		hash >>= shift;
1233 		nhash |= index << shifted;
1234 		shifted += shift;
1235 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1236 					  size <= (1 << shift) ? shifted : 0,
1237 					  nhash);
1238 	}
1239 
1240 	if (!ntbl)
1241 		return NULL;
1242 
1243 	return &ntbl[hash].bucket;
1244 
1245 }
1246 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1247