xref: /openbmc/linux/lib/rhashtable.c (revision 174cd4b1)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30 
31 #define HASH_DEFAULT_SIZE	64UL
32 #define HASH_MIN_SIZE		4U
33 #define BUCKET_LOCKS_PER_CPU	32UL
34 
35 union nested_table {
36 	union nested_table __rcu *table;
37 	struct rhash_head __rcu *bucket;
38 };
39 
40 static u32 head_hashfn(struct rhashtable *ht,
41 		       const struct bucket_table *tbl,
42 		       const struct rhash_head *he)
43 {
44 	return rht_head_hashfn(ht, tbl, he, ht->p);
45 }
46 
47 #ifdef CONFIG_PROVE_LOCKING
48 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
49 
50 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
51 {
52 	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
53 }
54 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
55 
56 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
57 {
58 	spinlock_t *lock = rht_bucket_lock(tbl, hash);
59 
60 	return (debug_locks) ? lockdep_is_held(lock) : 1;
61 }
62 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
63 #else
64 #define ASSERT_RHT_MUTEX(HT)
65 #endif
66 
67 
68 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
69 			      gfp_t gfp)
70 {
71 	unsigned int i, size;
72 #if defined(CONFIG_PROVE_LOCKING)
73 	unsigned int nr_pcpus = 2;
74 #else
75 	unsigned int nr_pcpus = num_possible_cpus();
76 #endif
77 
78 	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
79 	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
80 
81 	/* Never allocate more than 0.5 locks per bucket */
82 	size = min_t(unsigned int, size, tbl->size >> 1);
83 
84 	if (tbl->nest)
85 		size = min(size, 1U << tbl->nest);
86 
87 	if (sizeof(spinlock_t) != 0) {
88 		tbl->locks = NULL;
89 #ifdef CONFIG_NUMA
90 		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
91 		    gfp == GFP_KERNEL)
92 			tbl->locks = vmalloc(size * sizeof(spinlock_t));
93 #endif
94 		if (gfp != GFP_KERNEL)
95 			gfp |= __GFP_NOWARN | __GFP_NORETRY;
96 
97 		if (!tbl->locks)
98 			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
99 						   gfp);
100 		if (!tbl->locks)
101 			return -ENOMEM;
102 		for (i = 0; i < size; i++)
103 			spin_lock_init(&tbl->locks[i]);
104 	}
105 	tbl->locks_mask = size - 1;
106 
107 	return 0;
108 }
109 
110 static void nested_table_free(union nested_table *ntbl, unsigned int size)
111 {
112 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
113 	const unsigned int len = 1 << shift;
114 	unsigned int i;
115 
116 	ntbl = rcu_dereference_raw(ntbl->table);
117 	if (!ntbl)
118 		return;
119 
120 	if (size > len) {
121 		size >>= shift;
122 		for (i = 0; i < len; i++)
123 			nested_table_free(ntbl + i, size);
124 	}
125 
126 	kfree(ntbl);
127 }
128 
129 static void nested_bucket_table_free(const struct bucket_table *tbl)
130 {
131 	unsigned int size = tbl->size >> tbl->nest;
132 	unsigned int len = 1 << tbl->nest;
133 	union nested_table *ntbl;
134 	unsigned int i;
135 
136 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
137 
138 	for (i = 0; i < len; i++)
139 		nested_table_free(ntbl + i, size);
140 
141 	kfree(ntbl);
142 }
143 
144 static void bucket_table_free(const struct bucket_table *tbl)
145 {
146 	if (tbl->nest)
147 		nested_bucket_table_free(tbl);
148 
149 	kvfree(tbl->locks);
150 	kvfree(tbl);
151 }
152 
153 static void bucket_table_free_rcu(struct rcu_head *head)
154 {
155 	bucket_table_free(container_of(head, struct bucket_table, rcu));
156 }
157 
158 static union nested_table *nested_table_alloc(struct rhashtable *ht,
159 					      union nested_table __rcu **prev,
160 					      unsigned int shifted,
161 					      unsigned int nhash)
162 {
163 	union nested_table *ntbl;
164 	int i;
165 
166 	ntbl = rcu_dereference(*prev);
167 	if (ntbl)
168 		return ntbl;
169 
170 	ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
171 
172 	if (ntbl && shifted) {
173 		for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
174 			INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
175 					    (i << shifted) | nhash);
176 	}
177 
178 	rcu_assign_pointer(*prev, ntbl);
179 
180 	return ntbl;
181 }
182 
183 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
184 						      size_t nbuckets,
185 						      gfp_t gfp)
186 {
187 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
188 	struct bucket_table *tbl;
189 	size_t size;
190 
191 	if (nbuckets < (1 << (shift + 1)))
192 		return NULL;
193 
194 	size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
195 
196 	tbl = kzalloc(size, gfp);
197 	if (!tbl)
198 		return NULL;
199 
200 	if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
201 				0, 0)) {
202 		kfree(tbl);
203 		return NULL;
204 	}
205 
206 	tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
207 
208 	return tbl;
209 }
210 
211 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
212 					       size_t nbuckets,
213 					       gfp_t gfp)
214 {
215 	struct bucket_table *tbl = NULL;
216 	size_t size;
217 	int i;
218 
219 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
220 	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
221 	    gfp != GFP_KERNEL)
222 		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
223 	if (tbl == NULL && gfp == GFP_KERNEL)
224 		tbl = vzalloc(size);
225 
226 	size = nbuckets;
227 
228 	if (tbl == NULL && gfp != GFP_KERNEL) {
229 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
230 		nbuckets = 0;
231 	}
232 	if (tbl == NULL)
233 		return NULL;
234 
235 	tbl->size = size;
236 
237 	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
238 		bucket_table_free(tbl);
239 		return NULL;
240 	}
241 
242 	INIT_LIST_HEAD(&tbl->walkers);
243 
244 	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
245 
246 	for (i = 0; i < nbuckets; i++)
247 		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
248 
249 	return tbl;
250 }
251 
252 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
253 						  struct bucket_table *tbl)
254 {
255 	struct bucket_table *new_tbl;
256 
257 	do {
258 		new_tbl = tbl;
259 		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
260 	} while (tbl);
261 
262 	return new_tbl;
263 }
264 
265 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
266 {
267 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
268 	struct bucket_table *new_tbl = rhashtable_last_table(ht,
269 		rht_dereference_rcu(old_tbl->future_tbl, ht));
270 	struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
271 	int err = -EAGAIN;
272 	struct rhash_head *head, *next, *entry;
273 	spinlock_t *new_bucket_lock;
274 	unsigned int new_hash;
275 
276 	if (new_tbl->nest)
277 		goto out;
278 
279 	err = -ENOENT;
280 
281 	rht_for_each(entry, old_tbl, old_hash) {
282 		err = 0;
283 		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
284 
285 		if (rht_is_a_nulls(next))
286 			break;
287 
288 		pprev = &entry->next;
289 	}
290 
291 	if (err)
292 		goto out;
293 
294 	new_hash = head_hashfn(ht, new_tbl, entry);
295 
296 	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
297 
298 	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
299 	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
300 				      new_tbl, new_hash);
301 
302 	RCU_INIT_POINTER(entry->next, head);
303 
304 	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
305 	spin_unlock(new_bucket_lock);
306 
307 	rcu_assign_pointer(*pprev, next);
308 
309 out:
310 	return err;
311 }
312 
313 static int rhashtable_rehash_chain(struct rhashtable *ht,
314 				    unsigned int old_hash)
315 {
316 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
317 	spinlock_t *old_bucket_lock;
318 	int err;
319 
320 	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
321 
322 	spin_lock_bh(old_bucket_lock);
323 	while (!(err = rhashtable_rehash_one(ht, old_hash)))
324 		;
325 
326 	if (err == -ENOENT) {
327 		old_tbl->rehash++;
328 		err = 0;
329 	}
330 	spin_unlock_bh(old_bucket_lock);
331 
332 	return err;
333 }
334 
335 static int rhashtable_rehash_attach(struct rhashtable *ht,
336 				    struct bucket_table *old_tbl,
337 				    struct bucket_table *new_tbl)
338 {
339 	/* Protect future_tbl using the first bucket lock. */
340 	spin_lock_bh(old_tbl->locks);
341 
342 	/* Did somebody beat us to it? */
343 	if (rcu_access_pointer(old_tbl->future_tbl)) {
344 		spin_unlock_bh(old_tbl->locks);
345 		return -EEXIST;
346 	}
347 
348 	/* Make insertions go into the new, empty table right away. Deletions
349 	 * and lookups will be attempted in both tables until we synchronize.
350 	 */
351 	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
352 
353 	spin_unlock_bh(old_tbl->locks);
354 
355 	return 0;
356 }
357 
358 static int rhashtable_rehash_table(struct rhashtable *ht)
359 {
360 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
361 	struct bucket_table *new_tbl;
362 	struct rhashtable_walker *walker;
363 	unsigned int old_hash;
364 	int err;
365 
366 	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
367 	if (!new_tbl)
368 		return 0;
369 
370 	for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
371 		err = rhashtable_rehash_chain(ht, old_hash);
372 		if (err)
373 			return err;
374 	}
375 
376 	/* Publish the new table pointer. */
377 	rcu_assign_pointer(ht->tbl, new_tbl);
378 
379 	spin_lock(&ht->lock);
380 	list_for_each_entry(walker, &old_tbl->walkers, list)
381 		walker->tbl = NULL;
382 	spin_unlock(&ht->lock);
383 
384 	/* Wait for readers. All new readers will see the new
385 	 * table, and thus no references to the old table will
386 	 * remain.
387 	 */
388 	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
389 
390 	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
391 }
392 
393 static int rhashtable_rehash_alloc(struct rhashtable *ht,
394 				   struct bucket_table *old_tbl,
395 				   unsigned int size)
396 {
397 	struct bucket_table *new_tbl;
398 	int err;
399 
400 	ASSERT_RHT_MUTEX(ht);
401 
402 	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
403 	if (new_tbl == NULL)
404 		return -ENOMEM;
405 
406 	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
407 	if (err)
408 		bucket_table_free(new_tbl);
409 
410 	return err;
411 }
412 
413 /**
414  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
415  * @ht:		the hash table to shrink
416  *
417  * This function shrinks the hash table to fit, i.e., the smallest
418  * size would not cause it to expand right away automatically.
419  *
420  * The caller must ensure that no concurrent resizing occurs by holding
421  * ht->mutex.
422  *
423  * The caller must ensure that no concurrent table mutations take place.
424  * It is however valid to have concurrent lookups if they are RCU protected.
425  *
426  * It is valid to have concurrent insertions and deletions protected by per
427  * bucket locks or concurrent RCU protected lookups and traversals.
428  */
429 static int rhashtable_shrink(struct rhashtable *ht)
430 {
431 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
432 	unsigned int nelems = atomic_read(&ht->nelems);
433 	unsigned int size = 0;
434 
435 	if (nelems)
436 		size = roundup_pow_of_two(nelems * 3 / 2);
437 	if (size < ht->p.min_size)
438 		size = ht->p.min_size;
439 
440 	if (old_tbl->size <= size)
441 		return 0;
442 
443 	if (rht_dereference(old_tbl->future_tbl, ht))
444 		return -EEXIST;
445 
446 	return rhashtable_rehash_alloc(ht, old_tbl, size);
447 }
448 
449 static void rht_deferred_worker(struct work_struct *work)
450 {
451 	struct rhashtable *ht;
452 	struct bucket_table *tbl;
453 	int err = 0;
454 
455 	ht = container_of(work, struct rhashtable, run_work);
456 	mutex_lock(&ht->mutex);
457 
458 	tbl = rht_dereference(ht->tbl, ht);
459 	tbl = rhashtable_last_table(ht, tbl);
460 
461 	if (rht_grow_above_75(ht, tbl))
462 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
463 	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
464 		err = rhashtable_shrink(ht);
465 	else if (tbl->nest)
466 		err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
467 
468 	if (!err)
469 		err = rhashtable_rehash_table(ht);
470 
471 	mutex_unlock(&ht->mutex);
472 
473 	if (err)
474 		schedule_work(&ht->run_work);
475 }
476 
477 static int rhashtable_insert_rehash(struct rhashtable *ht,
478 				    struct bucket_table *tbl)
479 {
480 	struct bucket_table *old_tbl;
481 	struct bucket_table *new_tbl;
482 	unsigned int size;
483 	int err;
484 
485 	old_tbl = rht_dereference_rcu(ht->tbl, ht);
486 
487 	size = tbl->size;
488 
489 	err = -EBUSY;
490 
491 	if (rht_grow_above_75(ht, tbl))
492 		size *= 2;
493 	/* Do not schedule more than one rehash */
494 	else if (old_tbl != tbl)
495 		goto fail;
496 
497 	err = -ENOMEM;
498 
499 	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
500 	if (new_tbl == NULL)
501 		goto fail;
502 
503 	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
504 	if (err) {
505 		bucket_table_free(new_tbl);
506 		if (err == -EEXIST)
507 			err = 0;
508 	} else
509 		schedule_work(&ht->run_work);
510 
511 	return err;
512 
513 fail:
514 	/* Do not fail the insert if someone else did a rehash. */
515 	if (likely(rcu_dereference_raw(tbl->future_tbl)))
516 		return 0;
517 
518 	/* Schedule async rehash to retry allocation in process context. */
519 	if (err == -ENOMEM)
520 		schedule_work(&ht->run_work);
521 
522 	return err;
523 }
524 
525 static void *rhashtable_lookup_one(struct rhashtable *ht,
526 				   struct bucket_table *tbl, unsigned int hash,
527 				   const void *key, struct rhash_head *obj)
528 {
529 	struct rhashtable_compare_arg arg = {
530 		.ht = ht,
531 		.key = key,
532 	};
533 	struct rhash_head __rcu **pprev;
534 	struct rhash_head *head;
535 	int elasticity;
536 
537 	elasticity = ht->elasticity;
538 	pprev = rht_bucket_var(tbl, hash);
539 	rht_for_each_continue(head, *pprev, tbl, hash) {
540 		struct rhlist_head *list;
541 		struct rhlist_head *plist;
542 
543 		elasticity--;
544 		if (!key ||
545 		    (ht->p.obj_cmpfn ?
546 		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
547 		     rhashtable_compare(&arg, rht_obj(ht, head))))
548 			continue;
549 
550 		if (!ht->rhlist)
551 			return rht_obj(ht, head);
552 
553 		list = container_of(obj, struct rhlist_head, rhead);
554 		plist = container_of(head, struct rhlist_head, rhead);
555 
556 		RCU_INIT_POINTER(list->next, plist);
557 		head = rht_dereference_bucket(head->next, tbl, hash);
558 		RCU_INIT_POINTER(list->rhead.next, head);
559 		rcu_assign_pointer(*pprev, obj);
560 
561 		return NULL;
562 	}
563 
564 	if (elasticity <= 0)
565 		return ERR_PTR(-EAGAIN);
566 
567 	return ERR_PTR(-ENOENT);
568 }
569 
570 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
571 						  struct bucket_table *tbl,
572 						  unsigned int hash,
573 						  struct rhash_head *obj,
574 						  void *data)
575 {
576 	struct rhash_head __rcu **pprev;
577 	struct bucket_table *new_tbl;
578 	struct rhash_head *head;
579 
580 	if (!IS_ERR_OR_NULL(data))
581 		return ERR_PTR(-EEXIST);
582 
583 	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
584 		return ERR_CAST(data);
585 
586 	new_tbl = rcu_dereference(tbl->future_tbl);
587 	if (new_tbl)
588 		return new_tbl;
589 
590 	if (PTR_ERR(data) != -ENOENT)
591 		return ERR_CAST(data);
592 
593 	if (unlikely(rht_grow_above_max(ht, tbl)))
594 		return ERR_PTR(-E2BIG);
595 
596 	if (unlikely(rht_grow_above_100(ht, tbl)))
597 		return ERR_PTR(-EAGAIN);
598 
599 	pprev = rht_bucket_insert(ht, tbl, hash);
600 	if (!pprev)
601 		return ERR_PTR(-ENOMEM);
602 
603 	head = rht_dereference_bucket(*pprev, tbl, hash);
604 
605 	RCU_INIT_POINTER(obj->next, head);
606 	if (ht->rhlist) {
607 		struct rhlist_head *list;
608 
609 		list = container_of(obj, struct rhlist_head, rhead);
610 		RCU_INIT_POINTER(list->next, NULL);
611 	}
612 
613 	rcu_assign_pointer(*pprev, obj);
614 
615 	atomic_inc(&ht->nelems);
616 	if (rht_grow_above_75(ht, tbl))
617 		schedule_work(&ht->run_work);
618 
619 	return NULL;
620 }
621 
622 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
623 				   struct rhash_head *obj)
624 {
625 	struct bucket_table *new_tbl;
626 	struct bucket_table *tbl;
627 	unsigned int hash;
628 	spinlock_t *lock;
629 	void *data;
630 
631 	tbl = rcu_dereference(ht->tbl);
632 
633 	/* All insertions must grab the oldest table containing
634 	 * the hashed bucket that is yet to be rehashed.
635 	 */
636 	for (;;) {
637 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
638 		lock = rht_bucket_lock(tbl, hash);
639 		spin_lock_bh(lock);
640 
641 		if (tbl->rehash <= hash)
642 			break;
643 
644 		spin_unlock_bh(lock);
645 		tbl = rcu_dereference(tbl->future_tbl);
646 	}
647 
648 	data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
649 	new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
650 	if (PTR_ERR(new_tbl) != -EEXIST)
651 		data = ERR_CAST(new_tbl);
652 
653 	while (!IS_ERR_OR_NULL(new_tbl)) {
654 		tbl = new_tbl;
655 		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
656 		spin_lock_nested(rht_bucket_lock(tbl, hash),
657 				 SINGLE_DEPTH_NESTING);
658 
659 		data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
660 		new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
661 		if (PTR_ERR(new_tbl) != -EEXIST)
662 			data = ERR_CAST(new_tbl);
663 
664 		spin_unlock(rht_bucket_lock(tbl, hash));
665 	}
666 
667 	spin_unlock_bh(lock);
668 
669 	if (PTR_ERR(data) == -EAGAIN)
670 		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
671 			       -EAGAIN);
672 
673 	return data;
674 }
675 
676 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
677 			     struct rhash_head *obj)
678 {
679 	void *data;
680 
681 	do {
682 		rcu_read_lock();
683 		data = rhashtable_try_insert(ht, key, obj);
684 		rcu_read_unlock();
685 	} while (PTR_ERR(data) == -EAGAIN);
686 
687 	return data;
688 }
689 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
690 
691 /**
692  * rhashtable_walk_enter - Initialise an iterator
693  * @ht:		Table to walk over
694  * @iter:	Hash table Iterator
695  *
696  * This function prepares a hash table walk.
697  *
698  * Note that if you restart a walk after rhashtable_walk_stop you
699  * may see the same object twice.  Also, you may miss objects if
700  * there are removals in between rhashtable_walk_stop and the next
701  * call to rhashtable_walk_start.
702  *
703  * For a completely stable walk you should construct your own data
704  * structure outside the hash table.
705  *
706  * This function may sleep so you must not call it from interrupt
707  * context or with spin locks held.
708  *
709  * You must call rhashtable_walk_exit after this function returns.
710  */
711 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
712 {
713 	iter->ht = ht;
714 	iter->p = NULL;
715 	iter->slot = 0;
716 	iter->skip = 0;
717 
718 	spin_lock(&ht->lock);
719 	iter->walker.tbl =
720 		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
721 	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
722 	spin_unlock(&ht->lock);
723 }
724 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
725 
726 /**
727  * rhashtable_walk_exit - Free an iterator
728  * @iter:	Hash table Iterator
729  *
730  * This function frees resources allocated by rhashtable_walk_init.
731  */
732 void rhashtable_walk_exit(struct rhashtable_iter *iter)
733 {
734 	spin_lock(&iter->ht->lock);
735 	if (iter->walker.tbl)
736 		list_del(&iter->walker.list);
737 	spin_unlock(&iter->ht->lock);
738 }
739 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
740 
741 /**
742  * rhashtable_walk_start - Start a hash table walk
743  * @iter:	Hash table iterator
744  *
745  * Start a hash table walk.  Note that we take the RCU lock in all
746  * cases including when we return an error.  So you must always call
747  * rhashtable_walk_stop to clean up.
748  *
749  * Returns zero if successful.
750  *
751  * Returns -EAGAIN if resize event occured.  Note that the iterator
752  * will rewind back to the beginning and you may use it immediately
753  * by calling rhashtable_walk_next.
754  */
755 int rhashtable_walk_start(struct rhashtable_iter *iter)
756 	__acquires(RCU)
757 {
758 	struct rhashtable *ht = iter->ht;
759 
760 	rcu_read_lock();
761 
762 	spin_lock(&ht->lock);
763 	if (iter->walker.tbl)
764 		list_del(&iter->walker.list);
765 	spin_unlock(&ht->lock);
766 
767 	if (!iter->walker.tbl) {
768 		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
769 		return -EAGAIN;
770 	}
771 
772 	return 0;
773 }
774 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
775 
776 /**
777  * rhashtable_walk_next - Return the next object and advance the iterator
778  * @iter:	Hash table iterator
779  *
780  * Note that you must call rhashtable_walk_stop when you are finished
781  * with the walk.
782  *
783  * Returns the next object or NULL when the end of the table is reached.
784  *
785  * Returns -EAGAIN if resize event occured.  Note that the iterator
786  * will rewind back to the beginning and you may continue to use it.
787  */
788 void *rhashtable_walk_next(struct rhashtable_iter *iter)
789 {
790 	struct bucket_table *tbl = iter->walker.tbl;
791 	struct rhlist_head *list = iter->list;
792 	struct rhashtable *ht = iter->ht;
793 	struct rhash_head *p = iter->p;
794 	bool rhlist = ht->rhlist;
795 
796 	if (p) {
797 		if (!rhlist || !(list = rcu_dereference(list->next))) {
798 			p = rcu_dereference(p->next);
799 			list = container_of(p, struct rhlist_head, rhead);
800 		}
801 		goto next;
802 	}
803 
804 	for (; iter->slot < tbl->size; iter->slot++) {
805 		int skip = iter->skip;
806 
807 		rht_for_each_rcu(p, tbl, iter->slot) {
808 			if (rhlist) {
809 				list = container_of(p, struct rhlist_head,
810 						    rhead);
811 				do {
812 					if (!skip)
813 						goto next;
814 					skip--;
815 					list = rcu_dereference(list->next);
816 				} while (list);
817 
818 				continue;
819 			}
820 			if (!skip)
821 				break;
822 			skip--;
823 		}
824 
825 next:
826 		if (!rht_is_a_nulls(p)) {
827 			iter->skip++;
828 			iter->p = p;
829 			iter->list = list;
830 			return rht_obj(ht, rhlist ? &list->rhead : p);
831 		}
832 
833 		iter->skip = 0;
834 	}
835 
836 	iter->p = NULL;
837 
838 	/* Ensure we see any new tables. */
839 	smp_rmb();
840 
841 	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
842 	if (iter->walker.tbl) {
843 		iter->slot = 0;
844 		iter->skip = 0;
845 		return ERR_PTR(-EAGAIN);
846 	}
847 
848 	return NULL;
849 }
850 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
851 
852 /**
853  * rhashtable_walk_stop - Finish a hash table walk
854  * @iter:	Hash table iterator
855  *
856  * Finish a hash table walk.
857  */
858 void rhashtable_walk_stop(struct rhashtable_iter *iter)
859 	__releases(RCU)
860 {
861 	struct rhashtable *ht;
862 	struct bucket_table *tbl = iter->walker.tbl;
863 
864 	if (!tbl)
865 		goto out;
866 
867 	ht = iter->ht;
868 
869 	spin_lock(&ht->lock);
870 	if (tbl->rehash < tbl->size)
871 		list_add(&iter->walker.list, &tbl->walkers);
872 	else
873 		iter->walker.tbl = NULL;
874 	spin_unlock(&ht->lock);
875 
876 	iter->p = NULL;
877 
878 out:
879 	rcu_read_unlock();
880 }
881 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
882 
883 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
884 {
885 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
886 		   (unsigned long)params->min_size);
887 }
888 
889 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
890 {
891 	return jhash2(key, length, seed);
892 }
893 
894 /**
895  * rhashtable_init - initialize a new hash table
896  * @ht:		hash table to be initialized
897  * @params:	configuration parameters
898  *
899  * Initializes a new hash table based on the provided configuration
900  * parameters. A table can be configured either with a variable or
901  * fixed length key:
902  *
903  * Configuration Example 1: Fixed length keys
904  * struct test_obj {
905  *	int			key;
906  *	void *			my_member;
907  *	struct rhash_head	node;
908  * };
909  *
910  * struct rhashtable_params params = {
911  *	.head_offset = offsetof(struct test_obj, node),
912  *	.key_offset = offsetof(struct test_obj, key),
913  *	.key_len = sizeof(int),
914  *	.hashfn = jhash,
915  *	.nulls_base = (1U << RHT_BASE_SHIFT),
916  * };
917  *
918  * Configuration Example 2: Variable length keys
919  * struct test_obj {
920  *	[...]
921  *	struct rhash_head	node;
922  * };
923  *
924  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
925  * {
926  *	struct test_obj *obj = data;
927  *
928  *	return [... hash ...];
929  * }
930  *
931  * struct rhashtable_params params = {
932  *	.head_offset = offsetof(struct test_obj, node),
933  *	.hashfn = jhash,
934  *	.obj_hashfn = my_hash_fn,
935  * };
936  */
937 int rhashtable_init(struct rhashtable *ht,
938 		    const struct rhashtable_params *params)
939 {
940 	struct bucket_table *tbl;
941 	size_t size;
942 
943 	size = HASH_DEFAULT_SIZE;
944 
945 	if ((!params->key_len && !params->obj_hashfn) ||
946 	    (params->obj_hashfn && !params->obj_cmpfn))
947 		return -EINVAL;
948 
949 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
950 		return -EINVAL;
951 
952 	memset(ht, 0, sizeof(*ht));
953 	mutex_init(&ht->mutex);
954 	spin_lock_init(&ht->lock);
955 	memcpy(&ht->p, params, sizeof(*params));
956 
957 	if (params->min_size)
958 		ht->p.min_size = roundup_pow_of_two(params->min_size);
959 
960 	if (params->max_size)
961 		ht->p.max_size = rounddown_pow_of_two(params->max_size);
962 
963 	if (params->insecure_max_entries)
964 		ht->p.insecure_max_entries =
965 			rounddown_pow_of_two(params->insecure_max_entries);
966 	else
967 		ht->p.insecure_max_entries = ht->p.max_size * 2;
968 
969 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
970 
971 	if (params->nelem_hint)
972 		size = rounded_hashtable_size(&ht->p);
973 
974 	/* The maximum (not average) chain length grows with the
975 	 * size of the hash table, at a rate of (log N)/(log log N).
976 	 * The value of 16 is selected so that even if the hash
977 	 * table grew to 2^32 you would not expect the maximum
978 	 * chain length to exceed it unless we are under attack
979 	 * (or extremely unlucky).
980 	 *
981 	 * As this limit is only to detect attacks, we don't need
982 	 * to set it to a lower value as you'd need the chain
983 	 * length to vastly exceed 16 to have any real effect
984 	 * on the system.
985 	 */
986 	if (!params->insecure_elasticity)
987 		ht->elasticity = 16;
988 
989 	if (params->locks_mul)
990 		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
991 	else
992 		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
993 
994 	ht->key_len = ht->p.key_len;
995 	if (!params->hashfn) {
996 		ht->p.hashfn = jhash;
997 
998 		if (!(ht->key_len & (sizeof(u32) - 1))) {
999 			ht->key_len /= sizeof(u32);
1000 			ht->p.hashfn = rhashtable_jhash2;
1001 		}
1002 	}
1003 
1004 	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1005 	if (tbl == NULL)
1006 		return -ENOMEM;
1007 
1008 	atomic_set(&ht->nelems, 0);
1009 
1010 	RCU_INIT_POINTER(ht->tbl, tbl);
1011 
1012 	INIT_WORK(&ht->run_work, rht_deferred_worker);
1013 
1014 	return 0;
1015 }
1016 EXPORT_SYMBOL_GPL(rhashtable_init);
1017 
1018 /**
1019  * rhltable_init - initialize a new hash list table
1020  * @hlt:	hash list table to be initialized
1021  * @params:	configuration parameters
1022  *
1023  * Initializes a new hash list table.
1024  *
1025  * See documentation for rhashtable_init.
1026  */
1027 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1028 {
1029 	int err;
1030 
1031 	/* No rhlist NULLs marking for now. */
1032 	if (params->nulls_base)
1033 		return -EINVAL;
1034 
1035 	err = rhashtable_init(&hlt->ht, params);
1036 	hlt->ht.rhlist = true;
1037 	return err;
1038 }
1039 EXPORT_SYMBOL_GPL(rhltable_init);
1040 
1041 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1042 				void (*free_fn)(void *ptr, void *arg),
1043 				void *arg)
1044 {
1045 	struct rhlist_head *list;
1046 
1047 	if (!ht->rhlist) {
1048 		free_fn(rht_obj(ht, obj), arg);
1049 		return;
1050 	}
1051 
1052 	list = container_of(obj, struct rhlist_head, rhead);
1053 	do {
1054 		obj = &list->rhead;
1055 		list = rht_dereference(list->next, ht);
1056 		free_fn(rht_obj(ht, obj), arg);
1057 	} while (list);
1058 }
1059 
1060 /**
1061  * rhashtable_free_and_destroy - free elements and destroy hash table
1062  * @ht:		the hash table to destroy
1063  * @free_fn:	callback to release resources of element
1064  * @arg:	pointer passed to free_fn
1065  *
1066  * Stops an eventual async resize. If defined, invokes free_fn for each
1067  * element to releasal resources. Please note that RCU protected
1068  * readers may still be accessing the elements. Releasing of resources
1069  * must occur in a compatible manner. Then frees the bucket array.
1070  *
1071  * This function will eventually sleep to wait for an async resize
1072  * to complete. The caller is responsible that no further write operations
1073  * occurs in parallel.
1074  */
1075 void rhashtable_free_and_destroy(struct rhashtable *ht,
1076 				 void (*free_fn)(void *ptr, void *arg),
1077 				 void *arg)
1078 {
1079 	struct bucket_table *tbl;
1080 	unsigned int i;
1081 
1082 	cancel_work_sync(&ht->run_work);
1083 
1084 	mutex_lock(&ht->mutex);
1085 	tbl = rht_dereference(ht->tbl, ht);
1086 	if (free_fn) {
1087 		for (i = 0; i < tbl->size; i++) {
1088 			struct rhash_head *pos, *next;
1089 
1090 			for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1091 			     next = !rht_is_a_nulls(pos) ?
1092 					rht_dereference(pos->next, ht) : NULL;
1093 			     !rht_is_a_nulls(pos);
1094 			     pos = next,
1095 			     next = !rht_is_a_nulls(pos) ?
1096 					rht_dereference(pos->next, ht) : NULL)
1097 				rhashtable_free_one(ht, pos, free_fn, arg);
1098 		}
1099 	}
1100 
1101 	bucket_table_free(tbl);
1102 	mutex_unlock(&ht->mutex);
1103 }
1104 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1105 
1106 void rhashtable_destroy(struct rhashtable *ht)
1107 {
1108 	return rhashtable_free_and_destroy(ht, NULL, NULL);
1109 }
1110 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1111 
1112 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1113 					    unsigned int hash)
1114 {
1115 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1116 	static struct rhash_head __rcu *rhnull =
1117 		(struct rhash_head __rcu *)NULLS_MARKER(0);
1118 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1119 	unsigned int size = tbl->size >> tbl->nest;
1120 	unsigned int subhash = hash;
1121 	union nested_table *ntbl;
1122 
1123 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1124 	ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1125 	subhash >>= tbl->nest;
1126 
1127 	while (ntbl && size > (1 << shift)) {
1128 		index = subhash & ((1 << shift) - 1);
1129 		ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1130 						  tbl, hash);
1131 		size >>= shift;
1132 		subhash >>= shift;
1133 	}
1134 
1135 	if (!ntbl)
1136 		return &rhnull;
1137 
1138 	return &ntbl[subhash].bucket;
1139 
1140 }
1141 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1142 
1143 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1144 						   struct bucket_table *tbl,
1145 						   unsigned int hash)
1146 {
1147 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1148 	unsigned int index = hash & ((1 << tbl->nest) - 1);
1149 	unsigned int size = tbl->size >> tbl->nest;
1150 	union nested_table *ntbl;
1151 	unsigned int shifted;
1152 	unsigned int nhash;
1153 
1154 	ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1155 	hash >>= tbl->nest;
1156 	nhash = index;
1157 	shifted = tbl->nest;
1158 	ntbl = nested_table_alloc(ht, &ntbl[index].table,
1159 				  size <= (1 << shift) ? shifted : 0, nhash);
1160 
1161 	while (ntbl && size > (1 << shift)) {
1162 		index = hash & ((1 << shift) - 1);
1163 		size >>= shift;
1164 		hash >>= shift;
1165 		nhash |= index << shifted;
1166 		shifted += shift;
1167 		ntbl = nested_table_alloc(ht, &ntbl[index].table,
1168 					  size <= (1 << shift) ? shifted : 0,
1169 					  nhash);
1170 	}
1171 
1172 	if (!ntbl)
1173 		return NULL;
1174 
1175 	return &ntbl[hash].bucket;
1176 
1177 }
1178 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1179