xref: /openbmc/linux/lib/rhashtable.c (revision 275876e2)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/hash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/log2.h>
27 
28 #define HASH_DEFAULT_SIZE	64UL
29 #define HASH_MIN_SIZE		4UL
30 
31 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
32 
33 #ifdef CONFIG_PROVE_LOCKING
34 int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
35 {
36 	return ht->p.mutex_is_held();
37 }
38 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
39 #endif
40 
41 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
42 {
43 	return (void *) he - ht->p.head_offset;
44 }
45 
46 static u32 __hashfn(const struct rhashtable *ht, const void *key,
47 		      u32 len, u32 hsize)
48 {
49 	u32 h;
50 
51 	h = ht->p.hashfn(key, len, ht->p.hash_rnd);
52 
53 	return h & (hsize - 1);
54 }
55 
56 /**
57  * rhashtable_hashfn - compute hash for key of given length
58  * @ht:		hash table to compuate for
59  * @key:	pointer to key
60  * @len:	length of key
61  *
62  * Computes the hash value using the hash function provided in the 'hashfn'
63  * of struct rhashtable_params. The returned value is guaranteed to be
64  * smaller than the number of buckets in the hash table.
65  */
66 u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
67 {
68 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
69 
70 	return __hashfn(ht, key, len, tbl->size);
71 }
72 EXPORT_SYMBOL_GPL(rhashtable_hashfn);
73 
74 static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
75 {
76 	if (unlikely(!ht->p.key_len)) {
77 		u32 h;
78 
79 		h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
80 
81 		return h & (hsize - 1);
82 	}
83 
84 	return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
85 }
86 
87 /**
88  * rhashtable_obj_hashfn - compute hash for hashed object
89  * @ht:		hash table to compuate for
90  * @ptr:	pointer to hashed object
91  *
92  * Computes the hash value using the hash function `hashfn` respectively
93  * 'obj_hashfn' depending on whether the hash table is set up to work with
94  * a fixed length key. The returned value is guaranteed to be smaller than
95  * the number of buckets in the hash table.
96  */
97 u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
98 {
99 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
100 
101 	return obj_hashfn(ht, ptr, tbl->size);
102 }
103 EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
104 
105 static u32 head_hashfn(const struct rhashtable *ht,
106 		       const struct rhash_head *he, u32 hsize)
107 {
108 	return obj_hashfn(ht, rht_obj(ht, he), hsize);
109 }
110 
111 static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
112 {
113 	struct bucket_table *tbl;
114 	size_t size;
115 
116 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
117 	tbl = kzalloc(size, flags);
118 	if (tbl == NULL)
119 		tbl = vzalloc(size);
120 
121 	if (tbl == NULL)
122 		return NULL;
123 
124 	tbl->size = nbuckets;
125 
126 	return tbl;
127 }
128 
129 static void bucket_table_free(const struct bucket_table *tbl)
130 {
131 	kvfree(tbl);
132 }
133 
134 /**
135  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
136  * @ht:		hash table
137  * @new_size:	new table size
138  */
139 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
140 {
141 	/* Expand table when exceeding 75% load */
142 	return ht->nelems > (new_size / 4 * 3);
143 }
144 EXPORT_SYMBOL_GPL(rht_grow_above_75);
145 
146 /**
147  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
148  * @ht:		hash table
149  * @new_size:	new table size
150  */
151 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
152 {
153 	/* Shrink table beneath 30% load */
154 	return ht->nelems < (new_size * 3 / 10);
155 }
156 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
157 
158 static void hashtable_chain_unzip(const struct rhashtable *ht,
159 				  const struct bucket_table *new_tbl,
160 				  struct bucket_table *old_tbl, size_t n)
161 {
162 	struct rhash_head *he, *p, *next;
163 	unsigned int h;
164 
165 	/* Old bucket empty, no work needed. */
166 	p = rht_dereference(old_tbl->buckets[n], ht);
167 	if (!p)
168 		return;
169 
170 	/* Advance the old bucket pointer one or more times until it
171 	 * reaches a node that doesn't hash to the same bucket as the
172 	 * previous node p. Call the previous node p;
173 	 */
174 	h = head_hashfn(ht, p, new_tbl->size);
175 	rht_for_each(he, p->next, ht) {
176 		if (head_hashfn(ht, he, new_tbl->size) != h)
177 			break;
178 		p = he;
179 	}
180 	RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
181 
182 	/* Find the subsequent node which does hash to the same
183 	 * bucket as node P, or NULL if no such node exists.
184 	 */
185 	next = NULL;
186 	if (he) {
187 		rht_for_each(he, he->next, ht) {
188 			if (head_hashfn(ht, he, new_tbl->size) == h) {
189 				next = he;
190 				break;
191 			}
192 		}
193 	}
194 
195 	/* Set p's next pointer to that subsequent node pointer,
196 	 * bypassing the nodes which do not hash to p's bucket
197 	 */
198 	RCU_INIT_POINTER(p->next, next);
199 }
200 
201 /**
202  * rhashtable_expand - Expand hash table while allowing concurrent lookups
203  * @ht:		the hash table to expand
204  * @flags:	allocation flags
205  *
206  * A secondary bucket array is allocated and the hash entries are migrated
207  * while keeping them on both lists until the end of the RCU grace period.
208  *
209  * This function may only be called in a context where it is safe to call
210  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
211  *
212  * The caller must ensure that no concurrent table mutations take place.
213  * It is however valid to have concurrent lookups if they are RCU protected.
214  */
215 int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
216 {
217 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
218 	struct rhash_head *he;
219 	unsigned int i, h;
220 	bool complete;
221 
222 	ASSERT_RHT_MUTEX(ht);
223 
224 	if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
225 		return 0;
226 
227 	new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
228 	if (new_tbl == NULL)
229 		return -ENOMEM;
230 
231 	ht->shift++;
232 
233 	/* For each new bucket, search the corresponding old bucket
234 	 * for the first entry that hashes to the new bucket, and
235 	 * link the new bucket to that entry. Since all the entries
236 	 * which will end up in the new bucket appear in the same
237 	 * old bucket, this constructs an entirely valid new hash
238 	 * table, but with multiple buckets "zipped" together into a
239 	 * single imprecise chain.
240 	 */
241 	for (i = 0; i < new_tbl->size; i++) {
242 		h = i & (old_tbl->size - 1);
243 		rht_for_each(he, old_tbl->buckets[h], ht) {
244 			if (head_hashfn(ht, he, new_tbl->size) == i) {
245 				RCU_INIT_POINTER(new_tbl->buckets[i], he);
246 				break;
247 			}
248 		}
249 	}
250 
251 	/* Publish the new table pointer. Lookups may now traverse
252 	 * the new table, but they will not benefit from any
253 	 * additional efficiency until later steps unzip the buckets.
254 	 */
255 	rcu_assign_pointer(ht->tbl, new_tbl);
256 
257 	/* Unzip interleaved hash chains */
258 	do {
259 		/* Wait for readers. All new readers will see the new
260 		 * table, and thus no references to the old table will
261 		 * remain.
262 		 */
263 		synchronize_rcu();
264 
265 		/* For each bucket in the old table (each of which
266 		 * contains items from multiple buckets of the new
267 		 * table): ...
268 		 */
269 		complete = true;
270 		for (i = 0; i < old_tbl->size; i++) {
271 			hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
272 			if (old_tbl->buckets[i] != NULL)
273 				complete = false;
274 		}
275 	} while (!complete);
276 
277 	bucket_table_free(old_tbl);
278 	return 0;
279 }
280 EXPORT_SYMBOL_GPL(rhashtable_expand);
281 
282 /**
283  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
284  * @ht:		the hash table to shrink
285  * @flags:	allocation flags
286  *
287  * This function may only be called in a context where it is safe to call
288  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
289  *
290  * The caller must ensure that no concurrent table mutations take place.
291  * It is however valid to have concurrent lookups if they are RCU protected.
292  */
293 int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
294 {
295 	struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
296 	struct rhash_head __rcu **pprev;
297 	unsigned int i;
298 
299 	ASSERT_RHT_MUTEX(ht);
300 
301 	if (tbl->size <= HASH_MIN_SIZE)
302 		return 0;
303 
304 	ntbl = bucket_table_alloc(tbl->size / 2, flags);
305 	if (ntbl == NULL)
306 		return -ENOMEM;
307 
308 	ht->shift--;
309 
310 	/* Link each bucket in the new table to the first bucket
311 	 * in the old table that contains entries which will hash
312 	 * to the new bucket.
313 	 */
314 	for (i = 0; i < ntbl->size; i++) {
315 		ntbl->buckets[i] = tbl->buckets[i];
316 
317 		/* Link each bucket in the new table to the first bucket
318 		 * in the old table that contains entries which will hash
319 		 * to the new bucket.
320 		 */
321 		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
322 		     pprev = &rht_dereference(*pprev, ht)->next)
323 			;
324 		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
325 	}
326 
327 	/* Publish the new, valid hash table */
328 	rcu_assign_pointer(ht->tbl, ntbl);
329 
330 	/* Wait for readers. No new readers will have references to the
331 	 * old hash table.
332 	 */
333 	synchronize_rcu();
334 
335 	bucket_table_free(tbl);
336 
337 	return 0;
338 }
339 EXPORT_SYMBOL_GPL(rhashtable_shrink);
340 
341 /**
342  * rhashtable_insert - insert object into hash hash table
343  * @ht:		hash table
344  * @obj:	pointer to hash head inside object
345  * @flags:	allocation flags (table expansion)
346  *
347  * Will automatically grow the table via rhashtable_expand() if the the
348  * grow_decision function specified at rhashtable_init() returns true.
349  *
350  * The caller must ensure that no concurrent table mutations occur. It is
351  * however valid to have concurrent lookups if they are RCU protected.
352  */
353 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
354 		       gfp_t flags)
355 {
356 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
357 	u32 hash;
358 
359 	ASSERT_RHT_MUTEX(ht);
360 
361 	hash = head_hashfn(ht, obj, tbl->size);
362 	RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
363 	rcu_assign_pointer(tbl->buckets[hash], obj);
364 	ht->nelems++;
365 
366 	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
367 		rhashtable_expand(ht, flags);
368 }
369 EXPORT_SYMBOL_GPL(rhashtable_insert);
370 
371 /**
372  * rhashtable_remove_pprev - remove object from hash table given previous element
373  * @ht:		hash table
374  * @obj:	pointer to hash head inside object
375  * @pprev:	pointer to previous element
376  * @flags:	allocation flags (table expansion)
377  *
378  * Identical to rhashtable_remove() but caller is alreayd aware of the element
379  * in front of the element to be deleted. This is in particular useful for
380  * deletion when combined with walking or lookup.
381  */
382 void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
383 			     struct rhash_head __rcu **pprev, gfp_t flags)
384 {
385 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
386 
387 	ASSERT_RHT_MUTEX(ht);
388 
389 	RCU_INIT_POINTER(*pprev, obj->next);
390 	ht->nelems--;
391 
392 	if (ht->p.shrink_decision &&
393 	    ht->p.shrink_decision(ht, tbl->size))
394 		rhashtable_shrink(ht, flags);
395 }
396 EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
397 
398 /**
399  * rhashtable_remove - remove object from hash table
400  * @ht:		hash table
401  * @obj:	pointer to hash head inside object
402  * @flags:	allocation flags (table expansion)
403  *
404  * Since the hash chain is single linked, the removal operation needs to
405  * walk the bucket chain upon removal. The removal operation is thus
406  * considerable slow if the hash table is not correctly sized.
407  *
408  * Will automatically shrink the table via rhashtable_expand() if the the
409  * shrink_decision function specified at rhashtable_init() returns true.
410  *
411  * The caller must ensure that no concurrent table mutations occur. It is
412  * however valid to have concurrent lookups if they are RCU protected.
413  */
414 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
415 		       gfp_t flags)
416 {
417 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
418 	struct rhash_head __rcu **pprev;
419 	struct rhash_head *he;
420 	u32 h;
421 
422 	ASSERT_RHT_MUTEX(ht);
423 
424 	h = head_hashfn(ht, obj, tbl->size);
425 
426 	pprev = &tbl->buckets[h];
427 	rht_for_each(he, tbl->buckets[h], ht) {
428 		if (he != obj) {
429 			pprev = &he->next;
430 			continue;
431 		}
432 
433 		rhashtable_remove_pprev(ht, he, pprev, flags);
434 		return true;
435 	}
436 
437 	return false;
438 }
439 EXPORT_SYMBOL_GPL(rhashtable_remove);
440 
441 /**
442  * rhashtable_lookup - lookup key in hash table
443  * @ht:		hash table
444  * @key:	pointer to key
445  *
446  * Computes the hash value for the key and traverses the bucket chain looking
447  * for a entry with an identical key. The first matching entry is returned.
448  *
449  * This lookup function may only be used for fixed key hash table (key_len
450  * paramter set). It will BUG() if used inappropriately.
451  *
452  * Lookups may occur in parallel with hash mutations as long as the lookup is
453  * guarded by rcu_read_lock(). The caller must take care of this.
454  */
455 void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
456 {
457 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
458 	struct rhash_head *he;
459 	u32 h;
460 
461 	BUG_ON(!ht->p.key_len);
462 
463 	h = __hashfn(ht, key, ht->p.key_len, tbl->size);
464 	rht_for_each_rcu(he, tbl->buckets[h], ht) {
465 		if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
466 			   ht->p.key_len))
467 			continue;
468 		return (void *) he - ht->p.head_offset;
469 	}
470 
471 	return NULL;
472 }
473 EXPORT_SYMBOL_GPL(rhashtable_lookup);
474 
475 /**
476  * rhashtable_lookup_compare - search hash table with compare function
477  * @ht:		hash table
478  * @hash:	hash value of desired entry
479  * @compare:	compare function, must return true on match
480  * @arg:	argument passed on to compare function
481  *
482  * Traverses the bucket chain behind the provided hash value and calls the
483  * specified compare function for each entry.
484  *
485  * Lookups may occur in parallel with hash mutations as long as the lookup is
486  * guarded by rcu_read_lock(). The caller must take care of this.
487  *
488  * Returns the first entry on which the compare function returned true.
489  */
490 void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
491 				bool (*compare)(void *, void *), void *arg)
492 {
493 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
494 	struct rhash_head *he;
495 
496 	if (unlikely(hash >= tbl->size))
497 		return NULL;
498 
499 	rht_for_each_rcu(he, tbl->buckets[hash], ht) {
500 		if (!compare(rht_obj(ht, he), arg))
501 			continue;
502 		return (void *) he - ht->p.head_offset;
503 	}
504 
505 	return NULL;
506 }
507 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
508 
509 static size_t rounded_hashtable_size(unsigned int nelem)
510 {
511 	return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
512 }
513 
514 /**
515  * rhashtable_init - initialize a new hash table
516  * @ht:		hash table to be initialized
517  * @params:	configuration parameters
518  *
519  * Initializes a new hash table based on the provided configuration
520  * parameters. A table can be configured either with a variable or
521  * fixed length key:
522  *
523  * Configuration Example 1: Fixed length keys
524  * struct test_obj {
525  *	int			key;
526  *	void *			my_member;
527  *	struct rhash_head	node;
528  * };
529  *
530  * struct rhashtable_params params = {
531  *	.head_offset = offsetof(struct test_obj, node),
532  *	.key_offset = offsetof(struct test_obj, key),
533  *	.key_len = sizeof(int),
534  *	.hashfn = arch_fast_hash,
535  *	.mutex_is_held = &my_mutex_is_held,
536  * };
537  *
538  * Configuration Example 2: Variable length keys
539  * struct test_obj {
540  *	[...]
541  *	struct rhash_head	node;
542  * };
543  *
544  * u32 my_hash_fn(const void *data, u32 seed)
545  * {
546  *	struct test_obj *obj = data;
547  *
548  *	return [... hash ...];
549  * }
550  *
551  * struct rhashtable_params params = {
552  *	.head_offset = offsetof(struct test_obj, node),
553  *	.hashfn = arch_fast_hash,
554  *	.obj_hashfn = my_hash_fn,
555  *	.mutex_is_held = &my_mutex_is_held,
556  * };
557  */
558 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
559 {
560 	struct bucket_table *tbl;
561 	size_t size;
562 
563 	size = HASH_DEFAULT_SIZE;
564 
565 	if ((params->key_len && !params->hashfn) ||
566 	    (!params->key_len && !params->obj_hashfn))
567 		return -EINVAL;
568 
569 	if (params->nelem_hint)
570 		size = rounded_hashtable_size(params->nelem_hint);
571 
572 	tbl = bucket_table_alloc(size, GFP_KERNEL);
573 	if (tbl == NULL)
574 		return -ENOMEM;
575 
576 	memset(ht, 0, sizeof(*ht));
577 	ht->shift = ilog2(tbl->size);
578 	memcpy(&ht->p, params, sizeof(*params));
579 	RCU_INIT_POINTER(ht->tbl, tbl);
580 
581 	if (!ht->p.hash_rnd)
582 		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
583 
584 	return 0;
585 }
586 EXPORT_SYMBOL_GPL(rhashtable_init);
587 
588 /**
589  * rhashtable_destroy - destroy hash table
590  * @ht:		the hash table to destroy
591  *
592  * Frees the bucket array.
593  */
594 void rhashtable_destroy(const struct rhashtable *ht)
595 {
596 	const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
597 
598 	bucket_table_free(tbl);
599 }
600 EXPORT_SYMBOL_GPL(rhashtable_destroy);
601 
602 /**************************************************************************
603  * Self Test
604  **************************************************************************/
605 
606 #ifdef CONFIG_TEST_RHASHTABLE
607 
608 #define TEST_HT_SIZE	8
609 #define TEST_ENTRIES	2048
610 #define TEST_PTR	((void *) 0xdeadbeef)
611 #define TEST_NEXPANDS	4
612 
613 static int test_mutex_is_held(void)
614 {
615 	return 1;
616 }
617 
618 struct test_obj {
619 	void			*ptr;
620 	int			value;
621 	struct rhash_head	node;
622 };
623 
624 static int __init test_rht_lookup(struct rhashtable *ht)
625 {
626 	unsigned int i;
627 
628 	for (i = 0; i < TEST_ENTRIES * 2; i++) {
629 		struct test_obj *obj;
630 		bool expected = !(i % 2);
631 		u32 key = i;
632 
633 		obj = rhashtable_lookup(ht, &key);
634 
635 		if (expected && !obj) {
636 			pr_warn("Test failed: Could not find key %u\n", key);
637 			return -ENOENT;
638 		} else if (!expected && obj) {
639 			pr_warn("Test failed: Unexpected entry found for key %u\n",
640 				key);
641 			return -EEXIST;
642 		} else if (expected && obj) {
643 			if (obj->ptr != TEST_PTR || obj->value != i) {
644 				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
645 					obj->ptr, TEST_PTR, obj->value, i);
646 				return -EINVAL;
647 			}
648 		}
649 	}
650 
651 	return 0;
652 }
653 
654 static void test_bucket_stats(struct rhashtable *ht,
655 				     struct bucket_table *tbl,
656 				     bool quiet)
657 {
658 	unsigned int cnt, i, total = 0;
659 	struct test_obj *obj;
660 
661 	for (i = 0; i < tbl->size; i++) {
662 		cnt = 0;
663 
664 		if (!quiet)
665 			pr_info(" [%#4x/%zu]", i, tbl->size);
666 
667 		rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
668 			cnt++;
669 			total++;
670 			if (!quiet)
671 				pr_cont(" [%p],", obj);
672 		}
673 
674 		if (!quiet)
675 			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
676 				i, tbl->buckets[i], cnt);
677 	}
678 
679 	pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
680 		total, ht->nelems, TEST_ENTRIES);
681 }
682 
683 static int __init test_rhashtable(struct rhashtable *ht)
684 {
685 	struct bucket_table *tbl;
686 	struct test_obj *obj, *next;
687 	int err;
688 	unsigned int i;
689 
690 	/*
691 	 * Insertion Test:
692 	 * Insert TEST_ENTRIES into table with all keys even numbers
693 	 */
694 	pr_info("  Adding %d keys\n", TEST_ENTRIES);
695 	for (i = 0; i < TEST_ENTRIES; i++) {
696 		struct test_obj *obj;
697 
698 		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
699 		if (!obj) {
700 			err = -ENOMEM;
701 			goto error;
702 		}
703 
704 		obj->ptr = TEST_PTR;
705 		obj->value = i * 2;
706 
707 		rhashtable_insert(ht, &obj->node, GFP_KERNEL);
708 	}
709 
710 	rcu_read_lock();
711 	tbl = rht_dereference_rcu(ht->tbl, ht);
712 	test_bucket_stats(ht, tbl, true);
713 	test_rht_lookup(ht);
714 	rcu_read_unlock();
715 
716 	for (i = 0; i < TEST_NEXPANDS; i++) {
717 		pr_info("  Table expansion iteration %u...\n", i);
718 		rhashtable_expand(ht, GFP_KERNEL);
719 
720 		rcu_read_lock();
721 		pr_info("  Verifying lookups...\n");
722 		test_rht_lookup(ht);
723 		rcu_read_unlock();
724 	}
725 
726 	for (i = 0; i < TEST_NEXPANDS; i++) {
727 		pr_info("  Table shrinkage iteration %u...\n", i);
728 		rhashtable_shrink(ht, GFP_KERNEL);
729 
730 		rcu_read_lock();
731 		pr_info("  Verifying lookups...\n");
732 		test_rht_lookup(ht);
733 		rcu_read_unlock();
734 	}
735 
736 	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
737 	for (i = 0; i < TEST_ENTRIES; i++) {
738 		u32 key = i * 2;
739 
740 		obj = rhashtable_lookup(ht, &key);
741 		BUG_ON(!obj);
742 
743 		rhashtable_remove(ht, &obj->node, GFP_KERNEL);
744 		kfree(obj);
745 	}
746 
747 	return 0;
748 
749 error:
750 	tbl = rht_dereference_rcu(ht->tbl, ht);
751 	for (i = 0; i < tbl->size; i++)
752 		rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
753 			kfree(obj);
754 
755 	return err;
756 }
757 
758 static int __init test_rht_init(void)
759 {
760 	struct rhashtable ht;
761 	struct rhashtable_params params = {
762 		.nelem_hint = TEST_HT_SIZE,
763 		.head_offset = offsetof(struct test_obj, node),
764 		.key_offset = offsetof(struct test_obj, value),
765 		.key_len = sizeof(int),
766 		.hashfn = arch_fast_hash,
767 		.mutex_is_held = &test_mutex_is_held,
768 		.grow_decision = rht_grow_above_75,
769 		.shrink_decision = rht_shrink_below_30,
770 	};
771 	int err;
772 
773 	pr_info("Running resizable hashtable tests...\n");
774 
775 	err = rhashtable_init(&ht, &params);
776 	if (err < 0) {
777 		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
778 			err);
779 		return err;
780 	}
781 
782 	err = test_rhashtable(&ht);
783 
784 	rhashtable_destroy(&ht);
785 
786 	return err;
787 }
788 
789 subsys_initcall(test_rht_init);
790 
791 #endif /* CONFIG_TEST_RHASHTABLE */
792