xref: /openbmc/linux/lib/rhashtable.c (revision ca460cc2)
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on the following paper:
8  * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9  *
10  * Code partially derived from nft_hash
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/mm.h>
23 #include <linux/hash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 
27 #define HASH_DEFAULT_SIZE	64UL
28 #define HASH_MIN_SIZE		4UL
29 
30 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
31 
32 #ifdef CONFIG_PROVE_LOCKING
33 int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
34 {
35 	return ht->p.mutex_is_held();
36 }
37 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
38 #endif
39 
40 static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
41 {
42 	return (void *) he - ht->p.head_offset;
43 }
44 
45 static u32 __hashfn(const struct rhashtable *ht, const void *key,
46 		      u32 len, u32 hsize)
47 {
48 	u32 h;
49 
50 	h = ht->p.hashfn(key, len, ht->p.hash_rnd);
51 
52 	return h & (hsize - 1);
53 }
54 
55 /**
56  * rhashtable_hashfn - compute hash for key of given length
57  * @ht:		hash table to compute for
58  * @key:	pointer to key
59  * @len:	length of key
60  *
61  * Computes the hash value using the hash function provided in the 'hashfn'
62  * of struct rhashtable_params. The returned value is guaranteed to be
63  * smaller than the number of buckets in the hash table.
64  */
65 u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
66 {
67 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
68 
69 	return __hashfn(ht, key, len, tbl->size);
70 }
71 EXPORT_SYMBOL_GPL(rhashtable_hashfn);
72 
73 static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
74 {
75 	if (unlikely(!ht->p.key_len)) {
76 		u32 h;
77 
78 		h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
79 
80 		return h & (hsize - 1);
81 	}
82 
83 	return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
84 }
85 
86 /**
87  * rhashtable_obj_hashfn - compute hash for hashed object
88  * @ht:		hash table to compute for
89  * @ptr:	pointer to hashed object
90  *
91  * Computes the hash value using the hash function `hashfn` respectively
92  * 'obj_hashfn' depending on whether the hash table is set up to work with
93  * a fixed length key. The returned value is guaranteed to be smaller than
94  * the number of buckets in the hash table.
95  */
96 u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
97 {
98 	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
99 
100 	return obj_hashfn(ht, ptr, tbl->size);
101 }
102 EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
103 
104 static u32 head_hashfn(const struct rhashtable *ht,
105 		       const struct rhash_head *he, u32 hsize)
106 {
107 	return obj_hashfn(ht, rht_obj(ht, he), hsize);
108 }
109 
110 static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
111 {
112 	struct bucket_table *tbl;
113 	size_t size;
114 
115 	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
116 	tbl = kzalloc(size, flags);
117 	if (tbl == NULL)
118 		tbl = vzalloc(size);
119 
120 	if (tbl == NULL)
121 		return NULL;
122 
123 	tbl->size = nbuckets;
124 
125 	return tbl;
126 }
127 
128 static void bucket_table_free(const struct bucket_table *tbl)
129 {
130 	kvfree(tbl);
131 }
132 
133 /**
134  * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
135  * @ht:		hash table
136  * @new_size:	new table size
137  */
138 bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
139 {
140 	/* Expand table when exceeding 75% load */
141 	return ht->nelems > (new_size / 4 * 3);
142 }
143 EXPORT_SYMBOL_GPL(rht_grow_above_75);
144 
145 /**
146  * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
147  * @ht:		hash table
148  * @new_size:	new table size
149  */
150 bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
151 {
152 	/* Shrink table beneath 30% load */
153 	return ht->nelems < (new_size * 3 / 10);
154 }
155 EXPORT_SYMBOL_GPL(rht_shrink_below_30);
156 
157 static void hashtable_chain_unzip(const struct rhashtable *ht,
158 				  const struct bucket_table *new_tbl,
159 				  struct bucket_table *old_tbl, size_t n)
160 {
161 	struct rhash_head *he, *p, *next;
162 	unsigned int h;
163 
164 	/* Old bucket empty, no work needed. */
165 	p = rht_dereference(old_tbl->buckets[n], ht);
166 	if (!p)
167 		return;
168 
169 	/* Advance the old bucket pointer one or more times until it
170 	 * reaches a node that doesn't hash to the same bucket as the
171 	 * previous node p. Call the previous node p;
172 	 */
173 	h = head_hashfn(ht, p, new_tbl->size);
174 	rht_for_each(he, p->next, ht) {
175 		if (head_hashfn(ht, he, new_tbl->size) != h)
176 			break;
177 		p = he;
178 	}
179 	RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
180 
181 	/* Find the subsequent node which does hash to the same
182 	 * bucket as node P, or NULL if no such node exists.
183 	 */
184 	next = NULL;
185 	if (he) {
186 		rht_for_each(he, he->next, ht) {
187 			if (head_hashfn(ht, he, new_tbl->size) == h) {
188 				next = he;
189 				break;
190 			}
191 		}
192 	}
193 
194 	/* Set p's next pointer to that subsequent node pointer,
195 	 * bypassing the nodes which do not hash to p's bucket
196 	 */
197 	RCU_INIT_POINTER(p->next, next);
198 }
199 
200 /**
201  * rhashtable_expand - Expand hash table while allowing concurrent lookups
202  * @ht:		the hash table to expand
203  * @flags:	allocation flags
204  *
205  * A secondary bucket array is allocated and the hash entries are migrated
206  * while keeping them on both lists until the end of the RCU grace period.
207  *
208  * This function may only be called in a context where it is safe to call
209  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
210  *
211  * The caller must ensure that no concurrent table mutations take place.
212  * It is however valid to have concurrent lookups if they are RCU protected.
213  */
214 int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
215 {
216 	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
217 	struct rhash_head *he;
218 	unsigned int i, h;
219 	bool complete;
220 
221 	ASSERT_RHT_MUTEX(ht);
222 
223 	if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
224 		return 0;
225 
226 	new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
227 	if (new_tbl == NULL)
228 		return -ENOMEM;
229 
230 	ht->shift++;
231 
232 	/* For each new bucket, search the corresponding old bucket
233 	 * for the first entry that hashes to the new bucket, and
234 	 * link the new bucket to that entry. Since all the entries
235 	 * which will end up in the new bucket appear in the same
236 	 * old bucket, this constructs an entirely valid new hash
237 	 * table, but with multiple buckets "zipped" together into a
238 	 * single imprecise chain.
239 	 */
240 	for (i = 0; i < new_tbl->size; i++) {
241 		h = i & (old_tbl->size - 1);
242 		rht_for_each(he, old_tbl->buckets[h], ht) {
243 			if (head_hashfn(ht, he, new_tbl->size) == i) {
244 				RCU_INIT_POINTER(new_tbl->buckets[i], he);
245 				break;
246 			}
247 		}
248 	}
249 
250 	/* Publish the new table pointer. Lookups may now traverse
251 	 * the new table, but they will not benefit from any
252 	 * additional efficiency until later steps unzip the buckets.
253 	 */
254 	rcu_assign_pointer(ht->tbl, new_tbl);
255 
256 	/* Unzip interleaved hash chains */
257 	do {
258 		/* Wait for readers. All new readers will see the new
259 		 * table, and thus no references to the old table will
260 		 * remain.
261 		 */
262 		synchronize_rcu();
263 
264 		/* For each bucket in the old table (each of which
265 		 * contains items from multiple buckets of the new
266 		 * table): ...
267 		 */
268 		complete = true;
269 		for (i = 0; i < old_tbl->size; i++) {
270 			hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
271 			if (old_tbl->buckets[i] != NULL)
272 				complete = false;
273 		}
274 	} while (!complete);
275 
276 	bucket_table_free(old_tbl);
277 	return 0;
278 }
279 EXPORT_SYMBOL_GPL(rhashtable_expand);
280 
281 /**
282  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
283  * @ht:		the hash table to shrink
284  * @flags:	allocation flags
285  *
286  * This function may only be called in a context where it is safe to call
287  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
288  *
289  * The caller must ensure that no concurrent table mutations take place.
290  * It is however valid to have concurrent lookups if they are RCU protected.
291  */
292 int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
293 {
294 	struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
295 	struct rhash_head __rcu **pprev;
296 	unsigned int i;
297 
298 	ASSERT_RHT_MUTEX(ht);
299 
300 	if (ht->shift <= ht->p.min_shift)
301 		return 0;
302 
303 	ntbl = bucket_table_alloc(tbl->size / 2, flags);
304 	if (ntbl == NULL)
305 		return -ENOMEM;
306 
307 	ht->shift--;
308 
309 	/* Link each bucket in the new table to the first bucket
310 	 * in the old table that contains entries which will hash
311 	 * to the new bucket.
312 	 */
313 	for (i = 0; i < ntbl->size; i++) {
314 		ntbl->buckets[i] = tbl->buckets[i];
315 
316 		/* Link each bucket in the new table to the first bucket
317 		 * in the old table that contains entries which will hash
318 		 * to the new bucket.
319 		 */
320 		for (pprev = &ntbl->buckets[i]; *pprev != NULL;
321 		     pprev = &rht_dereference(*pprev, ht)->next)
322 			;
323 		RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
324 	}
325 
326 	/* Publish the new, valid hash table */
327 	rcu_assign_pointer(ht->tbl, ntbl);
328 
329 	/* Wait for readers. No new readers will have references to the
330 	 * old hash table.
331 	 */
332 	synchronize_rcu();
333 
334 	bucket_table_free(tbl);
335 
336 	return 0;
337 }
338 EXPORT_SYMBOL_GPL(rhashtable_shrink);
339 
340 /**
341  * rhashtable_insert - insert object into hash hash table
342  * @ht:		hash table
343  * @obj:	pointer to hash head inside object
344  * @flags:	allocation flags (table expansion)
345  *
346  * Will automatically grow the table via rhashtable_expand() if the the
347  * grow_decision function specified at rhashtable_init() returns true.
348  *
349  * The caller must ensure that no concurrent table mutations occur. It is
350  * however valid to have concurrent lookups if they are RCU protected.
351  */
352 void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
353 		       gfp_t flags)
354 {
355 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
356 	u32 hash;
357 
358 	ASSERT_RHT_MUTEX(ht);
359 
360 	hash = head_hashfn(ht, obj, tbl->size);
361 	RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
362 	rcu_assign_pointer(tbl->buckets[hash], obj);
363 	ht->nelems++;
364 
365 	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
366 		rhashtable_expand(ht, flags);
367 }
368 EXPORT_SYMBOL_GPL(rhashtable_insert);
369 
370 /**
371  * rhashtable_remove_pprev - remove object from hash table given previous element
372  * @ht:		hash table
373  * @obj:	pointer to hash head inside object
374  * @pprev:	pointer to previous element
375  * @flags:	allocation flags (table expansion)
376  *
377  * Identical to rhashtable_remove() but caller is alreayd aware of the element
378  * in front of the element to be deleted. This is in particular useful for
379  * deletion when combined with walking or lookup.
380  */
381 void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
382 			     struct rhash_head __rcu **pprev, gfp_t flags)
383 {
384 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
385 
386 	ASSERT_RHT_MUTEX(ht);
387 
388 	RCU_INIT_POINTER(*pprev, obj->next);
389 	ht->nelems--;
390 
391 	if (ht->p.shrink_decision &&
392 	    ht->p.shrink_decision(ht, tbl->size))
393 		rhashtable_shrink(ht, flags);
394 }
395 EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
396 
397 /**
398  * rhashtable_remove - remove object from hash table
399  * @ht:		hash table
400  * @obj:	pointer to hash head inside object
401  * @flags:	allocation flags (table expansion)
402  *
403  * Since the hash chain is single linked, the removal operation needs to
404  * walk the bucket chain upon removal. The removal operation is thus
405  * considerable slow if the hash table is not correctly sized.
406  *
407  * Will automatically shrink the table via rhashtable_expand() if the the
408  * shrink_decision function specified at rhashtable_init() returns true.
409  *
410  * The caller must ensure that no concurrent table mutations occur. It is
411  * however valid to have concurrent lookups if they are RCU protected.
412  */
413 bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
414 		       gfp_t flags)
415 {
416 	struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
417 	struct rhash_head __rcu **pprev;
418 	struct rhash_head *he;
419 	u32 h;
420 
421 	ASSERT_RHT_MUTEX(ht);
422 
423 	h = head_hashfn(ht, obj, tbl->size);
424 
425 	pprev = &tbl->buckets[h];
426 	rht_for_each(he, tbl->buckets[h], ht) {
427 		if (he != obj) {
428 			pprev = &he->next;
429 			continue;
430 		}
431 
432 		rhashtable_remove_pprev(ht, he, pprev, flags);
433 		return true;
434 	}
435 
436 	return false;
437 }
438 EXPORT_SYMBOL_GPL(rhashtable_remove);
439 
440 /**
441  * rhashtable_lookup - lookup key in hash table
442  * @ht:		hash table
443  * @key:	pointer to key
444  *
445  * Computes the hash value for the key and traverses the bucket chain looking
446  * for a entry with an identical key. The first matching entry is returned.
447  *
448  * This lookup function may only be used for fixed key hash table (key_len
449  * paramter set). It will BUG() if used inappropriately.
450  *
451  * Lookups may occur in parallel with hash mutations as long as the lookup is
452  * guarded by rcu_read_lock(). The caller must take care of this.
453  */
454 void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
455 {
456 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
457 	struct rhash_head *he;
458 	u32 h;
459 
460 	BUG_ON(!ht->p.key_len);
461 
462 	h = __hashfn(ht, key, ht->p.key_len, tbl->size);
463 	rht_for_each_rcu(he, tbl->buckets[h], ht) {
464 		if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
465 			   ht->p.key_len))
466 			continue;
467 		return (void *) he - ht->p.head_offset;
468 	}
469 
470 	return NULL;
471 }
472 EXPORT_SYMBOL_GPL(rhashtable_lookup);
473 
474 /**
475  * rhashtable_lookup_compare - search hash table with compare function
476  * @ht:		hash table
477  * @hash:	hash value of desired entry
478  * @compare:	compare function, must return true on match
479  * @arg:	argument passed on to compare function
480  *
481  * Traverses the bucket chain behind the provided hash value and calls the
482  * specified compare function for each entry.
483  *
484  * Lookups may occur in parallel with hash mutations as long as the lookup is
485  * guarded by rcu_read_lock(). The caller must take care of this.
486  *
487  * Returns the first entry on which the compare function returned true.
488  */
489 void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
490 				bool (*compare)(void *, void *), void *arg)
491 {
492 	const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
493 	struct rhash_head *he;
494 
495 	if (unlikely(hash >= tbl->size))
496 		return NULL;
497 
498 	rht_for_each_rcu(he, tbl->buckets[hash], ht) {
499 		if (!compare(rht_obj(ht, he), arg))
500 			continue;
501 		return (void *) he - ht->p.head_offset;
502 	}
503 
504 	return NULL;
505 }
506 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
507 
508 static size_t rounded_hashtable_size(struct rhashtable_params *params)
509 {
510 	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
511 		   1UL << params->min_shift);
512 }
513 
514 /**
515  * rhashtable_init - initialize a new hash table
516  * @ht:		hash table to be initialized
517  * @params:	configuration parameters
518  *
519  * Initializes a new hash table based on the provided configuration
520  * parameters. A table can be configured either with a variable or
521  * fixed length key:
522  *
523  * Configuration Example 1: Fixed length keys
524  * struct test_obj {
525  *	int			key;
526  *	void *			my_member;
527  *	struct rhash_head	node;
528  * };
529  *
530  * struct rhashtable_params params = {
531  *	.head_offset = offsetof(struct test_obj, node),
532  *	.key_offset = offsetof(struct test_obj, key),
533  *	.key_len = sizeof(int),
534  *	.hashfn = arch_fast_hash,
535  *	.mutex_is_held = &my_mutex_is_held,
536  * };
537  *
538  * Configuration Example 2: Variable length keys
539  * struct test_obj {
540  *	[...]
541  *	struct rhash_head	node;
542  * };
543  *
544  * u32 my_hash_fn(const void *data, u32 seed)
545  * {
546  *	struct test_obj *obj = data;
547  *
548  *	return [... hash ...];
549  * }
550  *
551  * struct rhashtable_params params = {
552  *	.head_offset = offsetof(struct test_obj, node),
553  *	.hashfn = arch_fast_hash,
554  *	.obj_hashfn = my_hash_fn,
555  *	.mutex_is_held = &my_mutex_is_held,
556  * };
557  */
558 int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
559 {
560 	struct bucket_table *tbl;
561 	size_t size;
562 
563 	size = HASH_DEFAULT_SIZE;
564 
565 	if ((params->key_len && !params->hashfn) ||
566 	    (!params->key_len && !params->obj_hashfn))
567 		return -EINVAL;
568 
569 	params->min_shift = max_t(size_t, params->min_shift,
570 				  ilog2(HASH_MIN_SIZE));
571 
572 	if (params->nelem_hint)
573 		size = rounded_hashtable_size(params);
574 
575 	tbl = bucket_table_alloc(size, GFP_KERNEL);
576 	if (tbl == NULL)
577 		return -ENOMEM;
578 
579 	memset(ht, 0, sizeof(*ht));
580 	ht->shift = ilog2(tbl->size);
581 	memcpy(&ht->p, params, sizeof(*params));
582 	RCU_INIT_POINTER(ht->tbl, tbl);
583 
584 	if (!ht->p.hash_rnd)
585 		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
586 
587 	return 0;
588 }
589 EXPORT_SYMBOL_GPL(rhashtable_init);
590 
591 /**
592  * rhashtable_destroy - destroy hash table
593  * @ht:		the hash table to destroy
594  *
595  * Frees the bucket array. This function is not rcu safe, therefore the caller
596  * has to make sure that no resizing may happen by unpublishing the hashtable
597  * and waiting for the quiescent cycle before releasing the bucket array.
598  */
599 void rhashtable_destroy(const struct rhashtable *ht)
600 {
601 	bucket_table_free(ht->tbl);
602 }
603 EXPORT_SYMBOL_GPL(rhashtable_destroy);
604 
605 /**************************************************************************
606  * Self Test
607  **************************************************************************/
608 
609 #ifdef CONFIG_TEST_RHASHTABLE
610 
611 #define TEST_HT_SIZE	8
612 #define TEST_ENTRIES	2048
613 #define TEST_PTR	((void *) 0xdeadbeef)
614 #define TEST_NEXPANDS	4
615 
616 static int test_mutex_is_held(void)
617 {
618 	return 1;
619 }
620 
621 struct test_obj {
622 	void			*ptr;
623 	int			value;
624 	struct rhash_head	node;
625 };
626 
627 static int __init test_rht_lookup(struct rhashtable *ht)
628 {
629 	unsigned int i;
630 
631 	for (i = 0; i < TEST_ENTRIES * 2; i++) {
632 		struct test_obj *obj;
633 		bool expected = !(i % 2);
634 		u32 key = i;
635 
636 		obj = rhashtable_lookup(ht, &key);
637 
638 		if (expected && !obj) {
639 			pr_warn("Test failed: Could not find key %u\n", key);
640 			return -ENOENT;
641 		} else if (!expected && obj) {
642 			pr_warn("Test failed: Unexpected entry found for key %u\n",
643 				key);
644 			return -EEXIST;
645 		} else if (expected && obj) {
646 			if (obj->ptr != TEST_PTR || obj->value != i) {
647 				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
648 					obj->ptr, TEST_PTR, obj->value, i);
649 				return -EINVAL;
650 			}
651 		}
652 	}
653 
654 	return 0;
655 }
656 
657 static void test_bucket_stats(struct rhashtable *ht,
658 				     struct bucket_table *tbl,
659 				     bool quiet)
660 {
661 	unsigned int cnt, i, total = 0;
662 	struct test_obj *obj;
663 
664 	for (i = 0; i < tbl->size; i++) {
665 		cnt = 0;
666 
667 		if (!quiet)
668 			pr_info(" [%#4x/%zu]", i, tbl->size);
669 
670 		rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
671 			cnt++;
672 			total++;
673 			if (!quiet)
674 				pr_cont(" [%p],", obj);
675 		}
676 
677 		if (!quiet)
678 			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
679 				i, tbl->buckets[i], cnt);
680 	}
681 
682 	pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
683 		total, ht->nelems, TEST_ENTRIES);
684 }
685 
686 static int __init test_rhashtable(struct rhashtable *ht)
687 {
688 	struct bucket_table *tbl;
689 	struct test_obj *obj, *next;
690 	int err;
691 	unsigned int i;
692 
693 	/*
694 	 * Insertion Test:
695 	 * Insert TEST_ENTRIES into table with all keys even numbers
696 	 */
697 	pr_info("  Adding %d keys\n", TEST_ENTRIES);
698 	for (i = 0; i < TEST_ENTRIES; i++) {
699 		struct test_obj *obj;
700 
701 		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
702 		if (!obj) {
703 			err = -ENOMEM;
704 			goto error;
705 		}
706 
707 		obj->ptr = TEST_PTR;
708 		obj->value = i * 2;
709 
710 		rhashtable_insert(ht, &obj->node, GFP_KERNEL);
711 	}
712 
713 	rcu_read_lock();
714 	tbl = rht_dereference_rcu(ht->tbl, ht);
715 	test_bucket_stats(ht, tbl, true);
716 	test_rht_lookup(ht);
717 	rcu_read_unlock();
718 
719 	for (i = 0; i < TEST_NEXPANDS; i++) {
720 		pr_info("  Table expansion iteration %u...\n", i);
721 		rhashtable_expand(ht, GFP_KERNEL);
722 
723 		rcu_read_lock();
724 		pr_info("  Verifying lookups...\n");
725 		test_rht_lookup(ht);
726 		rcu_read_unlock();
727 	}
728 
729 	for (i = 0; i < TEST_NEXPANDS; i++) {
730 		pr_info("  Table shrinkage iteration %u...\n", i);
731 		rhashtable_shrink(ht, GFP_KERNEL);
732 
733 		rcu_read_lock();
734 		pr_info("  Verifying lookups...\n");
735 		test_rht_lookup(ht);
736 		rcu_read_unlock();
737 	}
738 
739 	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
740 	for (i = 0; i < TEST_ENTRIES; i++) {
741 		u32 key = i * 2;
742 
743 		obj = rhashtable_lookup(ht, &key);
744 		BUG_ON(!obj);
745 
746 		rhashtable_remove(ht, &obj->node, GFP_KERNEL);
747 		kfree(obj);
748 	}
749 
750 	return 0;
751 
752 error:
753 	tbl = rht_dereference_rcu(ht->tbl, ht);
754 	for (i = 0; i < tbl->size; i++)
755 		rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
756 			kfree(obj);
757 
758 	return err;
759 }
760 
761 static int __init test_rht_init(void)
762 {
763 	struct rhashtable ht;
764 	struct rhashtable_params params = {
765 		.nelem_hint = TEST_HT_SIZE,
766 		.head_offset = offsetof(struct test_obj, node),
767 		.key_offset = offsetof(struct test_obj, value),
768 		.key_len = sizeof(int),
769 		.hashfn = arch_fast_hash,
770 		.mutex_is_held = &test_mutex_is_held,
771 		.grow_decision = rht_grow_above_75,
772 		.shrink_decision = rht_shrink_below_30,
773 	};
774 	int err;
775 
776 	pr_info("Running resizable hashtable tests...\n");
777 
778 	err = rhashtable_init(&ht, &params);
779 	if (err < 0) {
780 		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
781 			err);
782 		return err;
783 	}
784 
785 	err = test_rhashtable(&ht);
786 
787 	rhashtable_destroy(&ht);
788 
789 	return err;
790 }
791 
792 subsys_initcall(test_rht_init);
793 
794 #endif /* CONFIG_TEST_RHASHTABLE */
795