xref: /openbmc/linux/lib/test_rhashtable.c (revision f3956ebb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resizable, Scalable, Concurrent Hash Table
4  *
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  */
8 
9 /**************************************************************************
10  * Self Test
11  **************************************************************************/
12 
13 #include <linux/init.h>
14 #include <linux/jhash.h>
15 #include <linux/kernel.h>
16 #include <linux/kthread.h>
17 #include <linux/module.h>
18 #include <linux/rcupdate.h>
19 #include <linux/rhashtable.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/random.h>
23 #include <linux/vmalloc.h>
24 #include <linux/wait.h>
25 
26 #define MAX_ENTRIES	1000000
27 #define TEST_INSERT_FAIL INT_MAX
28 
29 static int parm_entries = 50000;
30 module_param(parm_entries, int, 0);
31 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)");
32 
33 static int runs = 4;
34 module_param(runs, int, 0);
35 MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
36 
37 static int max_size = 0;
38 module_param(max_size, int, 0);
39 MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)");
40 
41 static bool shrinking = false;
42 module_param(shrinking, bool, 0);
43 MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
44 
45 static int size = 8;
46 module_param(size, int, 0);
47 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
48 
49 static int tcount = 10;
50 module_param(tcount, int, 0);
51 MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
52 
53 static bool enomem_retry = false;
54 module_param(enomem_retry, bool, 0);
55 MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)");
56 
57 struct test_obj_val {
58 	int	id;
59 	int	tid;
60 };
61 
62 struct test_obj {
63 	struct test_obj_val	value;
64 	struct rhash_head	node;
65 };
66 
67 struct test_obj_rhl {
68 	struct test_obj_val	value;
69 	struct rhlist_head	list_node;
70 };
71 
72 struct thread_data {
73 	unsigned int entries;
74 	int id;
75 	struct task_struct *task;
76 	struct test_obj *objs;
77 };
78 
79 static u32 my_hashfn(const void *data, u32 len, u32 seed)
80 {
81 	const struct test_obj_rhl *obj = data;
82 
83 	return (obj->value.id % 10);
84 }
85 
86 static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
87 {
88 	const struct test_obj_rhl *test_obj = obj;
89 	const struct test_obj_val *val = arg->key;
90 
91 	return test_obj->value.id - val->id;
92 }
93 
94 static struct rhashtable_params test_rht_params = {
95 	.head_offset = offsetof(struct test_obj, node),
96 	.key_offset = offsetof(struct test_obj, value),
97 	.key_len = sizeof(struct test_obj_val),
98 	.hashfn = jhash,
99 };
100 
101 static struct rhashtable_params test_rht_params_dup = {
102 	.head_offset = offsetof(struct test_obj_rhl, list_node),
103 	.key_offset = offsetof(struct test_obj_rhl, value),
104 	.key_len = sizeof(struct test_obj_val),
105 	.hashfn = jhash,
106 	.obj_hashfn = my_hashfn,
107 	.obj_cmpfn = my_cmpfn,
108 	.nelem_hint = 128,
109 	.automatic_shrinking = false,
110 };
111 
112 static atomic_t startup_count;
113 static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
114 
115 static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
116                         const struct rhashtable_params params)
117 {
118 	int err, retries = -1, enomem_retries = 0;
119 
120 	do {
121 		retries++;
122 		cond_resched();
123 		err = rhashtable_insert_fast(ht, &obj->node, params);
124 		if (err == -ENOMEM && enomem_retry) {
125 			enomem_retries++;
126 			err = -EBUSY;
127 		}
128 	} while (err == -EBUSY);
129 
130 	if (enomem_retries)
131 		pr_info(" %u insertions retried after -ENOMEM\n",
132 			enomem_retries);
133 
134 	return err ? : retries;
135 }
136 
137 static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
138 				  unsigned int entries)
139 {
140 	unsigned int i;
141 
142 	for (i = 0; i < entries; i++) {
143 		struct test_obj *obj;
144 		bool expected = !(i % 2);
145 		struct test_obj_val key = {
146 			.id = i,
147 		};
148 
149 		if (array[i / 2].value.id == TEST_INSERT_FAIL)
150 			expected = false;
151 
152 		obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
153 
154 		if (expected && !obj) {
155 			pr_warn("Test failed: Could not find key %u\n", key.id);
156 			return -ENOENT;
157 		} else if (!expected && obj) {
158 			pr_warn("Test failed: Unexpected entry found for key %u\n",
159 				key.id);
160 			return -EEXIST;
161 		} else if (expected && obj) {
162 			if (obj->value.id != i) {
163 				pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
164 					obj->value.id, i);
165 				return -EINVAL;
166 			}
167 		}
168 
169 		cond_resched_rcu();
170 	}
171 
172 	return 0;
173 }
174 
175 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
176 {
177 	unsigned int total = 0, chain_len = 0;
178 	struct rhashtable_iter hti;
179 	struct rhash_head *pos;
180 
181 	rhashtable_walk_enter(ht, &hti);
182 	rhashtable_walk_start(&hti);
183 
184 	while ((pos = rhashtable_walk_next(&hti))) {
185 		if (PTR_ERR(pos) == -EAGAIN) {
186 			pr_info("Info: encountered resize\n");
187 			chain_len++;
188 			continue;
189 		} else if (IS_ERR(pos)) {
190 			pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
191 				PTR_ERR(pos));
192 			break;
193 		}
194 
195 		total++;
196 	}
197 
198 	rhashtable_walk_stop(&hti);
199 	rhashtable_walk_exit(&hti);
200 
201 	pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
202 		total, atomic_read(&ht->nelems), entries, chain_len);
203 
204 	if (total != atomic_read(&ht->nelems) || total != entries)
205 		pr_warn("Test failed: Total count mismatch ^^^");
206 }
207 
208 static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array,
209 				  unsigned int entries)
210 {
211 	struct test_obj *obj;
212 	int err;
213 	unsigned int i, insert_retries = 0;
214 	s64 start, end;
215 
216 	/*
217 	 * Insertion Test:
218 	 * Insert entries into table with all keys even numbers
219 	 */
220 	pr_info("  Adding %d keys\n", entries);
221 	start = ktime_get_ns();
222 	for (i = 0; i < entries; i++) {
223 		struct test_obj *obj = &array[i];
224 
225 		obj->value.id = i * 2;
226 		err = insert_retry(ht, obj, test_rht_params);
227 		if (err > 0)
228 			insert_retries += err;
229 		else if (err)
230 			return err;
231 	}
232 
233 	if (insert_retries)
234 		pr_info("  %u insertions retried due to memory pressure\n",
235 			insert_retries);
236 
237 	test_bucket_stats(ht, entries);
238 	rcu_read_lock();
239 	test_rht_lookup(ht, array, entries);
240 	rcu_read_unlock();
241 
242 	test_bucket_stats(ht, entries);
243 
244 	pr_info("  Deleting %d keys\n", entries);
245 	for (i = 0; i < entries; i++) {
246 		struct test_obj_val key = {
247 			.id = i * 2,
248 		};
249 
250 		if (array[i].value.id != TEST_INSERT_FAIL) {
251 			obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
252 			BUG_ON(!obj);
253 
254 			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
255 		}
256 
257 		cond_resched();
258 	}
259 
260 	end = ktime_get_ns();
261 	pr_info("  Duration of test: %lld ns\n", end - start);
262 
263 	return end - start;
264 }
265 
266 static struct rhashtable ht;
267 static struct rhltable rhlt;
268 
269 static int __init test_rhltable(unsigned int entries)
270 {
271 	struct test_obj_rhl *rhl_test_objects;
272 	unsigned long *obj_in_table;
273 	unsigned int i, j, k;
274 	int ret, err;
275 
276 	if (entries == 0)
277 		entries = 1;
278 
279 	rhl_test_objects = vzalloc(array_size(entries,
280 					      sizeof(*rhl_test_objects)));
281 	if (!rhl_test_objects)
282 		return -ENOMEM;
283 
284 	ret = -ENOMEM;
285 	obj_in_table = vzalloc(array_size(sizeof(unsigned long),
286 					  BITS_TO_LONGS(entries)));
287 	if (!obj_in_table)
288 		goto out_free;
289 
290 	err = rhltable_init(&rhlt, &test_rht_params);
291 	if (WARN_ON(err))
292 		goto out_free;
293 
294 	k = prandom_u32();
295 	ret = 0;
296 	for (i = 0; i < entries; i++) {
297 		rhl_test_objects[i].value.id = k;
298 		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
299 				      test_rht_params);
300 		if (WARN(err, "error %d on element %d\n", err, i))
301 			break;
302 		if (err == 0)
303 			set_bit(i, obj_in_table);
304 	}
305 
306 	if (err)
307 		ret = err;
308 
309 	pr_info("test %d add/delete pairs into rhlist\n", entries);
310 	for (i = 0; i < entries; i++) {
311 		struct rhlist_head *h, *pos;
312 		struct test_obj_rhl *obj;
313 		struct test_obj_val key = {
314 			.id = k,
315 		};
316 		bool found;
317 
318 		rcu_read_lock();
319 		h = rhltable_lookup(&rhlt, &key, test_rht_params);
320 		if (WARN(!h, "key not found during iteration %d of %d", i, entries)) {
321 			rcu_read_unlock();
322 			break;
323 		}
324 
325 		if (i) {
326 			j = i - 1;
327 			rhl_for_each_entry_rcu(obj, pos, h, list_node) {
328 				if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone"))
329 					break;
330 			}
331 		}
332 
333 		cond_resched_rcu();
334 
335 		found = false;
336 
337 		rhl_for_each_entry_rcu(obj, pos, h, list_node) {
338 			if (pos == &rhl_test_objects[i].list_node) {
339 				found = true;
340 				break;
341 			}
342 		}
343 
344 		rcu_read_unlock();
345 
346 		if (WARN(!found, "element %d not found", i))
347 			break;
348 
349 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
350 		WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i);
351 		if (err == 0)
352 			clear_bit(i, obj_in_table);
353 	}
354 
355 	if (ret == 0 && err)
356 		ret = err;
357 
358 	for (i = 0; i < entries; i++) {
359 		WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i);
360 
361 		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node,
362 				      test_rht_params);
363 		if (WARN(err, "error %d on element %d\n", err, i))
364 			break;
365 		if (err == 0)
366 			set_bit(i, obj_in_table);
367 	}
368 
369 	pr_info("test %d random rhlist add/delete operations\n", entries);
370 	for (j = 0; j < entries; j++) {
371 		u32 i = prandom_u32_max(entries);
372 		u32 prand = prandom_u32();
373 
374 		cond_resched();
375 
376 		if (prand == 0)
377 			prand = prandom_u32();
378 
379 		if (prand & 1) {
380 			prand >>= 1;
381 			continue;
382 		}
383 
384 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
385 		if (test_bit(i, obj_in_table)) {
386 			clear_bit(i, obj_in_table);
387 			if (WARN(err, "cannot remove element at slot %d", i))
388 				continue;
389 		} else {
390 			if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
391 			     i, err, -ENOENT))
392 				continue;
393 		}
394 
395 		if (prand & 1) {
396 			prand >>= 1;
397 			continue;
398 		}
399 
400 		err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
401 		if (err == 0) {
402 			if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i))
403 				continue;
404 		} else {
405 			if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i))
406 				continue;
407 		}
408 
409 		if (prand & 1) {
410 			prand >>= 1;
411 			continue;
412 		}
413 
414 		i = prandom_u32_max(entries);
415 		if (test_bit(i, obj_in_table)) {
416 			err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
417 			WARN(err, "cannot remove element at slot %d", i);
418 			if (err == 0)
419 				clear_bit(i, obj_in_table);
420 		} else {
421 			err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
422 			WARN(err, "failed to insert object %d", i);
423 			if (err == 0)
424 				set_bit(i, obj_in_table);
425 		}
426 	}
427 
428 	for (i = 0; i < entries; i++) {
429 		cond_resched();
430 		err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
431 		if (test_bit(i, obj_in_table)) {
432 			if (WARN(err, "cannot remove element at slot %d", i))
433 				continue;
434 		} else {
435 			if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
436 				 err, -ENOENT))
437 				continue;
438 		}
439 	}
440 
441 	rhltable_destroy(&rhlt);
442 out_free:
443 	vfree(rhl_test_objects);
444 	vfree(obj_in_table);
445 	return ret;
446 }
447 
448 static int __init test_rhashtable_max(struct test_obj *array,
449 				      unsigned int entries)
450 {
451 	unsigned int i, insert_retries = 0;
452 	int err;
453 
454 	test_rht_params.max_size = roundup_pow_of_two(entries / 8);
455 	err = rhashtable_init(&ht, &test_rht_params);
456 	if (err)
457 		return err;
458 
459 	for (i = 0; i < ht.max_elems; i++) {
460 		struct test_obj *obj = &array[i];
461 
462 		obj->value.id = i * 2;
463 		err = insert_retry(&ht, obj, test_rht_params);
464 		if (err > 0)
465 			insert_retries += err;
466 		else if (err)
467 			return err;
468 	}
469 
470 	err = insert_retry(&ht, &array[ht.max_elems], test_rht_params);
471 	if (err == -E2BIG) {
472 		err = 0;
473 	} else {
474 		pr_info("insert element %u should have failed with %d, got %d\n",
475 				ht.max_elems, -E2BIG, err);
476 		if (err == 0)
477 			err = -1;
478 	}
479 
480 	rhashtable_destroy(&ht);
481 
482 	return err;
483 }
484 
485 static unsigned int __init print_ht(struct rhltable *rhlt)
486 {
487 	struct rhashtable *ht;
488 	const struct bucket_table *tbl;
489 	char buff[512] = "";
490 	int offset = 0;
491 	unsigned int i, cnt = 0;
492 
493 	ht = &rhlt->ht;
494 	/* Take the mutex to avoid RCU warning */
495 	mutex_lock(&ht->mutex);
496 	tbl = rht_dereference(ht->tbl, ht);
497 	for (i = 0; i < tbl->size; i++) {
498 		struct rhash_head *pos, *next;
499 		struct test_obj_rhl *p;
500 
501 		pos = rht_ptr_exclusive(tbl->buckets + i);
502 		next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
503 
504 		if (!rht_is_a_nulls(pos)) {
505 			offset += sprintf(buff + offset, "\nbucket[%d] -> ", i);
506 		}
507 
508 		while (!rht_is_a_nulls(pos)) {
509 			struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
510 			offset += sprintf(buff + offset, "[[");
511 			do {
512 				pos = &list->rhead;
513 				list = rht_dereference(list->next, ht);
514 				p = rht_obj(ht, pos);
515 
516 				offset += sprintf(buff + offset, " val %d (tid=%d)%s", p->value.id, p->value.tid,
517 					list? ", " : " ");
518 				cnt++;
519 			} while (list);
520 
521 			pos = next,
522 			next = !rht_is_a_nulls(pos) ?
523 				rht_dereference(pos->next, ht) : NULL;
524 
525 			offset += sprintf(buff + offset, "]]%s", !rht_is_a_nulls(pos) ? " -> " : "");
526 		}
527 	}
528 	printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
529 	mutex_unlock(&ht->mutex);
530 
531 	return cnt;
532 }
533 
534 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
535 				  int cnt, bool slow)
536 {
537 	struct rhltable *rhlt;
538 	unsigned int i, ret;
539 	const char *key;
540 	int err = 0;
541 
542 	rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
543 	if (WARN_ON(!rhlt))
544 		return -EINVAL;
545 
546 	err = rhltable_init(rhlt, &test_rht_params_dup);
547 	if (WARN_ON(err)) {
548 		kfree(rhlt);
549 		return err;
550 	}
551 
552 	for (i = 0; i < cnt; i++) {
553 		rhl_test_objects[i].value.tid = i;
554 		key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
555 		key += test_rht_params_dup.key_offset;
556 
557 		if (slow) {
558 			err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
559 							     &rhl_test_objects[i].list_node.rhead));
560 			if (err == -EAGAIN)
561 				err = 0;
562 		} else
563 			err = rhltable_insert(rhlt,
564 					      &rhl_test_objects[i].list_node,
565 					      test_rht_params_dup);
566 		if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
567 			goto skip_print;
568 	}
569 
570 	ret = print_ht(rhlt);
571 	WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
572 
573 skip_print:
574 	rhltable_destroy(rhlt);
575 	kfree(rhlt);
576 
577 	return 0;
578 }
579 
580 static int __init test_insert_duplicates_run(void)
581 {
582 	struct test_obj_rhl rhl_test_objects[3] = {};
583 
584 	pr_info("test inserting duplicates\n");
585 
586 	/* two different values that map to same bucket */
587 	rhl_test_objects[0].value.id = 1;
588 	rhl_test_objects[1].value.id = 21;
589 
590 	/* and another duplicate with same as [0] value
591 	 * which will be second on the bucket list */
592 	rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
593 
594 	test_insert_dup(rhl_test_objects, 2, false);
595 	test_insert_dup(rhl_test_objects, 3, false);
596 	test_insert_dup(rhl_test_objects, 2, true);
597 	test_insert_dup(rhl_test_objects, 3, true);
598 
599 	return 0;
600 }
601 
602 static int thread_lookup_test(struct thread_data *tdata)
603 {
604 	unsigned int entries = tdata->entries;
605 	int i, err = 0;
606 
607 	for (i = 0; i < entries; i++) {
608 		struct test_obj *obj;
609 		struct test_obj_val key = {
610 			.id = i,
611 			.tid = tdata->id,
612 		};
613 
614 		obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
615 		if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) {
616 			pr_err("  found unexpected object %d-%d\n", key.tid, key.id);
617 			err++;
618 		} else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) {
619 			pr_err("  object %d-%d not found!\n", key.tid, key.id);
620 			err++;
621 		} else if (obj && memcmp(&obj->value, &key, sizeof(key))) {
622 			pr_err("  wrong object returned (got %d-%d, expected %d-%d)\n",
623 			       obj->value.tid, obj->value.id, key.tid, key.id);
624 			err++;
625 		}
626 
627 		cond_resched();
628 	}
629 	return err;
630 }
631 
632 static int threadfunc(void *data)
633 {
634 	int i, step, err = 0, insert_retries = 0;
635 	struct thread_data *tdata = data;
636 
637 	if (atomic_dec_and_test(&startup_count))
638 		wake_up(&startup_wait);
639 	if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
640 		pr_err("  thread[%d]: interrupted\n", tdata->id);
641 		goto out;
642 	}
643 
644 	for (i = 0; i < tdata->entries; i++) {
645 		tdata->objs[i].value.id = i;
646 		tdata->objs[i].value.tid = tdata->id;
647 		err = insert_retry(&ht, &tdata->objs[i], test_rht_params);
648 		if (err > 0) {
649 			insert_retries += err;
650 		} else if (err) {
651 			pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
652 			       tdata->id);
653 			goto out;
654 		}
655 	}
656 	if (insert_retries)
657 		pr_info("  thread[%d]: %u insertions retried due to memory pressure\n",
658 			tdata->id, insert_retries);
659 
660 	err = thread_lookup_test(tdata);
661 	if (err) {
662 		pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
663 		       tdata->id);
664 		goto out;
665 	}
666 
667 	for (step = 10; step > 0; step--) {
668 		for (i = 0; i < tdata->entries; i += step) {
669 			if (tdata->objs[i].value.id == TEST_INSERT_FAIL)
670 				continue;
671 			err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
672 			                             test_rht_params);
673 			if (err) {
674 				pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
675 				       tdata->id);
676 				goto out;
677 			}
678 			tdata->objs[i].value.id = TEST_INSERT_FAIL;
679 
680 			cond_resched();
681 		}
682 		err = thread_lookup_test(tdata);
683 		if (err) {
684 			pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
685 			       tdata->id);
686 			goto out;
687 		}
688 	}
689 out:
690 	while (!kthread_should_stop()) {
691 		set_current_state(TASK_INTERRUPTIBLE);
692 		schedule();
693 	}
694 	return err;
695 }
696 
697 static int __init test_rht_init(void)
698 {
699 	unsigned int entries;
700 	int i, err, started_threads = 0, failed_threads = 0;
701 	u64 total_time = 0;
702 	struct thread_data *tdata;
703 	struct test_obj *objs;
704 
705 	if (parm_entries < 0)
706 		parm_entries = 1;
707 
708 	entries = min(parm_entries, MAX_ENTRIES);
709 
710 	test_rht_params.automatic_shrinking = shrinking;
711 	test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries);
712 	test_rht_params.nelem_hint = size;
713 
714 	objs = vzalloc(array_size(sizeof(struct test_obj),
715 				  test_rht_params.max_size + 1));
716 	if (!objs)
717 		return -ENOMEM;
718 
719 	pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
720 		size, max_size, shrinking);
721 
722 	for (i = 0; i < runs; i++) {
723 		s64 time;
724 
725 		pr_info("Test %02d:\n", i);
726 		memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj));
727 
728 		err = rhashtable_init(&ht, &test_rht_params);
729 		if (err < 0) {
730 			pr_warn("Test failed: Unable to initialize hashtable: %d\n",
731 				err);
732 			continue;
733 		}
734 
735 		time = test_rhashtable(&ht, objs, entries);
736 		rhashtable_destroy(&ht);
737 		if (time < 0) {
738 			vfree(objs);
739 			pr_warn("Test failed: return code %lld\n", time);
740 			return -EINVAL;
741 		}
742 
743 		total_time += time;
744 	}
745 
746 	pr_info("test if its possible to exceed max_size %d: %s\n",
747 			test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ?
748 			"no, ok" : "YES, failed");
749 	vfree(objs);
750 
751 	do_div(total_time, runs);
752 	pr_info("Average test time: %llu\n", total_time);
753 
754 	test_insert_duplicates_run();
755 
756 	if (!tcount)
757 		return 0;
758 
759 	pr_info("Testing concurrent rhashtable access from %d threads\n",
760 	        tcount);
761 	atomic_set(&startup_count, tcount);
762 	tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
763 	if (!tdata)
764 		return -ENOMEM;
765 	objs  = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries));
766 	if (!objs) {
767 		vfree(tdata);
768 		return -ENOMEM;
769 	}
770 
771 	test_rht_params.max_size = max_size ? :
772 	                           roundup_pow_of_two(tcount * entries);
773 	err = rhashtable_init(&ht, &test_rht_params);
774 	if (err < 0) {
775 		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
776 			err);
777 		vfree(tdata);
778 		vfree(objs);
779 		return -EINVAL;
780 	}
781 	for (i = 0; i < tcount; i++) {
782 		tdata[i].id = i;
783 		tdata[i].entries = entries;
784 		tdata[i].objs = objs + i * entries;
785 		tdata[i].task = kthread_run(threadfunc, &tdata[i],
786 		                            "rhashtable_thrad[%d]", i);
787 		if (IS_ERR(tdata[i].task)) {
788 			pr_err(" kthread_run failed for thread %d\n", i);
789 			atomic_dec(&startup_count);
790 		} else {
791 			started_threads++;
792 		}
793 	}
794 	if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
795 		pr_err("  wait_event interruptible failed\n");
796 	/* count is 0 now, set it to -1 and wake up all threads together */
797 	atomic_dec(&startup_count);
798 	wake_up_all(&startup_wait);
799 	for (i = 0; i < tcount; i++) {
800 		if (IS_ERR(tdata[i].task))
801 			continue;
802 		if ((err = kthread_stop(tdata[i].task))) {
803 			pr_warn("Test failed: thread %d returned: %d\n",
804 			        i, err);
805 			failed_threads++;
806 		}
807 	}
808 	rhashtable_destroy(&ht);
809 	vfree(tdata);
810 	vfree(objs);
811 
812 	/*
813 	 * rhltable_remove is very expensive, default values can cause test
814 	 * to run for 2 minutes or more,  use a smaller number instead.
815 	 */
816 	err = test_rhltable(entries / 16);
817 	pr_info("Started %d threads, %d failed, rhltable test returns %d\n",
818 	        started_threads, failed_threads, err);
819 	return 0;
820 }
821 
822 static void __exit test_rht_exit(void)
823 {
824 }
825 
826 module_init(test_rht_init);
827 module_exit(test_rht_exit);
828 
829 MODULE_LICENSE("GPL v2");
830