1 /* 2 * Resizable, Scalable, Concurrent Hash Table 3 * 4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 /************************************************************************** 13 * Self Test 14 **************************************************************************/ 15 16 #include <linux/init.h> 17 #include <linux/jhash.h> 18 #include <linux/kernel.h> 19 #include <linux/kthread.h> 20 #include <linux/module.h> 21 #include <linux/rcupdate.h> 22 #include <linux/rhashtable.h> 23 #include <linux/slab.h> 24 #include <linux/sched.h> 25 #include <linux/random.h> 26 #include <linux/vmalloc.h> 27 #include <linux/wait.h> 28 29 #define MAX_ENTRIES 1000000 30 #define TEST_INSERT_FAIL INT_MAX 31 32 static int parm_entries = 50000; 33 module_param(parm_entries, int, 0); 34 MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); 35 36 static int runs = 4; 37 module_param(runs, int, 0); 38 MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); 39 40 static int max_size = 0; 41 module_param(max_size, int, 0); 42 MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)"); 43 44 static bool shrinking = false; 45 module_param(shrinking, bool, 0); 46 MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)"); 47 48 static int size = 8; 49 module_param(size, int, 0); 50 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)"); 51 52 static int tcount = 10; 53 module_param(tcount, int, 0); 54 MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)"); 55 56 static bool enomem_retry = false; 57 module_param(enomem_retry, bool, 0); 58 MODULE_PARM_DESC(enomem_retry, "Retry insert even if -ENOMEM was returned (default: off)"); 59 60 struct test_obj_val { 61 int id; 62 int tid; 63 }; 64 65 struct test_obj { 66 struct test_obj_val value; 67 struct rhash_head node; 68 }; 69 70 struct test_obj_rhl { 71 struct test_obj_val value; 72 struct rhlist_head list_node; 73 }; 74 75 struct thread_data { 76 unsigned int entries; 77 int id; 78 struct task_struct *task; 79 struct test_obj *objs; 80 }; 81 82 static u32 my_hashfn(const void *data, u32 len, u32 seed) 83 { 84 const struct test_obj_rhl *obj = data; 85 86 return (obj->value.id % 10); 87 } 88 89 static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) 90 { 91 const struct test_obj_rhl *test_obj = obj; 92 const struct test_obj_val *val = arg->key; 93 94 return test_obj->value.id - val->id; 95 } 96 97 static struct rhashtable_params test_rht_params = { 98 .head_offset = offsetof(struct test_obj, node), 99 .key_offset = offsetof(struct test_obj, value), 100 .key_len = sizeof(struct test_obj_val), 101 .hashfn = jhash, 102 }; 103 104 static struct rhashtable_params test_rht_params_dup = { 105 .head_offset = offsetof(struct test_obj_rhl, list_node), 106 .key_offset = offsetof(struct test_obj_rhl, value), 107 .key_len = sizeof(struct test_obj_val), 108 .hashfn = jhash, 109 .obj_hashfn = my_hashfn, 110 .obj_cmpfn = my_cmpfn, 111 .nelem_hint = 128, 112 .automatic_shrinking = false, 113 }; 114 115 static atomic_t startup_count; 116 static DECLARE_WAIT_QUEUE_HEAD(startup_wait); 117 118 static int insert_retry(struct rhashtable *ht, struct test_obj *obj, 119 const struct rhashtable_params params) 120 { 121 int err, retries = -1, enomem_retries = 0; 122 123 do { 124 retries++; 125 cond_resched(); 126 err = rhashtable_insert_fast(ht, &obj->node, params); 127 if (err == -ENOMEM && enomem_retry) { 128 enomem_retries++; 129 err = -EBUSY; 130 } 131 } while (err == -EBUSY); 132 133 if (enomem_retries) 134 pr_info(" %u insertions retried after -ENOMEM\n", 135 enomem_retries); 136 137 return err ? : retries; 138 } 139 140 static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array, 141 unsigned int entries) 142 { 143 unsigned int i; 144 145 for (i = 0; i < entries; i++) { 146 struct test_obj *obj; 147 bool expected = !(i % 2); 148 struct test_obj_val key = { 149 .id = i, 150 }; 151 152 if (array[i / 2].value.id == TEST_INSERT_FAIL) 153 expected = false; 154 155 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 156 157 if (expected && !obj) { 158 pr_warn("Test failed: Could not find key %u\n", key.id); 159 return -ENOENT; 160 } else if (!expected && obj) { 161 pr_warn("Test failed: Unexpected entry found for key %u\n", 162 key.id); 163 return -EEXIST; 164 } else if (expected && obj) { 165 if (obj->value.id != i) { 166 pr_warn("Test failed: Lookup value mismatch %u!=%u\n", 167 obj->value.id, i); 168 return -EINVAL; 169 } 170 } 171 172 cond_resched_rcu(); 173 } 174 175 return 0; 176 } 177 178 static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) 179 { 180 unsigned int total = 0, chain_len = 0; 181 struct rhashtable_iter hti; 182 struct rhash_head *pos; 183 184 rhashtable_walk_enter(ht, &hti); 185 rhashtable_walk_start(&hti); 186 187 while ((pos = rhashtable_walk_next(&hti))) { 188 if (PTR_ERR(pos) == -EAGAIN) { 189 pr_info("Info: encountered resize\n"); 190 chain_len++; 191 continue; 192 } else if (IS_ERR(pos)) { 193 pr_warn("Test failed: rhashtable_walk_next() error: %ld\n", 194 PTR_ERR(pos)); 195 break; 196 } 197 198 total++; 199 } 200 201 rhashtable_walk_stop(&hti); 202 rhashtable_walk_exit(&hti); 203 204 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", 205 total, atomic_read(&ht->nelems), entries, chain_len); 206 207 if (total != atomic_read(&ht->nelems) || total != entries) 208 pr_warn("Test failed: Total count mismatch ^^^"); 209 } 210 211 static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, 212 unsigned int entries) 213 { 214 struct test_obj *obj; 215 int err; 216 unsigned int i, insert_retries = 0; 217 s64 start, end; 218 219 /* 220 * Insertion Test: 221 * Insert entries into table with all keys even numbers 222 */ 223 pr_info(" Adding %d keys\n", entries); 224 start = ktime_get_ns(); 225 for (i = 0; i < entries; i++) { 226 struct test_obj *obj = &array[i]; 227 228 obj->value.id = i * 2; 229 err = insert_retry(ht, obj, test_rht_params); 230 if (err > 0) 231 insert_retries += err; 232 else if (err) 233 return err; 234 } 235 236 if (insert_retries) 237 pr_info(" %u insertions retried due to memory pressure\n", 238 insert_retries); 239 240 test_bucket_stats(ht, entries); 241 rcu_read_lock(); 242 test_rht_lookup(ht, array, entries); 243 rcu_read_unlock(); 244 245 test_bucket_stats(ht, entries); 246 247 pr_info(" Deleting %d keys\n", entries); 248 for (i = 0; i < entries; i++) { 249 struct test_obj_val key = { 250 .id = i * 2, 251 }; 252 253 if (array[i].value.id != TEST_INSERT_FAIL) { 254 obj = rhashtable_lookup_fast(ht, &key, test_rht_params); 255 BUG_ON(!obj); 256 257 rhashtable_remove_fast(ht, &obj->node, test_rht_params); 258 } 259 260 cond_resched(); 261 } 262 263 end = ktime_get_ns(); 264 pr_info(" Duration of test: %lld ns\n", end - start); 265 266 return end - start; 267 } 268 269 static struct rhashtable ht; 270 static struct rhltable rhlt; 271 272 static int __init test_rhltable(unsigned int entries) 273 { 274 struct test_obj_rhl *rhl_test_objects; 275 unsigned long *obj_in_table; 276 unsigned int i, j, k; 277 int ret, err; 278 279 if (entries == 0) 280 entries = 1; 281 282 rhl_test_objects = vzalloc(array_size(entries, 283 sizeof(*rhl_test_objects))); 284 if (!rhl_test_objects) 285 return -ENOMEM; 286 287 ret = -ENOMEM; 288 obj_in_table = vzalloc(array_size(sizeof(unsigned long), 289 BITS_TO_LONGS(entries))); 290 if (!obj_in_table) 291 goto out_free; 292 293 err = rhltable_init(&rhlt, &test_rht_params); 294 if (WARN_ON(err)) 295 goto out_free; 296 297 k = prandom_u32(); 298 ret = 0; 299 for (i = 0; i < entries; i++) { 300 rhl_test_objects[i].value.id = k; 301 err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, 302 test_rht_params); 303 if (WARN(err, "error %d on element %d\n", err, i)) 304 break; 305 if (err == 0) 306 set_bit(i, obj_in_table); 307 } 308 309 if (err) 310 ret = err; 311 312 pr_info("test %d add/delete pairs into rhlist\n", entries); 313 for (i = 0; i < entries; i++) { 314 struct rhlist_head *h, *pos; 315 struct test_obj_rhl *obj; 316 struct test_obj_val key = { 317 .id = k, 318 }; 319 bool found; 320 321 rcu_read_lock(); 322 h = rhltable_lookup(&rhlt, &key, test_rht_params); 323 if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { 324 rcu_read_unlock(); 325 break; 326 } 327 328 if (i) { 329 j = i - 1; 330 rhl_for_each_entry_rcu(obj, pos, h, list_node) { 331 if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone")) 332 break; 333 } 334 } 335 336 cond_resched_rcu(); 337 338 found = false; 339 340 rhl_for_each_entry_rcu(obj, pos, h, list_node) { 341 if (pos == &rhl_test_objects[i].list_node) { 342 found = true; 343 break; 344 } 345 } 346 347 rcu_read_unlock(); 348 349 if (WARN(!found, "element %d not found", i)) 350 break; 351 352 err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 353 WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i); 354 if (err == 0) 355 clear_bit(i, obj_in_table); 356 } 357 358 if (ret == 0 && err) 359 ret = err; 360 361 for (i = 0; i < entries; i++) { 362 WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i); 363 364 err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, 365 test_rht_params); 366 if (WARN(err, "error %d on element %d\n", err, i)) 367 break; 368 if (err == 0) 369 set_bit(i, obj_in_table); 370 } 371 372 pr_info("test %d random rhlist add/delete operations\n", entries); 373 for (j = 0; j < entries; j++) { 374 u32 i = prandom_u32_max(entries); 375 u32 prand = prandom_u32(); 376 377 cond_resched(); 378 379 if (prand == 0) 380 prand = prandom_u32(); 381 382 if (prand & 1) { 383 prand >>= 1; 384 continue; 385 } 386 387 err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 388 if (test_bit(i, obj_in_table)) { 389 clear_bit(i, obj_in_table); 390 if (WARN(err, "cannot remove element at slot %d", i)) 391 continue; 392 } else { 393 if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d", 394 i, err, -ENOENT)) 395 continue; 396 } 397 398 if (prand & 1) { 399 prand >>= 1; 400 continue; 401 } 402 403 err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 404 if (err == 0) { 405 if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i)) 406 continue; 407 } else { 408 if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i)) 409 continue; 410 } 411 412 if (prand & 1) { 413 prand >>= 1; 414 continue; 415 } 416 417 i = prandom_u32_max(entries); 418 if (test_bit(i, obj_in_table)) { 419 err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 420 WARN(err, "cannot remove element at slot %d", i); 421 if (err == 0) 422 clear_bit(i, obj_in_table); 423 } else { 424 err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 425 WARN(err, "failed to insert object %d", i); 426 if (err == 0) 427 set_bit(i, obj_in_table); 428 } 429 } 430 431 for (i = 0; i < entries; i++) { 432 cond_resched(); 433 err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); 434 if (test_bit(i, obj_in_table)) { 435 if (WARN(err, "cannot remove element at slot %d", i)) 436 continue; 437 } else { 438 if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d", 439 err, -ENOENT)) 440 continue; 441 } 442 } 443 444 rhltable_destroy(&rhlt); 445 out_free: 446 vfree(rhl_test_objects); 447 vfree(obj_in_table); 448 return ret; 449 } 450 451 static int __init test_rhashtable_max(struct test_obj *array, 452 unsigned int entries) 453 { 454 unsigned int i, insert_retries = 0; 455 int err; 456 457 test_rht_params.max_size = roundup_pow_of_two(entries / 8); 458 err = rhashtable_init(&ht, &test_rht_params); 459 if (err) 460 return err; 461 462 for (i = 0; i < ht.max_elems; i++) { 463 struct test_obj *obj = &array[i]; 464 465 obj->value.id = i * 2; 466 err = insert_retry(&ht, obj, test_rht_params); 467 if (err > 0) 468 insert_retries += err; 469 else if (err) 470 return err; 471 } 472 473 err = insert_retry(&ht, &array[ht.max_elems], test_rht_params); 474 if (err == -E2BIG) { 475 err = 0; 476 } else { 477 pr_info("insert element %u should have failed with %d, got %d\n", 478 ht.max_elems, -E2BIG, err); 479 if (err == 0) 480 err = -1; 481 } 482 483 rhashtable_destroy(&ht); 484 485 return err; 486 } 487 488 static unsigned int __init print_ht(struct rhltable *rhlt) 489 { 490 struct rhashtable *ht; 491 const struct bucket_table *tbl; 492 char buff[512] = ""; 493 unsigned int i, cnt = 0; 494 495 ht = &rhlt->ht; 496 /* Take the mutex to avoid RCU warning */ 497 mutex_lock(&ht->mutex); 498 tbl = rht_dereference(ht->tbl, ht); 499 for (i = 0; i < tbl->size; i++) { 500 struct rhash_head *pos, *next; 501 struct test_obj_rhl *p; 502 503 pos = rht_dereference(tbl->buckets[i], ht); 504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; 505 506 if (!rht_is_a_nulls(pos)) { 507 sprintf(buff, "%s\nbucket[%d] -> ", buff, i); 508 } 509 510 while (!rht_is_a_nulls(pos)) { 511 struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead); 512 sprintf(buff, "%s[[", buff); 513 do { 514 pos = &list->rhead; 515 list = rht_dereference(list->next, ht); 516 p = rht_obj(ht, pos); 517 518 sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid, 519 list? ", " : " "); 520 cnt++; 521 } while (list); 522 523 pos = next, 524 next = !rht_is_a_nulls(pos) ? 525 rht_dereference(pos->next, ht) : NULL; 526 527 sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : ""); 528 } 529 } 530 printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); 531 mutex_unlock(&ht->mutex); 532 533 return cnt; 534 } 535 536 static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, 537 int cnt, bool slow) 538 { 539 struct rhltable *rhlt; 540 unsigned int i, ret; 541 const char *key; 542 int err = 0; 543 544 rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); 545 if (WARN_ON(!rhlt)) 546 return -EINVAL; 547 548 err = rhltable_init(rhlt, &test_rht_params_dup); 549 if (WARN_ON(err)) { 550 kfree(rhlt); 551 return err; 552 } 553 554 for (i = 0; i < cnt; i++) { 555 rhl_test_objects[i].value.tid = i; 556 key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); 557 key += test_rht_params_dup.key_offset; 558 559 if (slow) { 560 err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, 561 &rhl_test_objects[i].list_node.rhead)); 562 if (err == -EAGAIN) 563 err = 0; 564 } else 565 err = rhltable_insert(rhlt, 566 &rhl_test_objects[i].list_node, 567 test_rht_params_dup); 568 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) 569 goto skip_print; 570 } 571 572 ret = print_ht(rhlt); 573 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); 574 575 skip_print: 576 rhltable_destroy(rhlt); 577 kfree(rhlt); 578 579 return 0; 580 } 581 582 static int __init test_insert_duplicates_run(void) 583 { 584 struct test_obj_rhl rhl_test_objects[3] = {}; 585 586 pr_info("test inserting duplicates\n"); 587 588 /* two different values that map to same bucket */ 589 rhl_test_objects[0].value.id = 1; 590 rhl_test_objects[1].value.id = 21; 591 592 /* and another duplicate with same as [0] value 593 * which will be second on the bucket list */ 594 rhl_test_objects[2].value.id = rhl_test_objects[0].value.id; 595 596 test_insert_dup(rhl_test_objects, 2, false); 597 test_insert_dup(rhl_test_objects, 3, false); 598 test_insert_dup(rhl_test_objects, 2, true); 599 test_insert_dup(rhl_test_objects, 3, true); 600 601 return 0; 602 } 603 604 static int thread_lookup_test(struct thread_data *tdata) 605 { 606 unsigned int entries = tdata->entries; 607 int i, err = 0; 608 609 for (i = 0; i < entries; i++) { 610 struct test_obj *obj; 611 struct test_obj_val key = { 612 .id = i, 613 .tid = tdata->id, 614 }; 615 616 obj = rhashtable_lookup_fast(&ht, &key, test_rht_params); 617 if (obj && (tdata->objs[i].value.id == TEST_INSERT_FAIL)) { 618 pr_err(" found unexpected object %d-%d\n", key.tid, key.id); 619 err++; 620 } else if (!obj && (tdata->objs[i].value.id != TEST_INSERT_FAIL)) { 621 pr_err(" object %d-%d not found!\n", key.tid, key.id); 622 err++; 623 } else if (obj && memcmp(&obj->value, &key, sizeof(key))) { 624 pr_err(" wrong object returned (got %d-%d, expected %d-%d)\n", 625 obj->value.tid, obj->value.id, key.tid, key.id); 626 err++; 627 } 628 629 cond_resched(); 630 } 631 return err; 632 } 633 634 static int threadfunc(void *data) 635 { 636 int i, step, err = 0, insert_retries = 0; 637 struct thread_data *tdata = data; 638 639 if (atomic_dec_and_test(&startup_count)) 640 wake_up(&startup_wait); 641 if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) { 642 pr_err(" thread[%d]: interrupted\n", tdata->id); 643 goto out; 644 } 645 646 for (i = 0; i < tdata->entries; i++) { 647 tdata->objs[i].value.id = i; 648 tdata->objs[i].value.tid = tdata->id; 649 err = insert_retry(&ht, &tdata->objs[i], test_rht_params); 650 if (err > 0) { 651 insert_retries += err; 652 } else if (err) { 653 pr_err(" thread[%d]: rhashtable_insert_fast failed\n", 654 tdata->id); 655 goto out; 656 } 657 } 658 if (insert_retries) 659 pr_info(" thread[%d]: %u insertions retried due to memory pressure\n", 660 tdata->id, insert_retries); 661 662 err = thread_lookup_test(tdata); 663 if (err) { 664 pr_err(" thread[%d]: rhashtable_lookup_test failed\n", 665 tdata->id); 666 goto out; 667 } 668 669 for (step = 10; step > 0; step--) { 670 for (i = 0; i < tdata->entries; i += step) { 671 if (tdata->objs[i].value.id == TEST_INSERT_FAIL) 672 continue; 673 err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, 674 test_rht_params); 675 if (err) { 676 pr_err(" thread[%d]: rhashtable_remove_fast failed\n", 677 tdata->id); 678 goto out; 679 } 680 tdata->objs[i].value.id = TEST_INSERT_FAIL; 681 682 cond_resched(); 683 } 684 err = thread_lookup_test(tdata); 685 if (err) { 686 pr_err(" thread[%d]: rhashtable_lookup_test (2) failed\n", 687 tdata->id); 688 goto out; 689 } 690 } 691 out: 692 while (!kthread_should_stop()) { 693 set_current_state(TASK_INTERRUPTIBLE); 694 schedule(); 695 } 696 return err; 697 } 698 699 static int __init test_rht_init(void) 700 { 701 unsigned int entries; 702 int i, err, started_threads = 0, failed_threads = 0; 703 u64 total_time = 0; 704 struct thread_data *tdata; 705 struct test_obj *objs; 706 707 if (parm_entries < 0) 708 parm_entries = 1; 709 710 entries = min(parm_entries, MAX_ENTRIES); 711 712 test_rht_params.automatic_shrinking = shrinking; 713 test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); 714 test_rht_params.nelem_hint = size; 715 716 objs = vzalloc(array_size(sizeof(struct test_obj), 717 test_rht_params.max_size + 1)); 718 if (!objs) 719 return -ENOMEM; 720 721 pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", 722 size, max_size, shrinking); 723 724 for (i = 0; i < runs; i++) { 725 s64 time; 726 727 pr_info("Test %02d:\n", i); 728 memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj)); 729 730 err = rhashtable_init(&ht, &test_rht_params); 731 if (err < 0) { 732 pr_warn("Test failed: Unable to initialize hashtable: %d\n", 733 err); 734 continue; 735 } 736 737 time = test_rhashtable(&ht, objs, entries); 738 rhashtable_destroy(&ht); 739 if (time < 0) { 740 vfree(objs); 741 pr_warn("Test failed: return code %lld\n", time); 742 return -EINVAL; 743 } 744 745 total_time += time; 746 } 747 748 pr_info("test if its possible to exceed max_size %d: %s\n", 749 test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ? 750 "no, ok" : "YES, failed"); 751 vfree(objs); 752 753 do_div(total_time, runs); 754 pr_info("Average test time: %llu\n", total_time); 755 756 test_insert_duplicates_run(); 757 758 if (!tcount) 759 return 0; 760 761 pr_info("Testing concurrent rhashtable access from %d threads\n", 762 tcount); 763 atomic_set(&startup_count, tcount); 764 tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); 765 if (!tdata) 766 return -ENOMEM; 767 objs = vzalloc(array3_size(sizeof(struct test_obj), tcount, entries)); 768 if (!objs) { 769 vfree(tdata); 770 return -ENOMEM; 771 } 772 773 test_rht_params.max_size = max_size ? : 774 roundup_pow_of_two(tcount * entries); 775 err = rhashtable_init(&ht, &test_rht_params); 776 if (err < 0) { 777 pr_warn("Test failed: Unable to initialize hashtable: %d\n", 778 err); 779 vfree(tdata); 780 vfree(objs); 781 return -EINVAL; 782 } 783 for (i = 0; i < tcount; i++) { 784 tdata[i].id = i; 785 tdata[i].entries = entries; 786 tdata[i].objs = objs + i * entries; 787 tdata[i].task = kthread_run(threadfunc, &tdata[i], 788 "rhashtable_thrad[%d]", i); 789 if (IS_ERR(tdata[i].task)) { 790 pr_err(" kthread_run failed for thread %d\n", i); 791 atomic_dec(&startup_count); 792 } else { 793 started_threads++; 794 } 795 } 796 if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0)) 797 pr_err(" wait_event interruptible failed\n"); 798 /* count is 0 now, set it to -1 and wake up all threads together */ 799 atomic_dec(&startup_count); 800 wake_up_all(&startup_wait); 801 for (i = 0; i < tcount; i++) { 802 if (IS_ERR(tdata[i].task)) 803 continue; 804 if ((err = kthread_stop(tdata[i].task))) { 805 pr_warn("Test failed: thread %d returned: %d\n", 806 i, err); 807 failed_threads++; 808 } 809 } 810 rhashtable_destroy(&ht); 811 vfree(tdata); 812 vfree(objs); 813 814 /* 815 * rhltable_remove is very expensive, default values can cause test 816 * to run for 2 minutes or more, use a smaller number instead. 817 */ 818 err = test_rhltable(entries / 16); 819 pr_info("Started %d threads, %d failed, rhltable test returns %d\n", 820 started_threads, failed_threads, err); 821 return 0; 822 } 823 824 static void __exit test_rht_exit(void) 825 { 826 } 827 828 module_init(test_rht_init); 829 module_exit(test_rht_exit); 830 831 MODULE_LICENSE("GPL v2"); 832