1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Common functions for in-kernel torture tests. 4 * 5 * Copyright (C) IBM Corporation, 2014 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 * Based on kernel/rcu/torture.c. 9 */ 10 11 #define pr_fmt(fmt) fmt 12 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/kthread.h> 18 #include <linux/err.h> 19 #include <linux/spinlock.h> 20 #include <linux/smp.h> 21 #include <linux/interrupt.h> 22 #include <linux/sched.h> 23 #include <linux/sched/clock.h> 24 #include <linux/atomic.h> 25 #include <linux/bitops.h> 26 #include <linux/completion.h> 27 #include <linux/moduleparam.h> 28 #include <linux/percpu.h> 29 #include <linux/notifier.h> 30 #include <linux/reboot.h> 31 #include <linux/freezer.h> 32 #include <linux/cpu.h> 33 #include <linux/delay.h> 34 #include <linux/stat.h> 35 #include <linux/slab.h> 36 #include <linux/trace_clock.h> 37 #include <linux/ktime.h> 38 #include <asm/byteorder.h> 39 #include <linux/torture.h> 40 #include "rcu/rcu.h" 41 42 MODULE_LICENSE("GPL"); 43 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>"); 44 45 static bool disable_onoff_at_boot; 46 module_param(disable_onoff_at_boot, bool, 0444); 47 48 static bool ftrace_dump_at_shutdown; 49 module_param(ftrace_dump_at_shutdown, bool, 0444); 50 51 static char *torture_type; 52 static int verbose; 53 54 /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ 55 #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ 56 #define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */ 57 #define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */ 58 static int fullstop = FULLSTOP_RMMOD; 59 static DEFINE_MUTEX(fullstop_mutex); 60 61 #ifdef CONFIG_HOTPLUG_CPU 62 63 /* 64 * Variables for online-offline handling. Only present if CPU hotplug 65 * is enabled, otherwise does nothing. 66 */ 67 68 static struct task_struct *onoff_task; 69 static long onoff_holdoff; 70 static long onoff_interval; 71 static torture_ofl_func *onoff_f; 72 static long n_offline_attempts; 73 static long n_offline_successes; 74 static unsigned long sum_offline; 75 static int min_offline = -1; 76 static int max_offline; 77 static long n_online_attempts; 78 static long n_online_successes; 79 static unsigned long sum_online; 80 static int min_online = -1; 81 static int max_online; 82 83 /* 84 * Attempt to take a CPU offline. Return false if the CPU is already 85 * offline or if it is not subject to CPU-hotplug operations. The 86 * caller can detect other failures by looking at the statistics. 87 */ 88 bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes, 89 unsigned long *sum_offl, int *min_offl, int *max_offl) 90 { 91 unsigned long delta; 92 int ret; 93 char *s; 94 unsigned long starttime; 95 96 if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 97 return false; 98 if (num_online_cpus() <= 1) 99 return false; /* Can't offline the last CPU. */ 100 101 if (verbose > 1) 102 pr_alert("%s" TORTURE_FLAG 103 "torture_onoff task: offlining %d\n", 104 torture_type, cpu); 105 starttime = jiffies; 106 (*n_offl_attempts)++; 107 ret = remove_cpu(cpu); 108 if (ret) { 109 s = ""; 110 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 111 // PCI probe frequently disables hotplug during boot. 112 (*n_offl_attempts)--; 113 s = " (-EBUSY forgiven during boot)"; 114 } 115 if (verbose) 116 pr_alert("%s" TORTURE_FLAG 117 "torture_onoff task: offline %d failed%s: errno %d\n", 118 torture_type, cpu, s, ret); 119 } else { 120 if (verbose > 1) 121 pr_alert("%s" TORTURE_FLAG 122 "torture_onoff task: offlined %d\n", 123 torture_type, cpu); 124 if (onoff_f) 125 onoff_f(); 126 (*n_offl_successes)++; 127 delta = jiffies - starttime; 128 *sum_offl += delta; 129 if (*min_offl < 0) { 130 *min_offl = delta; 131 *max_offl = delta; 132 } 133 if (*min_offl > delta) 134 *min_offl = delta; 135 if (*max_offl < delta) 136 *max_offl = delta; 137 } 138 139 return true; 140 } 141 EXPORT_SYMBOL_GPL(torture_offline); 142 143 /* 144 * Attempt to bring a CPU online. Return false if the CPU is already 145 * online or if it is not subject to CPU-hotplug operations. The 146 * caller can detect other failures by looking at the statistics. 147 */ 148 bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, 149 unsigned long *sum_onl, int *min_onl, int *max_onl) 150 { 151 unsigned long delta; 152 int ret; 153 char *s; 154 unsigned long starttime; 155 156 if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu)) 157 return false; 158 159 if (verbose > 1) 160 pr_alert("%s" TORTURE_FLAG 161 "torture_onoff task: onlining %d\n", 162 torture_type, cpu); 163 starttime = jiffies; 164 (*n_onl_attempts)++; 165 ret = add_cpu(cpu); 166 if (ret) { 167 s = ""; 168 if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) { 169 // PCI probe frequently disables hotplug during boot. 170 (*n_onl_attempts)--; 171 s = " (-EBUSY forgiven during boot)"; 172 } 173 if (verbose) 174 pr_alert("%s" TORTURE_FLAG 175 "torture_onoff task: online %d failed%s: errno %d\n", 176 torture_type, cpu, s, ret); 177 } else { 178 if (verbose > 1) 179 pr_alert("%s" TORTURE_FLAG 180 "torture_onoff task: onlined %d\n", 181 torture_type, cpu); 182 (*n_onl_successes)++; 183 delta = jiffies - starttime; 184 *sum_onl += delta; 185 if (*min_onl < 0) { 186 *min_onl = delta; 187 *max_onl = delta; 188 } 189 if (*min_onl > delta) 190 *min_onl = delta; 191 if (*max_onl < delta) 192 *max_onl = delta; 193 } 194 195 return true; 196 } 197 EXPORT_SYMBOL_GPL(torture_online); 198 199 /* 200 * Execute random CPU-hotplug operations at the interval specified 201 * by the onoff_interval. 202 */ 203 static int 204 torture_onoff(void *arg) 205 { 206 int cpu; 207 int maxcpu = -1; 208 DEFINE_TORTURE_RANDOM(rand); 209 int ret; 210 211 VERBOSE_TOROUT_STRING("torture_onoff task started"); 212 for_each_online_cpu(cpu) 213 maxcpu = cpu; 214 WARN_ON(maxcpu < 0); 215 if (!IS_MODULE(CONFIG_TORTURE_TEST)) { 216 for_each_possible_cpu(cpu) { 217 if (cpu_online(cpu)) 218 continue; 219 ret = add_cpu(cpu); 220 if (ret && verbose) { 221 pr_alert("%s" TORTURE_FLAG 222 "%s: Initial online %d: errno %d\n", 223 __func__, torture_type, cpu, ret); 224 } 225 } 226 } 227 228 if (maxcpu == 0) { 229 VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled"); 230 goto stop; 231 } 232 233 if (onoff_holdoff > 0) { 234 VERBOSE_TOROUT_STRING("torture_onoff begin holdoff"); 235 schedule_timeout_interruptible(onoff_holdoff); 236 VERBOSE_TOROUT_STRING("torture_onoff end holdoff"); 237 } 238 while (!torture_must_stop()) { 239 if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) { 240 schedule_timeout_interruptible(HZ / 10); 241 continue; 242 } 243 cpu = (torture_random(&rand) >> 4) % (maxcpu + 1); 244 if (!torture_offline(cpu, 245 &n_offline_attempts, &n_offline_successes, 246 &sum_offline, &min_offline, &max_offline)) 247 torture_online(cpu, 248 &n_online_attempts, &n_online_successes, 249 &sum_online, &min_online, &max_online); 250 schedule_timeout_interruptible(onoff_interval); 251 } 252 253 stop: 254 torture_kthread_stopping("torture_onoff"); 255 return 0; 256 } 257 258 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 259 260 /* 261 * Initiate online-offline handling. 262 */ 263 int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f) 264 { 265 #ifdef CONFIG_HOTPLUG_CPU 266 onoff_holdoff = ooholdoff; 267 onoff_interval = oointerval; 268 onoff_f = f; 269 if (onoff_interval <= 0) 270 return 0; 271 return torture_create_kthread(torture_onoff, NULL, onoff_task); 272 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 273 return 0; 274 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 275 } 276 EXPORT_SYMBOL_GPL(torture_onoff_init); 277 278 /* 279 * Clean up after online/offline testing. 280 */ 281 static void torture_onoff_cleanup(void) 282 { 283 #ifdef CONFIG_HOTPLUG_CPU 284 if (onoff_task == NULL) 285 return; 286 VERBOSE_TOROUT_STRING("Stopping torture_onoff task"); 287 kthread_stop(onoff_task); 288 onoff_task = NULL; 289 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 290 } 291 292 /* 293 * Print online/offline testing statistics. 294 */ 295 void torture_onoff_stats(void) 296 { 297 #ifdef CONFIG_HOTPLUG_CPU 298 pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", 299 n_online_successes, n_online_attempts, 300 n_offline_successes, n_offline_attempts, 301 min_online, max_online, 302 min_offline, max_offline, 303 sum_online, sum_offline, HZ); 304 #endif /* #ifdef CONFIG_HOTPLUG_CPU */ 305 } 306 EXPORT_SYMBOL_GPL(torture_onoff_stats); 307 308 /* 309 * Were all the online/offline operations successful? 310 */ 311 bool torture_onoff_failures(void) 312 { 313 #ifdef CONFIG_HOTPLUG_CPU 314 return n_online_successes != n_online_attempts || 315 n_offline_successes != n_offline_attempts; 316 #else /* #ifdef CONFIG_HOTPLUG_CPU */ 317 return false; 318 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 319 } 320 EXPORT_SYMBOL_GPL(torture_onoff_failures); 321 322 #define TORTURE_RANDOM_MULT 39916801 /* prime */ 323 #define TORTURE_RANDOM_ADD 479001701 /* prime */ 324 #define TORTURE_RANDOM_REFRESH 10000 325 326 /* 327 * Crude but fast random-number generator. Uses a linear congruential 328 * generator, with occasional help from cpu_clock(). 329 */ 330 unsigned long 331 torture_random(struct torture_random_state *trsp) 332 { 333 if (--trsp->trs_count < 0) { 334 trsp->trs_state += (unsigned long)local_clock(); 335 trsp->trs_count = TORTURE_RANDOM_REFRESH; 336 } 337 trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT + 338 TORTURE_RANDOM_ADD; 339 return swahw32(trsp->trs_state); 340 } 341 EXPORT_SYMBOL_GPL(torture_random); 342 343 /* 344 * Variables for shuffling. The idea is to ensure that each CPU stays 345 * idle for an extended period to test interactions with dyntick idle, 346 * as well as interactions with any per-CPU variables. 347 */ 348 struct shuffle_task { 349 struct list_head st_l; 350 struct task_struct *st_t; 351 }; 352 353 static long shuffle_interval; /* In jiffies. */ 354 static struct task_struct *shuffler_task; 355 static cpumask_var_t shuffle_tmp_mask; 356 static int shuffle_idle_cpu; /* Force all torture tasks off this CPU */ 357 static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list); 358 static DEFINE_MUTEX(shuffle_task_mutex); 359 360 /* 361 * Register a task to be shuffled. If there is no memory, just splat 362 * and don't bother registering. 363 */ 364 void torture_shuffle_task_register(struct task_struct *tp) 365 { 366 struct shuffle_task *stp; 367 368 if (WARN_ON_ONCE(tp == NULL)) 369 return; 370 stp = kmalloc(sizeof(*stp), GFP_KERNEL); 371 if (WARN_ON_ONCE(stp == NULL)) 372 return; 373 stp->st_t = tp; 374 mutex_lock(&shuffle_task_mutex); 375 list_add(&stp->st_l, &shuffle_task_list); 376 mutex_unlock(&shuffle_task_mutex); 377 } 378 EXPORT_SYMBOL_GPL(torture_shuffle_task_register); 379 380 /* 381 * Unregister all tasks, for example, at the end of the torture run. 382 */ 383 static void torture_shuffle_task_unregister_all(void) 384 { 385 struct shuffle_task *stp; 386 struct shuffle_task *p; 387 388 mutex_lock(&shuffle_task_mutex); 389 list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) { 390 list_del(&stp->st_l); 391 kfree(stp); 392 } 393 mutex_unlock(&shuffle_task_mutex); 394 } 395 396 /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle. 397 * A special case is when shuffle_idle_cpu = -1, in which case we allow 398 * the tasks to run on all CPUs. 399 */ 400 static void torture_shuffle_tasks(void) 401 { 402 struct shuffle_task *stp; 403 404 cpumask_setall(shuffle_tmp_mask); 405 get_online_cpus(); 406 407 /* No point in shuffling if there is only one online CPU (ex: UP) */ 408 if (num_online_cpus() == 1) { 409 put_online_cpus(); 410 return; 411 } 412 413 /* Advance to the next CPU. Upon overflow, don't idle any CPUs. */ 414 shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask); 415 if (shuffle_idle_cpu >= nr_cpu_ids) 416 shuffle_idle_cpu = -1; 417 else 418 cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask); 419 420 mutex_lock(&shuffle_task_mutex); 421 list_for_each_entry(stp, &shuffle_task_list, st_l) 422 set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask); 423 mutex_unlock(&shuffle_task_mutex); 424 425 put_online_cpus(); 426 } 427 428 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 429 * system to become idle at a time and cut off its timer ticks. This is meant 430 * to test the support for such tickless idle CPU in RCU. 431 */ 432 static int torture_shuffle(void *arg) 433 { 434 VERBOSE_TOROUT_STRING("torture_shuffle task started"); 435 do { 436 schedule_timeout_interruptible(shuffle_interval); 437 torture_shuffle_tasks(); 438 torture_shutdown_absorb("torture_shuffle"); 439 } while (!torture_must_stop()); 440 torture_kthread_stopping("torture_shuffle"); 441 return 0; 442 } 443 444 /* 445 * Start the shuffler, with shuffint in jiffies. 446 */ 447 int torture_shuffle_init(long shuffint) 448 { 449 shuffle_interval = shuffint; 450 451 shuffle_idle_cpu = -1; 452 453 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { 454 VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask"); 455 return -ENOMEM; 456 } 457 458 /* Create the shuffler thread */ 459 return torture_create_kthread(torture_shuffle, NULL, shuffler_task); 460 } 461 EXPORT_SYMBOL_GPL(torture_shuffle_init); 462 463 /* 464 * Stop the shuffling. 465 */ 466 static void torture_shuffle_cleanup(void) 467 { 468 torture_shuffle_task_unregister_all(); 469 if (shuffler_task) { 470 VERBOSE_TOROUT_STRING("Stopping torture_shuffle task"); 471 kthread_stop(shuffler_task); 472 free_cpumask_var(shuffle_tmp_mask); 473 } 474 shuffler_task = NULL; 475 } 476 477 /* 478 * Variables for auto-shutdown. This allows "lights out" torture runs 479 * to be fully scripted. 480 */ 481 static struct task_struct *shutdown_task; 482 static ktime_t shutdown_time; /* time to system shutdown. */ 483 static void (*torture_shutdown_hook)(void); 484 485 /* 486 * Absorb kthreads into a kernel function that won't return, so that 487 * they won't ever access module text or data again. 488 */ 489 void torture_shutdown_absorb(const char *title) 490 { 491 while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 492 pr_notice("torture thread %s parking due to system shutdown\n", 493 title); 494 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); 495 } 496 } 497 EXPORT_SYMBOL_GPL(torture_shutdown_absorb); 498 499 /* 500 * Cause the torture test to shutdown the system after the test has 501 * run for the time specified by the shutdown_secs parameter. 502 */ 503 static int torture_shutdown(void *arg) 504 { 505 ktime_t ktime_snap; 506 507 VERBOSE_TOROUT_STRING("torture_shutdown task started"); 508 ktime_snap = ktime_get(); 509 while (ktime_before(ktime_snap, shutdown_time) && 510 !torture_must_stop()) { 511 if (verbose) 512 pr_alert("%s" TORTURE_FLAG 513 "torture_shutdown task: %llu ms remaining\n", 514 torture_type, 515 ktime_ms_delta(shutdown_time, ktime_snap)); 516 set_current_state(TASK_INTERRUPTIBLE); 517 schedule_hrtimeout(&shutdown_time, HRTIMER_MODE_ABS); 518 ktime_snap = ktime_get(); 519 } 520 if (torture_must_stop()) { 521 torture_kthread_stopping("torture_shutdown"); 522 return 0; 523 } 524 525 /* OK, shut down the system. */ 526 527 VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system"); 528 shutdown_task = NULL; /* Avoid self-kill deadlock. */ 529 if (torture_shutdown_hook) 530 torture_shutdown_hook(); 531 else 532 VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping."); 533 if (ftrace_dump_at_shutdown) 534 rcu_ftrace_dump(DUMP_ALL); 535 kernel_power_off(); /* Shut down the system. */ 536 return 0; 537 } 538 539 /* 540 * Start up the shutdown task. 541 */ 542 int torture_shutdown_init(int ssecs, void (*cleanup)(void)) 543 { 544 torture_shutdown_hook = cleanup; 545 if (ssecs > 0) { 546 shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0)); 547 return torture_create_kthread(torture_shutdown, NULL, 548 shutdown_task); 549 } 550 return 0; 551 } 552 EXPORT_SYMBOL_GPL(torture_shutdown_init); 553 554 /* 555 * Detect and respond to a system shutdown. 556 */ 557 static int torture_shutdown_notify(struct notifier_block *unused1, 558 unsigned long unused2, void *unused3) 559 { 560 mutex_lock(&fullstop_mutex); 561 if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { 562 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected"); 563 WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN); 564 } else { 565 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 566 } 567 mutex_unlock(&fullstop_mutex); 568 return NOTIFY_DONE; 569 } 570 571 static struct notifier_block torture_shutdown_nb = { 572 .notifier_call = torture_shutdown_notify, 573 }; 574 575 /* 576 * Shut down the shutdown task. Say what??? Heh! This can happen if 577 * the torture module gets an rmmod before the shutdown time arrives. ;-) 578 */ 579 static void torture_shutdown_cleanup(void) 580 { 581 unregister_reboot_notifier(&torture_shutdown_nb); 582 if (shutdown_task != NULL) { 583 VERBOSE_TOROUT_STRING("Stopping torture_shutdown task"); 584 kthread_stop(shutdown_task); 585 } 586 shutdown_task = NULL; 587 } 588 589 /* 590 * Variables for stuttering, which means to periodically pause and 591 * restart testing in order to catch bugs that appear when load is 592 * suddenly applied to or removed from the system. 593 */ 594 static struct task_struct *stutter_task; 595 static int stutter_pause_test; 596 static int stutter; 597 static int stutter_gap; 598 599 /* 600 * Block until the stutter interval ends. This must be called periodically 601 * by all running kthreads that need to be subject to stuttering. 602 */ 603 bool stutter_wait(const char *title) 604 { 605 int spt; 606 bool ret = false; 607 608 cond_resched_tasks_rcu_qs(); 609 spt = READ_ONCE(stutter_pause_test); 610 for (; spt; spt = READ_ONCE(stutter_pause_test)) { 611 ret = true; 612 if (spt == 1) { 613 schedule_timeout_interruptible(1); 614 } else if (spt == 2) { 615 while (READ_ONCE(stutter_pause_test)) 616 cond_resched(); 617 } else { 618 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 619 } 620 torture_shutdown_absorb(title); 621 } 622 return ret; 623 } 624 EXPORT_SYMBOL_GPL(stutter_wait); 625 626 /* 627 * Cause the torture test to "stutter", starting and stopping all 628 * threads periodically. 629 */ 630 static int torture_stutter(void *arg) 631 { 632 int wtime; 633 634 VERBOSE_TOROUT_STRING("torture_stutter task started"); 635 do { 636 if (!torture_must_stop() && stutter > 1) { 637 wtime = stutter; 638 if (stutter > HZ + 1) { 639 WRITE_ONCE(stutter_pause_test, 1); 640 wtime = stutter - HZ - 1; 641 schedule_timeout_interruptible(wtime); 642 wtime = HZ + 1; 643 } 644 WRITE_ONCE(stutter_pause_test, 2); 645 schedule_timeout_interruptible(wtime); 646 } 647 WRITE_ONCE(stutter_pause_test, 0); 648 if (!torture_must_stop()) 649 schedule_timeout_interruptible(stutter_gap); 650 torture_shutdown_absorb("torture_stutter"); 651 } while (!torture_must_stop()); 652 torture_kthread_stopping("torture_stutter"); 653 return 0; 654 } 655 656 /* 657 * Initialize and kick off the torture_stutter kthread. 658 */ 659 int torture_stutter_init(const int s, const int sgap) 660 { 661 stutter = s; 662 stutter_gap = sgap; 663 return torture_create_kthread(torture_stutter, NULL, stutter_task); 664 } 665 EXPORT_SYMBOL_GPL(torture_stutter_init); 666 667 /* 668 * Cleanup after the torture_stutter kthread. 669 */ 670 static void torture_stutter_cleanup(void) 671 { 672 if (!stutter_task) 673 return; 674 VERBOSE_TOROUT_STRING("Stopping torture_stutter task"); 675 kthread_stop(stutter_task); 676 stutter_task = NULL; 677 } 678 679 /* 680 * Initialize torture module. Please note that this is -not- invoked via 681 * the usual module_init() mechanism, but rather by an explicit call from 682 * the client torture module. This call must be paired with a later 683 * torture_init_end(). 684 * 685 * The runnable parameter points to a flag that controls whether or not 686 * the test is currently runnable. If there is no such flag, pass in NULL. 687 */ 688 bool torture_init_begin(char *ttype, int v) 689 { 690 mutex_lock(&fullstop_mutex); 691 if (torture_type != NULL) { 692 pr_alert("torture_init_begin: Refusing %s init: %s running.\n", 693 ttype, torture_type); 694 pr_alert("torture_init_begin: One torture test at a time!\n"); 695 mutex_unlock(&fullstop_mutex); 696 return false; 697 } 698 torture_type = ttype; 699 verbose = v; 700 fullstop = FULLSTOP_DONTSTOP; 701 return true; 702 } 703 EXPORT_SYMBOL_GPL(torture_init_begin); 704 705 /* 706 * Tell the torture module that initialization is complete. 707 */ 708 void torture_init_end(void) 709 { 710 mutex_unlock(&fullstop_mutex); 711 register_reboot_notifier(&torture_shutdown_nb); 712 } 713 EXPORT_SYMBOL_GPL(torture_init_end); 714 715 /* 716 * Clean up torture module. Please note that this is -not- invoked via 717 * the usual module_exit() mechanism, but rather by an explicit call from 718 * the client torture module. Returns true if a race with system shutdown 719 * is detected, otherwise, all kthreads started by functions in this file 720 * will be shut down. 721 * 722 * This must be called before the caller starts shutting down its own 723 * kthreads. 724 * 725 * Both torture_cleanup_begin() and torture_cleanup_end() must be paired, 726 * in order to correctly perform the cleanup. They are separated because 727 * threads can still need to reference the torture_type type, thus nullify 728 * only after completing all other relevant calls. 729 */ 730 bool torture_cleanup_begin(void) 731 { 732 mutex_lock(&fullstop_mutex); 733 if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { 734 pr_warn("Concurrent rmmod and shutdown illegal!\n"); 735 mutex_unlock(&fullstop_mutex); 736 schedule_timeout_uninterruptible(10); 737 return true; 738 } 739 WRITE_ONCE(fullstop, FULLSTOP_RMMOD); 740 mutex_unlock(&fullstop_mutex); 741 torture_shutdown_cleanup(); 742 torture_shuffle_cleanup(); 743 torture_stutter_cleanup(); 744 torture_onoff_cleanup(); 745 return false; 746 } 747 EXPORT_SYMBOL_GPL(torture_cleanup_begin); 748 749 void torture_cleanup_end(void) 750 { 751 mutex_lock(&fullstop_mutex); 752 torture_type = NULL; 753 mutex_unlock(&fullstop_mutex); 754 } 755 EXPORT_SYMBOL_GPL(torture_cleanup_end); 756 757 /* 758 * Is it time for the current torture test to stop? 759 */ 760 bool torture_must_stop(void) 761 { 762 return torture_must_stop_irq() || kthread_should_stop(); 763 } 764 EXPORT_SYMBOL_GPL(torture_must_stop); 765 766 /* 767 * Is it time for the current torture test to stop? This is the irq-safe 768 * version, hence no check for kthread_should_stop(). 769 */ 770 bool torture_must_stop_irq(void) 771 { 772 return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; 773 } 774 EXPORT_SYMBOL_GPL(torture_must_stop_irq); 775 776 /* 777 * Each kthread must wait for kthread_should_stop() before returning from 778 * its top-level function, otherwise segfaults ensue. This function 779 * prints a "stopping" message and waits for kthread_should_stop(), and 780 * should be called from all torture kthreads immediately prior to 781 * returning. 782 */ 783 void torture_kthread_stopping(char *title) 784 { 785 char buf[128]; 786 787 snprintf(buf, sizeof(buf), "Stopping %s", title); 788 VERBOSE_TOROUT_STRING(buf); 789 while (!kthread_should_stop()) { 790 torture_shutdown_absorb(title); 791 schedule_timeout_uninterruptible(1); 792 } 793 } 794 EXPORT_SYMBOL_GPL(torture_kthread_stopping); 795 796 /* 797 * Create a generic torture kthread that is immediately runnable. If you 798 * need the kthread to be stopped so that you can do something to it before 799 * it starts, you will need to open-code your own. 800 */ 801 int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m, 802 char *f, struct task_struct **tp) 803 { 804 int ret = 0; 805 806 VERBOSE_TOROUT_STRING(m); 807 *tp = kthread_run(fn, arg, "%s", s); 808 if (IS_ERR(*tp)) { 809 ret = PTR_ERR(*tp); 810 VERBOSE_TOROUT_ERRSTRING(f); 811 *tp = NULL; 812 } 813 torture_shuffle_task_register(*tp); 814 return ret; 815 } 816 EXPORT_SYMBOL_GPL(_torture_create_kthread); 817 818 /* 819 * Stop a generic kthread, emitting a message. 820 */ 821 void _torture_stop_kthread(char *m, struct task_struct **tp) 822 { 823 if (*tp == NULL) 824 return; 825 VERBOSE_TOROUT_STRING(m); 826 kthread_stop(*tp); 827 *tp = NULL; 828 } 829 EXPORT_SYMBOL_GPL(_torture_stop_kthread); 830