xref: /openbmc/linux/kernel/torture.c (revision ae19aaafae95a5487469433e9cae4c208f8d15cd)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Common functions for in-kernel torture tests.
4  *
5  * Copyright (C) IBM Corporation, 2014
6  *
7  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	Based on kernel/rcu/torture.c.
9  */
10 
11 #define pr_fmt(fmt) fmt
12 
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <linux/sched/clock.h>
24 #include <linux/atomic.h>
25 #include <linux/bitops.h>
26 #include <linux/completion.h>
27 #include <linux/moduleparam.h>
28 #include <linux/percpu.h>
29 #include <linux/notifier.h>
30 #include <linux/reboot.h>
31 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/delay.h>
34 #include <linux/stat.h>
35 #include <linux/slab.h>
36 #include <linux/trace_clock.h>
37 #include <linux/ktime.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include "rcu/rcu.h"
41 
42 MODULE_LICENSE("GPL");
43 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
44 
45 static bool disable_onoff_at_boot;
46 module_param(disable_onoff_at_boot, bool, 0444);
47 
48 static bool ftrace_dump_at_shutdown;
49 module_param(ftrace_dump_at_shutdown, bool, 0444);
50 
51 static char *torture_type;
52 static int verbose;
53 
54 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
55 #define FULLSTOP_DONTSTOP 0	/* Normal operation. */
56 #define FULLSTOP_SHUTDOWN 1	/* System shutdown with torture running. */
57 #define FULLSTOP_RMMOD    2	/* Normal rmmod of torture. */
58 static int fullstop = FULLSTOP_RMMOD;
59 static DEFINE_MUTEX(fullstop_mutex);
60 
61 /*
62  * Schedule a high-resolution-timer sleep in nanoseconds, with a 32-bit
63  * nanosecond random fuzz.  This function and its friends desynchronize
64  * testing from the timer wheel.
65  */
66 int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp)
67 {
68 	ktime_t hto = baset_ns;
69 
70 	if (trsp)
71 		hto += (torture_random(trsp) >> 3) % fuzzt_ns;
72 	set_current_state(TASK_UNINTERRUPTIBLE);
73 	return schedule_hrtimeout(&hto, HRTIMER_MODE_REL);
74 }
75 EXPORT_SYMBOL_GPL(torture_hrtimeout_ns);
76 
77 /*
78  * Schedule a high-resolution-timer sleep in microseconds, with a 32-bit
79  * nanosecond (not microsecond!) random fuzz.
80  */
81 int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp)
82 {
83 	ktime_t baset_ns = baset_us * NSEC_PER_USEC;
84 
85 	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
86 }
87 EXPORT_SYMBOL_GPL(torture_hrtimeout_us);
88 
89 /*
90  * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
91  * microsecond (not millisecond!) random fuzz.
92  */
93 int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp)
94 {
95 	ktime_t baset_ns = baset_ms * NSEC_PER_MSEC;
96 	u32 fuzzt_ns;
97 
98 	if ((u32)~0U / NSEC_PER_USEC < fuzzt_us)
99 		fuzzt_ns = (u32)~0U;
100 	else
101 		fuzzt_ns = fuzzt_us * NSEC_PER_USEC;
102 	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
103 }
104 EXPORT_SYMBOL_GPL(torture_hrtimeout_ms);
105 
106 /*
107  * Schedule a high-resolution-timer sleep in jiffies, with an
108  * implied one-jiffy random fuzz.  This is intended to replace calls to
109  * schedule_timeout_interruptible() and friends.
110  */
111 int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp)
112 {
113 	ktime_t baset_ns = jiffies_to_nsecs(baset_j);
114 
115 	return torture_hrtimeout_ns(baset_ns, jiffies_to_nsecs(1), trsp);
116 }
117 EXPORT_SYMBOL_GPL(torture_hrtimeout_jiffies);
118 
119 /*
120  * Schedule a high-resolution-timer sleep in milliseconds, with a 32-bit
121  * millisecond (not second!) random fuzz.
122  */
123 int torture_hrtimeout_s(u32 baset_s, u32 fuzzt_ms, struct torture_random_state *trsp)
124 {
125 	ktime_t baset_ns = baset_s * NSEC_PER_SEC;
126 	u32 fuzzt_ns;
127 
128 	if ((u32)~0U / NSEC_PER_MSEC < fuzzt_ms)
129 		fuzzt_ns = (u32)~0U;
130 	else
131 		fuzzt_ns = fuzzt_ms * NSEC_PER_MSEC;
132 	return torture_hrtimeout_ns(baset_ns, fuzzt_ns, trsp);
133 }
134 EXPORT_SYMBOL_GPL(torture_hrtimeout_s);
135 
136 #ifdef CONFIG_HOTPLUG_CPU
137 
138 /*
139  * Variables for online-offline handling.  Only present if CPU hotplug
140  * is enabled, otherwise does nothing.
141  */
142 
143 static struct task_struct *onoff_task;
144 static long onoff_holdoff;
145 static long onoff_interval;
146 static torture_ofl_func *onoff_f;
147 static long n_offline_attempts;
148 static long n_offline_successes;
149 static unsigned long sum_offline;
150 static int min_offline = -1;
151 static int max_offline;
152 static long n_online_attempts;
153 static long n_online_successes;
154 static unsigned long sum_online;
155 static int min_online = -1;
156 static int max_online;
157 
158 /*
159  * Attempt to take a CPU offline.  Return false if the CPU is already
160  * offline or if it is not subject to CPU-hotplug operations.  The
161  * caller can detect other failures by looking at the statistics.
162  */
163 bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
164 		     unsigned long *sum_offl, int *min_offl, int *max_offl)
165 {
166 	unsigned long delta;
167 	int ret;
168 	char *s;
169 	unsigned long starttime;
170 
171 	if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
172 		return false;
173 	if (num_online_cpus() <= 1)
174 		return false;  /* Can't offline the last CPU. */
175 
176 	if (verbose > 1)
177 		pr_alert("%s" TORTURE_FLAG
178 			 "torture_onoff task: offlining %d\n",
179 			 torture_type, cpu);
180 	starttime = jiffies;
181 	(*n_offl_attempts)++;
182 	ret = remove_cpu(cpu);
183 	if (ret) {
184 		s = "";
185 		if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
186 			// PCI probe frequently disables hotplug during boot.
187 			(*n_offl_attempts)--;
188 			s = " (-EBUSY forgiven during boot)";
189 		}
190 		if (verbose)
191 			pr_alert("%s" TORTURE_FLAG
192 				 "torture_onoff task: offline %d failed%s: errno %d\n",
193 				 torture_type, cpu, s, ret);
194 	} else {
195 		if (verbose > 1)
196 			pr_alert("%s" TORTURE_FLAG
197 				 "torture_onoff task: offlined %d\n",
198 				 torture_type, cpu);
199 		if (onoff_f)
200 			onoff_f();
201 		(*n_offl_successes)++;
202 		delta = jiffies - starttime;
203 		*sum_offl += delta;
204 		if (*min_offl < 0) {
205 			*min_offl = delta;
206 			*max_offl = delta;
207 		}
208 		if (*min_offl > delta)
209 			*min_offl = delta;
210 		if (*max_offl < delta)
211 			*max_offl = delta;
212 	}
213 
214 	return true;
215 }
216 EXPORT_SYMBOL_GPL(torture_offline);
217 
218 /*
219  * Attempt to bring a CPU online.  Return false if the CPU is already
220  * online or if it is not subject to CPU-hotplug operations.  The
221  * caller can detect other failures by looking at the statistics.
222  */
223 bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
224 		    unsigned long *sum_onl, int *min_onl, int *max_onl)
225 {
226 	unsigned long delta;
227 	int ret;
228 	char *s;
229 	unsigned long starttime;
230 
231 	if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
232 		return false;
233 
234 	if (verbose > 1)
235 		pr_alert("%s" TORTURE_FLAG
236 			 "torture_onoff task: onlining %d\n",
237 			 torture_type, cpu);
238 	starttime = jiffies;
239 	(*n_onl_attempts)++;
240 	ret = add_cpu(cpu);
241 	if (ret) {
242 		s = "";
243 		if (!rcu_inkernel_boot_has_ended() && ret == -EBUSY) {
244 			// PCI probe frequently disables hotplug during boot.
245 			(*n_onl_attempts)--;
246 			s = " (-EBUSY forgiven during boot)";
247 		}
248 		if (verbose)
249 			pr_alert("%s" TORTURE_FLAG
250 				 "torture_onoff task: online %d failed%s: errno %d\n",
251 				 torture_type, cpu, s, ret);
252 	} else {
253 		if (verbose > 1)
254 			pr_alert("%s" TORTURE_FLAG
255 				 "torture_onoff task: onlined %d\n",
256 				 torture_type, cpu);
257 		(*n_onl_successes)++;
258 		delta = jiffies - starttime;
259 		*sum_onl += delta;
260 		if (*min_onl < 0) {
261 			*min_onl = delta;
262 			*max_onl = delta;
263 		}
264 		if (*min_onl > delta)
265 			*min_onl = delta;
266 		if (*max_onl < delta)
267 			*max_onl = delta;
268 	}
269 
270 	return true;
271 }
272 EXPORT_SYMBOL_GPL(torture_online);
273 
274 /*
275  * Execute random CPU-hotplug operations at the interval specified
276  * by the onoff_interval.
277  */
278 static int
279 torture_onoff(void *arg)
280 {
281 	int cpu;
282 	int maxcpu = -1;
283 	DEFINE_TORTURE_RANDOM(rand);
284 	int ret;
285 
286 	VERBOSE_TOROUT_STRING("torture_onoff task started");
287 	for_each_online_cpu(cpu)
288 		maxcpu = cpu;
289 	WARN_ON(maxcpu < 0);
290 	if (!IS_MODULE(CONFIG_TORTURE_TEST)) {
291 		for_each_possible_cpu(cpu) {
292 			if (cpu_online(cpu))
293 				continue;
294 			ret = add_cpu(cpu);
295 			if (ret && verbose) {
296 				pr_alert("%s" TORTURE_FLAG
297 					 "%s: Initial online %d: errno %d\n",
298 					 __func__, torture_type, cpu, ret);
299 			}
300 		}
301 	}
302 
303 	if (maxcpu == 0) {
304 		VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
305 		goto stop;
306 	}
307 
308 	if (onoff_holdoff > 0) {
309 		VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
310 		schedule_timeout_interruptible(onoff_holdoff);
311 		VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
312 	}
313 	while (!torture_must_stop()) {
314 		if (disable_onoff_at_boot && !rcu_inkernel_boot_has_ended()) {
315 			schedule_timeout_interruptible(HZ / 10);
316 			continue;
317 		}
318 		cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
319 		if (!torture_offline(cpu,
320 				     &n_offline_attempts, &n_offline_successes,
321 				     &sum_offline, &min_offline, &max_offline))
322 			torture_online(cpu,
323 				       &n_online_attempts, &n_online_successes,
324 				       &sum_online, &min_online, &max_online);
325 		schedule_timeout_interruptible(onoff_interval);
326 	}
327 
328 stop:
329 	torture_kthread_stopping("torture_onoff");
330 	return 0;
331 }
332 
333 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
334 
335 /*
336  * Initiate online-offline handling.
337  */
338 int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f)
339 {
340 #ifdef CONFIG_HOTPLUG_CPU
341 	onoff_holdoff = ooholdoff;
342 	onoff_interval = oointerval;
343 	onoff_f = f;
344 	if (onoff_interval <= 0)
345 		return 0;
346 	return torture_create_kthread(torture_onoff, NULL, onoff_task);
347 #else /* #ifdef CONFIG_HOTPLUG_CPU */
348 	return 0;
349 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
350 }
351 EXPORT_SYMBOL_GPL(torture_onoff_init);
352 
353 /*
354  * Clean up after online/offline testing.
355  */
356 static void torture_onoff_cleanup(void)
357 {
358 #ifdef CONFIG_HOTPLUG_CPU
359 	if (onoff_task == NULL)
360 		return;
361 	VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
362 	kthread_stop(onoff_task);
363 	onoff_task = NULL;
364 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
365 }
366 
367 /*
368  * Print online/offline testing statistics.
369  */
370 void torture_onoff_stats(void)
371 {
372 #ifdef CONFIG_HOTPLUG_CPU
373 	pr_cont("onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
374 		n_online_successes, n_online_attempts,
375 		n_offline_successes, n_offline_attempts,
376 		min_online, max_online,
377 		min_offline, max_offline,
378 		sum_online, sum_offline, HZ);
379 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
380 }
381 EXPORT_SYMBOL_GPL(torture_onoff_stats);
382 
383 /*
384  * Were all the online/offline operations successful?
385  */
386 bool torture_onoff_failures(void)
387 {
388 #ifdef CONFIG_HOTPLUG_CPU
389 	return n_online_successes != n_online_attempts ||
390 	       n_offline_successes != n_offline_attempts;
391 #else /* #ifdef CONFIG_HOTPLUG_CPU */
392 	return false;
393 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
394 }
395 EXPORT_SYMBOL_GPL(torture_onoff_failures);
396 
397 #define TORTURE_RANDOM_MULT	39916801  /* prime */
398 #define TORTURE_RANDOM_ADD	479001701 /* prime */
399 #define TORTURE_RANDOM_REFRESH	10000
400 
401 /*
402  * Crude but fast random-number generator.  Uses a linear congruential
403  * generator, with occasional help from cpu_clock().
404  */
405 unsigned long
406 torture_random(struct torture_random_state *trsp)
407 {
408 	if (--trsp->trs_count < 0) {
409 		trsp->trs_state += (unsigned long)local_clock();
410 		trsp->trs_count = TORTURE_RANDOM_REFRESH;
411 	}
412 	trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
413 		TORTURE_RANDOM_ADD;
414 	return swahw32(trsp->trs_state);
415 }
416 EXPORT_SYMBOL_GPL(torture_random);
417 
418 /*
419  * Variables for shuffling.  The idea is to ensure that each CPU stays
420  * idle for an extended period to test interactions with dyntick idle,
421  * as well as interactions with any per-CPU variables.
422  */
423 struct shuffle_task {
424 	struct list_head st_l;
425 	struct task_struct *st_t;
426 };
427 
428 static long shuffle_interval;	/* In jiffies. */
429 static struct task_struct *shuffler_task;
430 static cpumask_var_t shuffle_tmp_mask;
431 static int shuffle_idle_cpu;	/* Force all torture tasks off this CPU */
432 static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
433 static DEFINE_MUTEX(shuffle_task_mutex);
434 
435 /*
436  * Register a task to be shuffled.  If there is no memory, just splat
437  * and don't bother registering.
438  */
439 void torture_shuffle_task_register(struct task_struct *tp)
440 {
441 	struct shuffle_task *stp;
442 
443 	if (WARN_ON_ONCE(tp == NULL))
444 		return;
445 	stp = kmalloc(sizeof(*stp), GFP_KERNEL);
446 	if (WARN_ON_ONCE(stp == NULL))
447 		return;
448 	stp->st_t = tp;
449 	mutex_lock(&shuffle_task_mutex);
450 	list_add(&stp->st_l, &shuffle_task_list);
451 	mutex_unlock(&shuffle_task_mutex);
452 }
453 EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
454 
455 /*
456  * Unregister all tasks, for example, at the end of the torture run.
457  */
458 static void torture_shuffle_task_unregister_all(void)
459 {
460 	struct shuffle_task *stp;
461 	struct shuffle_task *p;
462 
463 	mutex_lock(&shuffle_task_mutex);
464 	list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
465 		list_del(&stp->st_l);
466 		kfree(stp);
467 	}
468 	mutex_unlock(&shuffle_task_mutex);
469 }
470 
471 /* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
472  * A special case is when shuffle_idle_cpu = -1, in which case we allow
473  * the tasks to run on all CPUs.
474  */
475 static void torture_shuffle_tasks(void)
476 {
477 	struct shuffle_task *stp;
478 
479 	cpumask_setall(shuffle_tmp_mask);
480 	get_online_cpus();
481 
482 	/* No point in shuffling if there is only one online CPU (ex: UP) */
483 	if (num_online_cpus() == 1) {
484 		put_online_cpus();
485 		return;
486 	}
487 
488 	/* Advance to the next CPU.  Upon overflow, don't idle any CPUs. */
489 	shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
490 	if (shuffle_idle_cpu >= nr_cpu_ids)
491 		shuffle_idle_cpu = -1;
492 	else
493 		cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
494 
495 	mutex_lock(&shuffle_task_mutex);
496 	list_for_each_entry(stp, &shuffle_task_list, st_l)
497 		set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
498 	mutex_unlock(&shuffle_task_mutex);
499 
500 	put_online_cpus();
501 }
502 
503 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
504  * system to become idle at a time and cut off its timer ticks. This is meant
505  * to test the support for such tickless idle CPU in RCU.
506  */
507 static int torture_shuffle(void *arg)
508 {
509 	VERBOSE_TOROUT_STRING("torture_shuffle task started");
510 	do {
511 		schedule_timeout_interruptible(shuffle_interval);
512 		torture_shuffle_tasks();
513 		torture_shutdown_absorb("torture_shuffle");
514 	} while (!torture_must_stop());
515 	torture_kthread_stopping("torture_shuffle");
516 	return 0;
517 }
518 
519 /*
520  * Start the shuffler, with shuffint in jiffies.
521  */
522 int torture_shuffle_init(long shuffint)
523 {
524 	shuffle_interval = shuffint;
525 
526 	shuffle_idle_cpu = -1;
527 
528 	if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
529 		VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
530 		return -ENOMEM;
531 	}
532 
533 	/* Create the shuffler thread */
534 	return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
535 }
536 EXPORT_SYMBOL_GPL(torture_shuffle_init);
537 
538 /*
539  * Stop the shuffling.
540  */
541 static void torture_shuffle_cleanup(void)
542 {
543 	torture_shuffle_task_unregister_all();
544 	if (shuffler_task) {
545 		VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
546 		kthread_stop(shuffler_task);
547 		free_cpumask_var(shuffle_tmp_mask);
548 	}
549 	shuffler_task = NULL;
550 }
551 
552 /*
553  * Variables for auto-shutdown.  This allows "lights out" torture runs
554  * to be fully scripted.
555  */
556 static struct task_struct *shutdown_task;
557 static ktime_t shutdown_time;		/* time to system shutdown. */
558 static void (*torture_shutdown_hook)(void);
559 
560 /*
561  * Absorb kthreads into a kernel function that won't return, so that
562  * they won't ever access module text or data again.
563  */
564 void torture_shutdown_absorb(const char *title)
565 {
566 	while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
567 		pr_notice("torture thread %s parking due to system shutdown\n",
568 			  title);
569 		schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
570 	}
571 }
572 EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
573 
574 /*
575  * Cause the torture test to shutdown the system after the test has
576  * run for the time specified by the shutdown_secs parameter.
577  */
578 static int torture_shutdown(void *arg)
579 {
580 	ktime_t ktime_snap;
581 
582 	VERBOSE_TOROUT_STRING("torture_shutdown task started");
583 	ktime_snap = ktime_get();
584 	while (ktime_before(ktime_snap, shutdown_time) &&
585 	       !torture_must_stop()) {
586 		if (verbose)
587 			pr_alert("%s" TORTURE_FLAG
588 				 "torture_shutdown task: %llu ms remaining\n",
589 				 torture_type,
590 				 ktime_ms_delta(shutdown_time, ktime_snap));
591 		set_current_state(TASK_INTERRUPTIBLE);
592 		schedule_hrtimeout(&shutdown_time, HRTIMER_MODE_ABS);
593 		ktime_snap = ktime_get();
594 	}
595 	if (torture_must_stop()) {
596 		torture_kthread_stopping("torture_shutdown");
597 		return 0;
598 	}
599 
600 	/* OK, shut down the system. */
601 
602 	VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
603 	shutdown_task = NULL;	/* Avoid self-kill deadlock. */
604 	if (torture_shutdown_hook)
605 		torture_shutdown_hook();
606 	else
607 		VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
608 	if (ftrace_dump_at_shutdown)
609 		rcu_ftrace_dump(DUMP_ALL);
610 	kernel_power_off();	/* Shut down the system. */
611 	return 0;
612 }
613 
614 /*
615  * Start up the shutdown task.
616  */
617 int torture_shutdown_init(int ssecs, void (*cleanup)(void))
618 {
619 	torture_shutdown_hook = cleanup;
620 	if (ssecs > 0) {
621 		shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0));
622 		return torture_create_kthread(torture_shutdown, NULL,
623 					     shutdown_task);
624 	}
625 	return 0;
626 }
627 EXPORT_SYMBOL_GPL(torture_shutdown_init);
628 
629 /*
630  * Detect and respond to a system shutdown.
631  */
632 static int torture_shutdown_notify(struct notifier_block *unused1,
633 				   unsigned long unused2, void *unused3)
634 {
635 	mutex_lock(&fullstop_mutex);
636 	if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
637 		VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
638 		WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
639 	} else {
640 		pr_warn("Concurrent rmmod and shutdown illegal!\n");
641 	}
642 	mutex_unlock(&fullstop_mutex);
643 	return NOTIFY_DONE;
644 }
645 
646 static struct notifier_block torture_shutdown_nb = {
647 	.notifier_call = torture_shutdown_notify,
648 };
649 
650 /*
651  * Shut down the shutdown task.  Say what???  Heh!  This can happen if
652  * the torture module gets an rmmod before the shutdown time arrives.  ;-)
653  */
654 static void torture_shutdown_cleanup(void)
655 {
656 	unregister_reboot_notifier(&torture_shutdown_nb);
657 	if (shutdown_task != NULL) {
658 		VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
659 		kthread_stop(shutdown_task);
660 	}
661 	shutdown_task = NULL;
662 }
663 
664 /*
665  * Variables for stuttering, which means to periodically pause and
666  * restart testing in order to catch bugs that appear when load is
667  * suddenly applied to or removed from the system.
668  */
669 static struct task_struct *stutter_task;
670 static int stutter_pause_test;
671 static int stutter;
672 static int stutter_gap;
673 
674 /*
675  * Block until the stutter interval ends.  This must be called periodically
676  * by all running kthreads that need to be subject to stuttering.
677  */
678 bool stutter_wait(const char *title)
679 {
680 	ktime_t delay;
681 	unsigned int i = 0;
682 	bool ret = false;
683 	int spt;
684 
685 	cond_resched_tasks_rcu_qs();
686 	spt = READ_ONCE(stutter_pause_test);
687 	for (; spt; spt = READ_ONCE(stutter_pause_test)) {
688 		if (!ret) {
689 			sched_set_normal(current, MAX_NICE);
690 			ret = true;
691 		}
692 		if (spt == 1) {
693 			schedule_timeout_interruptible(1);
694 		} else if (spt == 2) {
695 			while (READ_ONCE(stutter_pause_test)) {
696 				if (!(i++ & 0xffff)) {
697 					set_current_state(TASK_INTERRUPTIBLE);
698 					delay = 10 * NSEC_PER_USEC;
699 					schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
700 				}
701 				cond_resched();
702 			}
703 		} else {
704 			schedule_timeout_interruptible(round_jiffies_relative(HZ));
705 		}
706 		torture_shutdown_absorb(title);
707 	}
708 	return ret;
709 }
710 EXPORT_SYMBOL_GPL(stutter_wait);
711 
712 /*
713  * Cause the torture test to "stutter", starting and stopping all
714  * threads periodically.
715  */
716 static int torture_stutter(void *arg)
717 {
718 	ktime_t delay;
719 	DEFINE_TORTURE_RANDOM(rand);
720 	int wtime;
721 
722 	VERBOSE_TOROUT_STRING("torture_stutter task started");
723 	do {
724 		if (!torture_must_stop() && stutter > 1) {
725 			wtime = stutter;
726 			if (stutter > 2) {
727 				WRITE_ONCE(stutter_pause_test, 1);
728 				wtime = stutter - 3;
729 				delay = ktime_divns(NSEC_PER_SEC * wtime, HZ);
730 				delay += (torture_random(&rand) >> 3) % NSEC_PER_MSEC;
731 				set_current_state(TASK_INTERRUPTIBLE);
732 				schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
733 				wtime = 2;
734 			}
735 			WRITE_ONCE(stutter_pause_test, 2);
736 			delay = ktime_divns(NSEC_PER_SEC * wtime, HZ);
737 			set_current_state(TASK_INTERRUPTIBLE);
738 			schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
739 		}
740 		WRITE_ONCE(stutter_pause_test, 0);
741 		if (!torture_must_stop())
742 			schedule_timeout_interruptible(stutter_gap);
743 		torture_shutdown_absorb("torture_stutter");
744 	} while (!torture_must_stop());
745 	torture_kthread_stopping("torture_stutter");
746 	return 0;
747 }
748 
749 /*
750  * Initialize and kick off the torture_stutter kthread.
751  */
752 int torture_stutter_init(const int s, const int sgap)
753 {
754 	stutter = s;
755 	stutter_gap = sgap;
756 	return torture_create_kthread(torture_stutter, NULL, stutter_task);
757 }
758 EXPORT_SYMBOL_GPL(torture_stutter_init);
759 
760 /*
761  * Cleanup after the torture_stutter kthread.
762  */
763 static void torture_stutter_cleanup(void)
764 {
765 	if (!stutter_task)
766 		return;
767 	VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
768 	kthread_stop(stutter_task);
769 	stutter_task = NULL;
770 }
771 
772 /*
773  * Initialize torture module.  Please note that this is -not- invoked via
774  * the usual module_init() mechanism, but rather by an explicit call from
775  * the client torture module.  This call must be paired with a later
776  * torture_init_end().
777  *
778  * The runnable parameter points to a flag that controls whether or not
779  * the test is currently runnable.  If there is no such flag, pass in NULL.
780  */
781 bool torture_init_begin(char *ttype, int v)
782 {
783 	mutex_lock(&fullstop_mutex);
784 	if (torture_type != NULL) {
785 		pr_alert("torture_init_begin: Refusing %s init: %s running.\n",
786 			 ttype, torture_type);
787 		pr_alert("torture_init_begin: One torture test at a time!\n");
788 		mutex_unlock(&fullstop_mutex);
789 		return false;
790 	}
791 	torture_type = ttype;
792 	verbose = v;
793 	fullstop = FULLSTOP_DONTSTOP;
794 	return true;
795 }
796 EXPORT_SYMBOL_GPL(torture_init_begin);
797 
798 /*
799  * Tell the torture module that initialization is complete.
800  */
801 void torture_init_end(void)
802 {
803 	mutex_unlock(&fullstop_mutex);
804 	register_reboot_notifier(&torture_shutdown_nb);
805 }
806 EXPORT_SYMBOL_GPL(torture_init_end);
807 
808 /*
809  * Clean up torture module.  Please note that this is -not- invoked via
810  * the usual module_exit() mechanism, but rather by an explicit call from
811  * the client torture module.  Returns true if a race with system shutdown
812  * is detected, otherwise, all kthreads started by functions in this file
813  * will be shut down.
814  *
815  * This must be called before the caller starts shutting down its own
816  * kthreads.
817  *
818  * Both torture_cleanup_begin() and torture_cleanup_end() must be paired,
819  * in order to correctly perform the cleanup. They are separated because
820  * threads can still need to reference the torture_type type, thus nullify
821  * only after completing all other relevant calls.
822  */
823 bool torture_cleanup_begin(void)
824 {
825 	mutex_lock(&fullstop_mutex);
826 	if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
827 		pr_warn("Concurrent rmmod and shutdown illegal!\n");
828 		mutex_unlock(&fullstop_mutex);
829 		schedule_timeout_uninterruptible(10);
830 		return true;
831 	}
832 	WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
833 	mutex_unlock(&fullstop_mutex);
834 	torture_shutdown_cleanup();
835 	torture_shuffle_cleanup();
836 	torture_stutter_cleanup();
837 	torture_onoff_cleanup();
838 	return false;
839 }
840 EXPORT_SYMBOL_GPL(torture_cleanup_begin);
841 
842 void torture_cleanup_end(void)
843 {
844 	mutex_lock(&fullstop_mutex);
845 	torture_type = NULL;
846 	mutex_unlock(&fullstop_mutex);
847 }
848 EXPORT_SYMBOL_GPL(torture_cleanup_end);
849 
850 /*
851  * Is it time for the current torture test to stop?
852  */
853 bool torture_must_stop(void)
854 {
855 	return torture_must_stop_irq() || kthread_should_stop();
856 }
857 EXPORT_SYMBOL_GPL(torture_must_stop);
858 
859 /*
860  * Is it time for the current torture test to stop?  This is the irq-safe
861  * version, hence no check for kthread_should_stop().
862  */
863 bool torture_must_stop_irq(void)
864 {
865 	return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
866 }
867 EXPORT_SYMBOL_GPL(torture_must_stop_irq);
868 
869 /*
870  * Each kthread must wait for kthread_should_stop() before returning from
871  * its top-level function, otherwise segfaults ensue.  This function
872  * prints a "stopping" message and waits for kthread_should_stop(), and
873  * should be called from all torture kthreads immediately prior to
874  * returning.
875  */
876 void torture_kthread_stopping(char *title)
877 {
878 	char buf[128];
879 
880 	snprintf(buf, sizeof(buf), "Stopping %s", title);
881 	VERBOSE_TOROUT_STRING(buf);
882 	while (!kthread_should_stop()) {
883 		torture_shutdown_absorb(title);
884 		schedule_timeout_uninterruptible(1);
885 	}
886 }
887 EXPORT_SYMBOL_GPL(torture_kthread_stopping);
888 
889 /*
890  * Create a generic torture kthread that is immediately runnable.  If you
891  * need the kthread to be stopped so that you can do something to it before
892  * it starts, you will need to open-code your own.
893  */
894 int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
895 			    char *f, struct task_struct **tp)
896 {
897 	int ret = 0;
898 
899 	VERBOSE_TOROUT_STRING(m);
900 	*tp = kthread_run(fn, arg, "%s", s);
901 	if (IS_ERR(*tp)) {
902 		ret = PTR_ERR(*tp);
903 		VERBOSE_TOROUT_ERRSTRING(f);
904 		*tp = NULL;
905 	}
906 	torture_shuffle_task_register(*tp);
907 	return ret;
908 }
909 EXPORT_SYMBOL_GPL(_torture_create_kthread);
910 
911 /*
912  * Stop a generic kthread, emitting a message.
913  */
914 void _torture_stop_kthread(char *m, struct task_struct **tp)
915 {
916 	if (*tp == NULL)
917 		return;
918 	VERBOSE_TOROUT_STRING(m);
919 	kthread_stop(*tp);
920 	*tp = NULL;
921 }
922 EXPORT_SYMBOL_GPL(_torture_stop_kthread);
923