xref: /openbmc/linux/kernel/rcu/rcutorture.c (revision fe7498ef)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 
50 #include "rcu.h"
51 
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
54 
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT	 8	/* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK	 ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND	 \
66 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
69 					/* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71 
72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration, 0,
75 	      "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff, 60,
81 	      "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched, 1,
83 	      "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal, false,
87 	     "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
92 torture_param(int, n_barrier_cbs, 0,
93 	     "# of callbacks/kthreads for barrier testing");
94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
95 torture_param(int, nreaders, -1, "Number of RCU reader threads");
96 torture_param(int, object_debug, 0,
97 	     "Enable debug-object double call_rcu() testing");
98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
99 torture_param(int, onoff_interval, 0,
100 	     "Time between CPU hotplugs (jiffies), 0=disable");
101 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
102 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
103 torture_param(int, read_exit_delay, 13,
104 	      "Delay between read-then-exit episodes (s)");
105 torture_param(int, read_exit_burst, 16,
106 	      "# of read-then-exit bursts per episode, zero to disable");
107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
110 torture_param(int, stall_cpu_holdoff, 10,
111 	     "Time to wait before starting stall (s).");
112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
113 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
114 torture_param(int, stall_gp_kthread, 0,
115 	      "Grace-period kthread stall duration (s).");
116 torture_param(int, stat_interval, 60,
117 	     "Number of seconds between stats printk()s");
118 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120 torture_param(int, test_boost_duration, 4,
121 	     "Duration of each boost test, seconds.");
122 torture_param(int, test_boost_interval, 7,
123 	     "Interval between boost tests, seconds.");
124 torture_param(bool, test_no_idle_hz, true,
125 	     "Test support for tickless idle CPUs");
126 torture_param(int, verbose, 1,
127 	     "Enable verbose debugging printk()s");
128 
129 static char *torture_type = "rcu";
130 module_param(torture_type, charp, 0444);
131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
132 
133 static int nrealnocbers;
134 static int nrealreaders;
135 static struct task_struct *writer_task;
136 static struct task_struct **fakewriter_tasks;
137 static struct task_struct **reader_tasks;
138 static struct task_struct **nocb_tasks;
139 static struct task_struct *stats_task;
140 static struct task_struct *fqs_task;
141 static struct task_struct *boost_tasks[NR_CPUS];
142 static struct task_struct *stall_task;
143 static struct task_struct *fwd_prog_task;
144 static struct task_struct **barrier_cbs_tasks;
145 static struct task_struct *barrier_task;
146 static struct task_struct *read_exit_task;
147 
148 #define RCU_TORTURE_PIPE_LEN 10
149 
150 // Mailbox-like structure to check RCU global memory ordering.
151 struct rcu_torture_reader_check {
152 	unsigned long rtc_myloops;
153 	int rtc_chkrdr;
154 	unsigned long rtc_chkloops;
155 	int rtc_ready;
156 	struct rcu_torture_reader_check *rtc_assigner;
157 } ____cacheline_internodealigned_in_smp;
158 
159 // Update-side data structure used to check RCU readers.
160 struct rcu_torture {
161 	struct rcu_head rtort_rcu;
162 	int rtort_pipe_count;
163 	struct list_head rtort_free;
164 	int rtort_mbtest;
165 	struct rcu_torture_reader_check *rtort_chkp;
166 };
167 
168 static LIST_HEAD(rcu_torture_freelist);
169 static struct rcu_torture __rcu *rcu_torture_current;
170 static unsigned long rcu_torture_current_version;
171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
172 static DEFINE_SPINLOCK(rcu_torture_lock);
173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
177 static atomic_t n_rcu_torture_alloc;
178 static atomic_t n_rcu_torture_alloc_fail;
179 static atomic_t n_rcu_torture_free;
180 static atomic_t n_rcu_torture_mberror;
181 static atomic_t n_rcu_torture_mbchk_fail;
182 static atomic_t n_rcu_torture_mbchk_tries;
183 static atomic_t n_rcu_torture_error;
184 static long n_rcu_torture_barrier_error;
185 static long n_rcu_torture_boost_ktrerror;
186 static long n_rcu_torture_boost_rterror;
187 static long n_rcu_torture_boost_failure;
188 static long n_rcu_torture_boosts;
189 static atomic_long_t n_rcu_torture_timers;
190 static long n_barrier_attempts;
191 static long n_barrier_successes; /* did rcu_barrier test succeed? */
192 static unsigned long n_read_exits;
193 static struct list_head rcu_torture_removed;
194 static unsigned long shutdown_jiffies;
195 static unsigned long start_gp_seq;
196 static atomic_long_t n_nocb_offload;
197 static atomic_long_t n_nocb_deoffload;
198 
199 static int rcu_torture_writer_state;
200 #define RTWS_FIXED_DELAY	0
201 #define RTWS_DELAY		1
202 #define RTWS_REPLACE		2
203 #define RTWS_DEF_FREE		3
204 #define RTWS_EXP_SYNC		4
205 #define RTWS_COND_GET		5
206 #define RTWS_COND_SYNC		6
207 #define RTWS_POLL_GET		7
208 #define RTWS_POLL_WAIT		8
209 #define RTWS_SYNC		9
210 #define RTWS_STUTTER		10
211 #define RTWS_STOPPING		11
212 static const char * const rcu_torture_writer_state_names[] = {
213 	"RTWS_FIXED_DELAY",
214 	"RTWS_DELAY",
215 	"RTWS_REPLACE",
216 	"RTWS_DEF_FREE",
217 	"RTWS_EXP_SYNC",
218 	"RTWS_COND_GET",
219 	"RTWS_COND_SYNC",
220 	"RTWS_POLL_GET",
221 	"RTWS_POLL_WAIT",
222 	"RTWS_SYNC",
223 	"RTWS_STUTTER",
224 	"RTWS_STOPPING",
225 };
226 
227 /* Record reader segment types and duration for first failing read. */
228 struct rt_read_seg {
229 	int rt_readstate;
230 	unsigned long rt_delay_jiffies;
231 	unsigned long rt_delay_ms;
232 	unsigned long rt_delay_us;
233 	bool rt_preempted;
234 };
235 static int err_segs_recorded;
236 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
237 static int rt_read_nsegs;
238 
239 static const char *rcu_torture_writer_state_getname(void)
240 {
241 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
242 
243 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
244 		return "???";
245 	return rcu_torture_writer_state_names[i];
246 }
247 
248 #ifdef CONFIG_RCU_TRACE
249 static u64 notrace rcu_trace_clock_local(void)
250 {
251 	u64 ts = trace_clock_local();
252 
253 	(void)do_div(ts, NSEC_PER_USEC);
254 	return ts;
255 }
256 #else /* #ifdef CONFIG_RCU_TRACE */
257 static u64 notrace rcu_trace_clock_local(void)
258 {
259 	return 0ULL;
260 }
261 #endif /* #else #ifdef CONFIG_RCU_TRACE */
262 
263 /*
264  * Stop aggressive CPU-hog tests a bit before the end of the test in order
265  * to avoid interfering with test shutdown.
266  */
267 static bool shutdown_time_arrived(void)
268 {
269 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
270 }
271 
272 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
273 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
274 					/*  and boost task create/destroy. */
275 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
276 static bool barrier_phase;		/* Test phase. */
277 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
278 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
279 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
280 
281 static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
282 
283 /*
284  * Allocate an element from the rcu_tortures pool.
285  */
286 static struct rcu_torture *
287 rcu_torture_alloc(void)
288 {
289 	struct list_head *p;
290 
291 	spin_lock_bh(&rcu_torture_lock);
292 	if (list_empty(&rcu_torture_freelist)) {
293 		atomic_inc(&n_rcu_torture_alloc_fail);
294 		spin_unlock_bh(&rcu_torture_lock);
295 		return NULL;
296 	}
297 	atomic_inc(&n_rcu_torture_alloc);
298 	p = rcu_torture_freelist.next;
299 	list_del_init(p);
300 	spin_unlock_bh(&rcu_torture_lock);
301 	return container_of(p, struct rcu_torture, rtort_free);
302 }
303 
304 /*
305  * Free an element to the rcu_tortures pool.
306  */
307 static void
308 rcu_torture_free(struct rcu_torture *p)
309 {
310 	atomic_inc(&n_rcu_torture_free);
311 	spin_lock_bh(&rcu_torture_lock);
312 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
313 	spin_unlock_bh(&rcu_torture_lock);
314 }
315 
316 /*
317  * Operations vector for selecting different types of tests.
318  */
319 
320 struct rcu_torture_ops {
321 	int ttype;
322 	void (*init)(void);
323 	void (*cleanup)(void);
324 	int (*readlock)(void);
325 	void (*read_delay)(struct torture_random_state *rrsp,
326 			   struct rt_read_seg *rtrsp);
327 	void (*readunlock)(int idx);
328 	int (*readlock_held)(void);
329 	unsigned long (*get_gp_seq)(void);
330 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
331 	void (*deferred_free)(struct rcu_torture *p);
332 	void (*sync)(void);
333 	void (*exp_sync)(void);
334 	unsigned long (*get_gp_state)(void);
335 	unsigned long (*start_gp_poll)(void);
336 	bool (*poll_gp_state)(unsigned long oldstate);
337 	void (*cond_sync)(unsigned long oldstate);
338 	call_rcu_func_t call;
339 	void (*cb_barrier)(void);
340 	void (*fqs)(void);
341 	void (*stats)(void);
342 	void (*gp_kthread_dbg)(void);
343 	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
344 	int (*stall_dur)(void);
345 	int irq_capable;
346 	int can_boost;
347 	int extendables;
348 	int slow_gps;
349 	const char *name;
350 };
351 
352 static struct rcu_torture_ops *cur_ops;
353 
354 /*
355  * Definitions for rcu torture testing.
356  */
357 
358 static int torture_readlock_not_held(void)
359 {
360 	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
361 }
362 
363 static int rcu_torture_read_lock(void) __acquires(RCU)
364 {
365 	rcu_read_lock();
366 	return 0;
367 }
368 
369 static void
370 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
371 {
372 	unsigned long started;
373 	unsigned long completed;
374 	const unsigned long shortdelay_us = 200;
375 	unsigned long longdelay_ms = 300;
376 	unsigned long long ts;
377 
378 	/* We want a short delay sometimes to make a reader delay the grace
379 	 * period, and we want a long delay occasionally to trigger
380 	 * force_quiescent_state. */
381 
382 	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
383 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
384 		started = cur_ops->get_gp_seq();
385 		ts = rcu_trace_clock_local();
386 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
387 			longdelay_ms = 5; /* Avoid triggering BH limits. */
388 		mdelay(longdelay_ms);
389 		rtrsp->rt_delay_ms = longdelay_ms;
390 		completed = cur_ops->get_gp_seq();
391 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
392 					  started, completed);
393 	}
394 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
395 		udelay(shortdelay_us);
396 		rtrsp->rt_delay_us = shortdelay_us;
397 	}
398 	if (!preempt_count() &&
399 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
400 		torture_preempt_schedule();  /* QS only if preemptible. */
401 		rtrsp->rt_preempted = true;
402 	}
403 }
404 
405 static void rcu_torture_read_unlock(int idx) __releases(RCU)
406 {
407 	rcu_read_unlock();
408 }
409 
410 /*
411  * Update callback in the pipe.  This should be invoked after a grace period.
412  */
413 static bool
414 rcu_torture_pipe_update_one(struct rcu_torture *rp)
415 {
416 	int i;
417 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
418 
419 	if (rtrcp) {
420 		WRITE_ONCE(rp->rtort_chkp, NULL);
421 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
422 	}
423 	i = READ_ONCE(rp->rtort_pipe_count);
424 	if (i > RCU_TORTURE_PIPE_LEN)
425 		i = RCU_TORTURE_PIPE_LEN;
426 	atomic_inc(&rcu_torture_wcount[i]);
427 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
428 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
429 		rp->rtort_mbtest = 0;
430 		return true;
431 	}
432 	return false;
433 }
434 
435 /*
436  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
437  * primitives.
438  */
439 static void
440 rcu_torture_pipe_update(struct rcu_torture *old_rp)
441 {
442 	struct rcu_torture *rp;
443 	struct rcu_torture *rp1;
444 
445 	if (old_rp)
446 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
447 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
448 		if (rcu_torture_pipe_update_one(rp)) {
449 			list_del(&rp->rtort_free);
450 			rcu_torture_free(rp);
451 		}
452 	}
453 }
454 
455 static void
456 rcu_torture_cb(struct rcu_head *p)
457 {
458 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
459 
460 	if (torture_must_stop_irq()) {
461 		/* Test is ending, just drop callbacks on the floor. */
462 		/* The next initialization will pick up the pieces. */
463 		return;
464 	}
465 	if (rcu_torture_pipe_update_one(rp))
466 		rcu_torture_free(rp);
467 	else
468 		cur_ops->deferred_free(rp);
469 }
470 
471 static unsigned long rcu_no_completed(void)
472 {
473 	return 0;
474 }
475 
476 static void rcu_torture_deferred_free(struct rcu_torture *p)
477 {
478 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
479 }
480 
481 static void rcu_sync_torture_init(void)
482 {
483 	INIT_LIST_HEAD(&rcu_torture_removed);
484 }
485 
486 static struct rcu_torture_ops rcu_ops = {
487 	.ttype			= RCU_FLAVOR,
488 	.init			= rcu_sync_torture_init,
489 	.readlock		= rcu_torture_read_lock,
490 	.read_delay		= rcu_read_delay,
491 	.readunlock		= rcu_torture_read_unlock,
492 	.readlock_held		= torture_readlock_not_held,
493 	.get_gp_seq		= rcu_get_gp_seq,
494 	.gp_diff		= rcu_seq_diff,
495 	.deferred_free		= rcu_torture_deferred_free,
496 	.sync			= synchronize_rcu,
497 	.exp_sync		= synchronize_rcu_expedited,
498 	.get_gp_state		= get_state_synchronize_rcu,
499 	.start_gp_poll		= start_poll_synchronize_rcu,
500 	.poll_gp_state		= poll_state_synchronize_rcu,
501 	.cond_sync		= cond_synchronize_rcu,
502 	.call			= call_rcu,
503 	.cb_barrier		= rcu_barrier,
504 	.fqs			= rcu_force_quiescent_state,
505 	.stats			= NULL,
506 	.gp_kthread_dbg		= show_rcu_gp_kthreads,
507 	.check_boost_failed	= rcu_check_boost_fail,
508 	.stall_dur		= rcu_jiffies_till_stall_check,
509 	.irq_capable		= 1,
510 	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
511 	.extendables		= RCUTORTURE_MAX_EXTEND,
512 	.name			= "rcu"
513 };
514 
515 /*
516  * Don't even think about trying any of these in real life!!!
517  * The names includes "busted", and they really means it!
518  * The only purpose of these functions is to provide a buggy RCU
519  * implementation to make sure that rcutorture correctly emits
520  * buggy-RCU error messages.
521  */
522 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
523 {
524 	/* This is a deliberate bug for testing purposes only! */
525 	rcu_torture_cb(&p->rtort_rcu);
526 }
527 
528 static void synchronize_rcu_busted(void)
529 {
530 	/* This is a deliberate bug for testing purposes only! */
531 }
532 
533 static void
534 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
535 {
536 	/* This is a deliberate bug for testing purposes only! */
537 	func(head);
538 }
539 
540 static struct rcu_torture_ops rcu_busted_ops = {
541 	.ttype		= INVALID_RCU_FLAVOR,
542 	.init		= rcu_sync_torture_init,
543 	.readlock	= rcu_torture_read_lock,
544 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
545 	.readunlock	= rcu_torture_read_unlock,
546 	.readlock_held	= torture_readlock_not_held,
547 	.get_gp_seq	= rcu_no_completed,
548 	.deferred_free	= rcu_busted_torture_deferred_free,
549 	.sync		= synchronize_rcu_busted,
550 	.exp_sync	= synchronize_rcu_busted,
551 	.call		= call_rcu_busted,
552 	.cb_barrier	= NULL,
553 	.fqs		= NULL,
554 	.stats		= NULL,
555 	.irq_capable	= 1,
556 	.name		= "busted"
557 };
558 
559 /*
560  * Definitions for srcu torture testing.
561  */
562 
563 DEFINE_STATIC_SRCU(srcu_ctl);
564 static struct srcu_struct srcu_ctld;
565 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
566 
567 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
568 {
569 	return srcu_read_lock(srcu_ctlp);
570 }
571 
572 static void
573 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
574 {
575 	long delay;
576 	const long uspertick = 1000000 / HZ;
577 	const long longdelay = 10;
578 
579 	/* We want there to be long-running readers, but not all the time. */
580 
581 	delay = torture_random(rrsp) %
582 		(nrealreaders * 2 * longdelay * uspertick);
583 	if (!delay && in_task()) {
584 		schedule_timeout_interruptible(longdelay);
585 		rtrsp->rt_delay_jiffies = longdelay;
586 	} else {
587 		rcu_read_delay(rrsp, rtrsp);
588 	}
589 }
590 
591 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
592 {
593 	srcu_read_unlock(srcu_ctlp, idx);
594 }
595 
596 static int torture_srcu_read_lock_held(void)
597 {
598 	return srcu_read_lock_held(srcu_ctlp);
599 }
600 
601 static unsigned long srcu_torture_completed(void)
602 {
603 	return srcu_batches_completed(srcu_ctlp);
604 }
605 
606 static void srcu_torture_deferred_free(struct rcu_torture *rp)
607 {
608 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
609 }
610 
611 static void srcu_torture_synchronize(void)
612 {
613 	synchronize_srcu(srcu_ctlp);
614 }
615 
616 static unsigned long srcu_torture_get_gp_state(void)
617 {
618 	return get_state_synchronize_srcu(srcu_ctlp);
619 }
620 
621 static unsigned long srcu_torture_start_gp_poll(void)
622 {
623 	return start_poll_synchronize_srcu(srcu_ctlp);
624 }
625 
626 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
627 {
628 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
629 }
630 
631 static void srcu_torture_call(struct rcu_head *head,
632 			      rcu_callback_t func)
633 {
634 	call_srcu(srcu_ctlp, head, func);
635 }
636 
637 static void srcu_torture_barrier(void)
638 {
639 	srcu_barrier(srcu_ctlp);
640 }
641 
642 static void srcu_torture_stats(void)
643 {
644 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
645 }
646 
647 static void srcu_torture_synchronize_expedited(void)
648 {
649 	synchronize_srcu_expedited(srcu_ctlp);
650 }
651 
652 static struct rcu_torture_ops srcu_ops = {
653 	.ttype		= SRCU_FLAVOR,
654 	.init		= rcu_sync_torture_init,
655 	.readlock	= srcu_torture_read_lock,
656 	.read_delay	= srcu_read_delay,
657 	.readunlock	= srcu_torture_read_unlock,
658 	.readlock_held	= torture_srcu_read_lock_held,
659 	.get_gp_seq	= srcu_torture_completed,
660 	.deferred_free	= srcu_torture_deferred_free,
661 	.sync		= srcu_torture_synchronize,
662 	.exp_sync	= srcu_torture_synchronize_expedited,
663 	.get_gp_state	= srcu_torture_get_gp_state,
664 	.start_gp_poll	= srcu_torture_start_gp_poll,
665 	.poll_gp_state	= srcu_torture_poll_gp_state,
666 	.call		= srcu_torture_call,
667 	.cb_barrier	= srcu_torture_barrier,
668 	.stats		= srcu_torture_stats,
669 	.irq_capable	= 1,
670 	.name		= "srcu"
671 };
672 
673 static void srcu_torture_init(void)
674 {
675 	rcu_sync_torture_init();
676 	WARN_ON(init_srcu_struct(&srcu_ctld));
677 	srcu_ctlp = &srcu_ctld;
678 }
679 
680 static void srcu_torture_cleanup(void)
681 {
682 	cleanup_srcu_struct(&srcu_ctld);
683 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
684 }
685 
686 /* As above, but dynamically allocated. */
687 static struct rcu_torture_ops srcud_ops = {
688 	.ttype		= SRCU_FLAVOR,
689 	.init		= srcu_torture_init,
690 	.cleanup	= srcu_torture_cleanup,
691 	.readlock	= srcu_torture_read_lock,
692 	.read_delay	= srcu_read_delay,
693 	.readunlock	= srcu_torture_read_unlock,
694 	.readlock_held	= torture_srcu_read_lock_held,
695 	.get_gp_seq	= srcu_torture_completed,
696 	.deferred_free	= srcu_torture_deferred_free,
697 	.sync		= srcu_torture_synchronize,
698 	.exp_sync	= srcu_torture_synchronize_expedited,
699 	.call		= srcu_torture_call,
700 	.cb_barrier	= srcu_torture_barrier,
701 	.stats		= srcu_torture_stats,
702 	.irq_capable	= 1,
703 	.name		= "srcud"
704 };
705 
706 /* As above, but broken due to inappropriate reader extension. */
707 static struct rcu_torture_ops busted_srcud_ops = {
708 	.ttype		= SRCU_FLAVOR,
709 	.init		= srcu_torture_init,
710 	.cleanup	= srcu_torture_cleanup,
711 	.readlock	= srcu_torture_read_lock,
712 	.read_delay	= rcu_read_delay,
713 	.readunlock	= srcu_torture_read_unlock,
714 	.readlock_held	= torture_srcu_read_lock_held,
715 	.get_gp_seq	= srcu_torture_completed,
716 	.deferred_free	= srcu_torture_deferred_free,
717 	.sync		= srcu_torture_synchronize,
718 	.exp_sync	= srcu_torture_synchronize_expedited,
719 	.call		= srcu_torture_call,
720 	.cb_barrier	= srcu_torture_barrier,
721 	.stats		= srcu_torture_stats,
722 	.irq_capable	= 1,
723 	.extendables	= RCUTORTURE_MAX_EXTEND,
724 	.name		= "busted_srcud"
725 };
726 
727 /*
728  * Definitions for RCU-tasks torture testing.
729  */
730 
731 static int tasks_torture_read_lock(void)
732 {
733 	return 0;
734 }
735 
736 static void tasks_torture_read_unlock(int idx)
737 {
738 }
739 
740 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
741 {
742 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
743 }
744 
745 static void synchronize_rcu_mult_test(void)
746 {
747 	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
748 }
749 
750 static struct rcu_torture_ops tasks_ops = {
751 	.ttype		= RCU_TASKS_FLAVOR,
752 	.init		= rcu_sync_torture_init,
753 	.readlock	= tasks_torture_read_lock,
754 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
755 	.readunlock	= tasks_torture_read_unlock,
756 	.get_gp_seq	= rcu_no_completed,
757 	.deferred_free	= rcu_tasks_torture_deferred_free,
758 	.sync		= synchronize_rcu_tasks,
759 	.exp_sync	= synchronize_rcu_mult_test,
760 	.call		= call_rcu_tasks,
761 	.cb_barrier	= rcu_barrier_tasks,
762 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
763 	.fqs		= NULL,
764 	.stats		= NULL,
765 	.irq_capable	= 1,
766 	.slow_gps	= 1,
767 	.name		= "tasks"
768 };
769 
770 /*
771  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
772  * This implementation does not necessarily work well with CPU hotplug.
773  */
774 
775 static void synchronize_rcu_trivial(void)
776 {
777 	int cpu;
778 
779 	for_each_online_cpu(cpu) {
780 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
781 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
782 	}
783 }
784 
785 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
786 {
787 	preempt_disable();
788 	return 0;
789 }
790 
791 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
792 {
793 	preempt_enable();
794 }
795 
796 static struct rcu_torture_ops trivial_ops = {
797 	.ttype		= RCU_TRIVIAL_FLAVOR,
798 	.init		= rcu_sync_torture_init,
799 	.readlock	= rcu_torture_read_lock_trivial,
800 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
801 	.readunlock	= rcu_torture_read_unlock_trivial,
802 	.readlock_held	= torture_readlock_not_held,
803 	.get_gp_seq	= rcu_no_completed,
804 	.sync		= synchronize_rcu_trivial,
805 	.exp_sync	= synchronize_rcu_trivial,
806 	.fqs		= NULL,
807 	.stats		= NULL,
808 	.irq_capable	= 1,
809 	.name		= "trivial"
810 };
811 
812 /*
813  * Definitions for rude RCU-tasks torture testing.
814  */
815 
816 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
817 {
818 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
819 }
820 
821 static struct rcu_torture_ops tasks_rude_ops = {
822 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
823 	.init		= rcu_sync_torture_init,
824 	.readlock	= rcu_torture_read_lock_trivial,
825 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
826 	.readunlock	= rcu_torture_read_unlock_trivial,
827 	.get_gp_seq	= rcu_no_completed,
828 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
829 	.sync		= synchronize_rcu_tasks_rude,
830 	.exp_sync	= synchronize_rcu_tasks_rude,
831 	.call		= call_rcu_tasks_rude,
832 	.cb_barrier	= rcu_barrier_tasks_rude,
833 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
834 	.fqs		= NULL,
835 	.stats		= NULL,
836 	.irq_capable	= 1,
837 	.name		= "tasks-rude"
838 };
839 
840 /*
841  * Definitions for tracing RCU-tasks torture testing.
842  */
843 
844 static int tasks_tracing_torture_read_lock(void)
845 {
846 	rcu_read_lock_trace();
847 	return 0;
848 }
849 
850 static void tasks_tracing_torture_read_unlock(int idx)
851 {
852 	rcu_read_unlock_trace();
853 }
854 
855 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
856 {
857 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
858 }
859 
860 static struct rcu_torture_ops tasks_tracing_ops = {
861 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
862 	.init		= rcu_sync_torture_init,
863 	.readlock	= tasks_tracing_torture_read_lock,
864 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
865 	.readunlock	= tasks_tracing_torture_read_unlock,
866 	.readlock_held	= rcu_read_lock_trace_held,
867 	.get_gp_seq	= rcu_no_completed,
868 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
869 	.sync		= synchronize_rcu_tasks_trace,
870 	.exp_sync	= synchronize_rcu_tasks_trace,
871 	.call		= call_rcu_tasks_trace,
872 	.cb_barrier	= rcu_barrier_tasks_trace,
873 	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
874 	.fqs		= NULL,
875 	.stats		= NULL,
876 	.irq_capable	= 1,
877 	.slow_gps	= 1,
878 	.name		= "tasks-tracing"
879 };
880 
881 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
882 {
883 	if (!cur_ops->gp_diff)
884 		return new - old;
885 	return cur_ops->gp_diff(new, old);
886 }
887 
888 /*
889  * RCU torture priority-boost testing.  Runs one real-time thread per
890  * CPU for moderate bursts, repeatedly starting grace periods and waiting
891  * for them to complete.  If a given grace period takes too long, we assume
892  * that priority inversion has occurred.
893  */
894 
895 static int old_rt_runtime = -1;
896 
897 static void rcu_torture_disable_rt_throttle(void)
898 {
899 	/*
900 	 * Disable RT throttling so that rcutorture's boost threads don't get
901 	 * throttled. Only possible if rcutorture is built-in otherwise the
902 	 * user should manually do this by setting the sched_rt_period_us and
903 	 * sched_rt_runtime sysctls.
904 	 */
905 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
906 		return;
907 
908 	old_rt_runtime = sysctl_sched_rt_runtime;
909 	sysctl_sched_rt_runtime = -1;
910 }
911 
912 static void rcu_torture_enable_rt_throttle(void)
913 {
914 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
915 		return;
916 
917 	sysctl_sched_rt_runtime = old_rt_runtime;
918 	old_rt_runtime = -1;
919 }
920 
921 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
922 {
923 	int cpu;
924 	static int dbg_done;
925 	unsigned long end = jiffies;
926 	bool gp_done;
927 	unsigned long j;
928 	static unsigned long last_persist;
929 	unsigned long lp;
930 	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
931 
932 	if (end - *start > mininterval) {
933 		// Recheck after checking time to avoid false positives.
934 		smp_mb(); // Time check before grace-period check.
935 		if (cur_ops->poll_gp_state(gp_state))
936 			return false; // passed, though perhaps just barely
937 		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
938 			// At most one persisted message per boost test.
939 			j = jiffies;
940 			lp = READ_ONCE(last_persist);
941 			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
942 				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
943 			return false; // passed on a technicality
944 		}
945 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
946 		n_rcu_torture_boost_failure++;
947 		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
948 			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
949 				current->rt_priority, gp_state, end - *start);
950 			cur_ops->gp_kthread_dbg();
951 			// Recheck after print to flag grace period ending during splat.
952 			gp_done = cur_ops->poll_gp_state(gp_state);
953 			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
954 				gp_done ? "ended already" : "still pending");
955 
956 		}
957 
958 		return true; // failed
959 	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
960 		*start = jiffies;
961 	}
962 
963 	return false; // passed
964 }
965 
966 static int rcu_torture_boost(void *arg)
967 {
968 	unsigned long endtime;
969 	unsigned long gp_state;
970 	unsigned long gp_state_time;
971 	unsigned long oldstarttime;
972 
973 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
974 
975 	/* Set real-time priority. */
976 	sched_set_fifo_low(current);
977 
978 	/* Each pass through the following loop does one boost-test cycle. */
979 	do {
980 		bool failed = false; // Test failed already in this test interval
981 		bool gp_initiated = false;
982 
983 		if (kthread_should_stop())
984 			goto checkwait;
985 
986 		/* Wait for the next test interval. */
987 		oldstarttime = boost_starttime;
988 		while (time_before(jiffies, oldstarttime)) {
989 			schedule_timeout_interruptible(oldstarttime - jiffies);
990 			if (stutter_wait("rcu_torture_boost"))
991 				sched_set_fifo_low(current);
992 			if (torture_must_stop())
993 				goto checkwait;
994 		}
995 
996 		// Do one boost-test interval.
997 		endtime = oldstarttime + test_boost_duration * HZ;
998 		while (time_before(jiffies, endtime)) {
999 			// Has current GP gone too long?
1000 			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1001 				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1002 			// If we don't have a grace period in flight, start one.
1003 			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1004 				gp_state = cur_ops->start_gp_poll();
1005 				gp_initiated = true;
1006 				gp_state_time = jiffies;
1007 			}
1008 			if (stutter_wait("rcu_torture_boost")) {
1009 				sched_set_fifo_low(current);
1010 				// If the grace period already ended,
1011 				// we don't know when that happened, so
1012 				// start over.
1013 				if (cur_ops->poll_gp_state(gp_state))
1014 					gp_initiated = false;
1015 			}
1016 			if (torture_must_stop())
1017 				goto checkwait;
1018 		}
1019 
1020 		// In case the grace period extended beyond the end of the loop.
1021 		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1022 			rcu_torture_boost_failed(gp_state, &gp_state_time);
1023 
1024 		/*
1025 		 * Set the start time of the next test interval.
1026 		 * Yes, this is vulnerable to long delays, but such
1027 		 * delays simply cause a false negative for the next
1028 		 * interval.  Besides, we are running at RT priority,
1029 		 * so delays should be relatively rare.
1030 		 */
1031 		while (oldstarttime == boost_starttime && !kthread_should_stop()) {
1032 			if (mutex_trylock(&boost_mutex)) {
1033 				if (oldstarttime == boost_starttime) {
1034 					boost_starttime = jiffies + test_boost_interval * HZ;
1035 					n_rcu_torture_boosts++;
1036 				}
1037 				mutex_unlock(&boost_mutex);
1038 				break;
1039 			}
1040 			schedule_timeout_uninterruptible(1);
1041 		}
1042 
1043 		/* Go do the stutter. */
1044 checkwait:	if (stutter_wait("rcu_torture_boost"))
1045 			sched_set_fifo_low(current);
1046 	} while (!torture_must_stop());
1047 
1048 	/* Clean up and exit. */
1049 	while (!kthread_should_stop()) {
1050 		torture_shutdown_absorb("rcu_torture_boost");
1051 		schedule_timeout_uninterruptible(1);
1052 	}
1053 	torture_kthread_stopping("rcu_torture_boost");
1054 	return 0;
1055 }
1056 
1057 /*
1058  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1059  * bursts of calls to force_quiescent_state(), increasing the probability
1060  * of occurrence of some important types of race conditions.
1061  */
1062 static int
1063 rcu_torture_fqs(void *arg)
1064 {
1065 	unsigned long fqs_resume_time;
1066 	int fqs_burst_remaining;
1067 	int oldnice = task_nice(current);
1068 
1069 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1070 	do {
1071 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1072 		while (time_before(jiffies, fqs_resume_time) &&
1073 		       !kthread_should_stop()) {
1074 			schedule_timeout_interruptible(1);
1075 		}
1076 		fqs_burst_remaining = fqs_duration;
1077 		while (fqs_burst_remaining > 0 &&
1078 		       !kthread_should_stop()) {
1079 			cur_ops->fqs();
1080 			udelay(fqs_holdoff);
1081 			fqs_burst_remaining -= fqs_holdoff;
1082 		}
1083 		if (stutter_wait("rcu_torture_fqs"))
1084 			sched_set_normal(current, oldnice);
1085 	} while (!torture_must_stop());
1086 	torture_kthread_stopping("rcu_torture_fqs");
1087 	return 0;
1088 }
1089 
1090 // Used by writers to randomly choose from the available grace-period
1091 // primitives.  The only purpose of the initialization is to size the array.
1092 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1093 static int nsynctypes;
1094 
1095 /*
1096  * Determine which grace-period primitives are available.
1097  */
1098 static void rcu_torture_write_types(void)
1099 {
1100 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1101 	bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1102 
1103 	/* Initialize synctype[] array.  If none set, take default. */
1104 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1105 		gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1106 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1107 		synctype[nsynctypes++] = RTWS_COND_GET;
1108 		pr_info("%s: Testing conditional GPs.\n", __func__);
1109 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1110 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1111 	}
1112 	if (gp_exp1 && cur_ops->exp_sync) {
1113 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1114 		pr_info("%s: Testing expedited GPs.\n", __func__);
1115 	} else if (gp_exp && !cur_ops->exp_sync) {
1116 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1117 	}
1118 	if (gp_normal1 && cur_ops->deferred_free) {
1119 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1120 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1121 	} else if (gp_normal && !cur_ops->deferred_free) {
1122 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1123 	}
1124 	if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1125 		synctype[nsynctypes++] = RTWS_POLL_GET;
1126 		pr_info("%s: Testing polling GPs.\n", __func__);
1127 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1128 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1129 	}
1130 	if (gp_sync1 && cur_ops->sync) {
1131 		synctype[nsynctypes++] = RTWS_SYNC;
1132 		pr_info("%s: Testing normal GPs.\n", __func__);
1133 	} else if (gp_sync && !cur_ops->sync) {
1134 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1135 	}
1136 }
1137 
1138 /*
1139  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1140  * for that pointed to by rcu_torture_current, freeing the old structure
1141  * after a series of grace periods (the "pipeline").
1142  */
1143 static int
1144 rcu_torture_writer(void *arg)
1145 {
1146 	bool boot_ended;
1147 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1148 	unsigned long cookie;
1149 	int expediting = 0;
1150 	unsigned long gp_snap;
1151 	int i;
1152 	int idx;
1153 	int oldnice = task_nice(current);
1154 	struct rcu_torture *rp;
1155 	struct rcu_torture *old_rp;
1156 	static DEFINE_TORTURE_RANDOM(rand);
1157 	bool stutter_waited;
1158 
1159 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1160 	if (!can_expedite)
1161 		pr_alert("%s" TORTURE_FLAG
1162 			 " GP expediting controlled from boot/sysfs for %s.\n",
1163 			 torture_type, cur_ops->name);
1164 	if (WARN_ONCE(nsynctypes == 0,
1165 		      "rcu_torture_writer: No update-side primitives.\n")) {
1166 		/*
1167 		 * No updates primitives, so don't try updating.
1168 		 * The resulting test won't be testing much, hence the
1169 		 * above WARN_ONCE().
1170 		 */
1171 		rcu_torture_writer_state = RTWS_STOPPING;
1172 		torture_kthread_stopping("rcu_torture_writer");
1173 	}
1174 
1175 	do {
1176 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1177 		torture_hrtimeout_us(500, 1000, &rand);
1178 		rp = rcu_torture_alloc();
1179 		if (rp == NULL)
1180 			continue;
1181 		rp->rtort_pipe_count = 0;
1182 		rcu_torture_writer_state = RTWS_DELAY;
1183 		udelay(torture_random(&rand) & 0x3ff);
1184 		rcu_torture_writer_state = RTWS_REPLACE;
1185 		old_rp = rcu_dereference_check(rcu_torture_current,
1186 					       current == writer_task);
1187 		rp->rtort_mbtest = 1;
1188 		rcu_assign_pointer(rcu_torture_current, rp);
1189 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1190 		if (old_rp) {
1191 			i = old_rp->rtort_pipe_count;
1192 			if (i > RCU_TORTURE_PIPE_LEN)
1193 				i = RCU_TORTURE_PIPE_LEN;
1194 			atomic_inc(&rcu_torture_wcount[i]);
1195 			WRITE_ONCE(old_rp->rtort_pipe_count,
1196 				   old_rp->rtort_pipe_count + 1);
1197 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1198 				idx = cur_ops->readlock();
1199 				cookie = cur_ops->get_gp_state();
1200 				WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1201 					  cur_ops->poll_gp_state(cookie),
1202 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1203 					  __func__,
1204 					  rcu_torture_writer_state_getname(),
1205 					  rcu_torture_writer_state,
1206 					  cookie, cur_ops->get_gp_state());
1207 				cur_ops->readunlock(idx);
1208 			}
1209 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1210 			case RTWS_DEF_FREE:
1211 				rcu_torture_writer_state = RTWS_DEF_FREE;
1212 				cur_ops->deferred_free(old_rp);
1213 				break;
1214 			case RTWS_EXP_SYNC:
1215 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1216 				cur_ops->exp_sync();
1217 				rcu_torture_pipe_update(old_rp);
1218 				break;
1219 			case RTWS_COND_GET:
1220 				rcu_torture_writer_state = RTWS_COND_GET;
1221 				gp_snap = cur_ops->get_gp_state();
1222 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1223 				rcu_torture_writer_state = RTWS_COND_SYNC;
1224 				cur_ops->cond_sync(gp_snap);
1225 				rcu_torture_pipe_update(old_rp);
1226 				break;
1227 			case RTWS_POLL_GET:
1228 				rcu_torture_writer_state = RTWS_POLL_GET;
1229 				gp_snap = cur_ops->start_gp_poll();
1230 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1231 				while (!cur_ops->poll_gp_state(gp_snap))
1232 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1233 								  &rand);
1234 				rcu_torture_pipe_update(old_rp);
1235 				break;
1236 			case RTWS_SYNC:
1237 				rcu_torture_writer_state = RTWS_SYNC;
1238 				cur_ops->sync();
1239 				rcu_torture_pipe_update(old_rp);
1240 				break;
1241 			default:
1242 				WARN_ON_ONCE(1);
1243 				break;
1244 			}
1245 		}
1246 		WRITE_ONCE(rcu_torture_current_version,
1247 			   rcu_torture_current_version + 1);
1248 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1249 		if (can_expedite &&
1250 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1251 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1252 			if (expediting >= 0)
1253 				rcu_expedite_gp();
1254 			else
1255 				rcu_unexpedite_gp();
1256 			if (++expediting > 3)
1257 				expediting = -expediting;
1258 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1259 			can_expedite = !rcu_gp_is_expedited() &&
1260 				       !rcu_gp_is_normal();
1261 		}
1262 		rcu_torture_writer_state = RTWS_STUTTER;
1263 		boot_ended = rcu_inkernel_boot_has_ended();
1264 		stutter_waited = stutter_wait("rcu_torture_writer");
1265 		if (stutter_waited &&
1266 		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
1267 		    !cur_ops->slow_gps &&
1268 		    !torture_must_stop() &&
1269 		    boot_ended)
1270 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1271 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1272 				    rcu_access_pointer(rcu_torture_current) !=
1273 				    &rcu_tortures[i]) {
1274 					rcu_ftrace_dump(DUMP_ALL);
1275 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1276 				}
1277 		if (stutter_waited)
1278 			sched_set_normal(current, oldnice);
1279 	} while (!torture_must_stop());
1280 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1281 	/* Reset expediting back to unexpedited. */
1282 	if (expediting > 0)
1283 		expediting = -expediting;
1284 	while (can_expedite && expediting++ < 0)
1285 		rcu_unexpedite_gp();
1286 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1287 	if (!can_expedite)
1288 		pr_alert("%s" TORTURE_FLAG
1289 			 " Dynamic grace-period expediting was disabled.\n",
1290 			 torture_type);
1291 	rcu_torture_writer_state = RTWS_STOPPING;
1292 	torture_kthread_stopping("rcu_torture_writer");
1293 	return 0;
1294 }
1295 
1296 /*
1297  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1298  * delay between calls.
1299  */
1300 static int
1301 rcu_torture_fakewriter(void *arg)
1302 {
1303 	unsigned long gp_snap;
1304 	DEFINE_TORTURE_RANDOM(rand);
1305 
1306 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1307 	set_user_nice(current, MAX_NICE);
1308 
1309 	do {
1310 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1311 		if (cur_ops->cb_barrier != NULL &&
1312 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1313 			cur_ops->cb_barrier();
1314 		} else {
1315 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1316 			case RTWS_DEF_FREE:
1317 				break;
1318 			case RTWS_EXP_SYNC:
1319 				cur_ops->exp_sync();
1320 				break;
1321 			case RTWS_COND_GET:
1322 				gp_snap = cur_ops->get_gp_state();
1323 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1324 				cur_ops->cond_sync(gp_snap);
1325 				break;
1326 			case RTWS_POLL_GET:
1327 				gp_snap = cur_ops->start_gp_poll();
1328 				while (!cur_ops->poll_gp_state(gp_snap)) {
1329 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1330 								  &rand);
1331 				}
1332 				break;
1333 			case RTWS_SYNC:
1334 				cur_ops->sync();
1335 				break;
1336 			default:
1337 				WARN_ON_ONCE(1);
1338 				break;
1339 			}
1340 		}
1341 		stutter_wait("rcu_torture_fakewriter");
1342 	} while (!torture_must_stop());
1343 
1344 	torture_kthread_stopping("rcu_torture_fakewriter");
1345 	return 0;
1346 }
1347 
1348 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1349 {
1350 	kfree(rhp);
1351 }
1352 
1353 // Set up and carry out testing of RCU's global memory ordering
1354 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1355 					struct torture_random_state *trsp)
1356 {
1357 	unsigned long loops;
1358 	int noc = torture_num_online_cpus();
1359 	int rdrchked;
1360 	int rdrchker;
1361 	struct rcu_torture_reader_check *rtrcp; // Me.
1362 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1363 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1364 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1365 
1366 	if (myid < 0)
1367 		return; // Don't try this from timer handlers.
1368 
1369 	// Increment my counter.
1370 	rtrcp = &rcu_torture_reader_mbchk[myid];
1371 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1372 
1373 	// Attempt to assign someone else some checking work.
1374 	rdrchked = torture_random(trsp) % nrealreaders;
1375 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1376 	rdrchker = torture_random(trsp) % nrealreaders;
1377 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1378 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1379 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1380 	    !READ_ONCE(rtp->rtort_chkp) &&
1381 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1382 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1383 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1384 		rtrcp->rtc_chkrdr = rdrchked;
1385 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1386 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1387 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1388 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1389 	}
1390 
1391 	// If assigned some completed work, do it!
1392 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1393 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1394 		return; // No work or work not yet ready.
1395 	rdrchked = rtrcp_assigner->rtc_chkrdr;
1396 	if (WARN_ON_ONCE(rdrchked < 0))
1397 		return;
1398 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1399 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1400 	atomic_inc(&n_rcu_torture_mbchk_tries);
1401 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1402 		atomic_inc(&n_rcu_torture_mbchk_fail);
1403 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1404 	rtrcp_assigner->rtc_ready = 0;
1405 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1406 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1407 }
1408 
1409 /*
1410  * Do one extension of an RCU read-side critical section using the
1411  * current reader state in readstate (set to zero for initial entry
1412  * to extended critical section), set the new state as specified by
1413  * newstate (set to zero for final exit from extended critical section),
1414  * and random-number-generator state in trsp.  If this is neither the
1415  * beginning or end of the critical section and if there was actually a
1416  * change, do a ->read_delay().
1417  */
1418 static void rcutorture_one_extend(int *readstate, int newstate,
1419 				  struct torture_random_state *trsp,
1420 				  struct rt_read_seg *rtrsp)
1421 {
1422 	unsigned long flags;
1423 	int idxnew = -1;
1424 	int idxold = *readstate;
1425 	int statesnew = ~*readstate & newstate;
1426 	int statesold = *readstate & ~newstate;
1427 
1428 	WARN_ON_ONCE(idxold < 0);
1429 	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1430 	rtrsp->rt_readstate = newstate;
1431 
1432 	/* First, put new protection in place to avoid critical-section gap. */
1433 	if (statesnew & RCUTORTURE_RDR_BH)
1434 		local_bh_disable();
1435 	if (statesnew & RCUTORTURE_RDR_RBH)
1436 		rcu_read_lock_bh();
1437 	if (statesnew & RCUTORTURE_RDR_IRQ)
1438 		local_irq_disable();
1439 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1440 		preempt_disable();
1441 	if (statesnew & RCUTORTURE_RDR_SCHED)
1442 		rcu_read_lock_sched();
1443 	if (statesnew & RCUTORTURE_RDR_RCU)
1444 		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1445 
1446 	/*
1447 	 * Next, remove old protection, in decreasing order of strength
1448 	 * to avoid unlock paths that aren't safe in the stronger
1449 	 * context. Namely: BH can not be enabled with disabled interrupts.
1450 	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1451 	 * context.
1452 	 */
1453 	if (statesold & RCUTORTURE_RDR_IRQ)
1454 		local_irq_enable();
1455 	if (statesold & RCUTORTURE_RDR_PREEMPT)
1456 		preempt_enable();
1457 	if (statesold & RCUTORTURE_RDR_SCHED)
1458 		rcu_read_unlock_sched();
1459 	if (statesold & RCUTORTURE_RDR_BH)
1460 		local_bh_enable();
1461 	if (statesold & RCUTORTURE_RDR_RBH)
1462 		rcu_read_unlock_bh();
1463 	if (statesold & RCUTORTURE_RDR_RCU) {
1464 		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1465 
1466 		if (lockit)
1467 			raw_spin_lock_irqsave(&current->pi_lock, flags);
1468 		cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1469 		if (lockit)
1470 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1471 	}
1472 
1473 	/* Delay if neither beginning nor end and there was a change. */
1474 	if ((statesnew || statesold) && *readstate && newstate)
1475 		cur_ops->read_delay(trsp, rtrsp);
1476 
1477 	/* Update the reader state. */
1478 	if (idxnew == -1)
1479 		idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1480 	WARN_ON_ONCE(idxnew < 0);
1481 	WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1482 	*readstate = idxnew | newstate;
1483 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1484 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1485 }
1486 
1487 /* Return the biggest extendables mask given current RCU and boot parameters. */
1488 static int rcutorture_extend_mask_max(void)
1489 {
1490 	int mask;
1491 
1492 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1493 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1494 	mask = mask | RCUTORTURE_RDR_RCU;
1495 	return mask;
1496 }
1497 
1498 /* Return a random protection state mask, but with at least one bit set. */
1499 static int
1500 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1501 {
1502 	int mask = rcutorture_extend_mask_max();
1503 	unsigned long randmask1 = torture_random(trsp) >> 8;
1504 	unsigned long randmask2 = randmask1 >> 3;
1505 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1506 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1507 	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1508 
1509 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1510 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1511 	if (!(randmask1 & 0x7))
1512 		mask = mask & randmask2;
1513 	else
1514 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1515 
1516 	/*
1517 	 * Can't enable bh w/irq disabled.
1518 	 */
1519 	if (mask & RCUTORTURE_RDR_IRQ)
1520 		mask |= oldmask & bhs;
1521 
1522 	/*
1523 	 * Ideally these sequences would be detected in debug builds
1524 	 * (regardless of RT), but until then don't stop testing
1525 	 * them on non-RT.
1526 	 */
1527 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1528 		/* Can't modify BH in atomic context */
1529 		if (oldmask & preempts_irq)
1530 			mask &= ~bhs;
1531 		if ((oldmask | mask) & preempts_irq)
1532 			mask |= oldmask & bhs;
1533 	}
1534 
1535 	return mask ?: RCUTORTURE_RDR_RCU;
1536 }
1537 
1538 /*
1539  * Do a randomly selected number of extensions of an existing RCU read-side
1540  * critical section.
1541  */
1542 static struct rt_read_seg *
1543 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1544 		       struct rt_read_seg *rtrsp)
1545 {
1546 	int i;
1547 	int j;
1548 	int mask = rcutorture_extend_mask_max();
1549 
1550 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1551 	if (!((mask - 1) & mask))
1552 		return rtrsp;  /* Current RCU reader not extendable. */
1553 	/* Bias towards larger numbers of loops. */
1554 	i = (torture_random(trsp) >> 3);
1555 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1556 	for (j = 0; j < i; j++) {
1557 		mask = rcutorture_extend_mask(*readstate, trsp);
1558 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1559 	}
1560 	return &rtrsp[j];
1561 }
1562 
1563 /*
1564  * Do one read-side critical section, returning false if there was
1565  * no data to read.  Can be invoked both from process context and
1566  * from a timer handler.
1567  */
1568 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1569 {
1570 	unsigned long cookie;
1571 	int i;
1572 	unsigned long started;
1573 	unsigned long completed;
1574 	int newstate;
1575 	struct rcu_torture *p;
1576 	int pipe_count;
1577 	int readstate = 0;
1578 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1579 	struct rt_read_seg *rtrsp = &rtseg[0];
1580 	struct rt_read_seg *rtrsp1;
1581 	unsigned long long ts;
1582 
1583 	WARN_ON_ONCE(!rcu_is_watching());
1584 	newstate = rcutorture_extend_mask(readstate, trsp);
1585 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1586 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1587 		cookie = cur_ops->get_gp_state();
1588 	started = cur_ops->get_gp_seq();
1589 	ts = rcu_trace_clock_local();
1590 	p = rcu_dereference_check(rcu_torture_current,
1591 				  !cur_ops->readlock_held || cur_ops->readlock_held());
1592 	if (p == NULL) {
1593 		/* Wait for rcu_torture_writer to get underway */
1594 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1595 		return false;
1596 	}
1597 	if (p->rtort_mbtest == 0)
1598 		atomic_inc(&n_rcu_torture_mberror);
1599 	rcu_torture_reader_do_mbchk(myid, p, trsp);
1600 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1601 	preempt_disable();
1602 	pipe_count = READ_ONCE(p->rtort_pipe_count);
1603 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1604 		/* Should not happen, but... */
1605 		pipe_count = RCU_TORTURE_PIPE_LEN;
1606 	}
1607 	completed = cur_ops->get_gp_seq();
1608 	if (pipe_count > 1) {
1609 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1610 					  ts, started, completed);
1611 		rcu_ftrace_dump(DUMP_ALL);
1612 	}
1613 	__this_cpu_inc(rcu_torture_count[pipe_count]);
1614 	completed = rcutorture_seq_diff(completed, started);
1615 	if (completed > RCU_TORTURE_PIPE_LEN) {
1616 		/* Should not happen, but... */
1617 		completed = RCU_TORTURE_PIPE_LEN;
1618 	}
1619 	__this_cpu_inc(rcu_torture_batch[completed]);
1620 	preempt_enable();
1621 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1622 		WARN_ONCE(cur_ops->poll_gp_state(cookie),
1623 			  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1624 			  __func__,
1625 			  rcu_torture_writer_state_getname(),
1626 			  rcu_torture_writer_state,
1627 			  cookie, cur_ops->get_gp_state());
1628 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1629 	WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1630 	// This next splat is expected behavior if leakpointer, especially
1631 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1632 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1633 
1634 	/* If error or close call, record the sequence of reader protections. */
1635 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1636 		i = 0;
1637 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1638 			err_segs[i++] = *rtrsp1;
1639 		rt_read_nsegs = i;
1640 	}
1641 
1642 	return true;
1643 }
1644 
1645 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1646 
1647 /*
1648  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1649  * incrementing the corresponding element of the pipeline array.  The
1650  * counter in the element should never be greater than 1, otherwise, the
1651  * RCU implementation is broken.
1652  */
1653 static void rcu_torture_timer(struct timer_list *unused)
1654 {
1655 	atomic_long_inc(&n_rcu_torture_timers);
1656 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1657 
1658 	/* Test call_rcu() invocation from interrupt handler. */
1659 	if (cur_ops->call) {
1660 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1661 
1662 		if (rhp)
1663 			cur_ops->call(rhp, rcu_torture_timer_cb);
1664 	}
1665 }
1666 
1667 /*
1668  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1669  * incrementing the corresponding element of the pipeline array.  The
1670  * counter in the element should never be greater than 1, otherwise, the
1671  * RCU implementation is broken.
1672  */
1673 static int
1674 rcu_torture_reader(void *arg)
1675 {
1676 	unsigned long lastsleep = jiffies;
1677 	long myid = (long)arg;
1678 	int mynumonline = myid;
1679 	DEFINE_TORTURE_RANDOM(rand);
1680 	struct timer_list t;
1681 
1682 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1683 	set_user_nice(current, MAX_NICE);
1684 	if (irqreader && cur_ops->irq_capable)
1685 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
1686 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1687 	do {
1688 		if (irqreader && cur_ops->irq_capable) {
1689 			if (!timer_pending(&t))
1690 				mod_timer(&t, jiffies + 1);
1691 		}
1692 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1693 			schedule_timeout_interruptible(HZ);
1694 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1695 			torture_hrtimeout_us(500, 1000, &rand);
1696 			lastsleep = jiffies + 10;
1697 		}
1698 		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1699 			schedule_timeout_interruptible(HZ / 5);
1700 		stutter_wait("rcu_torture_reader");
1701 	} while (!torture_must_stop());
1702 	if (irqreader && cur_ops->irq_capable) {
1703 		del_timer_sync(&t);
1704 		destroy_timer_on_stack(&t);
1705 	}
1706 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1707 	torture_kthread_stopping("rcu_torture_reader");
1708 	return 0;
1709 }
1710 
1711 /*
1712  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
1713  * increase race probabilities and fuzzes the interval between toggling.
1714  */
1715 static int rcu_nocb_toggle(void *arg)
1716 {
1717 	int cpu;
1718 	int maxcpu = -1;
1719 	int oldnice = task_nice(current);
1720 	long r;
1721 	DEFINE_TORTURE_RANDOM(rand);
1722 	ktime_t toggle_delay;
1723 	unsigned long toggle_fuzz;
1724 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1725 
1726 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1727 	while (!rcu_inkernel_boot_has_ended())
1728 		schedule_timeout_interruptible(HZ / 10);
1729 	for_each_online_cpu(cpu)
1730 		maxcpu = cpu;
1731 	WARN_ON(maxcpu < 0);
1732 	if (toggle_interval > ULONG_MAX)
1733 		toggle_fuzz = ULONG_MAX >> 3;
1734 	else
1735 		toggle_fuzz = toggle_interval >> 3;
1736 	if (toggle_fuzz <= 0)
1737 		toggle_fuzz = NSEC_PER_USEC;
1738 	do {
1739 		r = torture_random(&rand);
1740 		cpu = (r >> 4) % (maxcpu + 1);
1741 		if (r & 0x1) {
1742 			rcu_nocb_cpu_offload(cpu);
1743 			atomic_long_inc(&n_nocb_offload);
1744 		} else {
1745 			rcu_nocb_cpu_deoffload(cpu);
1746 			atomic_long_inc(&n_nocb_deoffload);
1747 		}
1748 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1749 		set_current_state(TASK_INTERRUPTIBLE);
1750 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1751 		if (stutter_wait("rcu_nocb_toggle"))
1752 			sched_set_normal(current, oldnice);
1753 	} while (!torture_must_stop());
1754 	torture_kthread_stopping("rcu_nocb_toggle");
1755 	return 0;
1756 }
1757 
1758 /*
1759  * Print torture statistics.  Caller must ensure that there is only
1760  * one call to this function at a given time!!!  This is normally
1761  * accomplished by relying on the module system to only have one copy
1762  * of the module loaded, and then by giving the rcu_torture_stats
1763  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1764  * thread is not running).
1765  */
1766 static void
1767 rcu_torture_stats_print(void)
1768 {
1769 	int cpu;
1770 	int i;
1771 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1772 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1773 	struct rcu_torture *rtcp;
1774 	static unsigned long rtcv_snap = ULONG_MAX;
1775 	static bool splatted;
1776 	struct task_struct *wtp;
1777 
1778 	for_each_possible_cpu(cpu) {
1779 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1780 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1781 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1782 		}
1783 	}
1784 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1785 		if (pipesummary[i] != 0)
1786 			break;
1787 	}
1788 
1789 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1790 	rtcp = rcu_access_pointer(rcu_torture_current);
1791 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1792 		rtcp,
1793 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1794 		rcu_torture_current_version,
1795 		list_empty(&rcu_torture_freelist),
1796 		atomic_read(&n_rcu_torture_alloc),
1797 		atomic_read(&n_rcu_torture_alloc_fail),
1798 		atomic_read(&n_rcu_torture_free));
1799 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1800 		atomic_read(&n_rcu_torture_mberror),
1801 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1802 		n_rcu_torture_barrier_error,
1803 		n_rcu_torture_boost_ktrerror,
1804 		n_rcu_torture_boost_rterror);
1805 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1806 		n_rcu_torture_boost_failure,
1807 		n_rcu_torture_boosts,
1808 		atomic_long_read(&n_rcu_torture_timers));
1809 	torture_onoff_stats();
1810 	pr_cont("barrier: %ld/%ld:%ld ",
1811 		data_race(n_barrier_successes),
1812 		data_race(n_barrier_attempts),
1813 		data_race(n_rcu_torture_barrier_error));
1814 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1815 	pr_cont("nocb-toggles: %ld:%ld\n",
1816 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1817 
1818 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1819 	if (atomic_read(&n_rcu_torture_mberror) ||
1820 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
1821 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1822 	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1823 	    i > 1) {
1824 		pr_cont("%s", "!!! ");
1825 		atomic_inc(&n_rcu_torture_error);
1826 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1827 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1828 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1829 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1830 		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1831 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
1832 		WARN_ON_ONCE(i > 1); // Too-short grace period
1833 	}
1834 	pr_cont("Reader Pipe: ");
1835 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1836 		pr_cont(" %ld", pipesummary[i]);
1837 	pr_cont("\n");
1838 
1839 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1840 	pr_cont("Reader Batch: ");
1841 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1842 		pr_cont(" %ld", batchsummary[i]);
1843 	pr_cont("\n");
1844 
1845 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1846 	pr_cont("Free-Block Circulation: ");
1847 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1848 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1849 	}
1850 	pr_cont("\n");
1851 
1852 	if (cur_ops->stats)
1853 		cur_ops->stats();
1854 	if (rtcv_snap == rcu_torture_current_version &&
1855 	    rcu_access_pointer(rcu_torture_current) &&
1856 	    !rcu_stall_is_suppressed()) {
1857 		int __maybe_unused flags = 0;
1858 		unsigned long __maybe_unused gp_seq = 0;
1859 
1860 		rcutorture_get_gp_data(cur_ops->ttype,
1861 				       &flags, &gp_seq);
1862 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1863 					&flags, &gp_seq);
1864 		wtp = READ_ONCE(writer_task);
1865 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
1866 			 rcu_torture_writer_state_getname(),
1867 			 rcu_torture_writer_state, gp_seq, flags,
1868 			 wtp == NULL ? ~0U : wtp->__state,
1869 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
1870 		if (!splatted && wtp) {
1871 			sched_show_task(wtp);
1872 			splatted = true;
1873 		}
1874 		if (cur_ops->gp_kthread_dbg)
1875 			cur_ops->gp_kthread_dbg();
1876 		rcu_ftrace_dump(DUMP_ALL);
1877 	}
1878 	rtcv_snap = rcu_torture_current_version;
1879 }
1880 
1881 /*
1882  * Periodically prints torture statistics, if periodic statistics printing
1883  * was specified via the stat_interval module parameter.
1884  */
1885 static int
1886 rcu_torture_stats(void *arg)
1887 {
1888 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1889 	do {
1890 		schedule_timeout_interruptible(stat_interval * HZ);
1891 		rcu_torture_stats_print();
1892 		torture_shutdown_absorb("rcu_torture_stats");
1893 	} while (!torture_must_stop());
1894 	torture_kthread_stopping("rcu_torture_stats");
1895 	return 0;
1896 }
1897 
1898 /* Test mem_dump_obj() and friends.  */
1899 static void rcu_torture_mem_dump_obj(void)
1900 {
1901 	struct rcu_head *rhp;
1902 	struct kmem_cache *kcp;
1903 	static int z;
1904 
1905 	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1906 	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1907 	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1908 	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1909 	mem_dump_obj(ZERO_SIZE_PTR);
1910 	pr_alert("mem_dump_obj(NULL):");
1911 	mem_dump_obj(NULL);
1912 	pr_alert("mem_dump_obj(%px):", &rhp);
1913 	mem_dump_obj(&rhp);
1914 	pr_alert("mem_dump_obj(%px):", rhp);
1915 	mem_dump_obj(rhp);
1916 	pr_alert("mem_dump_obj(%px):", &rhp->func);
1917 	mem_dump_obj(&rhp->func);
1918 	pr_alert("mem_dump_obj(%px):", &z);
1919 	mem_dump_obj(&z);
1920 	kmem_cache_free(kcp, rhp);
1921 	kmem_cache_destroy(kcp);
1922 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
1923 	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1924 	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
1925 	mem_dump_obj(rhp);
1926 	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
1927 	mem_dump_obj(&rhp->func);
1928 	kfree(rhp);
1929 	rhp = vmalloc(4096);
1930 	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1931 	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
1932 	mem_dump_obj(rhp);
1933 	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
1934 	mem_dump_obj(&rhp->func);
1935 	vfree(rhp);
1936 }
1937 
1938 static void
1939 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1940 {
1941 	pr_alert("%s" TORTURE_FLAG
1942 		 "--- %s: nreaders=%d nfakewriters=%d "
1943 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1944 		 "shuffle_interval=%d stutter=%d irqreader=%d "
1945 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1946 		 "test_boost=%d/%d test_boost_interval=%d "
1947 		 "test_boost_duration=%d shutdown_secs=%d "
1948 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1949 		 "stall_cpu_block=%d "
1950 		 "n_barrier_cbs=%d "
1951 		 "onoff_interval=%d onoff_holdoff=%d "
1952 		 "read_exit_delay=%d read_exit_burst=%d "
1953 		 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1954 		 torture_type, tag, nrealreaders, nfakewriters,
1955 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1956 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1957 		 test_boost, cur_ops->can_boost,
1958 		 test_boost_interval, test_boost_duration, shutdown_secs,
1959 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1960 		 stall_cpu_block,
1961 		 n_barrier_cbs,
1962 		 onoff_interval, onoff_holdoff,
1963 		 read_exit_delay, read_exit_burst,
1964 		 nocbs_nthreads, nocbs_toggle);
1965 }
1966 
1967 static int rcutorture_booster_cleanup(unsigned int cpu)
1968 {
1969 	struct task_struct *t;
1970 
1971 	if (boost_tasks[cpu] == NULL)
1972 		return 0;
1973 	mutex_lock(&boost_mutex);
1974 	t = boost_tasks[cpu];
1975 	boost_tasks[cpu] = NULL;
1976 	rcu_torture_enable_rt_throttle();
1977 	mutex_unlock(&boost_mutex);
1978 
1979 	/* This must be outside of the mutex, otherwise deadlock! */
1980 	torture_stop_kthread(rcu_torture_boost, t);
1981 	return 0;
1982 }
1983 
1984 static int rcutorture_booster_init(unsigned int cpu)
1985 {
1986 	int retval;
1987 
1988 	if (boost_tasks[cpu] != NULL)
1989 		return 0;  /* Already created, nothing more to do. */
1990 
1991 	/* Don't allow time recalculation while creating a new task. */
1992 	mutex_lock(&boost_mutex);
1993 	rcu_torture_disable_rt_throttle();
1994 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1995 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1996 						  cpu_to_node(cpu),
1997 						  "rcu_torture_boost");
1998 	if (IS_ERR(boost_tasks[cpu])) {
1999 		retval = PTR_ERR(boost_tasks[cpu]);
2000 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2001 		n_rcu_torture_boost_ktrerror++;
2002 		boost_tasks[cpu] = NULL;
2003 		mutex_unlock(&boost_mutex);
2004 		return retval;
2005 	}
2006 	kthread_bind(boost_tasks[cpu], cpu);
2007 	wake_up_process(boost_tasks[cpu]);
2008 	mutex_unlock(&boost_mutex);
2009 	return 0;
2010 }
2011 
2012 /*
2013  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
2014  * induces a CPU stall for the time specified by stall_cpu.
2015  */
2016 static int rcu_torture_stall(void *args)
2017 {
2018 	int idx;
2019 	unsigned long stop_at;
2020 
2021 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2022 	if (stall_cpu_holdoff > 0) {
2023 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2024 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2025 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2026 	}
2027 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2028 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2029 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2030 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2031 			if (kthread_should_stop())
2032 				break;
2033 			schedule_timeout_uninterruptible(HZ);
2034 		}
2035 	}
2036 	if (!kthread_should_stop() && stall_cpu > 0) {
2037 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2038 		stop_at = ktime_get_seconds() + stall_cpu;
2039 		/* RCU CPU stall is expected behavior in following code. */
2040 		idx = cur_ops->readlock();
2041 		if (stall_cpu_irqsoff)
2042 			local_irq_disable();
2043 		else if (!stall_cpu_block)
2044 			preempt_disable();
2045 		pr_alert("%s start on CPU %d.\n",
2046 			  __func__, raw_smp_processor_id());
2047 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2048 				    stop_at))
2049 			if (stall_cpu_block) {
2050 #ifdef CONFIG_PREEMPTION
2051 				preempt_schedule();
2052 #else
2053 				schedule_timeout_uninterruptible(HZ);
2054 #endif
2055 			}
2056 		if (stall_cpu_irqsoff)
2057 			local_irq_enable();
2058 		else if (!stall_cpu_block)
2059 			preempt_enable();
2060 		cur_ops->readunlock(idx);
2061 	}
2062 	pr_alert("%s end.\n", __func__);
2063 	torture_shutdown_absorb("rcu_torture_stall");
2064 	while (!kthread_should_stop())
2065 		schedule_timeout_interruptible(10 * HZ);
2066 	return 0;
2067 }
2068 
2069 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2070 static int __init rcu_torture_stall_init(void)
2071 {
2072 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2073 		return 0;
2074 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2075 }
2076 
2077 /* State structure for forward-progress self-propagating RCU callback. */
2078 struct fwd_cb_state {
2079 	struct rcu_head rh;
2080 	int stop;
2081 };
2082 
2083 /*
2084  * Forward-progress self-propagating RCU callback function.  Because
2085  * callbacks run from softirq, this function is an implicit RCU read-side
2086  * critical section.
2087  */
2088 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2089 {
2090 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2091 
2092 	if (READ_ONCE(fcsp->stop)) {
2093 		WRITE_ONCE(fcsp->stop, 2);
2094 		return;
2095 	}
2096 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2097 }
2098 
2099 /* State for continuous-flood RCU callbacks. */
2100 struct rcu_fwd_cb {
2101 	struct rcu_head rh;
2102 	struct rcu_fwd_cb *rfc_next;
2103 	struct rcu_fwd *rfc_rfp;
2104 	int rfc_gps;
2105 };
2106 
2107 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2108 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2109 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2110 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2111 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2112 
2113 struct rcu_launder_hist {
2114 	long n_launders;
2115 	unsigned long launder_gp_seq;
2116 };
2117 
2118 struct rcu_fwd {
2119 	spinlock_t rcu_fwd_lock;
2120 	struct rcu_fwd_cb *rcu_fwd_cb_head;
2121 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2122 	long n_launders_cb;
2123 	unsigned long rcu_fwd_startat;
2124 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2125 	unsigned long rcu_launder_gp_seq_start;
2126 };
2127 
2128 static DEFINE_MUTEX(rcu_fwd_mutex);
2129 static struct rcu_fwd *rcu_fwds;
2130 static bool rcu_fwd_emergency_stop;
2131 
2132 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2133 {
2134 	unsigned long gps;
2135 	unsigned long gps_old;
2136 	int i;
2137 	int j;
2138 
2139 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2140 		if (rfp->n_launders_hist[i].n_launders > 0)
2141 			break;
2142 	pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
2143 		 __func__, jiffies - rfp->rcu_fwd_startat);
2144 	gps_old = rfp->rcu_launder_gp_seq_start;
2145 	for (j = 0; j <= i; j++) {
2146 		gps = rfp->n_launders_hist[j].launder_gp_seq;
2147 		pr_cont(" %ds/%d: %ld:%ld",
2148 			j + 1, FWD_CBS_HIST_DIV,
2149 			rfp->n_launders_hist[j].n_launders,
2150 			rcutorture_seq_diff(gps, gps_old));
2151 		gps_old = gps;
2152 	}
2153 	pr_cont("\n");
2154 }
2155 
2156 /* Callback function for continuous-flood RCU callbacks. */
2157 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2158 {
2159 	unsigned long flags;
2160 	int i;
2161 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2162 	struct rcu_fwd_cb **rfcpp;
2163 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2164 
2165 	rfcp->rfc_next = NULL;
2166 	rfcp->rfc_gps++;
2167 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2168 	rfcpp = rfp->rcu_fwd_cb_tail;
2169 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2170 	WRITE_ONCE(*rfcpp, rfcp);
2171 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2172 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2173 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2174 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2175 	rfp->n_launders_hist[i].n_launders++;
2176 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2177 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2178 }
2179 
2180 // Give the scheduler a chance, even on nohz_full CPUs.
2181 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2182 {
2183 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2184 		// Real call_rcu() floods hit userspace, so emulate that.
2185 		if (need_resched() || (iter & 0xfff))
2186 			schedule();
2187 		return;
2188 	}
2189 	// No userspace emulation: CB invocation throttles call_rcu()
2190 	cond_resched();
2191 }
2192 
2193 /*
2194  * Free all callbacks on the rcu_fwd_cb_head list, either because the
2195  * test is over or because we hit an OOM event.
2196  */
2197 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2198 {
2199 	unsigned long flags;
2200 	unsigned long freed = 0;
2201 	struct rcu_fwd_cb *rfcp;
2202 
2203 	for (;;) {
2204 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2205 		rfcp = rfp->rcu_fwd_cb_head;
2206 		if (!rfcp) {
2207 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2208 			break;
2209 		}
2210 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2211 		if (!rfp->rcu_fwd_cb_head)
2212 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2213 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2214 		kfree(rfcp);
2215 		freed++;
2216 		rcu_torture_fwd_prog_cond_resched(freed);
2217 		if (tick_nohz_full_enabled()) {
2218 			local_irq_save(flags);
2219 			rcu_momentary_dyntick_idle();
2220 			local_irq_restore(flags);
2221 		}
2222 	}
2223 	return freed;
2224 }
2225 
2226 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2227 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2228 				    int *tested, int *tested_tries)
2229 {
2230 	unsigned long cver;
2231 	unsigned long dur;
2232 	struct fwd_cb_state fcs;
2233 	unsigned long gps;
2234 	int idx;
2235 	int sd;
2236 	int sd4;
2237 	bool selfpropcb = false;
2238 	unsigned long stopat;
2239 	static DEFINE_TORTURE_RANDOM(trs);
2240 
2241 	if (!cur_ops->sync)
2242 		return; // Cannot do need_resched() forward progress testing without ->sync.
2243 	if (cur_ops->call && cur_ops->cb_barrier) {
2244 		init_rcu_head_on_stack(&fcs.rh);
2245 		selfpropcb = true;
2246 	}
2247 
2248 	/* Tight loop containing cond_resched(). */
2249 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2250 	cur_ops->sync(); /* Later readers see above write. */
2251 	if  (selfpropcb) {
2252 		WRITE_ONCE(fcs.stop, 0);
2253 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2254 	}
2255 	cver = READ_ONCE(rcu_torture_current_version);
2256 	gps = cur_ops->get_gp_seq();
2257 	sd = cur_ops->stall_dur() + 1;
2258 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2259 	dur = sd4 + torture_random(&trs) % (sd - sd4);
2260 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2261 	stopat = rfp->rcu_fwd_startat + dur;
2262 	while (time_before(jiffies, stopat) &&
2263 	       !shutdown_time_arrived() &&
2264 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2265 		idx = cur_ops->readlock();
2266 		udelay(10);
2267 		cur_ops->readunlock(idx);
2268 		if (!fwd_progress_need_resched || need_resched())
2269 			cond_resched();
2270 	}
2271 	(*tested_tries)++;
2272 	if (!time_before(jiffies, stopat) &&
2273 	    !shutdown_time_arrived() &&
2274 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2275 		(*tested)++;
2276 		cver = READ_ONCE(rcu_torture_current_version) - cver;
2277 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2278 		WARN_ON(!cver && gps < 2);
2279 		pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
2280 	}
2281 	if (selfpropcb) {
2282 		WRITE_ONCE(fcs.stop, 1);
2283 		cur_ops->sync(); /* Wait for running CB to complete. */
2284 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2285 	}
2286 
2287 	if (selfpropcb) {
2288 		WARN_ON(READ_ONCE(fcs.stop) != 2);
2289 		destroy_rcu_head_on_stack(&fcs.rh);
2290 	}
2291 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2292 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2293 }
2294 
2295 /* Carry out call_rcu() forward-progress testing. */
2296 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2297 {
2298 	unsigned long cver;
2299 	unsigned long flags;
2300 	unsigned long gps;
2301 	int i;
2302 	long n_launders;
2303 	long n_launders_cb_snap;
2304 	long n_launders_sa;
2305 	long n_max_cbs;
2306 	long n_max_gps;
2307 	struct rcu_fwd_cb *rfcp;
2308 	struct rcu_fwd_cb *rfcpn;
2309 	unsigned long stopat;
2310 	unsigned long stoppedat;
2311 
2312 	if (READ_ONCE(rcu_fwd_emergency_stop))
2313 		return; /* Get out of the way quickly, no GP wait! */
2314 	if (!cur_ops->call)
2315 		return; /* Can't do call_rcu() fwd prog without ->call. */
2316 
2317 	/* Loop continuously posting RCU callbacks. */
2318 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2319 	cur_ops->sync(); /* Later readers see above write. */
2320 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2321 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2322 	n_launders = 0;
2323 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2324 	n_launders_sa = 0;
2325 	n_max_cbs = 0;
2326 	n_max_gps = 0;
2327 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2328 		rfp->n_launders_hist[i].n_launders = 0;
2329 	cver = READ_ONCE(rcu_torture_current_version);
2330 	gps = cur_ops->get_gp_seq();
2331 	rfp->rcu_launder_gp_seq_start = gps;
2332 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2333 	while (time_before(jiffies, stopat) &&
2334 	       !shutdown_time_arrived() &&
2335 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2336 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2337 		rfcpn = NULL;
2338 		if (rfcp)
2339 			rfcpn = READ_ONCE(rfcp->rfc_next);
2340 		if (rfcpn) {
2341 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2342 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2343 				break;
2344 			rfp->rcu_fwd_cb_head = rfcpn;
2345 			n_launders++;
2346 			n_launders_sa++;
2347 		} else {
2348 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2349 			if (WARN_ON_ONCE(!rfcp)) {
2350 				schedule_timeout_interruptible(1);
2351 				continue;
2352 			}
2353 			n_max_cbs++;
2354 			n_launders_sa = 0;
2355 			rfcp->rfc_gps = 0;
2356 			rfcp->rfc_rfp = rfp;
2357 		}
2358 		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2359 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2360 		if (tick_nohz_full_enabled()) {
2361 			local_irq_save(flags);
2362 			rcu_momentary_dyntick_idle();
2363 			local_irq_restore(flags);
2364 		}
2365 	}
2366 	stoppedat = jiffies;
2367 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2368 	cver = READ_ONCE(rcu_torture_current_version) - cver;
2369 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2370 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2371 	(void)rcu_torture_fwd_prog_cbfree(rfp);
2372 
2373 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2374 	    !shutdown_time_arrived()) {
2375 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2376 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2377 			 __func__,
2378 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2379 			 n_launders + n_max_cbs - n_launders_cb_snap,
2380 			 n_launders, n_launders_sa,
2381 			 n_max_gps, n_max_cbs, cver, gps);
2382 		rcu_torture_fwd_cb_hist(rfp);
2383 	}
2384 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2385 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2386 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2387 }
2388 
2389 
2390 /*
2391  * OOM notifier, but this only prints diagnostic information for the
2392  * current forward-progress test.
2393  */
2394 static int rcutorture_oom_notify(struct notifier_block *self,
2395 				 unsigned long notused, void *nfreed)
2396 {
2397 	struct rcu_fwd *rfp;
2398 
2399 	mutex_lock(&rcu_fwd_mutex);
2400 	rfp = rcu_fwds;
2401 	if (!rfp) {
2402 		mutex_unlock(&rcu_fwd_mutex);
2403 		return NOTIFY_OK;
2404 	}
2405 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2406 	     __func__);
2407 	rcu_torture_fwd_cb_hist(rfp);
2408 	rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2409 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2410 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2411 	pr_info("%s: Freed %lu RCU callbacks.\n",
2412 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2413 	rcu_barrier();
2414 	pr_info("%s: Freed %lu RCU callbacks.\n",
2415 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2416 	rcu_barrier();
2417 	pr_info("%s: Freed %lu RCU callbacks.\n",
2418 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2419 	smp_mb(); /* Frees before return to avoid redoing OOM. */
2420 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2421 	pr_info("%s returning after OOM processing.\n", __func__);
2422 	mutex_unlock(&rcu_fwd_mutex);
2423 	return NOTIFY_OK;
2424 }
2425 
2426 static struct notifier_block rcutorture_oom_nb = {
2427 	.notifier_call = rcutorture_oom_notify
2428 };
2429 
2430 /* Carry out grace-period forward-progress testing. */
2431 static int rcu_torture_fwd_prog(void *args)
2432 {
2433 	int oldnice = task_nice(current);
2434 	struct rcu_fwd *rfp = args;
2435 	int tested = 0;
2436 	int tested_tries = 0;
2437 
2438 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2439 	rcu_bind_current_to_nocb();
2440 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2441 		set_user_nice(current, MAX_NICE);
2442 	do {
2443 		schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2444 		WRITE_ONCE(rcu_fwd_emergency_stop, false);
2445 		if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2446 		    rcu_inkernel_boot_has_ended())
2447 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2448 		if (rcu_inkernel_boot_has_ended())
2449 			rcu_torture_fwd_prog_cr(rfp);
2450 
2451 		/* Avoid slow periods, better to test when busy. */
2452 		if (stutter_wait("rcu_torture_fwd_prog"))
2453 			sched_set_normal(current, oldnice);
2454 	} while (!torture_must_stop());
2455 	/* Short runs might not contain a valid forward-progress attempt. */
2456 	WARN_ON(!tested && tested_tries >= 5);
2457 	pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2458 	torture_kthread_stopping("rcu_torture_fwd_prog");
2459 	return 0;
2460 }
2461 
2462 /* If forward-progress checking is requested and feasible, spawn the thread. */
2463 static int __init rcu_torture_fwd_prog_init(void)
2464 {
2465 	struct rcu_fwd *rfp;
2466 
2467 	if (!fwd_progress)
2468 		return 0; /* Not requested, so don't do it. */
2469 	if ((!cur_ops->sync && !cur_ops->call) ||
2470 	    !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2471 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2472 		return 0;
2473 	}
2474 	if (stall_cpu > 0) {
2475 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2476 		if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2477 			return -EINVAL; /* In module, can fail back to user. */
2478 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2479 		return 0;
2480 	}
2481 	if (fwd_progress_holdoff <= 0)
2482 		fwd_progress_holdoff = 1;
2483 	if (fwd_progress_div <= 0)
2484 		fwd_progress_div = 4;
2485 	rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2486 	if (!rfp)
2487 		return -ENOMEM;
2488 	spin_lock_init(&rfp->rcu_fwd_lock);
2489 	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2490 	mutex_lock(&rcu_fwd_mutex);
2491 	rcu_fwds = rfp;
2492 	mutex_unlock(&rcu_fwd_mutex);
2493 	register_oom_notifier(&rcutorture_oom_nb);
2494 	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2495 }
2496 
2497 static void rcu_torture_fwd_prog_cleanup(void)
2498 {
2499 	struct rcu_fwd *rfp;
2500 
2501 	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2502 	rfp = rcu_fwds;
2503 	mutex_lock(&rcu_fwd_mutex);
2504 	rcu_fwds = NULL;
2505 	mutex_unlock(&rcu_fwd_mutex);
2506 	unregister_oom_notifier(&rcutorture_oom_nb);
2507 	kfree(rfp);
2508 }
2509 
2510 /* Callback function for RCU barrier testing. */
2511 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2512 {
2513 	atomic_inc(&barrier_cbs_invoked);
2514 }
2515 
2516 /* IPI handler to get callback posted on desired CPU, if online. */
2517 static void rcu_torture_barrier1cb(void *rcu_void)
2518 {
2519 	struct rcu_head *rhp = rcu_void;
2520 
2521 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
2522 }
2523 
2524 /* kthread function to register callbacks used to test RCU barriers. */
2525 static int rcu_torture_barrier_cbs(void *arg)
2526 {
2527 	long myid = (long)arg;
2528 	bool lastphase = false;
2529 	bool newphase;
2530 	struct rcu_head rcu;
2531 
2532 	init_rcu_head_on_stack(&rcu);
2533 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2534 	set_user_nice(current, MAX_NICE);
2535 	do {
2536 		wait_event(barrier_cbs_wq[myid],
2537 			   (newphase =
2538 			    smp_load_acquire(&barrier_phase)) != lastphase ||
2539 			   torture_must_stop());
2540 		lastphase = newphase;
2541 		if (torture_must_stop())
2542 			break;
2543 		/*
2544 		 * The above smp_load_acquire() ensures barrier_phase load
2545 		 * is ordered before the following ->call().
2546 		 */
2547 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2548 					     &rcu, 1)) {
2549 			// IPI failed, so use direct call from current CPU.
2550 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2551 		}
2552 		if (atomic_dec_and_test(&barrier_cbs_count))
2553 			wake_up(&barrier_wq);
2554 	} while (!torture_must_stop());
2555 	if (cur_ops->cb_barrier != NULL)
2556 		cur_ops->cb_barrier();
2557 	destroy_rcu_head_on_stack(&rcu);
2558 	torture_kthread_stopping("rcu_torture_barrier_cbs");
2559 	return 0;
2560 }
2561 
2562 /* kthread function to drive and coordinate RCU barrier testing. */
2563 static int rcu_torture_barrier(void *arg)
2564 {
2565 	int i;
2566 
2567 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2568 	do {
2569 		atomic_set(&barrier_cbs_invoked, 0);
2570 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
2571 		/* Ensure barrier_phase ordered after prior assignments. */
2572 		smp_store_release(&barrier_phase, !barrier_phase);
2573 		for (i = 0; i < n_barrier_cbs; i++)
2574 			wake_up(&barrier_cbs_wq[i]);
2575 		wait_event(barrier_wq,
2576 			   atomic_read(&barrier_cbs_count) == 0 ||
2577 			   torture_must_stop());
2578 		if (torture_must_stop())
2579 			break;
2580 		n_barrier_attempts++;
2581 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2582 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2583 			n_rcu_torture_barrier_error++;
2584 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2585 			       atomic_read(&barrier_cbs_invoked),
2586 			       n_barrier_cbs);
2587 			WARN_ON(1);
2588 			// Wait manually for the remaining callbacks
2589 			i = 0;
2590 			do {
2591 				if (WARN_ON(i++ > HZ))
2592 					i = INT_MIN;
2593 				schedule_timeout_interruptible(1);
2594 				cur_ops->cb_barrier();
2595 			} while (atomic_read(&barrier_cbs_invoked) !=
2596 				 n_barrier_cbs &&
2597 				 !torture_must_stop());
2598 			smp_mb(); // Can't trust ordering if broken.
2599 			if (!torture_must_stop())
2600 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
2601 				       atomic_read(&barrier_cbs_invoked));
2602 		} else {
2603 			n_barrier_successes++;
2604 		}
2605 		schedule_timeout_interruptible(HZ / 10);
2606 	} while (!torture_must_stop());
2607 	torture_kthread_stopping("rcu_torture_barrier");
2608 	return 0;
2609 }
2610 
2611 /* Initialize RCU barrier testing. */
2612 static int rcu_torture_barrier_init(void)
2613 {
2614 	int i;
2615 	int ret;
2616 
2617 	if (n_barrier_cbs <= 0)
2618 		return 0;
2619 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2620 		pr_alert("%s" TORTURE_FLAG
2621 			 " Call or barrier ops missing for %s,\n",
2622 			 torture_type, cur_ops->name);
2623 		pr_alert("%s" TORTURE_FLAG
2624 			 " RCU barrier testing omitted from run.\n",
2625 			 torture_type);
2626 		return 0;
2627 	}
2628 	atomic_set(&barrier_cbs_count, 0);
2629 	atomic_set(&barrier_cbs_invoked, 0);
2630 	barrier_cbs_tasks =
2631 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2632 			GFP_KERNEL);
2633 	barrier_cbs_wq =
2634 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2635 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2636 		return -ENOMEM;
2637 	for (i = 0; i < n_barrier_cbs; i++) {
2638 		init_waitqueue_head(&barrier_cbs_wq[i]);
2639 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
2640 					     (void *)(long)i,
2641 					     barrier_cbs_tasks[i]);
2642 		if (ret)
2643 			return ret;
2644 	}
2645 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2646 }
2647 
2648 /* Clean up after RCU barrier testing. */
2649 static void rcu_torture_barrier_cleanup(void)
2650 {
2651 	int i;
2652 
2653 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
2654 	if (barrier_cbs_tasks != NULL) {
2655 		for (i = 0; i < n_barrier_cbs; i++)
2656 			torture_stop_kthread(rcu_torture_barrier_cbs,
2657 					     barrier_cbs_tasks[i]);
2658 		kfree(barrier_cbs_tasks);
2659 		barrier_cbs_tasks = NULL;
2660 	}
2661 	if (barrier_cbs_wq != NULL) {
2662 		kfree(barrier_cbs_wq);
2663 		barrier_cbs_wq = NULL;
2664 	}
2665 }
2666 
2667 static bool rcu_torture_can_boost(void)
2668 {
2669 	static int boost_warn_once;
2670 	int prio;
2671 
2672 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2673 		return false;
2674 	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2675 		return false;
2676 
2677 	prio = rcu_get_gp_kthreads_prio();
2678 	if (!prio)
2679 		return false;
2680 
2681 	if (prio < 2) {
2682 		if (boost_warn_once == 1)
2683 			return false;
2684 
2685 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2686 		boost_warn_once = 1;
2687 		return false;
2688 	}
2689 
2690 	return true;
2691 }
2692 
2693 static bool read_exit_child_stop;
2694 static bool read_exit_child_stopped;
2695 static wait_queue_head_t read_exit_wq;
2696 
2697 // Child kthread which just does an rcutorture reader and exits.
2698 static int rcu_torture_read_exit_child(void *trsp_in)
2699 {
2700 	struct torture_random_state *trsp = trsp_in;
2701 
2702 	set_user_nice(current, MAX_NICE);
2703 	// Minimize time between reading and exiting.
2704 	while (!kthread_should_stop())
2705 		schedule_timeout_uninterruptible(1);
2706 	(void)rcu_torture_one_read(trsp, -1);
2707 	return 0;
2708 }
2709 
2710 // Parent kthread which creates and destroys read-exit child kthreads.
2711 static int rcu_torture_read_exit(void *unused)
2712 {
2713 	int count = 0;
2714 	bool errexit = false;
2715 	int i;
2716 	struct task_struct *tsp;
2717 	DEFINE_TORTURE_RANDOM(trs);
2718 
2719 	// Allocate and initialize.
2720 	set_user_nice(current, MAX_NICE);
2721 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2722 
2723 	// Each pass through this loop does one read-exit episode.
2724 	do {
2725 		if (++count > read_exit_burst) {
2726 			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2727 			rcu_barrier(); // Wait for task_struct free, avoid OOM.
2728 			for (i = 0; i < read_exit_delay; i++) {
2729 				schedule_timeout_uninterruptible(HZ);
2730 				if (READ_ONCE(read_exit_child_stop))
2731 					break;
2732 			}
2733 			if (!READ_ONCE(read_exit_child_stop))
2734 				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2735 			count = 0;
2736 		}
2737 		if (READ_ONCE(read_exit_child_stop))
2738 			break;
2739 		// Spawn child.
2740 		tsp = kthread_run(rcu_torture_read_exit_child,
2741 				     &trs, "%s",
2742 				     "rcu_torture_read_exit_child");
2743 		if (IS_ERR(tsp)) {
2744 			VERBOSE_TOROUT_ERRSTRING("out of memory");
2745 			errexit = true;
2746 			tsp = NULL;
2747 			break;
2748 		}
2749 		cond_resched();
2750 		kthread_stop(tsp);
2751 		n_read_exits ++;
2752 		stutter_wait("rcu_torture_read_exit");
2753 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
2754 
2755 	// Clean up and exit.
2756 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
2757 	smp_mb(); // Store before wakeup.
2758 	wake_up(&read_exit_wq);
2759 	while (!torture_must_stop())
2760 		schedule_timeout_uninterruptible(1);
2761 	torture_kthread_stopping("rcu_torture_read_exit");
2762 	return 0;
2763 }
2764 
2765 static int rcu_torture_read_exit_init(void)
2766 {
2767 	if (read_exit_burst <= 0)
2768 		return 0;
2769 	init_waitqueue_head(&read_exit_wq);
2770 	read_exit_child_stop = false;
2771 	read_exit_child_stopped = false;
2772 	return torture_create_kthread(rcu_torture_read_exit, NULL,
2773 				      read_exit_task);
2774 }
2775 
2776 static void rcu_torture_read_exit_cleanup(void)
2777 {
2778 	if (!read_exit_task)
2779 		return;
2780 	WRITE_ONCE(read_exit_child_stop, true);
2781 	smp_mb(); // Above write before wait.
2782 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2783 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2784 }
2785 
2786 static enum cpuhp_state rcutor_hp;
2787 
2788 static void
2789 rcu_torture_cleanup(void)
2790 {
2791 	int firsttime;
2792 	int flags = 0;
2793 	unsigned long gp_seq = 0;
2794 	int i;
2795 
2796 	if (torture_cleanup_begin()) {
2797 		if (cur_ops->cb_barrier != NULL)
2798 			cur_ops->cb_barrier();
2799 		return;
2800 	}
2801 	if (!cur_ops) {
2802 		torture_cleanup_end();
2803 		return;
2804 	}
2805 
2806 	if (cur_ops->gp_kthread_dbg)
2807 		cur_ops->gp_kthread_dbg();
2808 	rcu_torture_read_exit_cleanup();
2809 	rcu_torture_barrier_cleanup();
2810 	rcu_torture_fwd_prog_cleanup();
2811 	torture_stop_kthread(rcu_torture_stall, stall_task);
2812 	torture_stop_kthread(rcu_torture_writer, writer_task);
2813 
2814 	if (nocb_tasks) {
2815 		for (i = 0; i < nrealnocbers; i++)
2816 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2817 		kfree(nocb_tasks);
2818 		nocb_tasks = NULL;
2819 	}
2820 
2821 	if (reader_tasks) {
2822 		for (i = 0; i < nrealreaders; i++)
2823 			torture_stop_kthread(rcu_torture_reader,
2824 					     reader_tasks[i]);
2825 		kfree(reader_tasks);
2826 		reader_tasks = NULL;
2827 	}
2828 	kfree(rcu_torture_reader_mbchk);
2829 	rcu_torture_reader_mbchk = NULL;
2830 
2831 	if (fakewriter_tasks) {
2832 		for (i = 0; i < nfakewriters; i++)
2833 			torture_stop_kthread(rcu_torture_fakewriter,
2834 					     fakewriter_tasks[i]);
2835 		kfree(fakewriter_tasks);
2836 		fakewriter_tasks = NULL;
2837 	}
2838 
2839 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2840 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2841 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2842 		 cur_ops->name, (long)gp_seq, flags,
2843 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
2844 	torture_stop_kthread(rcu_torture_stats, stats_task);
2845 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
2846 	if (rcu_torture_can_boost() && rcutor_hp >= 0)
2847 		cpuhp_remove_state(rcutor_hp);
2848 
2849 	/*
2850 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
2851 	 * cleanup operations.
2852 	 */
2853 	if (cur_ops->cb_barrier != NULL)
2854 		cur_ops->cb_barrier();
2855 	if (cur_ops->cleanup != NULL)
2856 		cur_ops->cleanup();
2857 
2858 	rcu_torture_mem_dump_obj();
2859 
2860 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2861 
2862 	if (err_segs_recorded) {
2863 		pr_alert("Failure/close-call rcutorture reader segments:\n");
2864 		if (rt_read_nsegs == 0)
2865 			pr_alert("\t: No segments recorded!!!\n");
2866 		firsttime = 1;
2867 		for (i = 0; i < rt_read_nsegs; i++) {
2868 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2869 			if (err_segs[i].rt_delay_jiffies != 0) {
2870 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2871 					err_segs[i].rt_delay_jiffies);
2872 				firsttime = 0;
2873 			}
2874 			if (err_segs[i].rt_delay_ms != 0) {
2875 				pr_cont("%s%ldms", firsttime ? "" : "+",
2876 					err_segs[i].rt_delay_ms);
2877 				firsttime = 0;
2878 			}
2879 			if (err_segs[i].rt_delay_us != 0) {
2880 				pr_cont("%s%ldus", firsttime ? "" : "+",
2881 					err_segs[i].rt_delay_us);
2882 				firsttime = 0;
2883 			}
2884 			pr_cont("%s\n",
2885 				err_segs[i].rt_preempted ? "preempted" : "");
2886 
2887 		}
2888 	}
2889 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2890 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2891 	else if (torture_onoff_failures())
2892 		rcu_torture_print_module_parms(cur_ops,
2893 					       "End of test: RCU_HOTPLUG");
2894 	else
2895 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2896 	torture_cleanup_end();
2897 }
2898 
2899 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2900 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2901 {
2902 }
2903 
2904 static void rcu_torture_err_cb(struct rcu_head *rhp)
2905 {
2906 	/*
2907 	 * This -might- happen due to race conditions, but is unlikely.
2908 	 * The scenario that leads to this happening is that the
2909 	 * first of the pair of duplicate callbacks is queued,
2910 	 * someone else starts a grace period that includes that
2911 	 * callback, then the second of the pair must wait for the
2912 	 * next grace period.  Unlikely, but can happen.  If it
2913 	 * does happen, the debug-objects subsystem won't have splatted.
2914 	 */
2915 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2916 }
2917 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2918 
2919 /*
2920  * Verify that double-free causes debug-objects to complain, but only
2921  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
2922  * cannot be carried out.
2923  */
2924 static void rcu_test_debug_objects(void)
2925 {
2926 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2927 	struct rcu_head rh1;
2928 	struct rcu_head rh2;
2929 	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2930 
2931 	init_rcu_head_on_stack(&rh1);
2932 	init_rcu_head_on_stack(&rh2);
2933 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2934 
2935 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
2936 	preempt_disable(); /* Prevent preemption from interrupting test. */
2937 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
2938 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2939 	local_irq_disable(); /* Make it harder to start a new grace period. */
2940 	call_rcu(&rh2, rcu_torture_leak_cb);
2941 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2942 	if (rhp) {
2943 		call_rcu(rhp, rcu_torture_leak_cb);
2944 		call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
2945 	}
2946 	local_irq_enable();
2947 	rcu_read_unlock();
2948 	preempt_enable();
2949 
2950 	/* Wait for them all to get done so we can safely return. */
2951 	rcu_barrier();
2952 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2953 	destroy_rcu_head_on_stack(&rh1);
2954 	destroy_rcu_head_on_stack(&rh2);
2955 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2956 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2957 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2958 }
2959 
2960 static void rcutorture_sync(void)
2961 {
2962 	static unsigned long n;
2963 
2964 	if (cur_ops->sync && !(++n & 0xfff))
2965 		cur_ops->sync();
2966 }
2967 
2968 static int __init
2969 rcu_torture_init(void)
2970 {
2971 	long i;
2972 	int cpu;
2973 	int firsterr = 0;
2974 	int flags = 0;
2975 	unsigned long gp_seq = 0;
2976 	static struct rcu_torture_ops *torture_ops[] = {
2977 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2978 		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2979 		&tasks_tracing_ops, &trivial_ops,
2980 	};
2981 
2982 	if (!torture_init_begin(torture_type, verbose))
2983 		return -EBUSY;
2984 
2985 	/* Process args and tell the world that the torturer is on the job. */
2986 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2987 		cur_ops = torture_ops[i];
2988 		if (strcmp(torture_type, cur_ops->name) == 0)
2989 			break;
2990 	}
2991 	if (i == ARRAY_SIZE(torture_ops)) {
2992 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2993 			 torture_type);
2994 		pr_alert("rcu-torture types:");
2995 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2996 			pr_cont(" %s", torture_ops[i]->name);
2997 		pr_cont("\n");
2998 		firsterr = -EINVAL;
2999 		cur_ops = NULL;
3000 		goto unwind;
3001 	}
3002 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
3003 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3004 		fqs_duration = 0;
3005 	}
3006 	if (cur_ops->init)
3007 		cur_ops->init();
3008 
3009 	if (nreaders >= 0) {
3010 		nrealreaders = nreaders;
3011 	} else {
3012 		nrealreaders = num_online_cpus() - 2 - nreaders;
3013 		if (nrealreaders <= 0)
3014 			nrealreaders = 1;
3015 	}
3016 	rcu_torture_print_module_parms(cur_ops, "Start of test");
3017 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3018 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3019 	start_gp_seq = gp_seq;
3020 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
3021 		 cur_ops->name, (long)gp_seq, flags);
3022 
3023 	/* Set up the freelist. */
3024 
3025 	INIT_LIST_HEAD(&rcu_torture_freelist);
3026 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3027 		rcu_tortures[i].rtort_mbtest = 0;
3028 		list_add_tail(&rcu_tortures[i].rtort_free,
3029 			      &rcu_torture_freelist);
3030 	}
3031 
3032 	/* Initialize the statistics so that each run gets its own numbers. */
3033 
3034 	rcu_torture_current = NULL;
3035 	rcu_torture_current_version = 0;
3036 	atomic_set(&n_rcu_torture_alloc, 0);
3037 	atomic_set(&n_rcu_torture_alloc_fail, 0);
3038 	atomic_set(&n_rcu_torture_free, 0);
3039 	atomic_set(&n_rcu_torture_mberror, 0);
3040 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3041 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3042 	atomic_set(&n_rcu_torture_error, 0);
3043 	n_rcu_torture_barrier_error = 0;
3044 	n_rcu_torture_boost_ktrerror = 0;
3045 	n_rcu_torture_boost_rterror = 0;
3046 	n_rcu_torture_boost_failure = 0;
3047 	n_rcu_torture_boosts = 0;
3048 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3049 		atomic_set(&rcu_torture_wcount[i], 0);
3050 	for_each_possible_cpu(cpu) {
3051 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3052 			per_cpu(rcu_torture_count, cpu)[i] = 0;
3053 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3054 		}
3055 	}
3056 	err_segs_recorded = 0;
3057 	rt_read_nsegs = 0;
3058 
3059 	/* Start up the kthreads. */
3060 
3061 	rcu_torture_write_types();
3062 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3063 					  writer_task);
3064 	if (torture_init_error(firsterr))
3065 		goto unwind;
3066 	if (nfakewriters > 0) {
3067 		fakewriter_tasks = kcalloc(nfakewriters,
3068 					   sizeof(fakewriter_tasks[0]),
3069 					   GFP_KERNEL);
3070 		if (fakewriter_tasks == NULL) {
3071 			VERBOSE_TOROUT_ERRSTRING("out of memory");
3072 			firsterr = -ENOMEM;
3073 			goto unwind;
3074 		}
3075 	}
3076 	for (i = 0; i < nfakewriters; i++) {
3077 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3078 						  NULL, fakewriter_tasks[i]);
3079 		if (torture_init_error(firsterr))
3080 			goto unwind;
3081 	}
3082 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3083 			       GFP_KERNEL);
3084 	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3085 					   GFP_KERNEL);
3086 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3087 		VERBOSE_TOROUT_ERRSTRING("out of memory");
3088 		firsterr = -ENOMEM;
3089 		goto unwind;
3090 	}
3091 	for (i = 0; i < nrealreaders; i++) {
3092 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3093 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3094 						  reader_tasks[i]);
3095 		if (torture_init_error(firsterr))
3096 			goto unwind;
3097 	}
3098 	nrealnocbers = nocbs_nthreads;
3099 	if (WARN_ON(nrealnocbers < 0))
3100 		nrealnocbers = 1;
3101 	if (WARN_ON(nocbs_toggle < 0))
3102 		nocbs_toggle = HZ;
3103 	if (nrealnocbers > 0) {
3104 		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3105 		if (nocb_tasks == NULL) {
3106 			VERBOSE_TOROUT_ERRSTRING("out of memory");
3107 			firsterr = -ENOMEM;
3108 			goto unwind;
3109 		}
3110 	} else {
3111 		nocb_tasks = NULL;
3112 	}
3113 	for (i = 0; i < nrealnocbers; i++) {
3114 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3115 		if (torture_init_error(firsterr))
3116 			goto unwind;
3117 	}
3118 	if (stat_interval > 0) {
3119 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3120 						  stats_task);
3121 		if (torture_init_error(firsterr))
3122 			goto unwind;
3123 	}
3124 	if (test_no_idle_hz && shuffle_interval > 0) {
3125 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3126 		if (torture_init_error(firsterr))
3127 			goto unwind;
3128 	}
3129 	if (stutter < 0)
3130 		stutter = 0;
3131 	if (stutter) {
3132 		int t;
3133 
3134 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3135 		firsterr = torture_stutter_init(stutter * HZ, t);
3136 		if (torture_init_error(firsterr))
3137 			goto unwind;
3138 	}
3139 	if (fqs_duration < 0)
3140 		fqs_duration = 0;
3141 	if (fqs_duration) {
3142 		/* Create the fqs thread */
3143 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3144 						  fqs_task);
3145 		if (torture_init_error(firsterr))
3146 			goto unwind;
3147 	}
3148 	if (test_boost_interval < 1)
3149 		test_boost_interval = 1;
3150 	if (test_boost_duration < 2)
3151 		test_boost_duration = 2;
3152 	if (rcu_torture_can_boost()) {
3153 
3154 		boost_starttime = jiffies + test_boost_interval * HZ;
3155 
3156 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3157 					     rcutorture_booster_init,
3158 					     rcutorture_booster_cleanup);
3159 		rcutor_hp = firsterr;
3160 		if (torture_init_error(firsterr))
3161 			goto unwind;
3162 
3163 		// Testing RCU priority boosting requires rcutorture do
3164 		// some serious abuse.  Counter this by running ksoftirqd
3165 		// at higher priority.
3166 		if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3167 			for_each_online_cpu(cpu) {
3168 				struct sched_param sp;
3169 				struct task_struct *t;
3170 
3171 				t = per_cpu(ksoftirqd, cpu);
3172 				WARN_ON_ONCE(!t);
3173 				sp.sched_priority = 2;
3174 				sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3175 			}
3176 		}
3177 	}
3178 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3179 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3180 	if (torture_init_error(firsterr))
3181 		goto unwind;
3182 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3183 				      rcutorture_sync);
3184 	if (torture_init_error(firsterr))
3185 		goto unwind;
3186 	firsterr = rcu_torture_stall_init();
3187 	if (torture_init_error(firsterr))
3188 		goto unwind;
3189 	firsterr = rcu_torture_fwd_prog_init();
3190 	if (torture_init_error(firsterr))
3191 		goto unwind;
3192 	firsterr = rcu_torture_barrier_init();
3193 	if (torture_init_error(firsterr))
3194 		goto unwind;
3195 	firsterr = rcu_torture_read_exit_init();
3196 	if (torture_init_error(firsterr))
3197 		goto unwind;
3198 	if (object_debug)
3199 		rcu_test_debug_objects();
3200 	torture_init_end();
3201 	return 0;
3202 
3203 unwind:
3204 	torture_init_end();
3205 	rcu_torture_cleanup();
3206 	if (shutdown_secs) {
3207 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3208 		kernel_power_off();
3209 	}
3210 	return firsterr;
3211 }
3212 
3213 module_init(rcu_torture_init);
3214 module_exit(rcu_torture_cleanup);
3215