xref: /openbmc/linux/kernel/rcu/rcutorture.c (revision a9d85efb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 
50 #include "rcu.h"
51 
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
54 
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT	 8	/* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK	 ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND	 \
66 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
69 					/* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71 
72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration, 0,
75 	      "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff, 60,
81 	      "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched, 1,
83 	      "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal, false,
87 	     "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
92 torture_param(int, n_barrier_cbs, 0,
93 	     "# of callbacks/kthreads for barrier testing");
94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
95 torture_param(int, nreaders, -1, "Number of RCU reader threads");
96 torture_param(int, object_debug, 0,
97 	     "Enable debug-object double call_rcu() testing");
98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
99 torture_param(int, onoff_interval, 0,
100 	     "Time between CPU hotplugs (jiffies), 0=disable");
101 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
102 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
103 torture_param(int, read_exit_delay, 13,
104 	      "Delay between read-then-exit episodes (s)");
105 torture_param(int, read_exit_burst, 16,
106 	      "# of read-then-exit bursts per episode, zero to disable");
107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
110 torture_param(int, stall_cpu_holdoff, 10,
111 	     "Time to wait before starting stall (s).");
112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
113 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
114 torture_param(int, stall_gp_kthread, 0,
115 	      "Grace-period kthread stall duration (s).");
116 torture_param(int, stat_interval, 60,
117 	     "Number of seconds between stats printk()s");
118 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120 torture_param(int, test_boost_duration, 4,
121 	     "Duration of each boost test, seconds.");
122 torture_param(int, test_boost_interval, 7,
123 	     "Interval between boost tests, seconds.");
124 torture_param(bool, test_no_idle_hz, true,
125 	     "Test support for tickless idle CPUs");
126 torture_param(int, verbose, 1,
127 	     "Enable verbose debugging printk()s");
128 
129 static char *torture_type = "rcu";
130 module_param(torture_type, charp, 0444);
131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
132 
133 static int nrealnocbers;
134 static int nrealreaders;
135 static struct task_struct *writer_task;
136 static struct task_struct **fakewriter_tasks;
137 static struct task_struct **reader_tasks;
138 static struct task_struct **nocb_tasks;
139 static struct task_struct *stats_task;
140 static struct task_struct *fqs_task;
141 static struct task_struct *boost_tasks[NR_CPUS];
142 static struct task_struct *stall_task;
143 static struct task_struct *fwd_prog_task;
144 static struct task_struct **barrier_cbs_tasks;
145 static struct task_struct *barrier_task;
146 static struct task_struct *read_exit_task;
147 
148 #define RCU_TORTURE_PIPE_LEN 10
149 
150 // Mailbox-like structure to check RCU global memory ordering.
151 struct rcu_torture_reader_check {
152 	unsigned long rtc_myloops;
153 	int rtc_chkrdr;
154 	unsigned long rtc_chkloops;
155 	int rtc_ready;
156 	struct rcu_torture_reader_check *rtc_assigner;
157 } ____cacheline_internodealigned_in_smp;
158 
159 // Update-side data structure used to check RCU readers.
160 struct rcu_torture {
161 	struct rcu_head rtort_rcu;
162 	int rtort_pipe_count;
163 	struct list_head rtort_free;
164 	int rtort_mbtest;
165 	struct rcu_torture_reader_check *rtort_chkp;
166 };
167 
168 static LIST_HEAD(rcu_torture_freelist);
169 static struct rcu_torture __rcu *rcu_torture_current;
170 static unsigned long rcu_torture_current_version;
171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
172 static DEFINE_SPINLOCK(rcu_torture_lock);
173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
177 static atomic_t n_rcu_torture_alloc;
178 static atomic_t n_rcu_torture_alloc_fail;
179 static atomic_t n_rcu_torture_free;
180 static atomic_t n_rcu_torture_mberror;
181 static atomic_t n_rcu_torture_mbchk_fail;
182 static atomic_t n_rcu_torture_mbchk_tries;
183 static atomic_t n_rcu_torture_error;
184 static long n_rcu_torture_barrier_error;
185 static long n_rcu_torture_boost_ktrerror;
186 static long n_rcu_torture_boost_rterror;
187 static long n_rcu_torture_boost_failure;
188 static long n_rcu_torture_boosts;
189 static atomic_long_t n_rcu_torture_timers;
190 static long n_barrier_attempts;
191 static long n_barrier_successes; /* did rcu_barrier test succeed? */
192 static unsigned long n_read_exits;
193 static struct list_head rcu_torture_removed;
194 static unsigned long shutdown_jiffies;
195 static unsigned long start_gp_seq;
196 static atomic_long_t n_nocb_offload;
197 static atomic_long_t n_nocb_deoffload;
198 
199 static int rcu_torture_writer_state;
200 #define RTWS_FIXED_DELAY	0
201 #define RTWS_DELAY		1
202 #define RTWS_REPLACE		2
203 #define RTWS_DEF_FREE		3
204 #define RTWS_EXP_SYNC		4
205 #define RTWS_COND_GET		5
206 #define RTWS_COND_SYNC		6
207 #define RTWS_POLL_GET		7
208 #define RTWS_POLL_WAIT		8
209 #define RTWS_SYNC		9
210 #define RTWS_STUTTER		10
211 #define RTWS_STOPPING		11
212 static const char * const rcu_torture_writer_state_names[] = {
213 	"RTWS_FIXED_DELAY",
214 	"RTWS_DELAY",
215 	"RTWS_REPLACE",
216 	"RTWS_DEF_FREE",
217 	"RTWS_EXP_SYNC",
218 	"RTWS_COND_GET",
219 	"RTWS_COND_SYNC",
220 	"RTWS_POLL_GET",
221 	"RTWS_POLL_WAIT",
222 	"RTWS_SYNC",
223 	"RTWS_STUTTER",
224 	"RTWS_STOPPING",
225 };
226 
227 /* Record reader segment types and duration for first failing read. */
228 struct rt_read_seg {
229 	int rt_readstate;
230 	unsigned long rt_delay_jiffies;
231 	unsigned long rt_delay_ms;
232 	unsigned long rt_delay_us;
233 	bool rt_preempted;
234 };
235 static int err_segs_recorded;
236 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
237 static int rt_read_nsegs;
238 
239 static const char *rcu_torture_writer_state_getname(void)
240 {
241 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
242 
243 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
244 		return "???";
245 	return rcu_torture_writer_state_names[i];
246 }
247 
248 #ifdef CONFIG_RCU_TRACE
249 static u64 notrace rcu_trace_clock_local(void)
250 {
251 	u64 ts = trace_clock_local();
252 
253 	(void)do_div(ts, NSEC_PER_USEC);
254 	return ts;
255 }
256 #else /* #ifdef CONFIG_RCU_TRACE */
257 static u64 notrace rcu_trace_clock_local(void)
258 {
259 	return 0ULL;
260 }
261 #endif /* #else #ifdef CONFIG_RCU_TRACE */
262 
263 /*
264  * Stop aggressive CPU-hog tests a bit before the end of the test in order
265  * to avoid interfering with test shutdown.
266  */
267 static bool shutdown_time_arrived(void)
268 {
269 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
270 }
271 
272 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
273 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
274 					/*  and boost task create/destroy. */
275 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
276 static bool barrier_phase;		/* Test phase. */
277 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
278 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
279 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
280 
281 static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
282 
283 /*
284  * Allocate an element from the rcu_tortures pool.
285  */
286 static struct rcu_torture *
287 rcu_torture_alloc(void)
288 {
289 	struct list_head *p;
290 
291 	spin_lock_bh(&rcu_torture_lock);
292 	if (list_empty(&rcu_torture_freelist)) {
293 		atomic_inc(&n_rcu_torture_alloc_fail);
294 		spin_unlock_bh(&rcu_torture_lock);
295 		return NULL;
296 	}
297 	atomic_inc(&n_rcu_torture_alloc);
298 	p = rcu_torture_freelist.next;
299 	list_del_init(p);
300 	spin_unlock_bh(&rcu_torture_lock);
301 	return container_of(p, struct rcu_torture, rtort_free);
302 }
303 
304 /*
305  * Free an element to the rcu_tortures pool.
306  */
307 static void
308 rcu_torture_free(struct rcu_torture *p)
309 {
310 	atomic_inc(&n_rcu_torture_free);
311 	spin_lock_bh(&rcu_torture_lock);
312 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
313 	spin_unlock_bh(&rcu_torture_lock);
314 }
315 
316 /*
317  * Operations vector for selecting different types of tests.
318  */
319 
320 struct rcu_torture_ops {
321 	int ttype;
322 	void (*init)(void);
323 	void (*cleanup)(void);
324 	int (*readlock)(void);
325 	void (*read_delay)(struct torture_random_state *rrsp,
326 			   struct rt_read_seg *rtrsp);
327 	void (*readunlock)(int idx);
328 	int (*readlock_held)(void);
329 	unsigned long (*get_gp_seq)(void);
330 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
331 	void (*deferred_free)(struct rcu_torture *p);
332 	void (*sync)(void);
333 	void (*exp_sync)(void);
334 	unsigned long (*get_gp_state)(void);
335 	unsigned long (*start_gp_poll)(void);
336 	bool (*poll_gp_state)(unsigned long oldstate);
337 	void (*cond_sync)(unsigned long oldstate);
338 	call_rcu_func_t call;
339 	void (*cb_barrier)(void);
340 	void (*fqs)(void);
341 	void (*stats)(void);
342 	void (*gp_kthread_dbg)(void);
343 	bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
344 	int (*stall_dur)(void);
345 	int irq_capable;
346 	int can_boost;
347 	int extendables;
348 	int slow_gps;
349 	const char *name;
350 };
351 
352 static struct rcu_torture_ops *cur_ops;
353 
354 /*
355  * Definitions for rcu torture testing.
356  */
357 
358 static int torture_readlock_not_held(void)
359 {
360 	return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
361 }
362 
363 static int rcu_torture_read_lock(void) __acquires(RCU)
364 {
365 	rcu_read_lock();
366 	return 0;
367 }
368 
369 static void
370 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
371 {
372 	unsigned long started;
373 	unsigned long completed;
374 	const unsigned long shortdelay_us = 200;
375 	unsigned long longdelay_ms = 300;
376 	unsigned long long ts;
377 
378 	/* We want a short delay sometimes to make a reader delay the grace
379 	 * period, and we want a long delay occasionally to trigger
380 	 * force_quiescent_state. */
381 
382 	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
383 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
384 		started = cur_ops->get_gp_seq();
385 		ts = rcu_trace_clock_local();
386 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
387 			longdelay_ms = 5; /* Avoid triggering BH limits. */
388 		mdelay(longdelay_ms);
389 		rtrsp->rt_delay_ms = longdelay_ms;
390 		completed = cur_ops->get_gp_seq();
391 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
392 					  started, completed);
393 	}
394 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
395 		udelay(shortdelay_us);
396 		rtrsp->rt_delay_us = shortdelay_us;
397 	}
398 	if (!preempt_count() &&
399 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
400 		torture_preempt_schedule();  /* QS only if preemptible. */
401 		rtrsp->rt_preempted = true;
402 	}
403 }
404 
405 static void rcu_torture_read_unlock(int idx) __releases(RCU)
406 {
407 	rcu_read_unlock();
408 }
409 
410 /*
411  * Update callback in the pipe.  This should be invoked after a grace period.
412  */
413 static bool
414 rcu_torture_pipe_update_one(struct rcu_torture *rp)
415 {
416 	int i;
417 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
418 
419 	if (rtrcp) {
420 		WRITE_ONCE(rp->rtort_chkp, NULL);
421 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
422 	}
423 	i = READ_ONCE(rp->rtort_pipe_count);
424 	if (i > RCU_TORTURE_PIPE_LEN)
425 		i = RCU_TORTURE_PIPE_LEN;
426 	atomic_inc(&rcu_torture_wcount[i]);
427 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
428 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
429 		rp->rtort_mbtest = 0;
430 		return true;
431 	}
432 	return false;
433 }
434 
435 /*
436  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
437  * primitives.
438  */
439 static void
440 rcu_torture_pipe_update(struct rcu_torture *old_rp)
441 {
442 	struct rcu_torture *rp;
443 	struct rcu_torture *rp1;
444 
445 	if (old_rp)
446 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
447 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
448 		if (rcu_torture_pipe_update_one(rp)) {
449 			list_del(&rp->rtort_free);
450 			rcu_torture_free(rp);
451 		}
452 	}
453 }
454 
455 static void
456 rcu_torture_cb(struct rcu_head *p)
457 {
458 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
459 
460 	if (torture_must_stop_irq()) {
461 		/* Test is ending, just drop callbacks on the floor. */
462 		/* The next initialization will pick up the pieces. */
463 		return;
464 	}
465 	if (rcu_torture_pipe_update_one(rp))
466 		rcu_torture_free(rp);
467 	else
468 		cur_ops->deferred_free(rp);
469 }
470 
471 static unsigned long rcu_no_completed(void)
472 {
473 	return 0;
474 }
475 
476 static void rcu_torture_deferred_free(struct rcu_torture *p)
477 {
478 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
479 }
480 
481 static void rcu_sync_torture_init(void)
482 {
483 	INIT_LIST_HEAD(&rcu_torture_removed);
484 }
485 
486 static struct rcu_torture_ops rcu_ops = {
487 	.ttype			= RCU_FLAVOR,
488 	.init			= rcu_sync_torture_init,
489 	.readlock		= rcu_torture_read_lock,
490 	.read_delay		= rcu_read_delay,
491 	.readunlock		= rcu_torture_read_unlock,
492 	.readlock_held		= torture_readlock_not_held,
493 	.get_gp_seq		= rcu_get_gp_seq,
494 	.gp_diff		= rcu_seq_diff,
495 	.deferred_free		= rcu_torture_deferred_free,
496 	.sync			= synchronize_rcu,
497 	.exp_sync		= synchronize_rcu_expedited,
498 	.get_gp_state		= get_state_synchronize_rcu,
499 	.start_gp_poll		= start_poll_synchronize_rcu,
500 	.poll_gp_state		= poll_state_synchronize_rcu,
501 	.cond_sync		= cond_synchronize_rcu,
502 	.call			= call_rcu,
503 	.cb_barrier		= rcu_barrier,
504 	.fqs			= rcu_force_quiescent_state,
505 	.stats			= NULL,
506 	.gp_kthread_dbg		= show_rcu_gp_kthreads,
507 	.check_boost_failed	= rcu_check_boost_fail,
508 	.stall_dur		= rcu_jiffies_till_stall_check,
509 	.irq_capable		= 1,
510 	.can_boost		= IS_ENABLED(CONFIG_RCU_BOOST),
511 	.extendables		= RCUTORTURE_MAX_EXTEND,
512 	.name			= "rcu"
513 };
514 
515 /*
516  * Don't even think about trying any of these in real life!!!
517  * The names includes "busted", and they really means it!
518  * The only purpose of these functions is to provide a buggy RCU
519  * implementation to make sure that rcutorture correctly emits
520  * buggy-RCU error messages.
521  */
522 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
523 {
524 	/* This is a deliberate bug for testing purposes only! */
525 	rcu_torture_cb(&p->rtort_rcu);
526 }
527 
528 static void synchronize_rcu_busted(void)
529 {
530 	/* This is a deliberate bug for testing purposes only! */
531 }
532 
533 static void
534 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
535 {
536 	/* This is a deliberate bug for testing purposes only! */
537 	func(head);
538 }
539 
540 static struct rcu_torture_ops rcu_busted_ops = {
541 	.ttype		= INVALID_RCU_FLAVOR,
542 	.init		= rcu_sync_torture_init,
543 	.readlock	= rcu_torture_read_lock,
544 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
545 	.readunlock	= rcu_torture_read_unlock,
546 	.readlock_held	= torture_readlock_not_held,
547 	.get_gp_seq	= rcu_no_completed,
548 	.deferred_free	= rcu_busted_torture_deferred_free,
549 	.sync		= synchronize_rcu_busted,
550 	.exp_sync	= synchronize_rcu_busted,
551 	.call		= call_rcu_busted,
552 	.cb_barrier	= NULL,
553 	.fqs		= NULL,
554 	.stats		= NULL,
555 	.irq_capable	= 1,
556 	.name		= "busted"
557 };
558 
559 /*
560  * Definitions for srcu torture testing.
561  */
562 
563 DEFINE_STATIC_SRCU(srcu_ctl);
564 static struct srcu_struct srcu_ctld;
565 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
566 
567 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
568 {
569 	return srcu_read_lock(srcu_ctlp);
570 }
571 
572 static void
573 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
574 {
575 	long delay;
576 	const long uspertick = 1000000 / HZ;
577 	const long longdelay = 10;
578 
579 	/* We want there to be long-running readers, but not all the time. */
580 
581 	delay = torture_random(rrsp) %
582 		(nrealreaders * 2 * longdelay * uspertick);
583 	if (!delay && in_task()) {
584 		schedule_timeout_interruptible(longdelay);
585 		rtrsp->rt_delay_jiffies = longdelay;
586 	} else {
587 		rcu_read_delay(rrsp, rtrsp);
588 	}
589 }
590 
591 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
592 {
593 	srcu_read_unlock(srcu_ctlp, idx);
594 }
595 
596 static int torture_srcu_read_lock_held(void)
597 {
598 	return srcu_read_lock_held(srcu_ctlp);
599 }
600 
601 static unsigned long srcu_torture_completed(void)
602 {
603 	return srcu_batches_completed(srcu_ctlp);
604 }
605 
606 static void srcu_torture_deferred_free(struct rcu_torture *rp)
607 {
608 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
609 }
610 
611 static void srcu_torture_synchronize(void)
612 {
613 	synchronize_srcu(srcu_ctlp);
614 }
615 
616 static unsigned long srcu_torture_get_gp_state(void)
617 {
618 	return get_state_synchronize_srcu(srcu_ctlp);
619 }
620 
621 static unsigned long srcu_torture_start_gp_poll(void)
622 {
623 	return start_poll_synchronize_srcu(srcu_ctlp);
624 }
625 
626 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
627 {
628 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
629 }
630 
631 static void srcu_torture_call(struct rcu_head *head,
632 			      rcu_callback_t func)
633 {
634 	call_srcu(srcu_ctlp, head, func);
635 }
636 
637 static void srcu_torture_barrier(void)
638 {
639 	srcu_barrier(srcu_ctlp);
640 }
641 
642 static void srcu_torture_stats(void)
643 {
644 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
645 }
646 
647 static void srcu_torture_synchronize_expedited(void)
648 {
649 	synchronize_srcu_expedited(srcu_ctlp);
650 }
651 
652 static struct rcu_torture_ops srcu_ops = {
653 	.ttype		= SRCU_FLAVOR,
654 	.init		= rcu_sync_torture_init,
655 	.readlock	= srcu_torture_read_lock,
656 	.read_delay	= srcu_read_delay,
657 	.readunlock	= srcu_torture_read_unlock,
658 	.readlock_held	= torture_srcu_read_lock_held,
659 	.get_gp_seq	= srcu_torture_completed,
660 	.deferred_free	= srcu_torture_deferred_free,
661 	.sync		= srcu_torture_synchronize,
662 	.exp_sync	= srcu_torture_synchronize_expedited,
663 	.get_gp_state	= srcu_torture_get_gp_state,
664 	.start_gp_poll	= srcu_torture_start_gp_poll,
665 	.poll_gp_state	= srcu_torture_poll_gp_state,
666 	.call		= srcu_torture_call,
667 	.cb_barrier	= srcu_torture_barrier,
668 	.stats		= srcu_torture_stats,
669 	.irq_capable	= 1,
670 	.name		= "srcu"
671 };
672 
673 static void srcu_torture_init(void)
674 {
675 	rcu_sync_torture_init();
676 	WARN_ON(init_srcu_struct(&srcu_ctld));
677 	srcu_ctlp = &srcu_ctld;
678 }
679 
680 static void srcu_torture_cleanup(void)
681 {
682 	cleanup_srcu_struct(&srcu_ctld);
683 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
684 }
685 
686 /* As above, but dynamically allocated. */
687 static struct rcu_torture_ops srcud_ops = {
688 	.ttype		= SRCU_FLAVOR,
689 	.init		= srcu_torture_init,
690 	.cleanup	= srcu_torture_cleanup,
691 	.readlock	= srcu_torture_read_lock,
692 	.read_delay	= srcu_read_delay,
693 	.readunlock	= srcu_torture_read_unlock,
694 	.readlock_held	= torture_srcu_read_lock_held,
695 	.get_gp_seq	= srcu_torture_completed,
696 	.deferred_free	= srcu_torture_deferred_free,
697 	.sync		= srcu_torture_synchronize,
698 	.exp_sync	= srcu_torture_synchronize_expedited,
699 	.call		= srcu_torture_call,
700 	.cb_barrier	= srcu_torture_barrier,
701 	.stats		= srcu_torture_stats,
702 	.irq_capable	= 1,
703 	.name		= "srcud"
704 };
705 
706 /* As above, but broken due to inappropriate reader extension. */
707 static struct rcu_torture_ops busted_srcud_ops = {
708 	.ttype		= SRCU_FLAVOR,
709 	.init		= srcu_torture_init,
710 	.cleanup	= srcu_torture_cleanup,
711 	.readlock	= srcu_torture_read_lock,
712 	.read_delay	= rcu_read_delay,
713 	.readunlock	= srcu_torture_read_unlock,
714 	.readlock_held	= torture_srcu_read_lock_held,
715 	.get_gp_seq	= srcu_torture_completed,
716 	.deferred_free	= srcu_torture_deferred_free,
717 	.sync		= srcu_torture_synchronize,
718 	.exp_sync	= srcu_torture_synchronize_expedited,
719 	.call		= srcu_torture_call,
720 	.cb_barrier	= srcu_torture_barrier,
721 	.stats		= srcu_torture_stats,
722 	.irq_capable	= 1,
723 	.extendables	= RCUTORTURE_MAX_EXTEND,
724 	.name		= "busted_srcud"
725 };
726 
727 /*
728  * Definitions for RCU-tasks torture testing.
729  */
730 
731 static int tasks_torture_read_lock(void)
732 {
733 	return 0;
734 }
735 
736 static void tasks_torture_read_unlock(int idx)
737 {
738 }
739 
740 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
741 {
742 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
743 }
744 
745 static void synchronize_rcu_mult_test(void)
746 {
747 	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
748 }
749 
750 static struct rcu_torture_ops tasks_ops = {
751 	.ttype		= RCU_TASKS_FLAVOR,
752 	.init		= rcu_sync_torture_init,
753 	.readlock	= tasks_torture_read_lock,
754 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
755 	.readunlock	= tasks_torture_read_unlock,
756 	.get_gp_seq	= rcu_no_completed,
757 	.deferred_free	= rcu_tasks_torture_deferred_free,
758 	.sync		= synchronize_rcu_tasks,
759 	.exp_sync	= synchronize_rcu_mult_test,
760 	.call		= call_rcu_tasks,
761 	.cb_barrier	= rcu_barrier_tasks,
762 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
763 	.fqs		= NULL,
764 	.stats		= NULL,
765 	.irq_capable	= 1,
766 	.slow_gps	= 1,
767 	.name		= "tasks"
768 };
769 
770 /*
771  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
772  * This implementation does not necessarily work well with CPU hotplug.
773  */
774 
775 static void synchronize_rcu_trivial(void)
776 {
777 	int cpu;
778 
779 	for_each_online_cpu(cpu) {
780 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
781 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
782 	}
783 }
784 
785 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
786 {
787 	preempt_disable();
788 	return 0;
789 }
790 
791 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
792 {
793 	preempt_enable();
794 }
795 
796 static struct rcu_torture_ops trivial_ops = {
797 	.ttype		= RCU_TRIVIAL_FLAVOR,
798 	.init		= rcu_sync_torture_init,
799 	.readlock	= rcu_torture_read_lock_trivial,
800 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
801 	.readunlock	= rcu_torture_read_unlock_trivial,
802 	.readlock_held	= torture_readlock_not_held,
803 	.get_gp_seq	= rcu_no_completed,
804 	.sync		= synchronize_rcu_trivial,
805 	.exp_sync	= synchronize_rcu_trivial,
806 	.fqs		= NULL,
807 	.stats		= NULL,
808 	.irq_capable	= 1,
809 	.name		= "trivial"
810 };
811 
812 /*
813  * Definitions for rude RCU-tasks torture testing.
814  */
815 
816 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
817 {
818 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
819 }
820 
821 static struct rcu_torture_ops tasks_rude_ops = {
822 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
823 	.init		= rcu_sync_torture_init,
824 	.readlock	= rcu_torture_read_lock_trivial,
825 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
826 	.readunlock	= rcu_torture_read_unlock_trivial,
827 	.get_gp_seq	= rcu_no_completed,
828 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
829 	.sync		= synchronize_rcu_tasks_rude,
830 	.exp_sync	= synchronize_rcu_tasks_rude,
831 	.call		= call_rcu_tasks_rude,
832 	.cb_barrier	= rcu_barrier_tasks_rude,
833 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
834 	.fqs		= NULL,
835 	.stats		= NULL,
836 	.irq_capable	= 1,
837 	.name		= "tasks-rude"
838 };
839 
840 /*
841  * Definitions for tracing RCU-tasks torture testing.
842  */
843 
844 static int tasks_tracing_torture_read_lock(void)
845 {
846 	rcu_read_lock_trace();
847 	return 0;
848 }
849 
850 static void tasks_tracing_torture_read_unlock(int idx)
851 {
852 	rcu_read_unlock_trace();
853 }
854 
855 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
856 {
857 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
858 }
859 
860 static struct rcu_torture_ops tasks_tracing_ops = {
861 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
862 	.init		= rcu_sync_torture_init,
863 	.readlock	= tasks_tracing_torture_read_lock,
864 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
865 	.readunlock	= tasks_tracing_torture_read_unlock,
866 	.readlock_held	= rcu_read_lock_trace_held,
867 	.get_gp_seq	= rcu_no_completed,
868 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
869 	.sync		= synchronize_rcu_tasks_trace,
870 	.exp_sync	= synchronize_rcu_tasks_trace,
871 	.call		= call_rcu_tasks_trace,
872 	.cb_barrier	= rcu_barrier_tasks_trace,
873 	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
874 	.fqs		= NULL,
875 	.stats		= NULL,
876 	.irq_capable	= 1,
877 	.slow_gps	= 1,
878 	.name		= "tasks-tracing"
879 };
880 
881 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
882 {
883 	if (!cur_ops->gp_diff)
884 		return new - old;
885 	return cur_ops->gp_diff(new, old);
886 }
887 
888 /*
889  * RCU torture priority-boost testing.  Runs one real-time thread per
890  * CPU for moderate bursts, repeatedly starting grace periods and waiting
891  * for them to complete.  If a given grace period takes too long, we assume
892  * that priority inversion has occurred.
893  */
894 
895 static int old_rt_runtime = -1;
896 
897 static void rcu_torture_disable_rt_throttle(void)
898 {
899 	/*
900 	 * Disable RT throttling so that rcutorture's boost threads don't get
901 	 * throttled. Only possible if rcutorture is built-in otherwise the
902 	 * user should manually do this by setting the sched_rt_period_us and
903 	 * sched_rt_runtime sysctls.
904 	 */
905 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
906 		return;
907 
908 	old_rt_runtime = sysctl_sched_rt_runtime;
909 	sysctl_sched_rt_runtime = -1;
910 }
911 
912 static void rcu_torture_enable_rt_throttle(void)
913 {
914 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
915 		return;
916 
917 	sysctl_sched_rt_runtime = old_rt_runtime;
918 	old_rt_runtime = -1;
919 }
920 
921 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
922 {
923 	int cpu;
924 	static int dbg_done;
925 	unsigned long end = jiffies;
926 	bool gp_done;
927 	unsigned long j;
928 	static unsigned long last_persist;
929 	unsigned long lp;
930 	unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
931 
932 	if (end - *start > mininterval) {
933 		// Recheck after checking time to avoid false positives.
934 		smp_mb(); // Time check before grace-period check.
935 		if (cur_ops->poll_gp_state(gp_state))
936 			return false; // passed, though perhaps just barely
937 		if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
938 			// At most one persisted message per boost test.
939 			j = jiffies;
940 			lp = READ_ONCE(last_persist);
941 			if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
942 				pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
943 			return false; // passed on a technicality
944 		}
945 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
946 		n_rcu_torture_boost_failure++;
947 		if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
948 			pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
949 				current->rt_priority, gp_state, end - *start);
950 			cur_ops->gp_kthread_dbg();
951 			// Recheck after print to flag grace period ending during splat.
952 			gp_done = cur_ops->poll_gp_state(gp_state);
953 			pr_info("Boost inversion: GP %lu %s.\n", gp_state,
954 				gp_done ? "ended already" : "still pending");
955 
956 		}
957 
958 		return true; // failed
959 	} else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
960 		*start = jiffies;
961 	}
962 
963 	return false; // passed
964 }
965 
966 static int rcu_torture_boost(void *arg)
967 {
968 	unsigned long endtime;
969 	unsigned long gp_state;
970 	unsigned long gp_state_time;
971 	unsigned long oldstarttime;
972 
973 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
974 
975 	/* Set real-time priority. */
976 	sched_set_fifo_low(current);
977 
978 	/* Each pass through the following loop does one boost-test cycle. */
979 	do {
980 		bool failed = false; // Test failed already in this test interval
981 		bool gp_initiated = false;
982 
983 		if (kthread_should_stop())
984 			goto checkwait;
985 
986 		/* Wait for the next test interval. */
987 		oldstarttime = boost_starttime;
988 		while (time_before(jiffies, oldstarttime)) {
989 			schedule_timeout_interruptible(oldstarttime - jiffies);
990 			if (stutter_wait("rcu_torture_boost"))
991 				sched_set_fifo_low(current);
992 			if (torture_must_stop())
993 				goto checkwait;
994 		}
995 
996 		// Do one boost-test interval.
997 		endtime = oldstarttime + test_boost_duration * HZ;
998 		while (time_before(jiffies, endtime)) {
999 			// Has current GP gone too long?
1000 			if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1001 				failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1002 			// If we don't have a grace period in flight, start one.
1003 			if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1004 				gp_state = cur_ops->start_gp_poll();
1005 				gp_initiated = true;
1006 				gp_state_time = jiffies;
1007 			}
1008 			if (stutter_wait("rcu_torture_boost")) {
1009 				sched_set_fifo_low(current);
1010 				// If the grace period already ended,
1011 				// we don't know when that happened, so
1012 				// start over.
1013 				if (cur_ops->poll_gp_state(gp_state))
1014 					gp_initiated = false;
1015 			}
1016 			if (torture_must_stop())
1017 				goto checkwait;
1018 		}
1019 
1020 		// In case the grace period extended beyond the end of the loop.
1021 		if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1022 			rcu_torture_boost_failed(gp_state, &gp_state_time);
1023 
1024 		/*
1025 		 * Set the start time of the next test interval.
1026 		 * Yes, this is vulnerable to long delays, but such
1027 		 * delays simply cause a false negative for the next
1028 		 * interval.  Besides, we are running at RT priority,
1029 		 * so delays should be relatively rare.
1030 		 */
1031 		while (oldstarttime == boost_starttime && !kthread_should_stop()) {
1032 			if (mutex_trylock(&boost_mutex)) {
1033 				if (oldstarttime == boost_starttime) {
1034 					boost_starttime = jiffies + test_boost_interval * HZ;
1035 					n_rcu_torture_boosts++;
1036 				}
1037 				mutex_unlock(&boost_mutex);
1038 				break;
1039 			}
1040 			schedule_timeout_uninterruptible(1);
1041 		}
1042 
1043 		/* Go do the stutter. */
1044 checkwait:	if (stutter_wait("rcu_torture_boost"))
1045 			sched_set_fifo_low(current);
1046 	} while (!torture_must_stop());
1047 
1048 	/* Clean up and exit. */
1049 	while (!kthread_should_stop()) {
1050 		torture_shutdown_absorb("rcu_torture_boost");
1051 		schedule_timeout_uninterruptible(1);
1052 	}
1053 	torture_kthread_stopping("rcu_torture_boost");
1054 	return 0;
1055 }
1056 
1057 /*
1058  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1059  * bursts of calls to force_quiescent_state(), increasing the probability
1060  * of occurrence of some important types of race conditions.
1061  */
1062 static int
1063 rcu_torture_fqs(void *arg)
1064 {
1065 	unsigned long fqs_resume_time;
1066 	int fqs_burst_remaining;
1067 	int oldnice = task_nice(current);
1068 
1069 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1070 	do {
1071 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1072 		while (time_before(jiffies, fqs_resume_time) &&
1073 		       !kthread_should_stop()) {
1074 			schedule_timeout_interruptible(1);
1075 		}
1076 		fqs_burst_remaining = fqs_duration;
1077 		while (fqs_burst_remaining > 0 &&
1078 		       !kthread_should_stop()) {
1079 			cur_ops->fqs();
1080 			udelay(fqs_holdoff);
1081 			fqs_burst_remaining -= fqs_holdoff;
1082 		}
1083 		if (stutter_wait("rcu_torture_fqs"))
1084 			sched_set_normal(current, oldnice);
1085 	} while (!torture_must_stop());
1086 	torture_kthread_stopping("rcu_torture_fqs");
1087 	return 0;
1088 }
1089 
1090 // Used by writers to randomly choose from the available grace-period
1091 // primitives.  The only purpose of the initialization is to size the array.
1092 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1093 static int nsynctypes;
1094 
1095 /*
1096  * Determine which grace-period primitives are available.
1097  */
1098 static void rcu_torture_write_types(void)
1099 {
1100 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1101 	bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1102 
1103 	/* Initialize synctype[] array.  If none set, take default. */
1104 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1105 		gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1106 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1107 		synctype[nsynctypes++] = RTWS_COND_GET;
1108 		pr_info("%s: Testing conditional GPs.\n", __func__);
1109 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1110 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1111 	}
1112 	if (gp_exp1 && cur_ops->exp_sync) {
1113 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1114 		pr_info("%s: Testing expedited GPs.\n", __func__);
1115 	} else if (gp_exp && !cur_ops->exp_sync) {
1116 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1117 	}
1118 	if (gp_normal1 && cur_ops->deferred_free) {
1119 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1120 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1121 	} else if (gp_normal && !cur_ops->deferred_free) {
1122 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1123 	}
1124 	if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1125 		synctype[nsynctypes++] = RTWS_POLL_GET;
1126 		pr_info("%s: Testing polling GPs.\n", __func__);
1127 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1128 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1129 	}
1130 	if (gp_sync1 && cur_ops->sync) {
1131 		synctype[nsynctypes++] = RTWS_SYNC;
1132 		pr_info("%s: Testing normal GPs.\n", __func__);
1133 	} else if (gp_sync && !cur_ops->sync) {
1134 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1135 	}
1136 }
1137 
1138 /*
1139  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1140  * for that pointed to by rcu_torture_current, freeing the old structure
1141  * after a series of grace periods (the "pipeline").
1142  */
1143 static int
1144 rcu_torture_writer(void *arg)
1145 {
1146 	bool boot_ended;
1147 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1148 	unsigned long cookie;
1149 	int expediting = 0;
1150 	unsigned long gp_snap;
1151 	int i;
1152 	int idx;
1153 	int oldnice = task_nice(current);
1154 	struct rcu_torture *rp;
1155 	struct rcu_torture *old_rp;
1156 	static DEFINE_TORTURE_RANDOM(rand);
1157 	bool stutter_waited;
1158 
1159 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1160 	if (!can_expedite)
1161 		pr_alert("%s" TORTURE_FLAG
1162 			 " GP expediting controlled from boot/sysfs for %s.\n",
1163 			 torture_type, cur_ops->name);
1164 	if (WARN_ONCE(nsynctypes == 0,
1165 		      "rcu_torture_writer: No update-side primitives.\n")) {
1166 		/*
1167 		 * No updates primitives, so don't try updating.
1168 		 * The resulting test won't be testing much, hence the
1169 		 * above WARN_ONCE().
1170 		 */
1171 		rcu_torture_writer_state = RTWS_STOPPING;
1172 		torture_kthread_stopping("rcu_torture_writer");
1173 	}
1174 
1175 	do {
1176 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1177 		torture_hrtimeout_us(500, 1000, &rand);
1178 		rp = rcu_torture_alloc();
1179 		if (rp == NULL)
1180 			continue;
1181 		rp->rtort_pipe_count = 0;
1182 		rcu_torture_writer_state = RTWS_DELAY;
1183 		udelay(torture_random(&rand) & 0x3ff);
1184 		rcu_torture_writer_state = RTWS_REPLACE;
1185 		old_rp = rcu_dereference_check(rcu_torture_current,
1186 					       current == writer_task);
1187 		rp->rtort_mbtest = 1;
1188 		rcu_assign_pointer(rcu_torture_current, rp);
1189 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1190 		if (old_rp) {
1191 			i = old_rp->rtort_pipe_count;
1192 			if (i > RCU_TORTURE_PIPE_LEN)
1193 				i = RCU_TORTURE_PIPE_LEN;
1194 			atomic_inc(&rcu_torture_wcount[i]);
1195 			WRITE_ONCE(old_rp->rtort_pipe_count,
1196 				   old_rp->rtort_pipe_count + 1);
1197 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1198 				idx = cur_ops->readlock();
1199 				cookie = cur_ops->get_gp_state();
1200 				WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1201 					  cur_ops->poll_gp_state(cookie),
1202 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1203 					  __func__,
1204 					  rcu_torture_writer_state_getname(),
1205 					  rcu_torture_writer_state,
1206 					  cookie, cur_ops->get_gp_state());
1207 				cur_ops->readunlock(idx);
1208 			}
1209 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1210 			case RTWS_DEF_FREE:
1211 				rcu_torture_writer_state = RTWS_DEF_FREE;
1212 				cur_ops->deferred_free(old_rp);
1213 				break;
1214 			case RTWS_EXP_SYNC:
1215 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1216 				cur_ops->exp_sync();
1217 				rcu_torture_pipe_update(old_rp);
1218 				break;
1219 			case RTWS_COND_GET:
1220 				rcu_torture_writer_state = RTWS_COND_GET;
1221 				gp_snap = cur_ops->get_gp_state();
1222 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1223 				rcu_torture_writer_state = RTWS_COND_SYNC;
1224 				cur_ops->cond_sync(gp_snap);
1225 				rcu_torture_pipe_update(old_rp);
1226 				break;
1227 			case RTWS_POLL_GET:
1228 				rcu_torture_writer_state = RTWS_POLL_GET;
1229 				gp_snap = cur_ops->start_gp_poll();
1230 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1231 				while (!cur_ops->poll_gp_state(gp_snap))
1232 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1233 								  &rand);
1234 				rcu_torture_pipe_update(old_rp);
1235 				break;
1236 			case RTWS_SYNC:
1237 				rcu_torture_writer_state = RTWS_SYNC;
1238 				cur_ops->sync();
1239 				rcu_torture_pipe_update(old_rp);
1240 				break;
1241 			default:
1242 				WARN_ON_ONCE(1);
1243 				break;
1244 			}
1245 		}
1246 		WRITE_ONCE(rcu_torture_current_version,
1247 			   rcu_torture_current_version + 1);
1248 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1249 		if (can_expedite &&
1250 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1251 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1252 			if (expediting >= 0)
1253 				rcu_expedite_gp();
1254 			else
1255 				rcu_unexpedite_gp();
1256 			if (++expediting > 3)
1257 				expediting = -expediting;
1258 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1259 			can_expedite = !rcu_gp_is_expedited() &&
1260 				       !rcu_gp_is_normal();
1261 		}
1262 		rcu_torture_writer_state = RTWS_STUTTER;
1263 		boot_ended = rcu_inkernel_boot_has_ended();
1264 		stutter_waited = stutter_wait("rcu_torture_writer");
1265 		if (stutter_waited &&
1266 		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
1267 		    !cur_ops->slow_gps &&
1268 		    !torture_must_stop() &&
1269 		    boot_ended)
1270 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1271 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1272 				    rcu_access_pointer(rcu_torture_current) !=
1273 				    &rcu_tortures[i]) {
1274 					rcu_ftrace_dump(DUMP_ALL);
1275 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1276 				}
1277 		if (stutter_waited)
1278 			sched_set_normal(current, oldnice);
1279 	} while (!torture_must_stop());
1280 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1281 	/* Reset expediting back to unexpedited. */
1282 	if (expediting > 0)
1283 		expediting = -expediting;
1284 	while (can_expedite && expediting++ < 0)
1285 		rcu_unexpedite_gp();
1286 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1287 	if (!can_expedite)
1288 		pr_alert("%s" TORTURE_FLAG
1289 			 " Dynamic grace-period expediting was disabled.\n",
1290 			 torture_type);
1291 	rcu_torture_writer_state = RTWS_STOPPING;
1292 	torture_kthread_stopping("rcu_torture_writer");
1293 	return 0;
1294 }
1295 
1296 /*
1297  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1298  * delay between calls.
1299  */
1300 static int
1301 rcu_torture_fakewriter(void *arg)
1302 {
1303 	unsigned long gp_snap;
1304 	DEFINE_TORTURE_RANDOM(rand);
1305 
1306 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1307 	set_user_nice(current, MAX_NICE);
1308 
1309 	do {
1310 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1311 		if (cur_ops->cb_barrier != NULL &&
1312 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1313 			cur_ops->cb_barrier();
1314 		} else {
1315 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1316 			case RTWS_DEF_FREE:
1317 				break;
1318 			case RTWS_EXP_SYNC:
1319 				cur_ops->exp_sync();
1320 				break;
1321 			case RTWS_COND_GET:
1322 				gp_snap = cur_ops->get_gp_state();
1323 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1324 				cur_ops->cond_sync(gp_snap);
1325 				break;
1326 			case RTWS_POLL_GET:
1327 				gp_snap = cur_ops->start_gp_poll();
1328 				while (!cur_ops->poll_gp_state(gp_snap)) {
1329 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1330 								  &rand);
1331 				}
1332 				break;
1333 			case RTWS_SYNC:
1334 				cur_ops->sync();
1335 				break;
1336 			default:
1337 				WARN_ON_ONCE(1);
1338 				break;
1339 			}
1340 		}
1341 		stutter_wait("rcu_torture_fakewriter");
1342 	} while (!torture_must_stop());
1343 
1344 	torture_kthread_stopping("rcu_torture_fakewriter");
1345 	return 0;
1346 }
1347 
1348 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1349 {
1350 	kfree(rhp);
1351 }
1352 
1353 // Set up and carry out testing of RCU's global memory ordering
1354 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1355 					struct torture_random_state *trsp)
1356 {
1357 	unsigned long loops;
1358 	int noc = torture_num_online_cpus();
1359 	int rdrchked;
1360 	int rdrchker;
1361 	struct rcu_torture_reader_check *rtrcp; // Me.
1362 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1363 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1364 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1365 
1366 	if (myid < 0)
1367 		return; // Don't try this from timer handlers.
1368 
1369 	// Increment my counter.
1370 	rtrcp = &rcu_torture_reader_mbchk[myid];
1371 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1372 
1373 	// Attempt to assign someone else some checking work.
1374 	rdrchked = torture_random(trsp) % nrealreaders;
1375 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1376 	rdrchker = torture_random(trsp) % nrealreaders;
1377 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1378 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1379 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1380 	    !READ_ONCE(rtp->rtort_chkp) &&
1381 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1382 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1383 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1384 		rtrcp->rtc_chkrdr = rdrchked;
1385 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1386 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1387 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1388 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1389 	}
1390 
1391 	// If assigned some completed work, do it!
1392 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1393 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1394 		return; // No work or work not yet ready.
1395 	rdrchked = rtrcp_assigner->rtc_chkrdr;
1396 	if (WARN_ON_ONCE(rdrchked < 0))
1397 		return;
1398 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1399 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1400 	atomic_inc(&n_rcu_torture_mbchk_tries);
1401 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1402 		atomic_inc(&n_rcu_torture_mbchk_fail);
1403 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1404 	rtrcp_assigner->rtc_ready = 0;
1405 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1406 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1407 }
1408 
1409 /*
1410  * Do one extension of an RCU read-side critical section using the
1411  * current reader state in readstate (set to zero for initial entry
1412  * to extended critical section), set the new state as specified by
1413  * newstate (set to zero for final exit from extended critical section),
1414  * and random-number-generator state in trsp.  If this is neither the
1415  * beginning or end of the critical section and if there was actually a
1416  * change, do a ->read_delay().
1417  */
1418 static void rcutorture_one_extend(int *readstate, int newstate,
1419 				  struct torture_random_state *trsp,
1420 				  struct rt_read_seg *rtrsp)
1421 {
1422 	unsigned long flags;
1423 	int idxnew = -1;
1424 	int idxold = *readstate;
1425 	int statesnew = ~*readstate & newstate;
1426 	int statesold = *readstate & ~newstate;
1427 
1428 	WARN_ON_ONCE(idxold < 0);
1429 	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1430 	rtrsp->rt_readstate = newstate;
1431 
1432 	/* First, put new protection in place to avoid critical-section gap. */
1433 	if (statesnew & RCUTORTURE_RDR_BH)
1434 		local_bh_disable();
1435 	if (statesnew & RCUTORTURE_RDR_IRQ)
1436 		local_irq_disable();
1437 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1438 		preempt_disable();
1439 	if (statesnew & RCUTORTURE_RDR_RBH)
1440 		rcu_read_lock_bh();
1441 	if (statesnew & RCUTORTURE_RDR_SCHED)
1442 		rcu_read_lock_sched();
1443 	if (statesnew & RCUTORTURE_RDR_RCU)
1444 		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1445 
1446 	/* Next, remove old protection, irq first due to bh conflict. */
1447 	if (statesold & RCUTORTURE_RDR_IRQ)
1448 		local_irq_enable();
1449 	if (statesold & RCUTORTURE_RDR_BH)
1450 		local_bh_enable();
1451 	if (statesold & RCUTORTURE_RDR_PREEMPT)
1452 		preempt_enable();
1453 	if (statesold & RCUTORTURE_RDR_RBH)
1454 		rcu_read_unlock_bh();
1455 	if (statesold & RCUTORTURE_RDR_SCHED)
1456 		rcu_read_unlock_sched();
1457 	if (statesold & RCUTORTURE_RDR_RCU) {
1458 		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1459 
1460 		if (lockit)
1461 			raw_spin_lock_irqsave(&current->pi_lock, flags);
1462 		cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1463 		if (lockit)
1464 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1465 	}
1466 
1467 	/* Delay if neither beginning nor end and there was a change. */
1468 	if ((statesnew || statesold) && *readstate && newstate)
1469 		cur_ops->read_delay(trsp, rtrsp);
1470 
1471 	/* Update the reader state. */
1472 	if (idxnew == -1)
1473 		idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1474 	WARN_ON_ONCE(idxnew < 0);
1475 	WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1476 	*readstate = idxnew | newstate;
1477 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1478 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1479 }
1480 
1481 /* Return the biggest extendables mask given current RCU and boot parameters. */
1482 static int rcutorture_extend_mask_max(void)
1483 {
1484 	int mask;
1485 
1486 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1487 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1488 	mask = mask | RCUTORTURE_RDR_RCU;
1489 	return mask;
1490 }
1491 
1492 /* Return a random protection state mask, but with at least one bit set. */
1493 static int
1494 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1495 {
1496 	int mask = rcutorture_extend_mask_max();
1497 	unsigned long randmask1 = torture_random(trsp) >> 8;
1498 	unsigned long randmask2 = randmask1 >> 3;
1499 
1500 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1501 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1502 	if (!(randmask1 & 0x7))
1503 		mask = mask & randmask2;
1504 	else
1505 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1506 	/* Can't enable bh w/irq disabled. */
1507 	if ((mask & RCUTORTURE_RDR_IRQ) &&
1508 	    ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1509 	     (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1510 		mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1511 	return mask ?: RCUTORTURE_RDR_RCU;
1512 }
1513 
1514 /*
1515  * Do a randomly selected number of extensions of an existing RCU read-side
1516  * critical section.
1517  */
1518 static struct rt_read_seg *
1519 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1520 		       struct rt_read_seg *rtrsp)
1521 {
1522 	int i;
1523 	int j;
1524 	int mask = rcutorture_extend_mask_max();
1525 
1526 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1527 	if (!((mask - 1) & mask))
1528 		return rtrsp;  /* Current RCU reader not extendable. */
1529 	/* Bias towards larger numbers of loops. */
1530 	i = (torture_random(trsp) >> 3);
1531 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1532 	for (j = 0; j < i; j++) {
1533 		mask = rcutorture_extend_mask(*readstate, trsp);
1534 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1535 	}
1536 	return &rtrsp[j];
1537 }
1538 
1539 /*
1540  * Do one read-side critical section, returning false if there was
1541  * no data to read.  Can be invoked both from process context and
1542  * from a timer handler.
1543  */
1544 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1545 {
1546 	unsigned long cookie;
1547 	int i;
1548 	unsigned long started;
1549 	unsigned long completed;
1550 	int newstate;
1551 	struct rcu_torture *p;
1552 	int pipe_count;
1553 	int readstate = 0;
1554 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1555 	struct rt_read_seg *rtrsp = &rtseg[0];
1556 	struct rt_read_seg *rtrsp1;
1557 	unsigned long long ts;
1558 
1559 	WARN_ON_ONCE(!rcu_is_watching());
1560 	newstate = rcutorture_extend_mask(readstate, trsp);
1561 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1562 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1563 		cookie = cur_ops->get_gp_state();
1564 	started = cur_ops->get_gp_seq();
1565 	ts = rcu_trace_clock_local();
1566 	p = rcu_dereference_check(rcu_torture_current,
1567 				  !cur_ops->readlock_held || cur_ops->readlock_held());
1568 	if (p == NULL) {
1569 		/* Wait for rcu_torture_writer to get underway */
1570 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1571 		return false;
1572 	}
1573 	if (p->rtort_mbtest == 0)
1574 		atomic_inc(&n_rcu_torture_mberror);
1575 	rcu_torture_reader_do_mbchk(myid, p, trsp);
1576 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1577 	preempt_disable();
1578 	pipe_count = READ_ONCE(p->rtort_pipe_count);
1579 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1580 		/* Should not happen, but... */
1581 		pipe_count = RCU_TORTURE_PIPE_LEN;
1582 	}
1583 	completed = cur_ops->get_gp_seq();
1584 	if (pipe_count > 1) {
1585 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1586 					  ts, started, completed);
1587 		rcu_ftrace_dump(DUMP_ALL);
1588 	}
1589 	__this_cpu_inc(rcu_torture_count[pipe_count]);
1590 	completed = rcutorture_seq_diff(completed, started);
1591 	if (completed > RCU_TORTURE_PIPE_LEN) {
1592 		/* Should not happen, but... */
1593 		completed = RCU_TORTURE_PIPE_LEN;
1594 	}
1595 	__this_cpu_inc(rcu_torture_batch[completed]);
1596 	preempt_enable();
1597 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1598 		WARN_ONCE(cur_ops->poll_gp_state(cookie),
1599 			  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1600 			  __func__,
1601 			  rcu_torture_writer_state_getname(),
1602 			  rcu_torture_writer_state,
1603 			  cookie, cur_ops->get_gp_state());
1604 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1605 	WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1606 	// This next splat is expected behavior if leakpointer, especially
1607 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1608 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1609 
1610 	/* If error or close call, record the sequence of reader protections. */
1611 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1612 		i = 0;
1613 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1614 			err_segs[i++] = *rtrsp1;
1615 		rt_read_nsegs = i;
1616 	}
1617 
1618 	return true;
1619 }
1620 
1621 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1622 
1623 /*
1624  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1625  * incrementing the corresponding element of the pipeline array.  The
1626  * counter in the element should never be greater than 1, otherwise, the
1627  * RCU implementation is broken.
1628  */
1629 static void rcu_torture_timer(struct timer_list *unused)
1630 {
1631 	atomic_long_inc(&n_rcu_torture_timers);
1632 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1633 
1634 	/* Test call_rcu() invocation from interrupt handler. */
1635 	if (cur_ops->call) {
1636 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1637 
1638 		if (rhp)
1639 			cur_ops->call(rhp, rcu_torture_timer_cb);
1640 	}
1641 }
1642 
1643 /*
1644  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1645  * incrementing the corresponding element of the pipeline array.  The
1646  * counter in the element should never be greater than 1, otherwise, the
1647  * RCU implementation is broken.
1648  */
1649 static int
1650 rcu_torture_reader(void *arg)
1651 {
1652 	unsigned long lastsleep = jiffies;
1653 	long myid = (long)arg;
1654 	int mynumonline = myid;
1655 	DEFINE_TORTURE_RANDOM(rand);
1656 	struct timer_list t;
1657 
1658 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1659 	set_user_nice(current, MAX_NICE);
1660 	if (irqreader && cur_ops->irq_capable)
1661 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
1662 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1663 	do {
1664 		if (irqreader && cur_ops->irq_capable) {
1665 			if (!timer_pending(&t))
1666 				mod_timer(&t, jiffies + 1);
1667 		}
1668 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1669 			schedule_timeout_interruptible(HZ);
1670 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1671 			torture_hrtimeout_us(500, 1000, &rand);
1672 			lastsleep = jiffies + 10;
1673 		}
1674 		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1675 			schedule_timeout_interruptible(HZ / 5);
1676 		stutter_wait("rcu_torture_reader");
1677 	} while (!torture_must_stop());
1678 	if (irqreader && cur_ops->irq_capable) {
1679 		del_timer_sync(&t);
1680 		destroy_timer_on_stack(&t);
1681 	}
1682 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1683 	torture_kthread_stopping("rcu_torture_reader");
1684 	return 0;
1685 }
1686 
1687 /*
1688  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
1689  * increase race probabilities and fuzzes the interval between toggling.
1690  */
1691 static int rcu_nocb_toggle(void *arg)
1692 {
1693 	int cpu;
1694 	int maxcpu = -1;
1695 	int oldnice = task_nice(current);
1696 	long r;
1697 	DEFINE_TORTURE_RANDOM(rand);
1698 	ktime_t toggle_delay;
1699 	unsigned long toggle_fuzz;
1700 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1701 
1702 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1703 	while (!rcu_inkernel_boot_has_ended())
1704 		schedule_timeout_interruptible(HZ / 10);
1705 	for_each_online_cpu(cpu)
1706 		maxcpu = cpu;
1707 	WARN_ON(maxcpu < 0);
1708 	if (toggle_interval > ULONG_MAX)
1709 		toggle_fuzz = ULONG_MAX >> 3;
1710 	else
1711 		toggle_fuzz = toggle_interval >> 3;
1712 	if (toggle_fuzz <= 0)
1713 		toggle_fuzz = NSEC_PER_USEC;
1714 	do {
1715 		r = torture_random(&rand);
1716 		cpu = (r >> 4) % (maxcpu + 1);
1717 		if (r & 0x1) {
1718 			rcu_nocb_cpu_offload(cpu);
1719 			atomic_long_inc(&n_nocb_offload);
1720 		} else {
1721 			rcu_nocb_cpu_deoffload(cpu);
1722 			atomic_long_inc(&n_nocb_deoffload);
1723 		}
1724 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1725 		set_current_state(TASK_INTERRUPTIBLE);
1726 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1727 		if (stutter_wait("rcu_nocb_toggle"))
1728 			sched_set_normal(current, oldnice);
1729 	} while (!torture_must_stop());
1730 	torture_kthread_stopping("rcu_nocb_toggle");
1731 	return 0;
1732 }
1733 
1734 /*
1735  * Print torture statistics.  Caller must ensure that there is only
1736  * one call to this function at a given time!!!  This is normally
1737  * accomplished by relying on the module system to only have one copy
1738  * of the module loaded, and then by giving the rcu_torture_stats
1739  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1740  * thread is not running).
1741  */
1742 static void
1743 rcu_torture_stats_print(void)
1744 {
1745 	int cpu;
1746 	int i;
1747 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1748 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1749 	struct rcu_torture *rtcp;
1750 	static unsigned long rtcv_snap = ULONG_MAX;
1751 	static bool splatted;
1752 	struct task_struct *wtp;
1753 
1754 	for_each_possible_cpu(cpu) {
1755 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1756 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1757 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1758 		}
1759 	}
1760 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1761 		if (pipesummary[i] != 0)
1762 			break;
1763 	}
1764 
1765 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1766 	rtcp = rcu_access_pointer(rcu_torture_current);
1767 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1768 		rtcp,
1769 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1770 		rcu_torture_current_version,
1771 		list_empty(&rcu_torture_freelist),
1772 		atomic_read(&n_rcu_torture_alloc),
1773 		atomic_read(&n_rcu_torture_alloc_fail),
1774 		atomic_read(&n_rcu_torture_free));
1775 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1776 		atomic_read(&n_rcu_torture_mberror),
1777 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1778 		n_rcu_torture_barrier_error,
1779 		n_rcu_torture_boost_ktrerror,
1780 		n_rcu_torture_boost_rterror);
1781 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1782 		n_rcu_torture_boost_failure,
1783 		n_rcu_torture_boosts,
1784 		atomic_long_read(&n_rcu_torture_timers));
1785 	torture_onoff_stats();
1786 	pr_cont("barrier: %ld/%ld:%ld ",
1787 		data_race(n_barrier_successes),
1788 		data_race(n_barrier_attempts),
1789 		data_race(n_rcu_torture_barrier_error));
1790 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1791 	pr_cont("nocb-toggles: %ld:%ld\n",
1792 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1793 
1794 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1795 	if (atomic_read(&n_rcu_torture_mberror) ||
1796 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
1797 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1798 	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1799 	    i > 1) {
1800 		pr_cont("%s", "!!! ");
1801 		atomic_inc(&n_rcu_torture_error);
1802 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1803 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1804 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1805 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1806 		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1807 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
1808 		WARN_ON_ONCE(i > 1); // Too-short grace period
1809 	}
1810 	pr_cont("Reader Pipe: ");
1811 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1812 		pr_cont(" %ld", pipesummary[i]);
1813 	pr_cont("\n");
1814 
1815 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1816 	pr_cont("Reader Batch: ");
1817 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1818 		pr_cont(" %ld", batchsummary[i]);
1819 	pr_cont("\n");
1820 
1821 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1822 	pr_cont("Free-Block Circulation: ");
1823 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1824 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1825 	}
1826 	pr_cont("\n");
1827 
1828 	if (cur_ops->stats)
1829 		cur_ops->stats();
1830 	if (rtcv_snap == rcu_torture_current_version &&
1831 	    rcu_access_pointer(rcu_torture_current) &&
1832 	    !rcu_stall_is_suppressed()) {
1833 		int __maybe_unused flags = 0;
1834 		unsigned long __maybe_unused gp_seq = 0;
1835 
1836 		rcutorture_get_gp_data(cur_ops->ttype,
1837 				       &flags, &gp_seq);
1838 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1839 					&flags, &gp_seq);
1840 		wtp = READ_ONCE(writer_task);
1841 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
1842 			 rcu_torture_writer_state_getname(),
1843 			 rcu_torture_writer_state, gp_seq, flags,
1844 			 wtp == NULL ? ~0U : wtp->__state,
1845 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
1846 		if (!splatted && wtp) {
1847 			sched_show_task(wtp);
1848 			splatted = true;
1849 		}
1850 		if (cur_ops->gp_kthread_dbg)
1851 			cur_ops->gp_kthread_dbg();
1852 		rcu_ftrace_dump(DUMP_ALL);
1853 	}
1854 	rtcv_snap = rcu_torture_current_version;
1855 }
1856 
1857 /*
1858  * Periodically prints torture statistics, if periodic statistics printing
1859  * was specified via the stat_interval module parameter.
1860  */
1861 static int
1862 rcu_torture_stats(void *arg)
1863 {
1864 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1865 	do {
1866 		schedule_timeout_interruptible(stat_interval * HZ);
1867 		rcu_torture_stats_print();
1868 		torture_shutdown_absorb("rcu_torture_stats");
1869 	} while (!torture_must_stop());
1870 	torture_kthread_stopping("rcu_torture_stats");
1871 	return 0;
1872 }
1873 
1874 /* Test mem_dump_obj() and friends.  */
1875 static void rcu_torture_mem_dump_obj(void)
1876 {
1877 	struct rcu_head *rhp;
1878 	struct kmem_cache *kcp;
1879 	static int z;
1880 
1881 	kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1882 	rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1883 	pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1884 	pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1885 	mem_dump_obj(ZERO_SIZE_PTR);
1886 	pr_alert("mem_dump_obj(NULL):");
1887 	mem_dump_obj(NULL);
1888 	pr_alert("mem_dump_obj(%px):", &rhp);
1889 	mem_dump_obj(&rhp);
1890 	pr_alert("mem_dump_obj(%px):", rhp);
1891 	mem_dump_obj(rhp);
1892 	pr_alert("mem_dump_obj(%px):", &rhp->func);
1893 	mem_dump_obj(&rhp->func);
1894 	pr_alert("mem_dump_obj(%px):", &z);
1895 	mem_dump_obj(&z);
1896 	kmem_cache_free(kcp, rhp);
1897 	kmem_cache_destroy(kcp);
1898 	rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
1899 	pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1900 	pr_alert("mem_dump_obj(kmalloc %px):", rhp);
1901 	mem_dump_obj(rhp);
1902 	pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
1903 	mem_dump_obj(&rhp->func);
1904 	kfree(rhp);
1905 	rhp = vmalloc(4096);
1906 	pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1907 	pr_alert("mem_dump_obj(vmalloc %px):", rhp);
1908 	mem_dump_obj(rhp);
1909 	pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
1910 	mem_dump_obj(&rhp->func);
1911 	vfree(rhp);
1912 }
1913 
1914 static void
1915 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1916 {
1917 	pr_alert("%s" TORTURE_FLAG
1918 		 "--- %s: nreaders=%d nfakewriters=%d "
1919 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1920 		 "shuffle_interval=%d stutter=%d irqreader=%d "
1921 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1922 		 "test_boost=%d/%d test_boost_interval=%d "
1923 		 "test_boost_duration=%d shutdown_secs=%d "
1924 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1925 		 "stall_cpu_block=%d "
1926 		 "n_barrier_cbs=%d "
1927 		 "onoff_interval=%d onoff_holdoff=%d "
1928 		 "read_exit_delay=%d read_exit_burst=%d "
1929 		 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1930 		 torture_type, tag, nrealreaders, nfakewriters,
1931 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1932 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1933 		 test_boost, cur_ops->can_boost,
1934 		 test_boost_interval, test_boost_duration, shutdown_secs,
1935 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1936 		 stall_cpu_block,
1937 		 n_barrier_cbs,
1938 		 onoff_interval, onoff_holdoff,
1939 		 read_exit_delay, read_exit_burst,
1940 		 nocbs_nthreads, nocbs_toggle);
1941 }
1942 
1943 static int rcutorture_booster_cleanup(unsigned int cpu)
1944 {
1945 	struct task_struct *t;
1946 
1947 	if (boost_tasks[cpu] == NULL)
1948 		return 0;
1949 	mutex_lock(&boost_mutex);
1950 	t = boost_tasks[cpu];
1951 	boost_tasks[cpu] = NULL;
1952 	rcu_torture_enable_rt_throttle();
1953 	mutex_unlock(&boost_mutex);
1954 
1955 	/* This must be outside of the mutex, otherwise deadlock! */
1956 	torture_stop_kthread(rcu_torture_boost, t);
1957 	return 0;
1958 }
1959 
1960 static int rcutorture_booster_init(unsigned int cpu)
1961 {
1962 	int retval;
1963 
1964 	if (boost_tasks[cpu] != NULL)
1965 		return 0;  /* Already created, nothing more to do. */
1966 
1967 	/* Don't allow time recalculation while creating a new task. */
1968 	mutex_lock(&boost_mutex);
1969 	rcu_torture_disable_rt_throttle();
1970 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1971 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1972 						  cpu_to_node(cpu),
1973 						  "rcu_torture_boost");
1974 	if (IS_ERR(boost_tasks[cpu])) {
1975 		retval = PTR_ERR(boost_tasks[cpu]);
1976 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1977 		n_rcu_torture_boost_ktrerror++;
1978 		boost_tasks[cpu] = NULL;
1979 		mutex_unlock(&boost_mutex);
1980 		return retval;
1981 	}
1982 	kthread_bind(boost_tasks[cpu], cpu);
1983 	wake_up_process(boost_tasks[cpu]);
1984 	mutex_unlock(&boost_mutex);
1985 	return 0;
1986 }
1987 
1988 /*
1989  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1990  * induces a CPU stall for the time specified by stall_cpu.
1991  */
1992 static int rcu_torture_stall(void *args)
1993 {
1994 	int idx;
1995 	unsigned long stop_at;
1996 
1997 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1998 	if (stall_cpu_holdoff > 0) {
1999 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2000 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2001 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2002 	}
2003 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
2004 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2005 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2006 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2007 			if (kthread_should_stop())
2008 				break;
2009 			schedule_timeout_uninterruptible(HZ);
2010 		}
2011 	}
2012 	if (!kthread_should_stop() && stall_cpu > 0) {
2013 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2014 		stop_at = ktime_get_seconds() + stall_cpu;
2015 		/* RCU CPU stall is expected behavior in following code. */
2016 		idx = cur_ops->readlock();
2017 		if (stall_cpu_irqsoff)
2018 			local_irq_disable();
2019 		else if (!stall_cpu_block)
2020 			preempt_disable();
2021 		pr_alert("%s start on CPU %d.\n",
2022 			  __func__, raw_smp_processor_id());
2023 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2024 				    stop_at))
2025 			if (stall_cpu_block) {
2026 #ifdef CONFIG_PREEMPTION
2027 				preempt_schedule();
2028 #else
2029 				schedule_timeout_uninterruptible(HZ);
2030 #endif
2031 			}
2032 		if (stall_cpu_irqsoff)
2033 			local_irq_enable();
2034 		else if (!stall_cpu_block)
2035 			preempt_enable();
2036 		cur_ops->readunlock(idx);
2037 	}
2038 	pr_alert("%s end.\n", __func__);
2039 	torture_shutdown_absorb("rcu_torture_stall");
2040 	while (!kthread_should_stop())
2041 		schedule_timeout_interruptible(10 * HZ);
2042 	return 0;
2043 }
2044 
2045 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2046 static int __init rcu_torture_stall_init(void)
2047 {
2048 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2049 		return 0;
2050 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2051 }
2052 
2053 /* State structure for forward-progress self-propagating RCU callback. */
2054 struct fwd_cb_state {
2055 	struct rcu_head rh;
2056 	int stop;
2057 };
2058 
2059 /*
2060  * Forward-progress self-propagating RCU callback function.  Because
2061  * callbacks run from softirq, this function is an implicit RCU read-side
2062  * critical section.
2063  */
2064 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2065 {
2066 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2067 
2068 	if (READ_ONCE(fcsp->stop)) {
2069 		WRITE_ONCE(fcsp->stop, 2);
2070 		return;
2071 	}
2072 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2073 }
2074 
2075 /* State for continuous-flood RCU callbacks. */
2076 struct rcu_fwd_cb {
2077 	struct rcu_head rh;
2078 	struct rcu_fwd_cb *rfc_next;
2079 	struct rcu_fwd *rfc_rfp;
2080 	int rfc_gps;
2081 };
2082 
2083 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2084 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2085 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2086 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2087 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2088 
2089 struct rcu_launder_hist {
2090 	long n_launders;
2091 	unsigned long launder_gp_seq;
2092 };
2093 
2094 struct rcu_fwd {
2095 	spinlock_t rcu_fwd_lock;
2096 	struct rcu_fwd_cb *rcu_fwd_cb_head;
2097 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2098 	long n_launders_cb;
2099 	unsigned long rcu_fwd_startat;
2100 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2101 	unsigned long rcu_launder_gp_seq_start;
2102 };
2103 
2104 static DEFINE_MUTEX(rcu_fwd_mutex);
2105 static struct rcu_fwd *rcu_fwds;
2106 static bool rcu_fwd_emergency_stop;
2107 
2108 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2109 {
2110 	unsigned long gps;
2111 	unsigned long gps_old;
2112 	int i;
2113 	int j;
2114 
2115 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2116 		if (rfp->n_launders_hist[i].n_launders > 0)
2117 			break;
2118 	pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
2119 		 __func__, jiffies - rfp->rcu_fwd_startat);
2120 	gps_old = rfp->rcu_launder_gp_seq_start;
2121 	for (j = 0; j <= i; j++) {
2122 		gps = rfp->n_launders_hist[j].launder_gp_seq;
2123 		pr_cont(" %ds/%d: %ld:%ld",
2124 			j + 1, FWD_CBS_HIST_DIV,
2125 			rfp->n_launders_hist[j].n_launders,
2126 			rcutorture_seq_diff(gps, gps_old));
2127 		gps_old = gps;
2128 	}
2129 	pr_cont("\n");
2130 }
2131 
2132 /* Callback function for continuous-flood RCU callbacks. */
2133 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2134 {
2135 	unsigned long flags;
2136 	int i;
2137 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2138 	struct rcu_fwd_cb **rfcpp;
2139 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2140 
2141 	rfcp->rfc_next = NULL;
2142 	rfcp->rfc_gps++;
2143 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2144 	rfcpp = rfp->rcu_fwd_cb_tail;
2145 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2146 	WRITE_ONCE(*rfcpp, rfcp);
2147 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2148 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2149 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2150 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2151 	rfp->n_launders_hist[i].n_launders++;
2152 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2153 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2154 }
2155 
2156 // Give the scheduler a chance, even on nohz_full CPUs.
2157 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2158 {
2159 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2160 		// Real call_rcu() floods hit userspace, so emulate that.
2161 		if (need_resched() || (iter & 0xfff))
2162 			schedule();
2163 		return;
2164 	}
2165 	// No userspace emulation: CB invocation throttles call_rcu()
2166 	cond_resched();
2167 }
2168 
2169 /*
2170  * Free all callbacks on the rcu_fwd_cb_head list, either because the
2171  * test is over or because we hit an OOM event.
2172  */
2173 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2174 {
2175 	unsigned long flags;
2176 	unsigned long freed = 0;
2177 	struct rcu_fwd_cb *rfcp;
2178 
2179 	for (;;) {
2180 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2181 		rfcp = rfp->rcu_fwd_cb_head;
2182 		if (!rfcp) {
2183 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2184 			break;
2185 		}
2186 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2187 		if (!rfp->rcu_fwd_cb_head)
2188 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2189 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2190 		kfree(rfcp);
2191 		freed++;
2192 		rcu_torture_fwd_prog_cond_resched(freed);
2193 		if (tick_nohz_full_enabled()) {
2194 			local_irq_save(flags);
2195 			rcu_momentary_dyntick_idle();
2196 			local_irq_restore(flags);
2197 		}
2198 	}
2199 	return freed;
2200 }
2201 
2202 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2203 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2204 				    int *tested, int *tested_tries)
2205 {
2206 	unsigned long cver;
2207 	unsigned long dur;
2208 	struct fwd_cb_state fcs;
2209 	unsigned long gps;
2210 	int idx;
2211 	int sd;
2212 	int sd4;
2213 	bool selfpropcb = false;
2214 	unsigned long stopat;
2215 	static DEFINE_TORTURE_RANDOM(trs);
2216 
2217 	if (!cur_ops->sync)
2218 		return; // Cannot do need_resched() forward progress testing without ->sync.
2219 	if (cur_ops->call && cur_ops->cb_barrier) {
2220 		init_rcu_head_on_stack(&fcs.rh);
2221 		selfpropcb = true;
2222 	}
2223 
2224 	/* Tight loop containing cond_resched(). */
2225 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2226 	cur_ops->sync(); /* Later readers see above write. */
2227 	if  (selfpropcb) {
2228 		WRITE_ONCE(fcs.stop, 0);
2229 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2230 	}
2231 	cver = READ_ONCE(rcu_torture_current_version);
2232 	gps = cur_ops->get_gp_seq();
2233 	sd = cur_ops->stall_dur() + 1;
2234 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2235 	dur = sd4 + torture_random(&trs) % (sd - sd4);
2236 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2237 	stopat = rfp->rcu_fwd_startat + dur;
2238 	while (time_before(jiffies, stopat) &&
2239 	       !shutdown_time_arrived() &&
2240 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2241 		idx = cur_ops->readlock();
2242 		udelay(10);
2243 		cur_ops->readunlock(idx);
2244 		if (!fwd_progress_need_resched || need_resched())
2245 			cond_resched();
2246 	}
2247 	(*tested_tries)++;
2248 	if (!time_before(jiffies, stopat) &&
2249 	    !shutdown_time_arrived() &&
2250 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2251 		(*tested)++;
2252 		cver = READ_ONCE(rcu_torture_current_version) - cver;
2253 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2254 		WARN_ON(!cver && gps < 2);
2255 		pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
2256 	}
2257 	if (selfpropcb) {
2258 		WRITE_ONCE(fcs.stop, 1);
2259 		cur_ops->sync(); /* Wait for running CB to complete. */
2260 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2261 	}
2262 
2263 	if (selfpropcb) {
2264 		WARN_ON(READ_ONCE(fcs.stop) != 2);
2265 		destroy_rcu_head_on_stack(&fcs.rh);
2266 	}
2267 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2268 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2269 }
2270 
2271 /* Carry out call_rcu() forward-progress testing. */
2272 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2273 {
2274 	unsigned long cver;
2275 	unsigned long flags;
2276 	unsigned long gps;
2277 	int i;
2278 	long n_launders;
2279 	long n_launders_cb_snap;
2280 	long n_launders_sa;
2281 	long n_max_cbs;
2282 	long n_max_gps;
2283 	struct rcu_fwd_cb *rfcp;
2284 	struct rcu_fwd_cb *rfcpn;
2285 	unsigned long stopat;
2286 	unsigned long stoppedat;
2287 
2288 	if (READ_ONCE(rcu_fwd_emergency_stop))
2289 		return; /* Get out of the way quickly, no GP wait! */
2290 	if (!cur_ops->call)
2291 		return; /* Can't do call_rcu() fwd prog without ->call. */
2292 
2293 	/* Loop continuously posting RCU callbacks. */
2294 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2295 	cur_ops->sync(); /* Later readers see above write. */
2296 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2297 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2298 	n_launders = 0;
2299 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2300 	n_launders_sa = 0;
2301 	n_max_cbs = 0;
2302 	n_max_gps = 0;
2303 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2304 		rfp->n_launders_hist[i].n_launders = 0;
2305 	cver = READ_ONCE(rcu_torture_current_version);
2306 	gps = cur_ops->get_gp_seq();
2307 	rfp->rcu_launder_gp_seq_start = gps;
2308 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2309 	while (time_before(jiffies, stopat) &&
2310 	       !shutdown_time_arrived() &&
2311 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2312 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2313 		rfcpn = NULL;
2314 		if (rfcp)
2315 			rfcpn = READ_ONCE(rfcp->rfc_next);
2316 		if (rfcpn) {
2317 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2318 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2319 				break;
2320 			rfp->rcu_fwd_cb_head = rfcpn;
2321 			n_launders++;
2322 			n_launders_sa++;
2323 		} else {
2324 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2325 			if (WARN_ON_ONCE(!rfcp)) {
2326 				schedule_timeout_interruptible(1);
2327 				continue;
2328 			}
2329 			n_max_cbs++;
2330 			n_launders_sa = 0;
2331 			rfcp->rfc_gps = 0;
2332 			rfcp->rfc_rfp = rfp;
2333 		}
2334 		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2335 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2336 		if (tick_nohz_full_enabled()) {
2337 			local_irq_save(flags);
2338 			rcu_momentary_dyntick_idle();
2339 			local_irq_restore(flags);
2340 		}
2341 	}
2342 	stoppedat = jiffies;
2343 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2344 	cver = READ_ONCE(rcu_torture_current_version) - cver;
2345 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2346 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2347 	(void)rcu_torture_fwd_prog_cbfree(rfp);
2348 
2349 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2350 	    !shutdown_time_arrived()) {
2351 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2352 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2353 			 __func__,
2354 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2355 			 n_launders + n_max_cbs - n_launders_cb_snap,
2356 			 n_launders, n_launders_sa,
2357 			 n_max_gps, n_max_cbs, cver, gps);
2358 		rcu_torture_fwd_cb_hist(rfp);
2359 	}
2360 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2361 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2362 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2363 }
2364 
2365 
2366 /*
2367  * OOM notifier, but this only prints diagnostic information for the
2368  * current forward-progress test.
2369  */
2370 static int rcutorture_oom_notify(struct notifier_block *self,
2371 				 unsigned long notused, void *nfreed)
2372 {
2373 	struct rcu_fwd *rfp;
2374 
2375 	mutex_lock(&rcu_fwd_mutex);
2376 	rfp = rcu_fwds;
2377 	if (!rfp) {
2378 		mutex_unlock(&rcu_fwd_mutex);
2379 		return NOTIFY_OK;
2380 	}
2381 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2382 	     __func__);
2383 	rcu_torture_fwd_cb_hist(rfp);
2384 	rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2385 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2386 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2387 	pr_info("%s: Freed %lu RCU callbacks.\n",
2388 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2389 	rcu_barrier();
2390 	pr_info("%s: Freed %lu RCU callbacks.\n",
2391 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2392 	rcu_barrier();
2393 	pr_info("%s: Freed %lu RCU callbacks.\n",
2394 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2395 	smp_mb(); /* Frees before return to avoid redoing OOM. */
2396 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2397 	pr_info("%s returning after OOM processing.\n", __func__);
2398 	mutex_unlock(&rcu_fwd_mutex);
2399 	return NOTIFY_OK;
2400 }
2401 
2402 static struct notifier_block rcutorture_oom_nb = {
2403 	.notifier_call = rcutorture_oom_notify
2404 };
2405 
2406 /* Carry out grace-period forward-progress testing. */
2407 static int rcu_torture_fwd_prog(void *args)
2408 {
2409 	int oldnice = task_nice(current);
2410 	struct rcu_fwd *rfp = args;
2411 	int tested = 0;
2412 	int tested_tries = 0;
2413 
2414 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2415 	rcu_bind_current_to_nocb();
2416 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2417 		set_user_nice(current, MAX_NICE);
2418 	do {
2419 		schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2420 		WRITE_ONCE(rcu_fwd_emergency_stop, false);
2421 		if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2422 		    rcu_inkernel_boot_has_ended())
2423 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2424 		if (rcu_inkernel_boot_has_ended())
2425 			rcu_torture_fwd_prog_cr(rfp);
2426 
2427 		/* Avoid slow periods, better to test when busy. */
2428 		if (stutter_wait("rcu_torture_fwd_prog"))
2429 			sched_set_normal(current, oldnice);
2430 	} while (!torture_must_stop());
2431 	/* Short runs might not contain a valid forward-progress attempt. */
2432 	WARN_ON(!tested && tested_tries >= 5);
2433 	pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2434 	torture_kthread_stopping("rcu_torture_fwd_prog");
2435 	return 0;
2436 }
2437 
2438 /* If forward-progress checking is requested and feasible, spawn the thread. */
2439 static int __init rcu_torture_fwd_prog_init(void)
2440 {
2441 	struct rcu_fwd *rfp;
2442 
2443 	if (!fwd_progress)
2444 		return 0; /* Not requested, so don't do it. */
2445 	if ((!cur_ops->sync && !cur_ops->call) ||
2446 	    !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2447 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2448 		return 0;
2449 	}
2450 	if (stall_cpu > 0) {
2451 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2452 		if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2453 			return -EINVAL; /* In module, can fail back to user. */
2454 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2455 		return 0;
2456 	}
2457 	if (fwd_progress_holdoff <= 0)
2458 		fwd_progress_holdoff = 1;
2459 	if (fwd_progress_div <= 0)
2460 		fwd_progress_div = 4;
2461 	rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2462 	if (!rfp)
2463 		return -ENOMEM;
2464 	spin_lock_init(&rfp->rcu_fwd_lock);
2465 	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2466 	mutex_lock(&rcu_fwd_mutex);
2467 	rcu_fwds = rfp;
2468 	mutex_unlock(&rcu_fwd_mutex);
2469 	register_oom_notifier(&rcutorture_oom_nb);
2470 	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2471 }
2472 
2473 static void rcu_torture_fwd_prog_cleanup(void)
2474 {
2475 	struct rcu_fwd *rfp;
2476 
2477 	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2478 	rfp = rcu_fwds;
2479 	mutex_lock(&rcu_fwd_mutex);
2480 	rcu_fwds = NULL;
2481 	mutex_unlock(&rcu_fwd_mutex);
2482 	unregister_oom_notifier(&rcutorture_oom_nb);
2483 	kfree(rfp);
2484 }
2485 
2486 /* Callback function for RCU barrier testing. */
2487 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2488 {
2489 	atomic_inc(&barrier_cbs_invoked);
2490 }
2491 
2492 /* IPI handler to get callback posted on desired CPU, if online. */
2493 static void rcu_torture_barrier1cb(void *rcu_void)
2494 {
2495 	struct rcu_head *rhp = rcu_void;
2496 
2497 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
2498 }
2499 
2500 /* kthread function to register callbacks used to test RCU barriers. */
2501 static int rcu_torture_barrier_cbs(void *arg)
2502 {
2503 	long myid = (long)arg;
2504 	bool lastphase = false;
2505 	bool newphase;
2506 	struct rcu_head rcu;
2507 
2508 	init_rcu_head_on_stack(&rcu);
2509 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2510 	set_user_nice(current, MAX_NICE);
2511 	do {
2512 		wait_event(barrier_cbs_wq[myid],
2513 			   (newphase =
2514 			    smp_load_acquire(&barrier_phase)) != lastphase ||
2515 			   torture_must_stop());
2516 		lastphase = newphase;
2517 		if (torture_must_stop())
2518 			break;
2519 		/*
2520 		 * The above smp_load_acquire() ensures barrier_phase load
2521 		 * is ordered before the following ->call().
2522 		 */
2523 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2524 					     &rcu, 1)) {
2525 			// IPI failed, so use direct call from current CPU.
2526 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2527 		}
2528 		if (atomic_dec_and_test(&barrier_cbs_count))
2529 			wake_up(&barrier_wq);
2530 	} while (!torture_must_stop());
2531 	if (cur_ops->cb_barrier != NULL)
2532 		cur_ops->cb_barrier();
2533 	destroy_rcu_head_on_stack(&rcu);
2534 	torture_kthread_stopping("rcu_torture_barrier_cbs");
2535 	return 0;
2536 }
2537 
2538 /* kthread function to drive and coordinate RCU barrier testing. */
2539 static int rcu_torture_barrier(void *arg)
2540 {
2541 	int i;
2542 
2543 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2544 	do {
2545 		atomic_set(&barrier_cbs_invoked, 0);
2546 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
2547 		/* Ensure barrier_phase ordered after prior assignments. */
2548 		smp_store_release(&barrier_phase, !barrier_phase);
2549 		for (i = 0; i < n_barrier_cbs; i++)
2550 			wake_up(&barrier_cbs_wq[i]);
2551 		wait_event(barrier_wq,
2552 			   atomic_read(&barrier_cbs_count) == 0 ||
2553 			   torture_must_stop());
2554 		if (torture_must_stop())
2555 			break;
2556 		n_barrier_attempts++;
2557 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2558 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2559 			n_rcu_torture_barrier_error++;
2560 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2561 			       atomic_read(&barrier_cbs_invoked),
2562 			       n_barrier_cbs);
2563 			WARN_ON(1);
2564 			// Wait manually for the remaining callbacks
2565 			i = 0;
2566 			do {
2567 				if (WARN_ON(i++ > HZ))
2568 					i = INT_MIN;
2569 				schedule_timeout_interruptible(1);
2570 				cur_ops->cb_barrier();
2571 			} while (atomic_read(&barrier_cbs_invoked) !=
2572 				 n_barrier_cbs &&
2573 				 !torture_must_stop());
2574 			smp_mb(); // Can't trust ordering if broken.
2575 			if (!torture_must_stop())
2576 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
2577 				       atomic_read(&barrier_cbs_invoked));
2578 		} else {
2579 			n_barrier_successes++;
2580 		}
2581 		schedule_timeout_interruptible(HZ / 10);
2582 	} while (!torture_must_stop());
2583 	torture_kthread_stopping("rcu_torture_barrier");
2584 	return 0;
2585 }
2586 
2587 /* Initialize RCU barrier testing. */
2588 static int rcu_torture_barrier_init(void)
2589 {
2590 	int i;
2591 	int ret;
2592 
2593 	if (n_barrier_cbs <= 0)
2594 		return 0;
2595 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2596 		pr_alert("%s" TORTURE_FLAG
2597 			 " Call or barrier ops missing for %s,\n",
2598 			 torture_type, cur_ops->name);
2599 		pr_alert("%s" TORTURE_FLAG
2600 			 " RCU barrier testing omitted from run.\n",
2601 			 torture_type);
2602 		return 0;
2603 	}
2604 	atomic_set(&barrier_cbs_count, 0);
2605 	atomic_set(&barrier_cbs_invoked, 0);
2606 	barrier_cbs_tasks =
2607 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2608 			GFP_KERNEL);
2609 	barrier_cbs_wq =
2610 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2611 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2612 		return -ENOMEM;
2613 	for (i = 0; i < n_barrier_cbs; i++) {
2614 		init_waitqueue_head(&barrier_cbs_wq[i]);
2615 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
2616 					     (void *)(long)i,
2617 					     barrier_cbs_tasks[i]);
2618 		if (ret)
2619 			return ret;
2620 	}
2621 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2622 }
2623 
2624 /* Clean up after RCU barrier testing. */
2625 static void rcu_torture_barrier_cleanup(void)
2626 {
2627 	int i;
2628 
2629 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
2630 	if (barrier_cbs_tasks != NULL) {
2631 		for (i = 0; i < n_barrier_cbs; i++)
2632 			torture_stop_kthread(rcu_torture_barrier_cbs,
2633 					     barrier_cbs_tasks[i]);
2634 		kfree(barrier_cbs_tasks);
2635 		barrier_cbs_tasks = NULL;
2636 	}
2637 	if (barrier_cbs_wq != NULL) {
2638 		kfree(barrier_cbs_wq);
2639 		barrier_cbs_wq = NULL;
2640 	}
2641 }
2642 
2643 static bool rcu_torture_can_boost(void)
2644 {
2645 	static int boost_warn_once;
2646 	int prio;
2647 
2648 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2649 		return false;
2650 	if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2651 		return false;
2652 
2653 	prio = rcu_get_gp_kthreads_prio();
2654 	if (!prio)
2655 		return false;
2656 
2657 	if (prio < 2) {
2658 		if (boost_warn_once == 1)
2659 			return false;
2660 
2661 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2662 		boost_warn_once = 1;
2663 		return false;
2664 	}
2665 
2666 	return true;
2667 }
2668 
2669 static bool read_exit_child_stop;
2670 static bool read_exit_child_stopped;
2671 static wait_queue_head_t read_exit_wq;
2672 
2673 // Child kthread which just does an rcutorture reader and exits.
2674 static int rcu_torture_read_exit_child(void *trsp_in)
2675 {
2676 	struct torture_random_state *trsp = trsp_in;
2677 
2678 	set_user_nice(current, MAX_NICE);
2679 	// Minimize time between reading and exiting.
2680 	while (!kthread_should_stop())
2681 		schedule_timeout_uninterruptible(1);
2682 	(void)rcu_torture_one_read(trsp, -1);
2683 	return 0;
2684 }
2685 
2686 // Parent kthread which creates and destroys read-exit child kthreads.
2687 static int rcu_torture_read_exit(void *unused)
2688 {
2689 	int count = 0;
2690 	bool errexit = false;
2691 	int i;
2692 	struct task_struct *tsp;
2693 	DEFINE_TORTURE_RANDOM(trs);
2694 
2695 	// Allocate and initialize.
2696 	set_user_nice(current, MAX_NICE);
2697 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2698 
2699 	// Each pass through this loop does one read-exit episode.
2700 	do {
2701 		if (++count > read_exit_burst) {
2702 			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2703 			rcu_barrier(); // Wait for task_struct free, avoid OOM.
2704 			for (i = 0; i < read_exit_delay; i++) {
2705 				schedule_timeout_uninterruptible(HZ);
2706 				if (READ_ONCE(read_exit_child_stop))
2707 					break;
2708 			}
2709 			if (!READ_ONCE(read_exit_child_stop))
2710 				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2711 			count = 0;
2712 		}
2713 		if (READ_ONCE(read_exit_child_stop))
2714 			break;
2715 		// Spawn child.
2716 		tsp = kthread_run(rcu_torture_read_exit_child,
2717 				     &trs, "%s",
2718 				     "rcu_torture_read_exit_child");
2719 		if (IS_ERR(tsp)) {
2720 			VERBOSE_TOROUT_ERRSTRING("out of memory");
2721 			errexit = true;
2722 			tsp = NULL;
2723 			break;
2724 		}
2725 		cond_resched();
2726 		kthread_stop(tsp);
2727 		n_read_exits ++;
2728 		stutter_wait("rcu_torture_read_exit");
2729 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
2730 
2731 	// Clean up and exit.
2732 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
2733 	smp_mb(); // Store before wakeup.
2734 	wake_up(&read_exit_wq);
2735 	while (!torture_must_stop())
2736 		schedule_timeout_uninterruptible(1);
2737 	torture_kthread_stopping("rcu_torture_read_exit");
2738 	return 0;
2739 }
2740 
2741 static int rcu_torture_read_exit_init(void)
2742 {
2743 	if (read_exit_burst <= 0)
2744 		return -EINVAL;
2745 	init_waitqueue_head(&read_exit_wq);
2746 	read_exit_child_stop = false;
2747 	read_exit_child_stopped = false;
2748 	return torture_create_kthread(rcu_torture_read_exit, NULL,
2749 				      read_exit_task);
2750 }
2751 
2752 static void rcu_torture_read_exit_cleanup(void)
2753 {
2754 	if (!read_exit_task)
2755 		return;
2756 	WRITE_ONCE(read_exit_child_stop, true);
2757 	smp_mb(); // Above write before wait.
2758 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2759 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2760 }
2761 
2762 static enum cpuhp_state rcutor_hp;
2763 
2764 static void
2765 rcu_torture_cleanup(void)
2766 {
2767 	int firsttime;
2768 	int flags = 0;
2769 	unsigned long gp_seq = 0;
2770 	int i;
2771 
2772 	if (torture_cleanup_begin()) {
2773 		if (cur_ops->cb_barrier != NULL)
2774 			cur_ops->cb_barrier();
2775 		return;
2776 	}
2777 	if (!cur_ops) {
2778 		torture_cleanup_end();
2779 		return;
2780 	}
2781 
2782 	if (cur_ops->gp_kthread_dbg)
2783 		cur_ops->gp_kthread_dbg();
2784 	rcu_torture_read_exit_cleanup();
2785 	rcu_torture_barrier_cleanup();
2786 	rcu_torture_fwd_prog_cleanup();
2787 	torture_stop_kthread(rcu_torture_stall, stall_task);
2788 	torture_stop_kthread(rcu_torture_writer, writer_task);
2789 
2790 	if (nocb_tasks) {
2791 		for (i = 0; i < nrealnocbers; i++)
2792 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2793 		kfree(nocb_tasks);
2794 		nocb_tasks = NULL;
2795 	}
2796 
2797 	if (reader_tasks) {
2798 		for (i = 0; i < nrealreaders; i++)
2799 			torture_stop_kthread(rcu_torture_reader,
2800 					     reader_tasks[i]);
2801 		kfree(reader_tasks);
2802 		reader_tasks = NULL;
2803 	}
2804 	kfree(rcu_torture_reader_mbchk);
2805 	rcu_torture_reader_mbchk = NULL;
2806 
2807 	if (fakewriter_tasks) {
2808 		for (i = 0; i < nfakewriters; i++)
2809 			torture_stop_kthread(rcu_torture_fakewriter,
2810 					     fakewriter_tasks[i]);
2811 		kfree(fakewriter_tasks);
2812 		fakewriter_tasks = NULL;
2813 	}
2814 
2815 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2816 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2817 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2818 		 cur_ops->name, (long)gp_seq, flags,
2819 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
2820 	torture_stop_kthread(rcu_torture_stats, stats_task);
2821 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
2822 	if (rcu_torture_can_boost())
2823 		cpuhp_remove_state(rcutor_hp);
2824 
2825 	/*
2826 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
2827 	 * cleanup operations.
2828 	 */
2829 	if (cur_ops->cb_barrier != NULL)
2830 		cur_ops->cb_barrier();
2831 	if (cur_ops->cleanup != NULL)
2832 		cur_ops->cleanup();
2833 
2834 	rcu_torture_mem_dump_obj();
2835 
2836 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2837 
2838 	if (err_segs_recorded) {
2839 		pr_alert("Failure/close-call rcutorture reader segments:\n");
2840 		if (rt_read_nsegs == 0)
2841 			pr_alert("\t: No segments recorded!!!\n");
2842 		firsttime = 1;
2843 		for (i = 0; i < rt_read_nsegs; i++) {
2844 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2845 			if (err_segs[i].rt_delay_jiffies != 0) {
2846 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2847 					err_segs[i].rt_delay_jiffies);
2848 				firsttime = 0;
2849 			}
2850 			if (err_segs[i].rt_delay_ms != 0) {
2851 				pr_cont("%s%ldms", firsttime ? "" : "+",
2852 					err_segs[i].rt_delay_ms);
2853 				firsttime = 0;
2854 			}
2855 			if (err_segs[i].rt_delay_us != 0) {
2856 				pr_cont("%s%ldus", firsttime ? "" : "+",
2857 					err_segs[i].rt_delay_us);
2858 				firsttime = 0;
2859 			}
2860 			pr_cont("%s\n",
2861 				err_segs[i].rt_preempted ? "preempted" : "");
2862 
2863 		}
2864 	}
2865 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2866 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2867 	else if (torture_onoff_failures())
2868 		rcu_torture_print_module_parms(cur_ops,
2869 					       "End of test: RCU_HOTPLUG");
2870 	else
2871 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2872 	torture_cleanup_end();
2873 }
2874 
2875 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2876 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2877 {
2878 }
2879 
2880 static void rcu_torture_err_cb(struct rcu_head *rhp)
2881 {
2882 	/*
2883 	 * This -might- happen due to race conditions, but is unlikely.
2884 	 * The scenario that leads to this happening is that the
2885 	 * first of the pair of duplicate callbacks is queued,
2886 	 * someone else starts a grace period that includes that
2887 	 * callback, then the second of the pair must wait for the
2888 	 * next grace period.  Unlikely, but can happen.  If it
2889 	 * does happen, the debug-objects subsystem won't have splatted.
2890 	 */
2891 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2892 }
2893 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2894 
2895 /*
2896  * Verify that double-free causes debug-objects to complain, but only
2897  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
2898  * cannot be carried out.
2899  */
2900 static void rcu_test_debug_objects(void)
2901 {
2902 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2903 	struct rcu_head rh1;
2904 	struct rcu_head rh2;
2905 	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2906 
2907 	init_rcu_head_on_stack(&rh1);
2908 	init_rcu_head_on_stack(&rh2);
2909 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2910 
2911 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
2912 	preempt_disable(); /* Prevent preemption from interrupting test. */
2913 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
2914 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2915 	local_irq_disable(); /* Make it harder to start a new grace period. */
2916 	call_rcu(&rh2, rcu_torture_leak_cb);
2917 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2918 	if (rhp) {
2919 		call_rcu(rhp, rcu_torture_leak_cb);
2920 		call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
2921 	}
2922 	local_irq_enable();
2923 	rcu_read_unlock();
2924 	preempt_enable();
2925 
2926 	/* Wait for them all to get done so we can safely return. */
2927 	rcu_barrier();
2928 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2929 	destroy_rcu_head_on_stack(&rh1);
2930 	destroy_rcu_head_on_stack(&rh2);
2931 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2932 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2933 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2934 }
2935 
2936 static void rcutorture_sync(void)
2937 {
2938 	static unsigned long n;
2939 
2940 	if (cur_ops->sync && !(++n & 0xfff))
2941 		cur_ops->sync();
2942 }
2943 
2944 static int __init
2945 rcu_torture_init(void)
2946 {
2947 	long i;
2948 	int cpu;
2949 	int firsterr = 0;
2950 	int flags = 0;
2951 	unsigned long gp_seq = 0;
2952 	static struct rcu_torture_ops *torture_ops[] = {
2953 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2954 		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2955 		&tasks_tracing_ops, &trivial_ops,
2956 	};
2957 
2958 	if (!torture_init_begin(torture_type, verbose))
2959 		return -EBUSY;
2960 
2961 	/* Process args and tell the world that the torturer is on the job. */
2962 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2963 		cur_ops = torture_ops[i];
2964 		if (strcmp(torture_type, cur_ops->name) == 0)
2965 			break;
2966 	}
2967 	if (i == ARRAY_SIZE(torture_ops)) {
2968 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2969 			 torture_type);
2970 		pr_alert("rcu-torture types:");
2971 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2972 			pr_cont(" %s", torture_ops[i]->name);
2973 		pr_cont("\n");
2974 		firsterr = -EINVAL;
2975 		cur_ops = NULL;
2976 		goto unwind;
2977 	}
2978 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
2979 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2980 		fqs_duration = 0;
2981 	}
2982 	if (cur_ops->init)
2983 		cur_ops->init();
2984 
2985 	if (nreaders >= 0) {
2986 		nrealreaders = nreaders;
2987 	} else {
2988 		nrealreaders = num_online_cpus() - 2 - nreaders;
2989 		if (nrealreaders <= 0)
2990 			nrealreaders = 1;
2991 	}
2992 	rcu_torture_print_module_parms(cur_ops, "Start of test");
2993 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2994 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2995 	start_gp_seq = gp_seq;
2996 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
2997 		 cur_ops->name, (long)gp_seq, flags);
2998 
2999 	/* Set up the freelist. */
3000 
3001 	INIT_LIST_HEAD(&rcu_torture_freelist);
3002 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3003 		rcu_tortures[i].rtort_mbtest = 0;
3004 		list_add_tail(&rcu_tortures[i].rtort_free,
3005 			      &rcu_torture_freelist);
3006 	}
3007 
3008 	/* Initialize the statistics so that each run gets its own numbers. */
3009 
3010 	rcu_torture_current = NULL;
3011 	rcu_torture_current_version = 0;
3012 	atomic_set(&n_rcu_torture_alloc, 0);
3013 	atomic_set(&n_rcu_torture_alloc_fail, 0);
3014 	atomic_set(&n_rcu_torture_free, 0);
3015 	atomic_set(&n_rcu_torture_mberror, 0);
3016 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
3017 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
3018 	atomic_set(&n_rcu_torture_error, 0);
3019 	n_rcu_torture_barrier_error = 0;
3020 	n_rcu_torture_boost_ktrerror = 0;
3021 	n_rcu_torture_boost_rterror = 0;
3022 	n_rcu_torture_boost_failure = 0;
3023 	n_rcu_torture_boosts = 0;
3024 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3025 		atomic_set(&rcu_torture_wcount[i], 0);
3026 	for_each_possible_cpu(cpu) {
3027 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3028 			per_cpu(rcu_torture_count, cpu)[i] = 0;
3029 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
3030 		}
3031 	}
3032 	err_segs_recorded = 0;
3033 	rt_read_nsegs = 0;
3034 
3035 	/* Start up the kthreads. */
3036 
3037 	rcu_torture_write_types();
3038 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3039 					  writer_task);
3040 	if (firsterr)
3041 		goto unwind;
3042 	if (nfakewriters > 0) {
3043 		fakewriter_tasks = kcalloc(nfakewriters,
3044 					   sizeof(fakewriter_tasks[0]),
3045 					   GFP_KERNEL);
3046 		if (fakewriter_tasks == NULL) {
3047 			VERBOSE_TOROUT_ERRSTRING("out of memory");
3048 			firsterr = -ENOMEM;
3049 			goto unwind;
3050 		}
3051 	}
3052 	for (i = 0; i < nfakewriters; i++) {
3053 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
3054 						  NULL, fakewriter_tasks[i]);
3055 		if (firsterr)
3056 			goto unwind;
3057 	}
3058 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3059 			       GFP_KERNEL);
3060 	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3061 					   GFP_KERNEL);
3062 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3063 		VERBOSE_TOROUT_ERRSTRING("out of memory");
3064 		firsterr = -ENOMEM;
3065 		goto unwind;
3066 	}
3067 	for (i = 0; i < nrealreaders; i++) {
3068 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3069 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3070 						  reader_tasks[i]);
3071 		if (firsterr)
3072 			goto unwind;
3073 	}
3074 	nrealnocbers = nocbs_nthreads;
3075 	if (WARN_ON(nrealnocbers < 0))
3076 		nrealnocbers = 1;
3077 	if (WARN_ON(nocbs_toggle < 0))
3078 		nocbs_toggle = HZ;
3079 	if (nrealnocbers > 0) {
3080 		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3081 		if (nocb_tasks == NULL) {
3082 			VERBOSE_TOROUT_ERRSTRING("out of memory");
3083 			firsterr = -ENOMEM;
3084 			goto unwind;
3085 		}
3086 	} else {
3087 		nocb_tasks = NULL;
3088 	}
3089 	for (i = 0; i < nrealnocbers; i++) {
3090 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3091 		if (firsterr)
3092 			goto unwind;
3093 	}
3094 	if (stat_interval > 0) {
3095 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3096 						  stats_task);
3097 		if (firsterr)
3098 			goto unwind;
3099 	}
3100 	if (test_no_idle_hz && shuffle_interval > 0) {
3101 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3102 		if (firsterr)
3103 			goto unwind;
3104 	}
3105 	if (stutter < 0)
3106 		stutter = 0;
3107 	if (stutter) {
3108 		int t;
3109 
3110 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3111 		firsterr = torture_stutter_init(stutter * HZ, t);
3112 		if (firsterr)
3113 			goto unwind;
3114 	}
3115 	if (fqs_duration < 0)
3116 		fqs_duration = 0;
3117 	if (fqs_duration) {
3118 		/* Create the fqs thread */
3119 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3120 						  fqs_task);
3121 		if (firsterr)
3122 			goto unwind;
3123 	}
3124 	if (test_boost_interval < 1)
3125 		test_boost_interval = 1;
3126 	if (test_boost_duration < 2)
3127 		test_boost_duration = 2;
3128 	if (rcu_torture_can_boost()) {
3129 
3130 		boost_starttime = jiffies + test_boost_interval * HZ;
3131 
3132 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3133 					     rcutorture_booster_init,
3134 					     rcutorture_booster_cleanup);
3135 		if (firsterr < 0)
3136 			goto unwind;
3137 		rcutor_hp = firsterr;
3138 
3139 		// Testing RCU priority boosting requires rcutorture do
3140 		// some serious abuse.  Counter this by running ksoftirqd
3141 		// at higher priority.
3142 		if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
3143 			for_each_online_cpu(cpu) {
3144 				struct sched_param sp;
3145 				struct task_struct *t;
3146 
3147 				t = per_cpu(ksoftirqd, cpu);
3148 				WARN_ON_ONCE(!t);
3149 				sp.sched_priority = 2;
3150 				sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3151 			}
3152 		}
3153 	}
3154 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3155 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3156 	if (firsterr)
3157 		goto unwind;
3158 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3159 				      rcutorture_sync);
3160 	if (firsterr)
3161 		goto unwind;
3162 	firsterr = rcu_torture_stall_init();
3163 	if (firsterr)
3164 		goto unwind;
3165 	firsterr = rcu_torture_fwd_prog_init();
3166 	if (firsterr)
3167 		goto unwind;
3168 	firsterr = rcu_torture_barrier_init();
3169 	if (firsterr)
3170 		goto unwind;
3171 	firsterr = rcu_torture_read_exit_init();
3172 	if (firsterr)
3173 		goto unwind;
3174 	if (object_debug)
3175 		rcu_test_debug_objects();
3176 	torture_init_end();
3177 	return 0;
3178 
3179 unwind:
3180 	torture_init_end();
3181 	rcu_torture_cleanup();
3182 	if (shutdown_secs) {
3183 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3184 		kernel_power_off();
3185 	}
3186 	return firsterr;
3187 }
3188 
3189 module_init(rcu_torture_init);
3190 module_exit(rcu_torture_cleanup);
3191