xref: /openbmc/linux/kernel/rcu/rcutorture.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based torture test facility
4  *
5  * Copyright (C) IBM Corporation, 2005, 2006
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *	  Josh Triplett <josh@joshtriplett.org>
9  *
10  * See also:  Documentation/RCU/torture.rst
11  */
12 
13 #define pr_fmt(fmt) fmt
14 
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 
50 #include "rcu.h"
51 
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
54 
55 /* Bits for ->extendables field, extendables param, and related definitions. */
56 #define RCUTORTURE_RDR_SHIFT	 8	/* Put SRCU index in upper bits. */
57 #define RCUTORTURE_RDR_MASK	 ((1 << RCUTORTURE_RDR_SHIFT) - 1)
58 #define RCUTORTURE_RDR_BH	 0x01	/* Extend readers by disabling bh. */
59 #define RCUTORTURE_RDR_IRQ	 0x02	/*  ... disabling interrupts. */
60 #define RCUTORTURE_RDR_PREEMPT	 0x04	/*  ... disabling preemption. */
61 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
62 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
63 #define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
64 #define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
65 #define RCUTORTURE_MAX_EXTEND	 \
66 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
67 	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
68 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
69 					/* Must be power of two minus one. */
70 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
71 
72 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
73 	      "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
74 torture_param(int, fqs_duration, 0,
75 	      "Duration of fqs bursts (us), 0 to disable");
76 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
77 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
78 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
79 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
80 torture_param(int, fwd_progress_holdoff, 60,
81 	      "Time between forward-progress tests (s)");
82 torture_param(bool, fwd_progress_need_resched, 1,
83 	      "Hide cond_resched() behind need_resched()");
84 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
85 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
86 torture_param(bool, gp_normal, false,
87 	     "Use normal (non-expedited) GP wait primitives");
88 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
89 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
90 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
91 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
92 torture_param(int, n_barrier_cbs, 0,
93 	     "# of callbacks/kthreads for barrier testing");
94 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
95 torture_param(int, nreaders, -1, "Number of RCU reader threads");
96 torture_param(int, object_debug, 0,
97 	     "Enable debug-object double call_rcu() testing");
98 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
99 torture_param(int, onoff_interval, 0,
100 	     "Time between CPU hotplugs (jiffies), 0=disable");
101 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
102 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
103 torture_param(int, read_exit_delay, 13,
104 	      "Delay between read-then-exit episodes (s)");
105 torture_param(int, read_exit_burst, 16,
106 	      "# of read-then-exit bursts per episode, zero to disable");
107 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
108 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
109 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
110 torture_param(int, stall_cpu_holdoff, 10,
111 	     "Time to wait before starting stall (s).");
112 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
113 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
114 torture_param(int, stall_gp_kthread, 0,
115 	      "Grace-period kthread stall duration (s).");
116 torture_param(int, stat_interval, 60,
117 	     "Number of seconds between stats printk()s");
118 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
119 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
120 torture_param(int, test_boost_duration, 4,
121 	     "Duration of each boost test, seconds.");
122 torture_param(int, test_boost_interval, 7,
123 	     "Interval between boost tests, seconds.");
124 torture_param(bool, test_no_idle_hz, true,
125 	     "Test support for tickless idle CPUs");
126 torture_param(int, verbose, 1,
127 	     "Enable verbose debugging printk()s");
128 
129 static char *torture_type = "rcu";
130 module_param(torture_type, charp, 0444);
131 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
132 
133 static int nrealnocbers;
134 static int nrealreaders;
135 static struct task_struct *writer_task;
136 static struct task_struct **fakewriter_tasks;
137 static struct task_struct **reader_tasks;
138 static struct task_struct **nocb_tasks;
139 static struct task_struct *stats_task;
140 static struct task_struct *fqs_task;
141 static struct task_struct *boost_tasks[NR_CPUS];
142 static struct task_struct *stall_task;
143 static struct task_struct *fwd_prog_task;
144 static struct task_struct **barrier_cbs_tasks;
145 static struct task_struct *barrier_task;
146 static struct task_struct *read_exit_task;
147 
148 #define RCU_TORTURE_PIPE_LEN 10
149 
150 // Mailbox-like structure to check RCU global memory ordering.
151 struct rcu_torture_reader_check {
152 	unsigned long rtc_myloops;
153 	int rtc_chkrdr;
154 	unsigned long rtc_chkloops;
155 	int rtc_ready;
156 	struct rcu_torture_reader_check *rtc_assigner;
157 } ____cacheline_internodealigned_in_smp;
158 
159 // Update-side data structure used to check RCU readers.
160 struct rcu_torture {
161 	struct rcu_head rtort_rcu;
162 	int rtort_pipe_count;
163 	struct list_head rtort_free;
164 	int rtort_mbtest;
165 	struct rcu_torture_reader_check *rtort_chkp;
166 };
167 
168 static LIST_HEAD(rcu_torture_freelist);
169 static struct rcu_torture __rcu *rcu_torture_current;
170 static unsigned long rcu_torture_current_version;
171 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
172 static DEFINE_SPINLOCK(rcu_torture_lock);
173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
174 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
175 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
176 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
177 static atomic_t n_rcu_torture_alloc;
178 static atomic_t n_rcu_torture_alloc_fail;
179 static atomic_t n_rcu_torture_free;
180 static atomic_t n_rcu_torture_mberror;
181 static atomic_t n_rcu_torture_mbchk_fail;
182 static atomic_t n_rcu_torture_mbchk_tries;
183 static atomic_t n_rcu_torture_error;
184 static long n_rcu_torture_barrier_error;
185 static long n_rcu_torture_boost_ktrerror;
186 static long n_rcu_torture_boost_rterror;
187 static long n_rcu_torture_boost_failure;
188 static long n_rcu_torture_boosts;
189 static atomic_long_t n_rcu_torture_timers;
190 static long n_barrier_attempts;
191 static long n_barrier_successes; /* did rcu_barrier test succeed? */
192 static unsigned long n_read_exits;
193 static struct list_head rcu_torture_removed;
194 static unsigned long shutdown_jiffies;
195 static unsigned long start_gp_seq;
196 static atomic_long_t n_nocb_offload;
197 static atomic_long_t n_nocb_deoffload;
198 
199 static int rcu_torture_writer_state;
200 #define RTWS_FIXED_DELAY	0
201 #define RTWS_DELAY		1
202 #define RTWS_REPLACE		2
203 #define RTWS_DEF_FREE		3
204 #define RTWS_EXP_SYNC		4
205 #define RTWS_COND_GET		5
206 #define RTWS_COND_SYNC		6
207 #define RTWS_POLL_GET		7
208 #define RTWS_POLL_WAIT		8
209 #define RTWS_SYNC		9
210 #define RTWS_STUTTER		10
211 #define RTWS_STOPPING		11
212 static const char * const rcu_torture_writer_state_names[] = {
213 	"RTWS_FIXED_DELAY",
214 	"RTWS_DELAY",
215 	"RTWS_REPLACE",
216 	"RTWS_DEF_FREE",
217 	"RTWS_EXP_SYNC",
218 	"RTWS_COND_GET",
219 	"RTWS_COND_SYNC",
220 	"RTWS_POLL_GET",
221 	"RTWS_POLL_WAIT",
222 	"RTWS_SYNC",
223 	"RTWS_STUTTER",
224 	"RTWS_STOPPING",
225 };
226 
227 /* Record reader segment types and duration for first failing read. */
228 struct rt_read_seg {
229 	int rt_readstate;
230 	unsigned long rt_delay_jiffies;
231 	unsigned long rt_delay_ms;
232 	unsigned long rt_delay_us;
233 	bool rt_preempted;
234 };
235 static int err_segs_recorded;
236 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
237 static int rt_read_nsegs;
238 
239 static const char *rcu_torture_writer_state_getname(void)
240 {
241 	unsigned int i = READ_ONCE(rcu_torture_writer_state);
242 
243 	if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
244 		return "???";
245 	return rcu_torture_writer_state_names[i];
246 }
247 
248 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
249 #define rcu_can_boost() 1
250 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
251 #define rcu_can_boost() 0
252 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
253 
254 #ifdef CONFIG_RCU_TRACE
255 static u64 notrace rcu_trace_clock_local(void)
256 {
257 	u64 ts = trace_clock_local();
258 
259 	(void)do_div(ts, NSEC_PER_USEC);
260 	return ts;
261 }
262 #else /* #ifdef CONFIG_RCU_TRACE */
263 static u64 notrace rcu_trace_clock_local(void)
264 {
265 	return 0ULL;
266 }
267 #endif /* #else #ifdef CONFIG_RCU_TRACE */
268 
269 /*
270  * Stop aggressive CPU-hog tests a bit before the end of the test in order
271  * to avoid interfering with test shutdown.
272  */
273 static bool shutdown_time_arrived(void)
274 {
275 	return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
276 }
277 
278 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
279 static DEFINE_MUTEX(boost_mutex);	/* protect setting boost_starttime */
280 					/*  and boost task create/destroy. */
281 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
282 static bool barrier_phase;		/* Test phase. */
283 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
284 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
285 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
286 
287 static bool rcu_fwd_cb_nodelay;		/* Short rcu_torture_delay() delays. */
288 
289 /*
290  * Allocate an element from the rcu_tortures pool.
291  */
292 static struct rcu_torture *
293 rcu_torture_alloc(void)
294 {
295 	struct list_head *p;
296 
297 	spin_lock_bh(&rcu_torture_lock);
298 	if (list_empty(&rcu_torture_freelist)) {
299 		atomic_inc(&n_rcu_torture_alloc_fail);
300 		spin_unlock_bh(&rcu_torture_lock);
301 		return NULL;
302 	}
303 	atomic_inc(&n_rcu_torture_alloc);
304 	p = rcu_torture_freelist.next;
305 	list_del_init(p);
306 	spin_unlock_bh(&rcu_torture_lock);
307 	return container_of(p, struct rcu_torture, rtort_free);
308 }
309 
310 /*
311  * Free an element to the rcu_tortures pool.
312  */
313 static void
314 rcu_torture_free(struct rcu_torture *p)
315 {
316 	atomic_inc(&n_rcu_torture_free);
317 	spin_lock_bh(&rcu_torture_lock);
318 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
319 	spin_unlock_bh(&rcu_torture_lock);
320 }
321 
322 /*
323  * Operations vector for selecting different types of tests.
324  */
325 
326 struct rcu_torture_ops {
327 	int ttype;
328 	void (*init)(void);
329 	void (*cleanup)(void);
330 	int (*readlock)(void);
331 	void (*read_delay)(struct torture_random_state *rrsp,
332 			   struct rt_read_seg *rtrsp);
333 	void (*readunlock)(int idx);
334 	unsigned long (*get_gp_seq)(void);
335 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
336 	void (*deferred_free)(struct rcu_torture *p);
337 	void (*sync)(void);
338 	void (*exp_sync)(void);
339 	unsigned long (*get_gp_state)(void);
340 	unsigned long (*start_gp_poll)(void);
341 	bool (*poll_gp_state)(unsigned long oldstate);
342 	void (*cond_sync)(unsigned long oldstate);
343 	call_rcu_func_t call;
344 	void (*cb_barrier)(void);
345 	void (*fqs)(void);
346 	void (*stats)(void);
347 	void (*gp_kthread_dbg)(void);
348 	int (*stall_dur)(void);
349 	int irq_capable;
350 	int can_boost;
351 	int extendables;
352 	int slow_gps;
353 	const char *name;
354 };
355 
356 static struct rcu_torture_ops *cur_ops;
357 
358 /*
359  * Definitions for rcu torture testing.
360  */
361 
362 static int rcu_torture_read_lock(void) __acquires(RCU)
363 {
364 	rcu_read_lock();
365 	return 0;
366 }
367 
368 static void
369 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
370 {
371 	unsigned long started;
372 	unsigned long completed;
373 	const unsigned long shortdelay_us = 200;
374 	unsigned long longdelay_ms = 300;
375 	unsigned long long ts;
376 
377 	/* We want a short delay sometimes to make a reader delay the grace
378 	 * period, and we want a long delay occasionally to trigger
379 	 * force_quiescent_state. */
380 
381 	if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
382 	    !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
383 		started = cur_ops->get_gp_seq();
384 		ts = rcu_trace_clock_local();
385 		if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
386 			longdelay_ms = 5; /* Avoid triggering BH limits. */
387 		mdelay(longdelay_ms);
388 		rtrsp->rt_delay_ms = longdelay_ms;
389 		completed = cur_ops->get_gp_seq();
390 		do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
391 					  started, completed);
392 	}
393 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
394 		udelay(shortdelay_us);
395 		rtrsp->rt_delay_us = shortdelay_us;
396 	}
397 	if (!preempt_count() &&
398 	    !(torture_random(rrsp) % (nrealreaders * 500))) {
399 		torture_preempt_schedule();  /* QS only if preemptible. */
400 		rtrsp->rt_preempted = true;
401 	}
402 }
403 
404 static void rcu_torture_read_unlock(int idx) __releases(RCU)
405 {
406 	rcu_read_unlock();
407 }
408 
409 /*
410  * Update callback in the pipe.  This should be invoked after a grace period.
411  */
412 static bool
413 rcu_torture_pipe_update_one(struct rcu_torture *rp)
414 {
415 	int i;
416 	struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
417 
418 	if (rtrcp) {
419 		WRITE_ONCE(rp->rtort_chkp, NULL);
420 		smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
421 	}
422 	i = READ_ONCE(rp->rtort_pipe_count);
423 	if (i > RCU_TORTURE_PIPE_LEN)
424 		i = RCU_TORTURE_PIPE_LEN;
425 	atomic_inc(&rcu_torture_wcount[i]);
426 	WRITE_ONCE(rp->rtort_pipe_count, i + 1);
427 	if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
428 		rp->rtort_mbtest = 0;
429 		return true;
430 	}
431 	return false;
432 }
433 
434 /*
435  * Update all callbacks in the pipe.  Suitable for synchronous grace-period
436  * primitives.
437  */
438 static void
439 rcu_torture_pipe_update(struct rcu_torture *old_rp)
440 {
441 	struct rcu_torture *rp;
442 	struct rcu_torture *rp1;
443 
444 	if (old_rp)
445 		list_add(&old_rp->rtort_free, &rcu_torture_removed);
446 	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
447 		if (rcu_torture_pipe_update_one(rp)) {
448 			list_del(&rp->rtort_free);
449 			rcu_torture_free(rp);
450 		}
451 	}
452 }
453 
454 static void
455 rcu_torture_cb(struct rcu_head *p)
456 {
457 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
458 
459 	if (torture_must_stop_irq()) {
460 		/* Test is ending, just drop callbacks on the floor. */
461 		/* The next initialization will pick up the pieces. */
462 		return;
463 	}
464 	if (rcu_torture_pipe_update_one(rp))
465 		rcu_torture_free(rp);
466 	else
467 		cur_ops->deferred_free(rp);
468 }
469 
470 static unsigned long rcu_no_completed(void)
471 {
472 	return 0;
473 }
474 
475 static void rcu_torture_deferred_free(struct rcu_torture *p)
476 {
477 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
478 }
479 
480 static void rcu_sync_torture_init(void)
481 {
482 	INIT_LIST_HEAD(&rcu_torture_removed);
483 }
484 
485 static struct rcu_torture_ops rcu_ops = {
486 	.ttype		= RCU_FLAVOR,
487 	.init		= rcu_sync_torture_init,
488 	.readlock	= rcu_torture_read_lock,
489 	.read_delay	= rcu_read_delay,
490 	.readunlock	= rcu_torture_read_unlock,
491 	.get_gp_seq	= rcu_get_gp_seq,
492 	.gp_diff	= rcu_seq_diff,
493 	.deferred_free	= rcu_torture_deferred_free,
494 	.sync		= synchronize_rcu,
495 	.exp_sync	= synchronize_rcu_expedited,
496 	.get_gp_state	= get_state_synchronize_rcu,
497 	.cond_sync	= cond_synchronize_rcu,
498 	.call		= call_rcu,
499 	.cb_barrier	= rcu_barrier,
500 	.fqs		= rcu_force_quiescent_state,
501 	.stats		= NULL,
502 	.gp_kthread_dbg	= show_rcu_gp_kthreads,
503 	.stall_dur	= rcu_jiffies_till_stall_check,
504 	.irq_capable	= 1,
505 	.can_boost	= rcu_can_boost(),
506 	.extendables	= RCUTORTURE_MAX_EXTEND,
507 	.name		= "rcu"
508 };
509 
510 /*
511  * Don't even think about trying any of these in real life!!!
512  * The names includes "busted", and they really means it!
513  * The only purpose of these functions is to provide a buggy RCU
514  * implementation to make sure that rcutorture correctly emits
515  * buggy-RCU error messages.
516  */
517 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
518 {
519 	/* This is a deliberate bug for testing purposes only! */
520 	rcu_torture_cb(&p->rtort_rcu);
521 }
522 
523 static void synchronize_rcu_busted(void)
524 {
525 	/* This is a deliberate bug for testing purposes only! */
526 }
527 
528 static void
529 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
530 {
531 	/* This is a deliberate bug for testing purposes only! */
532 	func(head);
533 }
534 
535 static struct rcu_torture_ops rcu_busted_ops = {
536 	.ttype		= INVALID_RCU_FLAVOR,
537 	.init		= rcu_sync_torture_init,
538 	.readlock	= rcu_torture_read_lock,
539 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
540 	.readunlock	= rcu_torture_read_unlock,
541 	.get_gp_seq	= rcu_no_completed,
542 	.deferred_free	= rcu_busted_torture_deferred_free,
543 	.sync		= synchronize_rcu_busted,
544 	.exp_sync	= synchronize_rcu_busted,
545 	.call		= call_rcu_busted,
546 	.cb_barrier	= NULL,
547 	.fqs		= NULL,
548 	.stats		= NULL,
549 	.irq_capable	= 1,
550 	.name		= "busted"
551 };
552 
553 /*
554  * Definitions for srcu torture testing.
555  */
556 
557 DEFINE_STATIC_SRCU(srcu_ctl);
558 static struct srcu_struct srcu_ctld;
559 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
560 
561 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
562 {
563 	return srcu_read_lock(srcu_ctlp);
564 }
565 
566 static void
567 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
568 {
569 	long delay;
570 	const long uspertick = 1000000 / HZ;
571 	const long longdelay = 10;
572 
573 	/* We want there to be long-running readers, but not all the time. */
574 
575 	delay = torture_random(rrsp) %
576 		(nrealreaders * 2 * longdelay * uspertick);
577 	if (!delay && in_task()) {
578 		schedule_timeout_interruptible(longdelay);
579 		rtrsp->rt_delay_jiffies = longdelay;
580 	} else {
581 		rcu_read_delay(rrsp, rtrsp);
582 	}
583 }
584 
585 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
586 {
587 	srcu_read_unlock(srcu_ctlp, idx);
588 }
589 
590 static unsigned long srcu_torture_completed(void)
591 {
592 	return srcu_batches_completed(srcu_ctlp);
593 }
594 
595 static void srcu_torture_deferred_free(struct rcu_torture *rp)
596 {
597 	call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
598 }
599 
600 static void srcu_torture_synchronize(void)
601 {
602 	synchronize_srcu(srcu_ctlp);
603 }
604 
605 static unsigned long srcu_torture_get_gp_state(void)
606 {
607 	return get_state_synchronize_srcu(srcu_ctlp);
608 }
609 
610 static unsigned long srcu_torture_start_gp_poll(void)
611 {
612 	return start_poll_synchronize_srcu(srcu_ctlp);
613 }
614 
615 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
616 {
617 	return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
618 }
619 
620 static void srcu_torture_call(struct rcu_head *head,
621 			      rcu_callback_t func)
622 {
623 	call_srcu(srcu_ctlp, head, func);
624 }
625 
626 static void srcu_torture_barrier(void)
627 {
628 	srcu_barrier(srcu_ctlp);
629 }
630 
631 static void srcu_torture_stats(void)
632 {
633 	srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
634 }
635 
636 static void srcu_torture_synchronize_expedited(void)
637 {
638 	synchronize_srcu_expedited(srcu_ctlp);
639 }
640 
641 static struct rcu_torture_ops srcu_ops = {
642 	.ttype		= SRCU_FLAVOR,
643 	.init		= rcu_sync_torture_init,
644 	.readlock	= srcu_torture_read_lock,
645 	.read_delay	= srcu_read_delay,
646 	.readunlock	= srcu_torture_read_unlock,
647 	.get_gp_seq	= srcu_torture_completed,
648 	.deferred_free	= srcu_torture_deferred_free,
649 	.sync		= srcu_torture_synchronize,
650 	.exp_sync	= srcu_torture_synchronize_expedited,
651 	.get_gp_state	= srcu_torture_get_gp_state,
652 	.start_gp_poll	= srcu_torture_start_gp_poll,
653 	.poll_gp_state	= srcu_torture_poll_gp_state,
654 	.call		= srcu_torture_call,
655 	.cb_barrier	= srcu_torture_barrier,
656 	.stats		= srcu_torture_stats,
657 	.irq_capable	= 1,
658 	.name		= "srcu"
659 };
660 
661 static void srcu_torture_init(void)
662 {
663 	rcu_sync_torture_init();
664 	WARN_ON(init_srcu_struct(&srcu_ctld));
665 	srcu_ctlp = &srcu_ctld;
666 }
667 
668 static void srcu_torture_cleanup(void)
669 {
670 	cleanup_srcu_struct(&srcu_ctld);
671 	srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
672 }
673 
674 /* As above, but dynamically allocated. */
675 static struct rcu_torture_ops srcud_ops = {
676 	.ttype		= SRCU_FLAVOR,
677 	.init		= srcu_torture_init,
678 	.cleanup	= srcu_torture_cleanup,
679 	.readlock	= srcu_torture_read_lock,
680 	.read_delay	= srcu_read_delay,
681 	.readunlock	= srcu_torture_read_unlock,
682 	.get_gp_seq	= srcu_torture_completed,
683 	.deferred_free	= srcu_torture_deferred_free,
684 	.sync		= srcu_torture_synchronize,
685 	.exp_sync	= srcu_torture_synchronize_expedited,
686 	.call		= srcu_torture_call,
687 	.cb_barrier	= srcu_torture_barrier,
688 	.stats		= srcu_torture_stats,
689 	.irq_capable	= 1,
690 	.name		= "srcud"
691 };
692 
693 /* As above, but broken due to inappropriate reader extension. */
694 static struct rcu_torture_ops busted_srcud_ops = {
695 	.ttype		= SRCU_FLAVOR,
696 	.init		= srcu_torture_init,
697 	.cleanup	= srcu_torture_cleanup,
698 	.readlock	= srcu_torture_read_lock,
699 	.read_delay	= rcu_read_delay,
700 	.readunlock	= srcu_torture_read_unlock,
701 	.get_gp_seq	= srcu_torture_completed,
702 	.deferred_free	= srcu_torture_deferred_free,
703 	.sync		= srcu_torture_synchronize,
704 	.exp_sync	= srcu_torture_synchronize_expedited,
705 	.call		= srcu_torture_call,
706 	.cb_barrier	= srcu_torture_barrier,
707 	.stats		= srcu_torture_stats,
708 	.irq_capable	= 1,
709 	.extendables	= RCUTORTURE_MAX_EXTEND,
710 	.name		= "busted_srcud"
711 };
712 
713 /*
714  * Definitions for RCU-tasks torture testing.
715  */
716 
717 static int tasks_torture_read_lock(void)
718 {
719 	return 0;
720 }
721 
722 static void tasks_torture_read_unlock(int idx)
723 {
724 }
725 
726 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
727 {
728 	call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
729 }
730 
731 static void synchronize_rcu_mult_test(void)
732 {
733 	synchronize_rcu_mult(call_rcu_tasks, call_rcu);
734 }
735 
736 static struct rcu_torture_ops tasks_ops = {
737 	.ttype		= RCU_TASKS_FLAVOR,
738 	.init		= rcu_sync_torture_init,
739 	.readlock	= tasks_torture_read_lock,
740 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
741 	.readunlock	= tasks_torture_read_unlock,
742 	.get_gp_seq	= rcu_no_completed,
743 	.deferred_free	= rcu_tasks_torture_deferred_free,
744 	.sync		= synchronize_rcu_tasks,
745 	.exp_sync	= synchronize_rcu_mult_test,
746 	.call		= call_rcu_tasks,
747 	.cb_barrier	= rcu_barrier_tasks,
748 	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
749 	.fqs		= NULL,
750 	.stats		= NULL,
751 	.irq_capable	= 1,
752 	.slow_gps	= 1,
753 	.name		= "tasks"
754 };
755 
756 /*
757  * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
758  * This implementation does not necessarily work well with CPU hotplug.
759  */
760 
761 static void synchronize_rcu_trivial(void)
762 {
763 	int cpu;
764 
765 	for_each_online_cpu(cpu) {
766 		rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
767 		WARN_ON_ONCE(raw_smp_processor_id() != cpu);
768 	}
769 }
770 
771 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
772 {
773 	preempt_disable();
774 	return 0;
775 }
776 
777 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
778 {
779 	preempt_enable();
780 }
781 
782 static struct rcu_torture_ops trivial_ops = {
783 	.ttype		= RCU_TRIVIAL_FLAVOR,
784 	.init		= rcu_sync_torture_init,
785 	.readlock	= rcu_torture_read_lock_trivial,
786 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
787 	.readunlock	= rcu_torture_read_unlock_trivial,
788 	.get_gp_seq	= rcu_no_completed,
789 	.sync		= synchronize_rcu_trivial,
790 	.exp_sync	= synchronize_rcu_trivial,
791 	.fqs		= NULL,
792 	.stats		= NULL,
793 	.irq_capable	= 1,
794 	.name		= "trivial"
795 };
796 
797 /*
798  * Definitions for rude RCU-tasks torture testing.
799  */
800 
801 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
802 {
803 	call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
804 }
805 
806 static struct rcu_torture_ops tasks_rude_ops = {
807 	.ttype		= RCU_TASKS_RUDE_FLAVOR,
808 	.init		= rcu_sync_torture_init,
809 	.readlock	= rcu_torture_read_lock_trivial,
810 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
811 	.readunlock	= rcu_torture_read_unlock_trivial,
812 	.get_gp_seq	= rcu_no_completed,
813 	.deferred_free	= rcu_tasks_rude_torture_deferred_free,
814 	.sync		= synchronize_rcu_tasks_rude,
815 	.exp_sync	= synchronize_rcu_tasks_rude,
816 	.call		= call_rcu_tasks_rude,
817 	.cb_barrier	= rcu_barrier_tasks_rude,
818 	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
819 	.fqs		= NULL,
820 	.stats		= NULL,
821 	.irq_capable	= 1,
822 	.name		= "tasks-rude"
823 };
824 
825 /*
826  * Definitions for tracing RCU-tasks torture testing.
827  */
828 
829 static int tasks_tracing_torture_read_lock(void)
830 {
831 	rcu_read_lock_trace();
832 	return 0;
833 }
834 
835 static void tasks_tracing_torture_read_unlock(int idx)
836 {
837 	rcu_read_unlock_trace();
838 }
839 
840 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
841 {
842 	call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
843 }
844 
845 static struct rcu_torture_ops tasks_tracing_ops = {
846 	.ttype		= RCU_TASKS_TRACING_FLAVOR,
847 	.init		= rcu_sync_torture_init,
848 	.readlock	= tasks_tracing_torture_read_lock,
849 	.read_delay	= srcu_read_delay,  /* just reuse srcu's version. */
850 	.readunlock	= tasks_tracing_torture_read_unlock,
851 	.get_gp_seq	= rcu_no_completed,
852 	.deferred_free	= rcu_tasks_tracing_torture_deferred_free,
853 	.sync		= synchronize_rcu_tasks_trace,
854 	.exp_sync	= synchronize_rcu_tasks_trace,
855 	.call		= call_rcu_tasks_trace,
856 	.cb_barrier	= rcu_barrier_tasks_trace,
857 	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
858 	.fqs		= NULL,
859 	.stats		= NULL,
860 	.irq_capable	= 1,
861 	.slow_gps	= 1,
862 	.name		= "tasks-tracing"
863 };
864 
865 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
866 {
867 	if (!cur_ops->gp_diff)
868 		return new - old;
869 	return cur_ops->gp_diff(new, old);
870 }
871 
872 static bool __maybe_unused torturing_tasks(void)
873 {
874 	return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops;
875 }
876 
877 /*
878  * RCU torture priority-boost testing.  Runs one real-time thread per
879  * CPU for moderate bursts, repeatedly registering RCU callbacks and
880  * spinning waiting for them to be invoked.  If a given callback takes
881  * too long to be invoked, we assume that priority inversion has occurred.
882  */
883 
884 struct rcu_boost_inflight {
885 	struct rcu_head rcu;
886 	int inflight;
887 };
888 
889 static void rcu_torture_boost_cb(struct rcu_head *head)
890 {
891 	struct rcu_boost_inflight *rbip =
892 		container_of(head, struct rcu_boost_inflight, rcu);
893 
894 	/* Ensure RCU-core accesses precede clearing ->inflight */
895 	smp_store_release(&rbip->inflight, 0);
896 }
897 
898 static int old_rt_runtime = -1;
899 
900 static void rcu_torture_disable_rt_throttle(void)
901 {
902 	/*
903 	 * Disable RT throttling so that rcutorture's boost threads don't get
904 	 * throttled. Only possible if rcutorture is built-in otherwise the
905 	 * user should manually do this by setting the sched_rt_period_us and
906 	 * sched_rt_runtime sysctls.
907 	 */
908 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
909 		return;
910 
911 	old_rt_runtime = sysctl_sched_rt_runtime;
912 	sysctl_sched_rt_runtime = -1;
913 }
914 
915 static void rcu_torture_enable_rt_throttle(void)
916 {
917 	if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
918 		return;
919 
920 	sysctl_sched_rt_runtime = old_rt_runtime;
921 	old_rt_runtime = -1;
922 }
923 
924 static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
925 {
926 	if (end - start > test_boost_duration * HZ - HZ / 2) {
927 		VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
928 		n_rcu_torture_boost_failure++;
929 
930 		return true; /* failed */
931 	}
932 
933 	return false; /* passed */
934 }
935 
936 static int rcu_torture_boost(void *arg)
937 {
938 	unsigned long call_rcu_time;
939 	unsigned long endtime;
940 	unsigned long oldstarttime;
941 	struct rcu_boost_inflight rbi = { .inflight = 0 };
942 
943 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
944 
945 	/* Set real-time priority. */
946 	sched_set_fifo_low(current);
947 
948 	init_rcu_head_on_stack(&rbi.rcu);
949 	/* Each pass through the following loop does one boost-test cycle. */
950 	do {
951 		/* Track if the test failed already in this test interval? */
952 		bool failed = false;
953 
954 		/* Increment n_rcu_torture_boosts once per boost-test */
955 		while (!kthread_should_stop()) {
956 			if (mutex_trylock(&boost_mutex)) {
957 				n_rcu_torture_boosts++;
958 				mutex_unlock(&boost_mutex);
959 				break;
960 			}
961 			schedule_timeout_uninterruptible(1);
962 		}
963 		if (kthread_should_stop())
964 			goto checkwait;
965 
966 		/* Wait for the next test interval. */
967 		oldstarttime = boost_starttime;
968 		while (time_before(jiffies, oldstarttime)) {
969 			schedule_timeout_interruptible(oldstarttime - jiffies);
970 			if (stutter_wait("rcu_torture_boost"))
971 				sched_set_fifo_low(current);
972 			if (torture_must_stop())
973 				goto checkwait;
974 		}
975 
976 		/* Do one boost-test interval. */
977 		endtime = oldstarttime + test_boost_duration * HZ;
978 		call_rcu_time = jiffies;
979 		while (time_before(jiffies, endtime)) {
980 			/* If we don't have a callback in flight, post one. */
981 			if (!smp_load_acquire(&rbi.inflight)) {
982 				/* RCU core before ->inflight = 1. */
983 				smp_store_release(&rbi.inflight, 1);
984 				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
985 				/* Check if the boost test failed */
986 				failed = failed ||
987 					 rcu_torture_boost_failed(call_rcu_time,
988 								 jiffies);
989 				call_rcu_time = jiffies;
990 			}
991 			if (stutter_wait("rcu_torture_boost"))
992 				sched_set_fifo_low(current);
993 			if (torture_must_stop())
994 				goto checkwait;
995 		}
996 
997 		/*
998 		 * If boost never happened, then inflight will always be 1, in
999 		 * this case the boost check would never happen in the above
1000 		 * loop so do another one here.
1001 		 */
1002 		if (!failed && smp_load_acquire(&rbi.inflight))
1003 			rcu_torture_boost_failed(call_rcu_time, jiffies);
1004 
1005 		/*
1006 		 * Set the start time of the next test interval.
1007 		 * Yes, this is vulnerable to long delays, but such
1008 		 * delays simply cause a false negative for the next
1009 		 * interval.  Besides, we are running at RT priority,
1010 		 * so delays should be relatively rare.
1011 		 */
1012 		while (oldstarttime == boost_starttime &&
1013 		       !kthread_should_stop()) {
1014 			if (mutex_trylock(&boost_mutex)) {
1015 				boost_starttime = jiffies +
1016 						  test_boost_interval * HZ;
1017 				mutex_unlock(&boost_mutex);
1018 				break;
1019 			}
1020 			schedule_timeout_uninterruptible(1);
1021 		}
1022 
1023 		/* Go do the stutter. */
1024 checkwait:	if (stutter_wait("rcu_torture_boost"))
1025 			sched_set_fifo_low(current);
1026 	} while (!torture_must_stop());
1027 
1028 	/* Clean up and exit. */
1029 	while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
1030 		torture_shutdown_absorb("rcu_torture_boost");
1031 		schedule_timeout_uninterruptible(1);
1032 	}
1033 	destroy_rcu_head_on_stack(&rbi.rcu);
1034 	torture_kthread_stopping("rcu_torture_boost");
1035 	return 0;
1036 }
1037 
1038 /*
1039  * RCU torture force-quiescent-state kthread.  Repeatedly induces
1040  * bursts of calls to force_quiescent_state(), increasing the probability
1041  * of occurrence of some important types of race conditions.
1042  */
1043 static int
1044 rcu_torture_fqs(void *arg)
1045 {
1046 	unsigned long fqs_resume_time;
1047 	int fqs_burst_remaining;
1048 	int oldnice = task_nice(current);
1049 
1050 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1051 	do {
1052 		fqs_resume_time = jiffies + fqs_stutter * HZ;
1053 		while (time_before(jiffies, fqs_resume_time) &&
1054 		       !kthread_should_stop()) {
1055 			schedule_timeout_interruptible(1);
1056 		}
1057 		fqs_burst_remaining = fqs_duration;
1058 		while (fqs_burst_remaining > 0 &&
1059 		       !kthread_should_stop()) {
1060 			cur_ops->fqs();
1061 			udelay(fqs_holdoff);
1062 			fqs_burst_remaining -= fqs_holdoff;
1063 		}
1064 		if (stutter_wait("rcu_torture_fqs"))
1065 			sched_set_normal(current, oldnice);
1066 	} while (!torture_must_stop());
1067 	torture_kthread_stopping("rcu_torture_fqs");
1068 	return 0;
1069 }
1070 
1071 // Used by writers to randomly choose from the available grace-period
1072 // primitives.  The only purpose of the initialization is to size the array.
1073 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1074 static int nsynctypes;
1075 
1076 /*
1077  * Determine which grace-period primitives are available.
1078  */
1079 static void rcu_torture_write_types(void)
1080 {
1081 	bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1082 	bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1083 
1084 	/* Initialize synctype[] array.  If none set, take default. */
1085 	if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1086 		gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1087 	if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1088 		synctype[nsynctypes++] = RTWS_COND_GET;
1089 		pr_info("%s: Testing conditional GPs.\n", __func__);
1090 	} else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1091 		pr_alert("%s: gp_cond without primitives.\n", __func__);
1092 	}
1093 	if (gp_exp1 && cur_ops->exp_sync) {
1094 		synctype[nsynctypes++] = RTWS_EXP_SYNC;
1095 		pr_info("%s: Testing expedited GPs.\n", __func__);
1096 	} else if (gp_exp && !cur_ops->exp_sync) {
1097 		pr_alert("%s: gp_exp without primitives.\n", __func__);
1098 	}
1099 	if (gp_normal1 && cur_ops->deferred_free) {
1100 		synctype[nsynctypes++] = RTWS_DEF_FREE;
1101 		pr_info("%s: Testing asynchronous GPs.\n", __func__);
1102 	} else if (gp_normal && !cur_ops->deferred_free) {
1103 		pr_alert("%s: gp_normal without primitives.\n", __func__);
1104 	}
1105 	if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1106 		synctype[nsynctypes++] = RTWS_POLL_GET;
1107 		pr_info("%s: Testing polling GPs.\n", __func__);
1108 	} else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1109 		pr_alert("%s: gp_poll without primitives.\n", __func__);
1110 	}
1111 	if (gp_sync1 && cur_ops->sync) {
1112 		synctype[nsynctypes++] = RTWS_SYNC;
1113 		pr_info("%s: Testing normal GPs.\n", __func__);
1114 	} else if (gp_sync && !cur_ops->sync) {
1115 		pr_alert("%s: gp_sync without primitives.\n", __func__);
1116 	}
1117 }
1118 
1119 /*
1120  * RCU torture writer kthread.  Repeatedly substitutes a new structure
1121  * for that pointed to by rcu_torture_current, freeing the old structure
1122  * after a series of grace periods (the "pipeline").
1123  */
1124 static int
1125 rcu_torture_writer(void *arg)
1126 {
1127 	bool boot_ended;
1128 	bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1129 	unsigned long cookie;
1130 	int expediting = 0;
1131 	unsigned long gp_snap;
1132 	int i;
1133 	int idx;
1134 	int oldnice = task_nice(current);
1135 	struct rcu_torture *rp;
1136 	struct rcu_torture *old_rp;
1137 	static DEFINE_TORTURE_RANDOM(rand);
1138 	bool stutter_waited;
1139 
1140 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1141 	if (!can_expedite)
1142 		pr_alert("%s" TORTURE_FLAG
1143 			 " GP expediting controlled from boot/sysfs for %s.\n",
1144 			 torture_type, cur_ops->name);
1145 	if (WARN_ONCE(nsynctypes == 0,
1146 		      "rcu_torture_writer: No update-side primitives.\n")) {
1147 		/*
1148 		 * No updates primitives, so don't try updating.
1149 		 * The resulting test won't be testing much, hence the
1150 		 * above WARN_ONCE().
1151 		 */
1152 		rcu_torture_writer_state = RTWS_STOPPING;
1153 		torture_kthread_stopping("rcu_torture_writer");
1154 	}
1155 
1156 	do {
1157 		rcu_torture_writer_state = RTWS_FIXED_DELAY;
1158 		torture_hrtimeout_us(500, 1000, &rand);
1159 		rp = rcu_torture_alloc();
1160 		if (rp == NULL)
1161 			continue;
1162 		rp->rtort_pipe_count = 0;
1163 		rcu_torture_writer_state = RTWS_DELAY;
1164 		udelay(torture_random(&rand) & 0x3ff);
1165 		rcu_torture_writer_state = RTWS_REPLACE;
1166 		old_rp = rcu_dereference_check(rcu_torture_current,
1167 					       current == writer_task);
1168 		rp->rtort_mbtest = 1;
1169 		rcu_assign_pointer(rcu_torture_current, rp);
1170 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1171 		if (old_rp) {
1172 			i = old_rp->rtort_pipe_count;
1173 			if (i > RCU_TORTURE_PIPE_LEN)
1174 				i = RCU_TORTURE_PIPE_LEN;
1175 			atomic_inc(&rcu_torture_wcount[i]);
1176 			WRITE_ONCE(old_rp->rtort_pipe_count,
1177 				   old_rp->rtort_pipe_count + 1);
1178 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1179 				idx = cur_ops->readlock();
1180 				cookie = cur_ops->get_gp_state();
1181 				WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1182 					  cur_ops->poll_gp_state(cookie),
1183 					  "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1184 					  __func__,
1185 					  rcu_torture_writer_state_getname(),
1186 					  rcu_torture_writer_state,
1187 					  cookie, cur_ops->get_gp_state());
1188 				cur_ops->readunlock(idx);
1189 			}
1190 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1191 			case RTWS_DEF_FREE:
1192 				rcu_torture_writer_state = RTWS_DEF_FREE;
1193 				cur_ops->deferred_free(old_rp);
1194 				break;
1195 			case RTWS_EXP_SYNC:
1196 				rcu_torture_writer_state = RTWS_EXP_SYNC;
1197 				cur_ops->exp_sync();
1198 				rcu_torture_pipe_update(old_rp);
1199 				break;
1200 			case RTWS_COND_GET:
1201 				rcu_torture_writer_state = RTWS_COND_GET;
1202 				gp_snap = cur_ops->get_gp_state();
1203 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1204 				rcu_torture_writer_state = RTWS_COND_SYNC;
1205 				cur_ops->cond_sync(gp_snap);
1206 				rcu_torture_pipe_update(old_rp);
1207 				break;
1208 			case RTWS_POLL_GET:
1209 				rcu_torture_writer_state = RTWS_POLL_GET;
1210 				gp_snap = cur_ops->start_gp_poll();
1211 				rcu_torture_writer_state = RTWS_POLL_WAIT;
1212 				while (!cur_ops->poll_gp_state(gp_snap))
1213 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1214 								  &rand);
1215 				rcu_torture_pipe_update(old_rp);
1216 				break;
1217 			case RTWS_SYNC:
1218 				rcu_torture_writer_state = RTWS_SYNC;
1219 				cur_ops->sync();
1220 				rcu_torture_pipe_update(old_rp);
1221 				break;
1222 			default:
1223 				WARN_ON_ONCE(1);
1224 				break;
1225 			}
1226 			if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1227 				WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1228 					  !cur_ops->poll_gp_state(cookie),
1229 					  "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1230 					  __func__,
1231 					  rcu_torture_writer_state_getname(),
1232 					  rcu_torture_writer_state,
1233 					  cookie, cur_ops->get_gp_state());
1234 		}
1235 		WRITE_ONCE(rcu_torture_current_version,
1236 			   rcu_torture_current_version + 1);
1237 		/* Cycle through nesting levels of rcu_expedite_gp() calls. */
1238 		if (can_expedite &&
1239 		    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1240 			WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1241 			if (expediting >= 0)
1242 				rcu_expedite_gp();
1243 			else
1244 				rcu_unexpedite_gp();
1245 			if (++expediting > 3)
1246 				expediting = -expediting;
1247 		} else if (!can_expedite) { /* Disabled during boot, recheck. */
1248 			can_expedite = !rcu_gp_is_expedited() &&
1249 				       !rcu_gp_is_normal();
1250 		}
1251 		rcu_torture_writer_state = RTWS_STUTTER;
1252 		boot_ended = rcu_inkernel_boot_has_ended();
1253 		stutter_waited = stutter_wait("rcu_torture_writer");
1254 		if (stutter_waited &&
1255 		    !READ_ONCE(rcu_fwd_cb_nodelay) &&
1256 		    !cur_ops->slow_gps &&
1257 		    !torture_must_stop() &&
1258 		    boot_ended)
1259 			for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1260 				if (list_empty(&rcu_tortures[i].rtort_free) &&
1261 				    rcu_access_pointer(rcu_torture_current) !=
1262 				    &rcu_tortures[i]) {
1263 					rcu_ftrace_dump(DUMP_ALL);
1264 					WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1265 				}
1266 		if (stutter_waited)
1267 			sched_set_normal(current, oldnice);
1268 	} while (!torture_must_stop());
1269 	rcu_torture_current = NULL;  // Let stats task know that we are done.
1270 	/* Reset expediting back to unexpedited. */
1271 	if (expediting > 0)
1272 		expediting = -expediting;
1273 	while (can_expedite && expediting++ < 0)
1274 		rcu_unexpedite_gp();
1275 	WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1276 	if (!can_expedite)
1277 		pr_alert("%s" TORTURE_FLAG
1278 			 " Dynamic grace-period expediting was disabled.\n",
1279 			 torture_type);
1280 	rcu_torture_writer_state = RTWS_STOPPING;
1281 	torture_kthread_stopping("rcu_torture_writer");
1282 	return 0;
1283 }
1284 
1285 /*
1286  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
1287  * delay between calls.
1288  */
1289 static int
1290 rcu_torture_fakewriter(void *arg)
1291 {
1292 	unsigned long gp_snap;
1293 	DEFINE_TORTURE_RANDOM(rand);
1294 
1295 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1296 	set_user_nice(current, MAX_NICE);
1297 
1298 	do {
1299 		torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1300 		if (cur_ops->cb_barrier != NULL &&
1301 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
1302 			cur_ops->cb_barrier();
1303 		} else {
1304 			switch (synctype[torture_random(&rand) % nsynctypes]) {
1305 			case RTWS_DEF_FREE:
1306 				break;
1307 			case RTWS_EXP_SYNC:
1308 				cur_ops->exp_sync();
1309 				break;
1310 			case RTWS_COND_GET:
1311 				gp_snap = cur_ops->get_gp_state();
1312 				torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1313 				cur_ops->cond_sync(gp_snap);
1314 				break;
1315 			case RTWS_POLL_GET:
1316 				gp_snap = cur_ops->start_gp_poll();
1317 				while (!cur_ops->poll_gp_state(gp_snap)) {
1318 					torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1319 								  &rand);
1320 				}
1321 				break;
1322 			case RTWS_SYNC:
1323 				cur_ops->sync();
1324 				break;
1325 			default:
1326 				WARN_ON_ONCE(1);
1327 				break;
1328 			}
1329 		}
1330 		stutter_wait("rcu_torture_fakewriter");
1331 	} while (!torture_must_stop());
1332 
1333 	torture_kthread_stopping("rcu_torture_fakewriter");
1334 	return 0;
1335 }
1336 
1337 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1338 {
1339 	kfree(rhp);
1340 }
1341 
1342 // Set up and carry out testing of RCU's global memory ordering
1343 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1344 					struct torture_random_state *trsp)
1345 {
1346 	unsigned long loops;
1347 	int noc = torture_num_online_cpus();
1348 	int rdrchked;
1349 	int rdrchker;
1350 	struct rcu_torture_reader_check *rtrcp; // Me.
1351 	struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1352 	struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1353 	struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1354 
1355 	if (myid < 0)
1356 		return; // Don't try this from timer handlers.
1357 
1358 	// Increment my counter.
1359 	rtrcp = &rcu_torture_reader_mbchk[myid];
1360 	WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1361 
1362 	// Attempt to assign someone else some checking work.
1363 	rdrchked = torture_random(trsp) % nrealreaders;
1364 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1365 	rdrchker = torture_random(trsp) % nrealreaders;
1366 	rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1367 	if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1368 	    smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1369 	    !READ_ONCE(rtp->rtort_chkp) &&
1370 	    !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1371 		rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1372 		WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1373 		rtrcp->rtc_chkrdr = rdrchked;
1374 		WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1375 		if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1376 		    cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1377 			(void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1378 	}
1379 
1380 	// If assigned some completed work, do it!
1381 	rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1382 	if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1383 		return; // No work or work not yet ready.
1384 	rdrchked = rtrcp_assigner->rtc_chkrdr;
1385 	if (WARN_ON_ONCE(rdrchked < 0))
1386 		return;
1387 	rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1388 	loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1389 	atomic_inc(&n_rcu_torture_mbchk_tries);
1390 	if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1391 		atomic_inc(&n_rcu_torture_mbchk_fail);
1392 	rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1393 	rtrcp_assigner->rtc_ready = 0;
1394 	smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1395 	smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1396 }
1397 
1398 /*
1399  * Do one extension of an RCU read-side critical section using the
1400  * current reader state in readstate (set to zero for initial entry
1401  * to extended critical section), set the new state as specified by
1402  * newstate (set to zero for final exit from extended critical section),
1403  * and random-number-generator state in trsp.  If this is neither the
1404  * beginning or end of the critical section and if there was actually a
1405  * change, do a ->read_delay().
1406  */
1407 static void rcutorture_one_extend(int *readstate, int newstate,
1408 				  struct torture_random_state *trsp,
1409 				  struct rt_read_seg *rtrsp)
1410 {
1411 	unsigned long flags;
1412 	int idxnew = -1;
1413 	int idxold = *readstate;
1414 	int statesnew = ~*readstate & newstate;
1415 	int statesold = *readstate & ~newstate;
1416 
1417 	WARN_ON_ONCE(idxold < 0);
1418 	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1419 	rtrsp->rt_readstate = newstate;
1420 
1421 	/* First, put new protection in place to avoid critical-section gap. */
1422 	if (statesnew & RCUTORTURE_RDR_BH)
1423 		local_bh_disable();
1424 	if (statesnew & RCUTORTURE_RDR_IRQ)
1425 		local_irq_disable();
1426 	if (statesnew & RCUTORTURE_RDR_PREEMPT)
1427 		preempt_disable();
1428 	if (statesnew & RCUTORTURE_RDR_RBH)
1429 		rcu_read_lock_bh();
1430 	if (statesnew & RCUTORTURE_RDR_SCHED)
1431 		rcu_read_lock_sched();
1432 	if (statesnew & RCUTORTURE_RDR_RCU)
1433 		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1434 
1435 	/* Next, remove old protection, irq first due to bh conflict. */
1436 	if (statesold & RCUTORTURE_RDR_IRQ)
1437 		local_irq_enable();
1438 	if (statesold & RCUTORTURE_RDR_BH)
1439 		local_bh_enable();
1440 	if (statesold & RCUTORTURE_RDR_PREEMPT)
1441 		preempt_enable();
1442 	if (statesold & RCUTORTURE_RDR_RBH)
1443 		rcu_read_unlock_bh();
1444 	if (statesold & RCUTORTURE_RDR_SCHED)
1445 		rcu_read_unlock_sched();
1446 	if (statesold & RCUTORTURE_RDR_RCU) {
1447 		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1448 
1449 		if (lockit)
1450 			raw_spin_lock_irqsave(&current->pi_lock, flags);
1451 		cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1452 		if (lockit)
1453 			raw_spin_unlock_irqrestore(&current->pi_lock, flags);
1454 	}
1455 
1456 	/* Delay if neither beginning nor end and there was a change. */
1457 	if ((statesnew || statesold) && *readstate && newstate)
1458 		cur_ops->read_delay(trsp, rtrsp);
1459 
1460 	/* Update the reader state. */
1461 	if (idxnew == -1)
1462 		idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1463 	WARN_ON_ONCE(idxnew < 0);
1464 	WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1465 	*readstate = idxnew | newstate;
1466 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1467 	WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1468 }
1469 
1470 /* Return the biggest extendables mask given current RCU and boot parameters. */
1471 static int rcutorture_extend_mask_max(void)
1472 {
1473 	int mask;
1474 
1475 	WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1476 	mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1477 	mask = mask | RCUTORTURE_RDR_RCU;
1478 	return mask;
1479 }
1480 
1481 /* Return a random protection state mask, but with at least one bit set. */
1482 static int
1483 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1484 {
1485 	int mask = rcutorture_extend_mask_max();
1486 	unsigned long randmask1 = torture_random(trsp) >> 8;
1487 	unsigned long randmask2 = randmask1 >> 3;
1488 
1489 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1490 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
1491 	if (!(randmask1 & 0x7))
1492 		mask = mask & randmask2;
1493 	else
1494 		mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1495 	/* Can't enable bh w/irq disabled. */
1496 	if ((mask & RCUTORTURE_RDR_IRQ) &&
1497 	    ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1498 	     (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1499 		mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1500 	return mask ?: RCUTORTURE_RDR_RCU;
1501 }
1502 
1503 /*
1504  * Do a randomly selected number of extensions of an existing RCU read-side
1505  * critical section.
1506  */
1507 static struct rt_read_seg *
1508 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1509 		       struct rt_read_seg *rtrsp)
1510 {
1511 	int i;
1512 	int j;
1513 	int mask = rcutorture_extend_mask_max();
1514 
1515 	WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1516 	if (!((mask - 1) & mask))
1517 		return rtrsp;  /* Current RCU reader not extendable. */
1518 	/* Bias towards larger numbers of loops. */
1519 	i = (torture_random(trsp) >> 3);
1520 	i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1521 	for (j = 0; j < i; j++) {
1522 		mask = rcutorture_extend_mask(*readstate, trsp);
1523 		rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1524 	}
1525 	return &rtrsp[j];
1526 }
1527 
1528 /*
1529  * Do one read-side critical section, returning false if there was
1530  * no data to read.  Can be invoked both from process context and
1531  * from a timer handler.
1532  */
1533 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1534 {
1535 	unsigned long cookie;
1536 	int i;
1537 	unsigned long started;
1538 	unsigned long completed;
1539 	int newstate;
1540 	struct rcu_torture *p;
1541 	int pipe_count;
1542 	int readstate = 0;
1543 	struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1544 	struct rt_read_seg *rtrsp = &rtseg[0];
1545 	struct rt_read_seg *rtrsp1;
1546 	unsigned long long ts;
1547 
1548 	WARN_ON_ONCE(!rcu_is_watching());
1549 	newstate = rcutorture_extend_mask(readstate, trsp);
1550 	rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1551 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1552 		cookie = cur_ops->get_gp_state();
1553 	started = cur_ops->get_gp_seq();
1554 	ts = rcu_trace_clock_local();
1555 	p = rcu_dereference_check(rcu_torture_current,
1556 				  rcu_read_lock_bh_held() ||
1557 				  rcu_read_lock_sched_held() ||
1558 				  srcu_read_lock_held(srcu_ctlp) ||
1559 				  rcu_read_lock_trace_held() ||
1560 				  torturing_tasks());
1561 	if (p == NULL) {
1562 		/* Wait for rcu_torture_writer to get underway */
1563 		rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1564 		return false;
1565 	}
1566 	if (p->rtort_mbtest == 0)
1567 		atomic_inc(&n_rcu_torture_mberror);
1568 	rcu_torture_reader_do_mbchk(myid, p, trsp);
1569 	rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1570 	preempt_disable();
1571 	pipe_count = READ_ONCE(p->rtort_pipe_count);
1572 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1573 		/* Should not happen, but... */
1574 		pipe_count = RCU_TORTURE_PIPE_LEN;
1575 	}
1576 	completed = cur_ops->get_gp_seq();
1577 	if (pipe_count > 1) {
1578 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1579 					  ts, started, completed);
1580 		rcu_ftrace_dump(DUMP_ALL);
1581 	}
1582 	__this_cpu_inc(rcu_torture_count[pipe_count]);
1583 	completed = rcutorture_seq_diff(completed, started);
1584 	if (completed > RCU_TORTURE_PIPE_LEN) {
1585 		/* Should not happen, but... */
1586 		completed = RCU_TORTURE_PIPE_LEN;
1587 	}
1588 	__this_cpu_inc(rcu_torture_batch[completed]);
1589 	preempt_enable();
1590 	if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1591 		WARN_ONCE(cur_ops->poll_gp_state(cookie),
1592 			  "%s: Cookie check 3 failed %s(%d) %lu->%lu\n",
1593 			  __func__,
1594 			  rcu_torture_writer_state_getname(),
1595 			  rcu_torture_writer_state,
1596 			  cookie, cur_ops->get_gp_state());
1597 	rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1598 	WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1599 	// This next splat is expected behavior if leakpointer, especially
1600 	// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1601 	WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1602 
1603 	/* If error or close call, record the sequence of reader protections. */
1604 	if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1605 		i = 0;
1606 		for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1607 			err_segs[i++] = *rtrsp1;
1608 		rt_read_nsegs = i;
1609 	}
1610 
1611 	return true;
1612 }
1613 
1614 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1615 
1616 /*
1617  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
1618  * incrementing the corresponding element of the pipeline array.  The
1619  * counter in the element should never be greater than 1, otherwise, the
1620  * RCU implementation is broken.
1621  */
1622 static void rcu_torture_timer(struct timer_list *unused)
1623 {
1624 	atomic_long_inc(&n_rcu_torture_timers);
1625 	(void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1626 
1627 	/* Test call_rcu() invocation from interrupt handler. */
1628 	if (cur_ops->call) {
1629 		struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1630 
1631 		if (rhp)
1632 			cur_ops->call(rhp, rcu_torture_timer_cb);
1633 	}
1634 }
1635 
1636 /*
1637  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1638  * incrementing the corresponding element of the pipeline array.  The
1639  * counter in the element should never be greater than 1, otherwise, the
1640  * RCU implementation is broken.
1641  */
1642 static int
1643 rcu_torture_reader(void *arg)
1644 {
1645 	unsigned long lastsleep = jiffies;
1646 	long myid = (long)arg;
1647 	int mynumonline = myid;
1648 	DEFINE_TORTURE_RANDOM(rand);
1649 	struct timer_list t;
1650 
1651 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1652 	set_user_nice(current, MAX_NICE);
1653 	if (irqreader && cur_ops->irq_capable)
1654 		timer_setup_on_stack(&t, rcu_torture_timer, 0);
1655 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1656 	do {
1657 		if (irqreader && cur_ops->irq_capable) {
1658 			if (!timer_pending(&t))
1659 				mod_timer(&t, jiffies + 1);
1660 		}
1661 		if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1662 			schedule_timeout_interruptible(HZ);
1663 		if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1664 			torture_hrtimeout_us(500, 1000, &rand);
1665 			lastsleep = jiffies + 10;
1666 		}
1667 		while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1668 			schedule_timeout_interruptible(HZ / 5);
1669 		stutter_wait("rcu_torture_reader");
1670 	} while (!torture_must_stop());
1671 	if (irqreader && cur_ops->irq_capable) {
1672 		del_timer_sync(&t);
1673 		destroy_timer_on_stack(&t);
1674 	}
1675 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1676 	torture_kthread_stopping("rcu_torture_reader");
1677 	return 0;
1678 }
1679 
1680 /*
1681  * Randomly Toggle CPUs' callback-offload state.  This uses hrtimers to
1682  * increase race probabilities and fuzzes the interval between toggling.
1683  */
1684 static int rcu_nocb_toggle(void *arg)
1685 {
1686 	int cpu;
1687 	int maxcpu = -1;
1688 	int oldnice = task_nice(current);
1689 	long r;
1690 	DEFINE_TORTURE_RANDOM(rand);
1691 	ktime_t toggle_delay;
1692 	unsigned long toggle_fuzz;
1693 	ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1694 
1695 	VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1696 	while (!rcu_inkernel_boot_has_ended())
1697 		schedule_timeout_interruptible(HZ / 10);
1698 	for_each_online_cpu(cpu)
1699 		maxcpu = cpu;
1700 	WARN_ON(maxcpu < 0);
1701 	if (toggle_interval > ULONG_MAX)
1702 		toggle_fuzz = ULONG_MAX >> 3;
1703 	else
1704 		toggle_fuzz = toggle_interval >> 3;
1705 	if (toggle_fuzz <= 0)
1706 		toggle_fuzz = NSEC_PER_USEC;
1707 	do {
1708 		r = torture_random(&rand);
1709 		cpu = (r >> 4) % (maxcpu + 1);
1710 		if (r & 0x1) {
1711 			rcu_nocb_cpu_offload(cpu);
1712 			atomic_long_inc(&n_nocb_offload);
1713 		} else {
1714 			rcu_nocb_cpu_deoffload(cpu);
1715 			atomic_long_inc(&n_nocb_deoffload);
1716 		}
1717 		toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1718 		set_current_state(TASK_INTERRUPTIBLE);
1719 		schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1720 		if (stutter_wait("rcu_nocb_toggle"))
1721 			sched_set_normal(current, oldnice);
1722 	} while (!torture_must_stop());
1723 	torture_kthread_stopping("rcu_nocb_toggle");
1724 	return 0;
1725 }
1726 
1727 /*
1728  * Print torture statistics.  Caller must ensure that there is only
1729  * one call to this function at a given time!!!  This is normally
1730  * accomplished by relying on the module system to only have one copy
1731  * of the module loaded, and then by giving the rcu_torture_stats
1732  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1733  * thread is not running).
1734  */
1735 static void
1736 rcu_torture_stats_print(void)
1737 {
1738 	int cpu;
1739 	int i;
1740 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1741 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1742 	struct rcu_torture *rtcp;
1743 	static unsigned long rtcv_snap = ULONG_MAX;
1744 	static bool splatted;
1745 	struct task_struct *wtp;
1746 
1747 	for_each_possible_cpu(cpu) {
1748 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1749 			pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1750 			batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1751 		}
1752 	}
1753 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1754 		if (pipesummary[i] != 0)
1755 			break;
1756 	}
1757 
1758 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1759 	rtcp = rcu_access_pointer(rcu_torture_current);
1760 	pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1761 		rtcp,
1762 		rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1763 		rcu_torture_current_version,
1764 		list_empty(&rcu_torture_freelist),
1765 		atomic_read(&n_rcu_torture_alloc),
1766 		atomic_read(&n_rcu_torture_alloc_fail),
1767 		atomic_read(&n_rcu_torture_free));
1768 	pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1769 		atomic_read(&n_rcu_torture_mberror),
1770 		atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1771 		n_rcu_torture_barrier_error,
1772 		n_rcu_torture_boost_ktrerror,
1773 		n_rcu_torture_boost_rterror);
1774 	pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1775 		n_rcu_torture_boost_failure,
1776 		n_rcu_torture_boosts,
1777 		atomic_long_read(&n_rcu_torture_timers));
1778 	torture_onoff_stats();
1779 	pr_cont("barrier: %ld/%ld:%ld ",
1780 		data_race(n_barrier_successes),
1781 		data_race(n_barrier_attempts),
1782 		data_race(n_rcu_torture_barrier_error));
1783 	pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1784 	pr_cont("nocb-toggles: %ld:%ld\n",
1785 		atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1786 
1787 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1788 	if (atomic_read(&n_rcu_torture_mberror) ||
1789 	    atomic_read(&n_rcu_torture_mbchk_fail) ||
1790 	    n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1791 	    n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1792 	    i > 1) {
1793 		pr_cont("%s", "!!! ");
1794 		atomic_inc(&n_rcu_torture_error);
1795 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1796 		WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1797 		WARN_ON_ONCE(n_rcu_torture_barrier_error);  // rcu_barrier()
1798 		WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1799 		WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1800 		WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed
1801 		WARN_ON_ONCE(i > 1); // Too-short grace period
1802 	}
1803 	pr_cont("Reader Pipe: ");
1804 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1805 		pr_cont(" %ld", pipesummary[i]);
1806 	pr_cont("\n");
1807 
1808 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1809 	pr_cont("Reader Batch: ");
1810 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1811 		pr_cont(" %ld", batchsummary[i]);
1812 	pr_cont("\n");
1813 
1814 	pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1815 	pr_cont("Free-Block Circulation: ");
1816 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1817 		pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1818 	}
1819 	pr_cont("\n");
1820 
1821 	if (cur_ops->stats)
1822 		cur_ops->stats();
1823 	if (rtcv_snap == rcu_torture_current_version &&
1824 	    rcu_access_pointer(rcu_torture_current) &&
1825 	    !rcu_stall_is_suppressed()) {
1826 		int __maybe_unused flags = 0;
1827 		unsigned long __maybe_unused gp_seq = 0;
1828 
1829 		rcutorture_get_gp_data(cur_ops->ttype,
1830 				       &flags, &gp_seq);
1831 		srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1832 					&flags, &gp_seq);
1833 		wtp = READ_ONCE(writer_task);
1834 		pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1835 			 rcu_torture_writer_state_getname(),
1836 			 rcu_torture_writer_state, gp_seq, flags,
1837 			 wtp == NULL ? ~0UL : wtp->state,
1838 			 wtp == NULL ? -1 : (int)task_cpu(wtp));
1839 		if (!splatted && wtp) {
1840 			sched_show_task(wtp);
1841 			splatted = true;
1842 		}
1843 		if (cur_ops->gp_kthread_dbg)
1844 			cur_ops->gp_kthread_dbg();
1845 		rcu_ftrace_dump(DUMP_ALL);
1846 	}
1847 	rtcv_snap = rcu_torture_current_version;
1848 }
1849 
1850 /*
1851  * Periodically prints torture statistics, if periodic statistics printing
1852  * was specified via the stat_interval module parameter.
1853  */
1854 static int
1855 rcu_torture_stats(void *arg)
1856 {
1857 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1858 	do {
1859 		schedule_timeout_interruptible(stat_interval * HZ);
1860 		rcu_torture_stats_print();
1861 		torture_shutdown_absorb("rcu_torture_stats");
1862 	} while (!torture_must_stop());
1863 	torture_kthread_stopping("rcu_torture_stats");
1864 	return 0;
1865 }
1866 
1867 static void
1868 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1869 {
1870 	pr_alert("%s" TORTURE_FLAG
1871 		 "--- %s: nreaders=%d nfakewriters=%d "
1872 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1873 		 "shuffle_interval=%d stutter=%d irqreader=%d "
1874 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1875 		 "test_boost=%d/%d test_boost_interval=%d "
1876 		 "test_boost_duration=%d shutdown_secs=%d "
1877 		 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1878 		 "stall_cpu_block=%d "
1879 		 "n_barrier_cbs=%d "
1880 		 "onoff_interval=%d onoff_holdoff=%d "
1881 		 "read_exit_delay=%d read_exit_burst=%d "
1882 		 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1883 		 torture_type, tag, nrealreaders, nfakewriters,
1884 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1885 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1886 		 test_boost, cur_ops->can_boost,
1887 		 test_boost_interval, test_boost_duration, shutdown_secs,
1888 		 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1889 		 stall_cpu_block,
1890 		 n_barrier_cbs,
1891 		 onoff_interval, onoff_holdoff,
1892 		 read_exit_delay, read_exit_burst,
1893 		 nocbs_nthreads, nocbs_toggle);
1894 }
1895 
1896 static int rcutorture_booster_cleanup(unsigned int cpu)
1897 {
1898 	struct task_struct *t;
1899 
1900 	if (boost_tasks[cpu] == NULL)
1901 		return 0;
1902 	mutex_lock(&boost_mutex);
1903 	t = boost_tasks[cpu];
1904 	boost_tasks[cpu] = NULL;
1905 	rcu_torture_enable_rt_throttle();
1906 	mutex_unlock(&boost_mutex);
1907 
1908 	/* This must be outside of the mutex, otherwise deadlock! */
1909 	torture_stop_kthread(rcu_torture_boost, t);
1910 	return 0;
1911 }
1912 
1913 static int rcutorture_booster_init(unsigned int cpu)
1914 {
1915 	int retval;
1916 
1917 	if (boost_tasks[cpu] != NULL)
1918 		return 0;  /* Already created, nothing more to do. */
1919 
1920 	/* Don't allow time recalculation while creating a new task. */
1921 	mutex_lock(&boost_mutex);
1922 	rcu_torture_disable_rt_throttle();
1923 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1924 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1925 						  cpu_to_node(cpu),
1926 						  "rcu_torture_boost");
1927 	if (IS_ERR(boost_tasks[cpu])) {
1928 		retval = PTR_ERR(boost_tasks[cpu]);
1929 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1930 		n_rcu_torture_boost_ktrerror++;
1931 		boost_tasks[cpu] = NULL;
1932 		mutex_unlock(&boost_mutex);
1933 		return retval;
1934 	}
1935 	kthread_bind(boost_tasks[cpu], cpu);
1936 	wake_up_process(boost_tasks[cpu]);
1937 	mutex_unlock(&boost_mutex);
1938 	return 0;
1939 }
1940 
1941 /*
1942  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1943  * induces a CPU stall for the time specified by stall_cpu.
1944  */
1945 static int rcu_torture_stall(void *args)
1946 {
1947 	int idx;
1948 	unsigned long stop_at;
1949 
1950 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1951 	if (stall_cpu_holdoff > 0) {
1952 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1953 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1954 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1955 	}
1956 	if (!kthread_should_stop() && stall_gp_kthread > 0) {
1957 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
1958 		rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
1959 		for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
1960 			if (kthread_should_stop())
1961 				break;
1962 			schedule_timeout_uninterruptible(HZ);
1963 		}
1964 	}
1965 	if (!kthread_should_stop() && stall_cpu > 0) {
1966 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
1967 		stop_at = ktime_get_seconds() + stall_cpu;
1968 		/* RCU CPU stall is expected behavior in following code. */
1969 		idx = cur_ops->readlock();
1970 		if (stall_cpu_irqsoff)
1971 			local_irq_disable();
1972 		else if (!stall_cpu_block)
1973 			preempt_disable();
1974 		pr_alert("rcu_torture_stall start on CPU %d.\n",
1975 			 raw_smp_processor_id());
1976 		while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1977 				    stop_at))
1978 			if (stall_cpu_block)
1979 				schedule_timeout_uninterruptible(HZ);
1980 		if (stall_cpu_irqsoff)
1981 			local_irq_enable();
1982 		else if (!stall_cpu_block)
1983 			preempt_enable();
1984 		cur_ops->readunlock(idx);
1985 	}
1986 	pr_alert("rcu_torture_stall end.\n");
1987 	torture_shutdown_absorb("rcu_torture_stall");
1988 	while (!kthread_should_stop())
1989 		schedule_timeout_interruptible(10 * HZ);
1990 	return 0;
1991 }
1992 
1993 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1994 static int __init rcu_torture_stall_init(void)
1995 {
1996 	if (stall_cpu <= 0 && stall_gp_kthread <= 0)
1997 		return 0;
1998 	return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1999 }
2000 
2001 /* State structure for forward-progress self-propagating RCU callback. */
2002 struct fwd_cb_state {
2003 	struct rcu_head rh;
2004 	int stop;
2005 };
2006 
2007 /*
2008  * Forward-progress self-propagating RCU callback function.  Because
2009  * callbacks run from softirq, this function is an implicit RCU read-side
2010  * critical section.
2011  */
2012 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2013 {
2014 	struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2015 
2016 	if (READ_ONCE(fcsp->stop)) {
2017 		WRITE_ONCE(fcsp->stop, 2);
2018 		return;
2019 	}
2020 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2021 }
2022 
2023 /* State for continuous-flood RCU callbacks. */
2024 struct rcu_fwd_cb {
2025 	struct rcu_head rh;
2026 	struct rcu_fwd_cb *rfc_next;
2027 	struct rcu_fwd *rfc_rfp;
2028 	int rfc_gps;
2029 };
2030 
2031 #define MAX_FWD_CB_JIFFIES	(8 * HZ) /* Maximum CB test duration. */
2032 #define MIN_FWD_CB_LAUNDERS	3	/* This many CB invocations to count. */
2033 #define MIN_FWD_CBS_LAUNDERED	100	/* Number of counted CBs. */
2034 #define FWD_CBS_HIST_DIV	10	/* Histogram buckets/second. */
2035 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2036 
2037 struct rcu_launder_hist {
2038 	long n_launders;
2039 	unsigned long launder_gp_seq;
2040 };
2041 
2042 struct rcu_fwd {
2043 	spinlock_t rcu_fwd_lock;
2044 	struct rcu_fwd_cb *rcu_fwd_cb_head;
2045 	struct rcu_fwd_cb **rcu_fwd_cb_tail;
2046 	long n_launders_cb;
2047 	unsigned long rcu_fwd_startat;
2048 	struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2049 	unsigned long rcu_launder_gp_seq_start;
2050 };
2051 
2052 static DEFINE_MUTEX(rcu_fwd_mutex);
2053 static struct rcu_fwd *rcu_fwds;
2054 static bool rcu_fwd_emergency_stop;
2055 
2056 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2057 {
2058 	unsigned long gps;
2059 	unsigned long gps_old;
2060 	int i;
2061 	int j;
2062 
2063 	for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2064 		if (rfp->n_launders_hist[i].n_launders > 0)
2065 			break;
2066 	pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
2067 		 __func__, jiffies - rfp->rcu_fwd_startat);
2068 	gps_old = rfp->rcu_launder_gp_seq_start;
2069 	for (j = 0; j <= i; j++) {
2070 		gps = rfp->n_launders_hist[j].launder_gp_seq;
2071 		pr_cont(" %ds/%d: %ld:%ld",
2072 			j + 1, FWD_CBS_HIST_DIV,
2073 			rfp->n_launders_hist[j].n_launders,
2074 			rcutorture_seq_diff(gps, gps_old));
2075 		gps_old = gps;
2076 	}
2077 	pr_cont("\n");
2078 }
2079 
2080 /* Callback function for continuous-flood RCU callbacks. */
2081 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2082 {
2083 	unsigned long flags;
2084 	int i;
2085 	struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2086 	struct rcu_fwd_cb **rfcpp;
2087 	struct rcu_fwd *rfp = rfcp->rfc_rfp;
2088 
2089 	rfcp->rfc_next = NULL;
2090 	rfcp->rfc_gps++;
2091 	spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2092 	rfcpp = rfp->rcu_fwd_cb_tail;
2093 	rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2094 	WRITE_ONCE(*rfcpp, rfcp);
2095 	WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2096 	i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2097 	if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2098 		i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2099 	rfp->n_launders_hist[i].n_launders++;
2100 	rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2101 	spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2102 }
2103 
2104 // Give the scheduler a chance, even on nohz_full CPUs.
2105 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2106 {
2107 	if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2108 		// Real call_rcu() floods hit userspace, so emulate that.
2109 		if (need_resched() || (iter & 0xfff))
2110 			schedule();
2111 		return;
2112 	}
2113 	// No userspace emulation: CB invocation throttles call_rcu()
2114 	cond_resched();
2115 }
2116 
2117 /*
2118  * Free all callbacks on the rcu_fwd_cb_head list, either because the
2119  * test is over or because we hit an OOM event.
2120  */
2121 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2122 {
2123 	unsigned long flags;
2124 	unsigned long freed = 0;
2125 	struct rcu_fwd_cb *rfcp;
2126 
2127 	for (;;) {
2128 		spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2129 		rfcp = rfp->rcu_fwd_cb_head;
2130 		if (!rfcp) {
2131 			spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2132 			break;
2133 		}
2134 		rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2135 		if (!rfp->rcu_fwd_cb_head)
2136 			rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2137 		spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2138 		kfree(rfcp);
2139 		freed++;
2140 		rcu_torture_fwd_prog_cond_resched(freed);
2141 		if (tick_nohz_full_enabled()) {
2142 			local_irq_save(flags);
2143 			rcu_momentary_dyntick_idle();
2144 			local_irq_restore(flags);
2145 		}
2146 	}
2147 	return freed;
2148 }
2149 
2150 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2151 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2152 				    int *tested, int *tested_tries)
2153 {
2154 	unsigned long cver;
2155 	unsigned long dur;
2156 	struct fwd_cb_state fcs;
2157 	unsigned long gps;
2158 	int idx;
2159 	int sd;
2160 	int sd4;
2161 	bool selfpropcb = false;
2162 	unsigned long stopat;
2163 	static DEFINE_TORTURE_RANDOM(trs);
2164 
2165 	if (!cur_ops->sync)
2166 		return; // Cannot do need_resched() forward progress testing without ->sync.
2167 	if (cur_ops->call && cur_ops->cb_barrier) {
2168 		init_rcu_head_on_stack(&fcs.rh);
2169 		selfpropcb = true;
2170 	}
2171 
2172 	/* Tight loop containing cond_resched(). */
2173 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2174 	cur_ops->sync(); /* Later readers see above write. */
2175 	if  (selfpropcb) {
2176 		WRITE_ONCE(fcs.stop, 0);
2177 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2178 	}
2179 	cver = READ_ONCE(rcu_torture_current_version);
2180 	gps = cur_ops->get_gp_seq();
2181 	sd = cur_ops->stall_dur() + 1;
2182 	sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2183 	dur = sd4 + torture_random(&trs) % (sd - sd4);
2184 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2185 	stopat = rfp->rcu_fwd_startat + dur;
2186 	while (time_before(jiffies, stopat) &&
2187 	       !shutdown_time_arrived() &&
2188 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2189 		idx = cur_ops->readlock();
2190 		udelay(10);
2191 		cur_ops->readunlock(idx);
2192 		if (!fwd_progress_need_resched || need_resched())
2193 			cond_resched();
2194 	}
2195 	(*tested_tries)++;
2196 	if (!time_before(jiffies, stopat) &&
2197 	    !shutdown_time_arrived() &&
2198 	    !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2199 		(*tested)++;
2200 		cver = READ_ONCE(rcu_torture_current_version) - cver;
2201 		gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2202 		WARN_ON(!cver && gps < 2);
2203 		pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
2204 	}
2205 	if (selfpropcb) {
2206 		WRITE_ONCE(fcs.stop, 1);
2207 		cur_ops->sync(); /* Wait for running CB to complete. */
2208 		cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2209 	}
2210 
2211 	if (selfpropcb) {
2212 		WARN_ON(READ_ONCE(fcs.stop) != 2);
2213 		destroy_rcu_head_on_stack(&fcs.rh);
2214 	}
2215 	schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2216 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2217 }
2218 
2219 /* Carry out call_rcu() forward-progress testing. */
2220 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2221 {
2222 	unsigned long cver;
2223 	unsigned long flags;
2224 	unsigned long gps;
2225 	int i;
2226 	long n_launders;
2227 	long n_launders_cb_snap;
2228 	long n_launders_sa;
2229 	long n_max_cbs;
2230 	long n_max_gps;
2231 	struct rcu_fwd_cb *rfcp;
2232 	struct rcu_fwd_cb *rfcpn;
2233 	unsigned long stopat;
2234 	unsigned long stoppedat;
2235 
2236 	if (READ_ONCE(rcu_fwd_emergency_stop))
2237 		return; /* Get out of the way quickly, no GP wait! */
2238 	if (!cur_ops->call)
2239 		return; /* Can't do call_rcu() fwd prog without ->call. */
2240 
2241 	/* Loop continuously posting RCU callbacks. */
2242 	WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2243 	cur_ops->sync(); /* Later readers see above write. */
2244 	WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2245 	stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2246 	n_launders = 0;
2247 	rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2248 	n_launders_sa = 0;
2249 	n_max_cbs = 0;
2250 	n_max_gps = 0;
2251 	for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2252 		rfp->n_launders_hist[i].n_launders = 0;
2253 	cver = READ_ONCE(rcu_torture_current_version);
2254 	gps = cur_ops->get_gp_seq();
2255 	rfp->rcu_launder_gp_seq_start = gps;
2256 	tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2257 	while (time_before(jiffies, stopat) &&
2258 	       !shutdown_time_arrived() &&
2259 	       !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2260 		rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2261 		rfcpn = NULL;
2262 		if (rfcp)
2263 			rfcpn = READ_ONCE(rfcp->rfc_next);
2264 		if (rfcpn) {
2265 			if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2266 			    ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2267 				break;
2268 			rfp->rcu_fwd_cb_head = rfcpn;
2269 			n_launders++;
2270 			n_launders_sa++;
2271 		} else {
2272 			rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2273 			if (WARN_ON_ONCE(!rfcp)) {
2274 				schedule_timeout_interruptible(1);
2275 				continue;
2276 			}
2277 			n_max_cbs++;
2278 			n_launders_sa = 0;
2279 			rfcp->rfc_gps = 0;
2280 			rfcp->rfc_rfp = rfp;
2281 		}
2282 		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2283 		rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2284 		if (tick_nohz_full_enabled()) {
2285 			local_irq_save(flags);
2286 			rcu_momentary_dyntick_idle();
2287 			local_irq_restore(flags);
2288 		}
2289 	}
2290 	stoppedat = jiffies;
2291 	n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2292 	cver = READ_ONCE(rcu_torture_current_version) - cver;
2293 	gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2294 	cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2295 	(void)rcu_torture_fwd_prog_cbfree(rfp);
2296 
2297 	if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2298 	    !shutdown_time_arrived()) {
2299 		WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2300 		pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2301 			 __func__,
2302 			 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2303 			 n_launders + n_max_cbs - n_launders_cb_snap,
2304 			 n_launders, n_launders_sa,
2305 			 n_max_gps, n_max_cbs, cver, gps);
2306 		rcu_torture_fwd_cb_hist(rfp);
2307 	}
2308 	schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2309 	tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2310 	WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2311 }
2312 
2313 
2314 /*
2315  * OOM notifier, but this only prints diagnostic information for the
2316  * current forward-progress test.
2317  */
2318 static int rcutorture_oom_notify(struct notifier_block *self,
2319 				 unsigned long notused, void *nfreed)
2320 {
2321 	struct rcu_fwd *rfp;
2322 
2323 	mutex_lock(&rcu_fwd_mutex);
2324 	rfp = rcu_fwds;
2325 	if (!rfp) {
2326 		mutex_unlock(&rcu_fwd_mutex);
2327 		return NOTIFY_OK;
2328 	}
2329 	WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2330 	     __func__);
2331 	rcu_torture_fwd_cb_hist(rfp);
2332 	rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2333 	WRITE_ONCE(rcu_fwd_emergency_stop, true);
2334 	smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2335 	pr_info("%s: Freed %lu RCU callbacks.\n",
2336 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2337 	rcu_barrier();
2338 	pr_info("%s: Freed %lu RCU callbacks.\n",
2339 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2340 	rcu_barrier();
2341 	pr_info("%s: Freed %lu RCU callbacks.\n",
2342 		__func__, rcu_torture_fwd_prog_cbfree(rfp));
2343 	smp_mb(); /* Frees before return to avoid redoing OOM. */
2344 	(*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2345 	pr_info("%s returning after OOM processing.\n", __func__);
2346 	mutex_unlock(&rcu_fwd_mutex);
2347 	return NOTIFY_OK;
2348 }
2349 
2350 static struct notifier_block rcutorture_oom_nb = {
2351 	.notifier_call = rcutorture_oom_notify
2352 };
2353 
2354 /* Carry out grace-period forward-progress testing. */
2355 static int rcu_torture_fwd_prog(void *args)
2356 {
2357 	int oldnice = task_nice(current);
2358 	struct rcu_fwd *rfp = args;
2359 	int tested = 0;
2360 	int tested_tries = 0;
2361 
2362 	VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2363 	rcu_bind_current_to_nocb();
2364 	if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2365 		set_user_nice(current, MAX_NICE);
2366 	do {
2367 		schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2368 		WRITE_ONCE(rcu_fwd_emergency_stop, false);
2369 		if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2370 		    rcu_inkernel_boot_has_ended())
2371 			rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2372 		if (rcu_inkernel_boot_has_ended())
2373 			rcu_torture_fwd_prog_cr(rfp);
2374 
2375 		/* Avoid slow periods, better to test when busy. */
2376 		if (stutter_wait("rcu_torture_fwd_prog"))
2377 			sched_set_normal(current, oldnice);
2378 	} while (!torture_must_stop());
2379 	/* Short runs might not contain a valid forward-progress attempt. */
2380 	WARN_ON(!tested && tested_tries >= 5);
2381 	pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2382 	torture_kthread_stopping("rcu_torture_fwd_prog");
2383 	return 0;
2384 }
2385 
2386 /* If forward-progress checking is requested and feasible, spawn the thread. */
2387 static int __init rcu_torture_fwd_prog_init(void)
2388 {
2389 	struct rcu_fwd *rfp;
2390 
2391 	if (!fwd_progress)
2392 		return 0; /* Not requested, so don't do it. */
2393 	if ((!cur_ops->sync && !cur_ops->call) ||
2394 	    !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2395 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2396 		return 0;
2397 	}
2398 	if (stall_cpu > 0) {
2399 		VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2400 		if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2401 			return -EINVAL; /* In module, can fail back to user. */
2402 		WARN_ON(1); /* Make sure rcutorture notices conflict. */
2403 		return 0;
2404 	}
2405 	if (fwd_progress_holdoff <= 0)
2406 		fwd_progress_holdoff = 1;
2407 	if (fwd_progress_div <= 0)
2408 		fwd_progress_div = 4;
2409 	rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2410 	if (!rfp)
2411 		return -ENOMEM;
2412 	spin_lock_init(&rfp->rcu_fwd_lock);
2413 	rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2414 	mutex_lock(&rcu_fwd_mutex);
2415 	rcu_fwds = rfp;
2416 	mutex_unlock(&rcu_fwd_mutex);
2417 	register_oom_notifier(&rcutorture_oom_nb);
2418 	return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2419 }
2420 
2421 static void rcu_torture_fwd_prog_cleanup(void)
2422 {
2423 	struct rcu_fwd *rfp;
2424 
2425 	torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2426 	rfp = rcu_fwds;
2427 	mutex_lock(&rcu_fwd_mutex);
2428 	rcu_fwds = NULL;
2429 	mutex_unlock(&rcu_fwd_mutex);
2430 	unregister_oom_notifier(&rcutorture_oom_nb);
2431 	kfree(rfp);
2432 }
2433 
2434 /* Callback function for RCU barrier testing. */
2435 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2436 {
2437 	atomic_inc(&barrier_cbs_invoked);
2438 }
2439 
2440 /* IPI handler to get callback posted on desired CPU, if online. */
2441 static void rcu_torture_barrier1cb(void *rcu_void)
2442 {
2443 	struct rcu_head *rhp = rcu_void;
2444 
2445 	cur_ops->call(rhp, rcu_torture_barrier_cbf);
2446 }
2447 
2448 /* kthread function to register callbacks used to test RCU barriers. */
2449 static int rcu_torture_barrier_cbs(void *arg)
2450 {
2451 	long myid = (long)arg;
2452 	bool lastphase = false;
2453 	bool newphase;
2454 	struct rcu_head rcu;
2455 
2456 	init_rcu_head_on_stack(&rcu);
2457 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2458 	set_user_nice(current, MAX_NICE);
2459 	do {
2460 		wait_event(barrier_cbs_wq[myid],
2461 			   (newphase =
2462 			    smp_load_acquire(&barrier_phase)) != lastphase ||
2463 			   torture_must_stop());
2464 		lastphase = newphase;
2465 		if (torture_must_stop())
2466 			break;
2467 		/*
2468 		 * The above smp_load_acquire() ensures barrier_phase load
2469 		 * is ordered before the following ->call().
2470 		 */
2471 		if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2472 					     &rcu, 1)) {
2473 			// IPI failed, so use direct call from current CPU.
2474 			cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2475 		}
2476 		if (atomic_dec_and_test(&barrier_cbs_count))
2477 			wake_up(&barrier_wq);
2478 	} while (!torture_must_stop());
2479 	if (cur_ops->cb_barrier != NULL)
2480 		cur_ops->cb_barrier();
2481 	destroy_rcu_head_on_stack(&rcu);
2482 	torture_kthread_stopping("rcu_torture_barrier_cbs");
2483 	return 0;
2484 }
2485 
2486 /* kthread function to drive and coordinate RCU barrier testing. */
2487 static int rcu_torture_barrier(void *arg)
2488 {
2489 	int i;
2490 
2491 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2492 	do {
2493 		atomic_set(&barrier_cbs_invoked, 0);
2494 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
2495 		/* Ensure barrier_phase ordered after prior assignments. */
2496 		smp_store_release(&barrier_phase, !barrier_phase);
2497 		for (i = 0; i < n_barrier_cbs; i++)
2498 			wake_up(&barrier_cbs_wq[i]);
2499 		wait_event(barrier_wq,
2500 			   atomic_read(&barrier_cbs_count) == 0 ||
2501 			   torture_must_stop());
2502 		if (torture_must_stop())
2503 			break;
2504 		n_barrier_attempts++;
2505 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2506 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2507 			n_rcu_torture_barrier_error++;
2508 			pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2509 			       atomic_read(&barrier_cbs_invoked),
2510 			       n_barrier_cbs);
2511 			WARN_ON(1);
2512 			// Wait manually for the remaining callbacks
2513 			i = 0;
2514 			do {
2515 				if (WARN_ON(i++ > HZ))
2516 					i = INT_MIN;
2517 				schedule_timeout_interruptible(1);
2518 				cur_ops->cb_barrier();
2519 			} while (atomic_read(&barrier_cbs_invoked) !=
2520 				 n_barrier_cbs &&
2521 				 !torture_must_stop());
2522 			smp_mb(); // Can't trust ordering if broken.
2523 			if (!torture_must_stop())
2524 				pr_err("Recovered: barrier_cbs_invoked = %d\n",
2525 				       atomic_read(&barrier_cbs_invoked));
2526 		} else {
2527 			n_barrier_successes++;
2528 		}
2529 		schedule_timeout_interruptible(HZ / 10);
2530 	} while (!torture_must_stop());
2531 	torture_kthread_stopping("rcu_torture_barrier");
2532 	return 0;
2533 }
2534 
2535 /* Initialize RCU barrier testing. */
2536 static int rcu_torture_barrier_init(void)
2537 {
2538 	int i;
2539 	int ret;
2540 
2541 	if (n_barrier_cbs <= 0)
2542 		return 0;
2543 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2544 		pr_alert("%s" TORTURE_FLAG
2545 			 " Call or barrier ops missing for %s,\n",
2546 			 torture_type, cur_ops->name);
2547 		pr_alert("%s" TORTURE_FLAG
2548 			 " RCU barrier testing omitted from run.\n",
2549 			 torture_type);
2550 		return 0;
2551 	}
2552 	atomic_set(&barrier_cbs_count, 0);
2553 	atomic_set(&barrier_cbs_invoked, 0);
2554 	barrier_cbs_tasks =
2555 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2556 			GFP_KERNEL);
2557 	barrier_cbs_wq =
2558 		kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2559 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2560 		return -ENOMEM;
2561 	for (i = 0; i < n_barrier_cbs; i++) {
2562 		init_waitqueue_head(&barrier_cbs_wq[i]);
2563 		ret = torture_create_kthread(rcu_torture_barrier_cbs,
2564 					     (void *)(long)i,
2565 					     barrier_cbs_tasks[i]);
2566 		if (ret)
2567 			return ret;
2568 	}
2569 	return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2570 }
2571 
2572 /* Clean up after RCU barrier testing. */
2573 static void rcu_torture_barrier_cleanup(void)
2574 {
2575 	int i;
2576 
2577 	torture_stop_kthread(rcu_torture_barrier, barrier_task);
2578 	if (barrier_cbs_tasks != NULL) {
2579 		for (i = 0; i < n_barrier_cbs; i++)
2580 			torture_stop_kthread(rcu_torture_barrier_cbs,
2581 					     barrier_cbs_tasks[i]);
2582 		kfree(barrier_cbs_tasks);
2583 		barrier_cbs_tasks = NULL;
2584 	}
2585 	if (barrier_cbs_wq != NULL) {
2586 		kfree(barrier_cbs_wq);
2587 		barrier_cbs_wq = NULL;
2588 	}
2589 }
2590 
2591 static bool rcu_torture_can_boost(void)
2592 {
2593 	static int boost_warn_once;
2594 	int prio;
2595 
2596 	if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2597 		return false;
2598 
2599 	prio = rcu_get_gp_kthreads_prio();
2600 	if (!prio)
2601 		return false;
2602 
2603 	if (prio < 2) {
2604 		if (boost_warn_once  == 1)
2605 			return false;
2606 
2607 		pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2608 		boost_warn_once = 1;
2609 		return false;
2610 	}
2611 
2612 	return true;
2613 }
2614 
2615 static bool read_exit_child_stop;
2616 static bool read_exit_child_stopped;
2617 static wait_queue_head_t read_exit_wq;
2618 
2619 // Child kthread which just does an rcutorture reader and exits.
2620 static int rcu_torture_read_exit_child(void *trsp_in)
2621 {
2622 	struct torture_random_state *trsp = trsp_in;
2623 
2624 	set_user_nice(current, MAX_NICE);
2625 	// Minimize time between reading and exiting.
2626 	while (!kthread_should_stop())
2627 		schedule_timeout_uninterruptible(1);
2628 	(void)rcu_torture_one_read(trsp, -1);
2629 	return 0;
2630 }
2631 
2632 // Parent kthread which creates and destroys read-exit child kthreads.
2633 static int rcu_torture_read_exit(void *unused)
2634 {
2635 	int count = 0;
2636 	bool errexit = false;
2637 	int i;
2638 	struct task_struct *tsp;
2639 	DEFINE_TORTURE_RANDOM(trs);
2640 
2641 	// Allocate and initialize.
2642 	set_user_nice(current, MAX_NICE);
2643 	VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2644 
2645 	// Each pass through this loop does one read-exit episode.
2646 	do {
2647 		if (++count > read_exit_burst) {
2648 			VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2649 			rcu_barrier(); // Wait for task_struct free, avoid OOM.
2650 			for (i = 0; i < read_exit_delay; i++) {
2651 				schedule_timeout_uninterruptible(HZ);
2652 				if (READ_ONCE(read_exit_child_stop))
2653 					break;
2654 			}
2655 			if (!READ_ONCE(read_exit_child_stop))
2656 				VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2657 			count = 0;
2658 		}
2659 		if (READ_ONCE(read_exit_child_stop))
2660 			break;
2661 		// Spawn child.
2662 		tsp = kthread_run(rcu_torture_read_exit_child,
2663 				     &trs, "%s",
2664 				     "rcu_torture_read_exit_child");
2665 		if (IS_ERR(tsp)) {
2666 			VERBOSE_TOROUT_ERRSTRING("out of memory");
2667 			errexit = true;
2668 			tsp = NULL;
2669 			break;
2670 		}
2671 		cond_resched();
2672 		kthread_stop(tsp);
2673 		n_read_exits ++;
2674 		stutter_wait("rcu_torture_read_exit");
2675 	} while (!errexit && !READ_ONCE(read_exit_child_stop));
2676 
2677 	// Clean up and exit.
2678 	smp_store_release(&read_exit_child_stopped, true); // After reaping.
2679 	smp_mb(); // Store before wakeup.
2680 	wake_up(&read_exit_wq);
2681 	while (!torture_must_stop())
2682 		schedule_timeout_uninterruptible(1);
2683 	torture_kthread_stopping("rcu_torture_read_exit");
2684 	return 0;
2685 }
2686 
2687 static int rcu_torture_read_exit_init(void)
2688 {
2689 	if (read_exit_burst <= 0)
2690 		return -EINVAL;
2691 	init_waitqueue_head(&read_exit_wq);
2692 	read_exit_child_stop = false;
2693 	read_exit_child_stopped = false;
2694 	return torture_create_kthread(rcu_torture_read_exit, NULL,
2695 				      read_exit_task);
2696 }
2697 
2698 static void rcu_torture_read_exit_cleanup(void)
2699 {
2700 	if (!read_exit_task)
2701 		return;
2702 	WRITE_ONCE(read_exit_child_stop, true);
2703 	smp_mb(); // Above write before wait.
2704 	wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2705 	torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2706 }
2707 
2708 static enum cpuhp_state rcutor_hp;
2709 
2710 static void
2711 rcu_torture_cleanup(void)
2712 {
2713 	int firsttime;
2714 	int flags = 0;
2715 	unsigned long gp_seq = 0;
2716 	int i;
2717 
2718 	if (torture_cleanup_begin()) {
2719 		if (cur_ops->cb_barrier != NULL)
2720 			cur_ops->cb_barrier();
2721 		return;
2722 	}
2723 	if (!cur_ops) {
2724 		torture_cleanup_end();
2725 		return;
2726 	}
2727 
2728 	if (cur_ops->gp_kthread_dbg)
2729 		cur_ops->gp_kthread_dbg();
2730 	rcu_torture_read_exit_cleanup();
2731 	rcu_torture_barrier_cleanup();
2732 	rcu_torture_fwd_prog_cleanup();
2733 	torture_stop_kthread(rcu_torture_stall, stall_task);
2734 	torture_stop_kthread(rcu_torture_writer, writer_task);
2735 
2736 	if (nocb_tasks) {
2737 		for (i = 0; i < nrealnocbers; i++)
2738 			torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2739 		kfree(nocb_tasks);
2740 		nocb_tasks = NULL;
2741 	}
2742 
2743 	if (reader_tasks) {
2744 		for (i = 0; i < nrealreaders; i++)
2745 			torture_stop_kthread(rcu_torture_reader,
2746 					     reader_tasks[i]);
2747 		kfree(reader_tasks);
2748 		reader_tasks = NULL;
2749 	}
2750 	kfree(rcu_torture_reader_mbchk);
2751 	rcu_torture_reader_mbchk = NULL;
2752 
2753 	if (fakewriter_tasks) {
2754 		for (i = 0; i < nfakewriters; i++)
2755 			torture_stop_kthread(rcu_torture_fakewriter,
2756 					     fakewriter_tasks[i]);
2757 		kfree(fakewriter_tasks);
2758 		fakewriter_tasks = NULL;
2759 	}
2760 
2761 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2762 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2763 	pr_alert("%s:  End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2764 		 cur_ops->name, (long)gp_seq, flags,
2765 		 rcutorture_seq_diff(gp_seq, start_gp_seq));
2766 	torture_stop_kthread(rcu_torture_stats, stats_task);
2767 	torture_stop_kthread(rcu_torture_fqs, fqs_task);
2768 	if (rcu_torture_can_boost())
2769 		cpuhp_remove_state(rcutor_hp);
2770 
2771 	/*
2772 	 * Wait for all RCU callbacks to fire, then do torture-type-specific
2773 	 * cleanup operations.
2774 	 */
2775 	if (cur_ops->cb_barrier != NULL)
2776 		cur_ops->cb_barrier();
2777 	if (cur_ops->cleanup != NULL)
2778 		cur_ops->cleanup();
2779 
2780 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
2781 
2782 	if (err_segs_recorded) {
2783 		pr_alert("Failure/close-call rcutorture reader segments:\n");
2784 		if (rt_read_nsegs == 0)
2785 			pr_alert("\t: No segments recorded!!!\n");
2786 		firsttime = 1;
2787 		for (i = 0; i < rt_read_nsegs; i++) {
2788 			pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2789 			if (err_segs[i].rt_delay_jiffies != 0) {
2790 				pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2791 					err_segs[i].rt_delay_jiffies);
2792 				firsttime = 0;
2793 			}
2794 			if (err_segs[i].rt_delay_ms != 0) {
2795 				pr_cont("%s%ldms", firsttime ? "" : "+",
2796 					err_segs[i].rt_delay_ms);
2797 				firsttime = 0;
2798 			}
2799 			if (err_segs[i].rt_delay_us != 0) {
2800 				pr_cont("%s%ldus", firsttime ? "" : "+",
2801 					err_segs[i].rt_delay_us);
2802 				firsttime = 0;
2803 			}
2804 			pr_cont("%s\n",
2805 				err_segs[i].rt_preempted ? "preempted" : "");
2806 
2807 		}
2808 	}
2809 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2810 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2811 	else if (torture_onoff_failures())
2812 		rcu_torture_print_module_parms(cur_ops,
2813 					       "End of test: RCU_HOTPLUG");
2814 	else
2815 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2816 	torture_cleanup_end();
2817 }
2818 
2819 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2820 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2821 {
2822 }
2823 
2824 static void rcu_torture_err_cb(struct rcu_head *rhp)
2825 {
2826 	/*
2827 	 * This -might- happen due to race conditions, but is unlikely.
2828 	 * The scenario that leads to this happening is that the
2829 	 * first of the pair of duplicate callbacks is queued,
2830 	 * someone else starts a grace period that includes that
2831 	 * callback, then the second of the pair must wait for the
2832 	 * next grace period.  Unlikely, but can happen.  If it
2833 	 * does happen, the debug-objects subsystem won't have splatted.
2834 	 */
2835 	pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2836 }
2837 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2838 
2839 /*
2840  * Verify that double-free causes debug-objects to complain, but only
2841  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
2842  * cannot be carried out.
2843  */
2844 static void rcu_test_debug_objects(void)
2845 {
2846 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2847 	struct rcu_head rh1;
2848 	struct rcu_head rh2;
2849 	struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2850 
2851 	init_rcu_head_on_stack(&rh1);
2852 	init_rcu_head_on_stack(&rh2);
2853 	pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2854 
2855 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
2856 	preempt_disable(); /* Prevent preemption from interrupting test. */
2857 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
2858 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2859 	local_irq_disable(); /* Make it harder to start a new grace period. */
2860 	call_rcu(&rh2, rcu_torture_leak_cb);
2861 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2862 	if (rhp) {
2863 		call_rcu(rhp, rcu_torture_leak_cb);
2864 		call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
2865 	}
2866 	local_irq_enable();
2867 	rcu_read_unlock();
2868 	preempt_enable();
2869 
2870 	/* Wait for them all to get done so we can safely return. */
2871 	rcu_barrier();
2872 	pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2873 	destroy_rcu_head_on_stack(&rh1);
2874 	destroy_rcu_head_on_stack(&rh2);
2875 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2876 	pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2877 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2878 }
2879 
2880 static void rcutorture_sync(void)
2881 {
2882 	static unsigned long n;
2883 
2884 	if (cur_ops->sync && !(++n & 0xfff))
2885 		cur_ops->sync();
2886 }
2887 
2888 static int __init
2889 rcu_torture_init(void)
2890 {
2891 	long i;
2892 	int cpu;
2893 	int firsterr = 0;
2894 	int flags = 0;
2895 	unsigned long gp_seq = 0;
2896 	static struct rcu_torture_ops *torture_ops[] = {
2897 		&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2898 		&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2899 		&tasks_tracing_ops, &trivial_ops,
2900 	};
2901 
2902 	if (!torture_init_begin(torture_type, verbose))
2903 		return -EBUSY;
2904 
2905 	/* Process args and tell the world that the torturer is on the job. */
2906 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2907 		cur_ops = torture_ops[i];
2908 		if (strcmp(torture_type, cur_ops->name) == 0)
2909 			break;
2910 	}
2911 	if (i == ARRAY_SIZE(torture_ops)) {
2912 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2913 			 torture_type);
2914 		pr_alert("rcu-torture types:");
2915 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2916 			pr_cont(" %s", torture_ops[i]->name);
2917 		pr_cont("\n");
2918 		firsterr = -EINVAL;
2919 		cur_ops = NULL;
2920 		goto unwind;
2921 	}
2922 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
2923 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2924 		fqs_duration = 0;
2925 	}
2926 	if (cur_ops->init)
2927 		cur_ops->init();
2928 
2929 	if (nreaders >= 0) {
2930 		nrealreaders = nreaders;
2931 	} else {
2932 		nrealreaders = num_online_cpus() - 2 - nreaders;
2933 		if (nrealreaders <= 0)
2934 			nrealreaders = 1;
2935 	}
2936 	rcu_torture_print_module_parms(cur_ops, "Start of test");
2937 	rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2938 	srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2939 	start_gp_seq = gp_seq;
2940 	pr_alert("%s:  Start-test grace-period state: g%ld f%#x\n",
2941 		 cur_ops->name, (long)gp_seq, flags);
2942 
2943 	/* Set up the freelist. */
2944 
2945 	INIT_LIST_HEAD(&rcu_torture_freelist);
2946 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2947 		rcu_tortures[i].rtort_mbtest = 0;
2948 		list_add_tail(&rcu_tortures[i].rtort_free,
2949 			      &rcu_torture_freelist);
2950 	}
2951 
2952 	/* Initialize the statistics so that each run gets its own numbers. */
2953 
2954 	rcu_torture_current = NULL;
2955 	rcu_torture_current_version = 0;
2956 	atomic_set(&n_rcu_torture_alloc, 0);
2957 	atomic_set(&n_rcu_torture_alloc_fail, 0);
2958 	atomic_set(&n_rcu_torture_free, 0);
2959 	atomic_set(&n_rcu_torture_mberror, 0);
2960 	atomic_set(&n_rcu_torture_mbchk_fail, 0);
2961 	atomic_set(&n_rcu_torture_mbchk_tries, 0);
2962 	atomic_set(&n_rcu_torture_error, 0);
2963 	n_rcu_torture_barrier_error = 0;
2964 	n_rcu_torture_boost_ktrerror = 0;
2965 	n_rcu_torture_boost_rterror = 0;
2966 	n_rcu_torture_boost_failure = 0;
2967 	n_rcu_torture_boosts = 0;
2968 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2969 		atomic_set(&rcu_torture_wcount[i], 0);
2970 	for_each_possible_cpu(cpu) {
2971 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2972 			per_cpu(rcu_torture_count, cpu)[i] = 0;
2973 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
2974 		}
2975 	}
2976 	err_segs_recorded = 0;
2977 	rt_read_nsegs = 0;
2978 
2979 	/* Start up the kthreads. */
2980 
2981 	rcu_torture_write_types();
2982 	firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2983 					  writer_task);
2984 	if (firsterr)
2985 		goto unwind;
2986 	if (nfakewriters > 0) {
2987 		fakewriter_tasks = kcalloc(nfakewriters,
2988 					   sizeof(fakewriter_tasks[0]),
2989 					   GFP_KERNEL);
2990 		if (fakewriter_tasks == NULL) {
2991 			VERBOSE_TOROUT_ERRSTRING("out of memory");
2992 			firsterr = -ENOMEM;
2993 			goto unwind;
2994 		}
2995 	}
2996 	for (i = 0; i < nfakewriters; i++) {
2997 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
2998 						  NULL, fakewriter_tasks[i]);
2999 		if (firsterr)
3000 			goto unwind;
3001 	}
3002 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3003 			       GFP_KERNEL);
3004 	rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3005 					   GFP_KERNEL);
3006 	if (!reader_tasks || !rcu_torture_reader_mbchk) {
3007 		VERBOSE_TOROUT_ERRSTRING("out of memory");
3008 		firsterr = -ENOMEM;
3009 		goto unwind;
3010 	}
3011 	for (i = 0; i < nrealreaders; i++) {
3012 		rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3013 		firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3014 						  reader_tasks[i]);
3015 		if (firsterr)
3016 			goto unwind;
3017 	}
3018 	nrealnocbers = nocbs_nthreads;
3019 	if (WARN_ON(nrealnocbers < 0))
3020 		nrealnocbers = 1;
3021 	if (WARN_ON(nocbs_toggle < 0))
3022 		nocbs_toggle = HZ;
3023 	if (nrealnocbers > 0) {
3024 		nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3025 		if (nocb_tasks == NULL) {
3026 			VERBOSE_TOROUT_ERRSTRING("out of memory");
3027 			firsterr = -ENOMEM;
3028 			goto unwind;
3029 		}
3030 	} else {
3031 		nocb_tasks = NULL;
3032 	}
3033 	for (i = 0; i < nrealnocbers; i++) {
3034 		firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3035 		if (firsterr)
3036 			goto unwind;
3037 	}
3038 	if (stat_interval > 0) {
3039 		firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3040 						  stats_task);
3041 		if (firsterr)
3042 			goto unwind;
3043 	}
3044 	if (test_no_idle_hz && shuffle_interval > 0) {
3045 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
3046 		if (firsterr)
3047 			goto unwind;
3048 	}
3049 	if (stutter < 0)
3050 		stutter = 0;
3051 	if (stutter) {
3052 		int t;
3053 
3054 		t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3055 		firsterr = torture_stutter_init(stutter * HZ, t);
3056 		if (firsterr)
3057 			goto unwind;
3058 	}
3059 	if (fqs_duration < 0)
3060 		fqs_duration = 0;
3061 	if (fqs_duration) {
3062 		/* Create the fqs thread */
3063 		firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3064 						  fqs_task);
3065 		if (firsterr)
3066 			goto unwind;
3067 	}
3068 	if (test_boost_interval < 1)
3069 		test_boost_interval = 1;
3070 	if (test_boost_duration < 2)
3071 		test_boost_duration = 2;
3072 	if (rcu_torture_can_boost()) {
3073 
3074 		boost_starttime = jiffies + test_boost_interval * HZ;
3075 
3076 		firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3077 					     rcutorture_booster_init,
3078 					     rcutorture_booster_cleanup);
3079 		if (firsterr < 0)
3080 			goto unwind;
3081 		rcutor_hp = firsterr;
3082 	}
3083 	shutdown_jiffies = jiffies + shutdown_secs * HZ;
3084 	firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3085 	if (firsterr)
3086 		goto unwind;
3087 	firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3088 				      rcutorture_sync);
3089 	if (firsterr)
3090 		goto unwind;
3091 	firsterr = rcu_torture_stall_init();
3092 	if (firsterr)
3093 		goto unwind;
3094 	firsterr = rcu_torture_fwd_prog_init();
3095 	if (firsterr)
3096 		goto unwind;
3097 	firsterr = rcu_torture_barrier_init();
3098 	if (firsterr)
3099 		goto unwind;
3100 	firsterr = rcu_torture_read_exit_init();
3101 	if (firsterr)
3102 		goto unwind;
3103 	if (object_debug)
3104 		rcu_test_debug_objects();
3105 	torture_init_end();
3106 	return 0;
3107 
3108 unwind:
3109 	torture_init_end();
3110 	rcu_torture_cleanup();
3111 	if (shutdown_secs) {
3112 		WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3113 		kernel_power_off();
3114 	}
3115 	return firsterr;
3116 }
3117 
3118 module_init(rcu_torture_init);
3119 module_exit(rcu_torture_cleanup);
3120