xref: /openbmc/linux/kernel/rcu/rcutorture.c (revision e991dbc0770b01b7dc7d6d7660442e83ebd11828)
1 /*
2  * Read-Copy Update module-based torture test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2005, 2006
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *	  Josh Triplett <josh@freedesktop.org>
22  *
23  * See also:  Documentation/RCU/torture.txt
24  */
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/trace_clock.h>
50 #include <asm/byteorder.h>
51 #include <linux/torture.h>
52 
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
55 
56 MODULE_ALIAS("rcutorture");
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "rcutorture."
61 
62 torture_param(int, fqs_duration, 0,
63 	      "Duration of fqs bursts (us), 0 to disable");
64 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
65 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
66 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
67 torture_param(bool, gp_normal, false,
68 	     "Use normal (non-expedited) GP wait primitives");
69 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
70 torture_param(int, n_barrier_cbs, 0,
71 	     "# of callbacks/kthreads for barrier testing");
72 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
73 torture_param(int, nreaders, -1, "Number of RCU reader threads");
74 torture_param(int, object_debug, 0,
75 	     "Enable debug-object double call_rcu() testing");
76 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
77 torture_param(int, onoff_interval, 0,
78 	     "Time between CPU hotplugs (s), 0=disable");
79 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
80 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
81 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
82 torture_param(int, stall_cpu_holdoff, 10,
83 	     "Time to wait before starting stall (s).");
84 torture_param(int, stat_interval, 60,
85 	     "Number of seconds between stats printk()s");
86 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
87 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
88 torture_param(int, test_boost_duration, 4,
89 	     "Duration of each boost test, seconds.");
90 torture_param(int, test_boost_interval, 7,
91 	     "Interval between boost tests, seconds.");
92 torture_param(bool, test_no_idle_hz, true,
93 	     "Test support for tickless idle CPUs");
94 torture_param(bool, verbose, true,
95 	     "Enable verbose debugging printk()s");
96 
97 static char *torture_type = "rcu";
98 module_param(torture_type, charp, 0444);
99 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
100 
101 static int nrealreaders;
102 static struct task_struct *writer_task;
103 static struct task_struct **fakewriter_tasks;
104 static struct task_struct **reader_tasks;
105 static struct task_struct *stats_task;
106 static struct task_struct *fqs_task;
107 static struct task_struct *boost_tasks[NR_CPUS];
108 static struct task_struct *stall_task;
109 static struct task_struct **barrier_cbs_tasks;
110 static struct task_struct *barrier_task;
111 
112 #define RCU_TORTURE_PIPE_LEN 10
113 
114 struct rcu_torture {
115 	struct rcu_head rtort_rcu;
116 	int rtort_pipe_count;
117 	struct list_head rtort_free;
118 	int rtort_mbtest;
119 };
120 
121 static LIST_HEAD(rcu_torture_freelist);
122 static struct rcu_torture __rcu *rcu_torture_current;
123 static unsigned long rcu_torture_current_version;
124 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
125 static DEFINE_SPINLOCK(rcu_torture_lock);
126 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
127 		      rcu_torture_count) = { 0 };
128 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
129 		      rcu_torture_batch) = { 0 };
130 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
131 static atomic_t n_rcu_torture_alloc;
132 static atomic_t n_rcu_torture_alloc_fail;
133 static atomic_t n_rcu_torture_free;
134 static atomic_t n_rcu_torture_mberror;
135 static atomic_t n_rcu_torture_error;
136 static long n_rcu_torture_barrier_error;
137 static long n_rcu_torture_boost_ktrerror;
138 static long n_rcu_torture_boost_rterror;
139 static long n_rcu_torture_boost_failure;
140 static long n_rcu_torture_boosts;
141 static long n_rcu_torture_timers;
142 static long n_barrier_attempts;
143 static long n_barrier_successes;
144 static struct list_head rcu_torture_removed;
145 
146 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
147 #define RCUTORTURE_RUNNABLE_INIT 1
148 #else
149 #define RCUTORTURE_RUNNABLE_INIT 0
150 #endif
151 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
152 module_param(rcutorture_runnable, int, 0444);
153 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
154 
155 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
156 #define rcu_can_boost() 1
157 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
158 #define rcu_can_boost() 0
159 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
160 
161 #ifdef CONFIG_RCU_TRACE
162 static u64 notrace rcu_trace_clock_local(void)
163 {
164 	u64 ts = trace_clock_local();
165 	unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
166 	return ts;
167 }
168 #else /* #ifdef CONFIG_RCU_TRACE */
169 static u64 notrace rcu_trace_clock_local(void)
170 {
171 	return 0ULL;
172 }
173 #endif /* #else #ifdef CONFIG_RCU_TRACE */
174 
175 static unsigned long boost_starttime;	/* jiffies of next boost test start. */
176 DEFINE_MUTEX(boost_mutex);		/* protect setting boost_starttime */
177 					/*  and boost task create/destroy. */
178 static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
179 static bool barrier_phase;		/* Test phase. */
180 static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
181 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
182 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
183 
184 /*
185  * Allocate an element from the rcu_tortures pool.
186  */
187 static struct rcu_torture *
188 rcu_torture_alloc(void)
189 {
190 	struct list_head *p;
191 
192 	spin_lock_bh(&rcu_torture_lock);
193 	if (list_empty(&rcu_torture_freelist)) {
194 		atomic_inc(&n_rcu_torture_alloc_fail);
195 		spin_unlock_bh(&rcu_torture_lock);
196 		return NULL;
197 	}
198 	atomic_inc(&n_rcu_torture_alloc);
199 	p = rcu_torture_freelist.next;
200 	list_del_init(p);
201 	spin_unlock_bh(&rcu_torture_lock);
202 	return container_of(p, struct rcu_torture, rtort_free);
203 }
204 
205 /*
206  * Free an element to the rcu_tortures pool.
207  */
208 static void
209 rcu_torture_free(struct rcu_torture *p)
210 {
211 	atomic_inc(&n_rcu_torture_free);
212 	spin_lock_bh(&rcu_torture_lock);
213 	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
214 	spin_unlock_bh(&rcu_torture_lock);
215 }
216 
217 /*
218  * Operations vector for selecting different types of tests.
219  */
220 
221 struct rcu_torture_ops {
222 	void (*init)(void);
223 	int (*readlock)(void);
224 	void (*read_delay)(struct torture_random_state *rrsp);
225 	void (*readunlock)(int idx);
226 	int (*completed)(void);
227 	void (*deferred_free)(struct rcu_torture *p);
228 	void (*sync)(void);
229 	void (*exp_sync)(void);
230 	void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
231 	void (*cb_barrier)(void);
232 	void (*fqs)(void);
233 	void (*stats)(char *page);
234 	int irq_capable;
235 	int can_boost;
236 	const char *name;
237 };
238 
239 static struct rcu_torture_ops *cur_ops;
240 
241 /*
242  * Definitions for rcu torture testing.
243  */
244 
245 static int rcu_torture_read_lock(void) __acquires(RCU)
246 {
247 	rcu_read_lock();
248 	return 0;
249 }
250 
251 static void rcu_read_delay(struct torture_random_state *rrsp)
252 {
253 	const unsigned long shortdelay_us = 200;
254 	const unsigned long longdelay_ms = 50;
255 
256 	/* We want a short delay sometimes to make a reader delay the grace
257 	 * period, and we want a long delay occasionally to trigger
258 	 * force_quiescent_state. */
259 
260 	if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
261 		mdelay(longdelay_ms);
262 	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
263 		udelay(shortdelay_us);
264 #ifdef CONFIG_PREEMPT
265 	if (!preempt_count() &&
266 	    !(torture_random(rrsp) % (nrealreaders * 20000)))
267 		preempt_schedule();  /* No QS if preempt_disable() in effect */
268 #endif
269 }
270 
271 static void rcu_torture_read_unlock(int idx) __releases(RCU)
272 {
273 	rcu_read_unlock();
274 }
275 
276 static int rcu_torture_completed(void)
277 {
278 	return rcu_batches_completed();
279 }
280 
281 static void
282 rcu_torture_cb(struct rcu_head *p)
283 {
284 	int i;
285 	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
286 
287 	if (torture_must_stop_irq()) {
288 		/* Test is ending, just drop callbacks on the floor. */
289 		/* The next initialization will pick up the pieces. */
290 		return;
291 	}
292 	i = rp->rtort_pipe_count;
293 	if (i > RCU_TORTURE_PIPE_LEN)
294 		i = RCU_TORTURE_PIPE_LEN;
295 	atomic_inc(&rcu_torture_wcount[i]);
296 	if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
297 		rp->rtort_mbtest = 0;
298 		rcu_torture_free(rp);
299 	} else {
300 		cur_ops->deferred_free(rp);
301 	}
302 }
303 
304 static int rcu_no_completed(void)
305 {
306 	return 0;
307 }
308 
309 static void rcu_torture_deferred_free(struct rcu_torture *p)
310 {
311 	call_rcu(&p->rtort_rcu, rcu_torture_cb);
312 }
313 
314 static void rcu_sync_torture_init(void)
315 {
316 	INIT_LIST_HEAD(&rcu_torture_removed);
317 }
318 
319 static struct rcu_torture_ops rcu_ops = {
320 	.init		= rcu_sync_torture_init,
321 	.readlock	= rcu_torture_read_lock,
322 	.read_delay	= rcu_read_delay,
323 	.readunlock	= rcu_torture_read_unlock,
324 	.completed	= rcu_torture_completed,
325 	.deferred_free	= rcu_torture_deferred_free,
326 	.sync		= synchronize_rcu,
327 	.exp_sync	= synchronize_rcu_expedited,
328 	.call		= call_rcu,
329 	.cb_barrier	= rcu_barrier,
330 	.fqs		= rcu_force_quiescent_state,
331 	.stats		= NULL,
332 	.irq_capable	= 1,
333 	.can_boost	= rcu_can_boost(),
334 	.name		= "rcu"
335 };
336 
337 /*
338  * Definitions for rcu_bh torture testing.
339  */
340 
341 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
342 {
343 	rcu_read_lock_bh();
344 	return 0;
345 }
346 
347 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
348 {
349 	rcu_read_unlock_bh();
350 }
351 
352 static int rcu_bh_torture_completed(void)
353 {
354 	return rcu_batches_completed_bh();
355 }
356 
357 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
358 {
359 	call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
360 }
361 
362 static struct rcu_torture_ops rcu_bh_ops = {
363 	.init		= rcu_sync_torture_init,
364 	.readlock	= rcu_bh_torture_read_lock,
365 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
366 	.readunlock	= rcu_bh_torture_read_unlock,
367 	.completed	= rcu_bh_torture_completed,
368 	.deferred_free	= rcu_bh_torture_deferred_free,
369 	.sync		= synchronize_rcu_bh,
370 	.exp_sync	= synchronize_rcu_bh_expedited,
371 	.call		= call_rcu_bh,
372 	.cb_barrier	= rcu_barrier_bh,
373 	.fqs		= rcu_bh_force_quiescent_state,
374 	.stats		= NULL,
375 	.irq_capable	= 1,
376 	.name		= "rcu_bh"
377 };
378 
379 /*
380  * Definitions for srcu torture testing.
381  */
382 
383 DEFINE_STATIC_SRCU(srcu_ctl);
384 
385 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
386 {
387 	return srcu_read_lock(&srcu_ctl);
388 }
389 
390 static void srcu_read_delay(struct torture_random_state *rrsp)
391 {
392 	long delay;
393 	const long uspertick = 1000000 / HZ;
394 	const long longdelay = 10;
395 
396 	/* We want there to be long-running readers, but not all the time. */
397 
398 	delay = torture_random(rrsp) %
399 		(nrealreaders * 2 * longdelay * uspertick);
400 	if (!delay)
401 		schedule_timeout_interruptible(longdelay);
402 	else
403 		rcu_read_delay(rrsp);
404 }
405 
406 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
407 {
408 	srcu_read_unlock(&srcu_ctl, idx);
409 }
410 
411 static int srcu_torture_completed(void)
412 {
413 	return srcu_batches_completed(&srcu_ctl);
414 }
415 
416 static void srcu_torture_deferred_free(struct rcu_torture *rp)
417 {
418 	call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
419 }
420 
421 static void srcu_torture_synchronize(void)
422 {
423 	synchronize_srcu(&srcu_ctl);
424 }
425 
426 static void srcu_torture_call(struct rcu_head *head,
427 			      void (*func)(struct rcu_head *head))
428 {
429 	call_srcu(&srcu_ctl, head, func);
430 }
431 
432 static void srcu_torture_barrier(void)
433 {
434 	srcu_barrier(&srcu_ctl);
435 }
436 
437 static void srcu_torture_stats(char *page)
438 {
439 	int cpu;
440 	int idx = srcu_ctl.completed & 0x1;
441 
442 	page += sprintf(page, "%s%s per-CPU(idx=%d):",
443 		       torture_type, TORTURE_FLAG, idx);
444 	for_each_possible_cpu(cpu) {
445 		page += sprintf(page, " %d(%lu,%lu)", cpu,
446 			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
447 			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
448 	}
449 	sprintf(page, "\n");
450 }
451 
452 static void srcu_torture_synchronize_expedited(void)
453 {
454 	synchronize_srcu_expedited(&srcu_ctl);
455 }
456 
457 static struct rcu_torture_ops srcu_ops = {
458 	.init		= rcu_sync_torture_init,
459 	.readlock	= srcu_torture_read_lock,
460 	.read_delay	= srcu_read_delay,
461 	.readunlock	= srcu_torture_read_unlock,
462 	.completed	= srcu_torture_completed,
463 	.deferred_free	= srcu_torture_deferred_free,
464 	.sync		= srcu_torture_synchronize,
465 	.exp_sync	= srcu_torture_synchronize_expedited,
466 	.call		= srcu_torture_call,
467 	.cb_barrier	= srcu_torture_barrier,
468 	.stats		= srcu_torture_stats,
469 	.name		= "srcu"
470 };
471 
472 /*
473  * Definitions for sched torture testing.
474  */
475 
476 static int sched_torture_read_lock(void)
477 {
478 	preempt_disable();
479 	return 0;
480 }
481 
482 static void sched_torture_read_unlock(int idx)
483 {
484 	preempt_enable();
485 }
486 
487 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
488 {
489 	call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
490 }
491 
492 static struct rcu_torture_ops sched_ops = {
493 	.init		= rcu_sync_torture_init,
494 	.readlock	= sched_torture_read_lock,
495 	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
496 	.readunlock	= sched_torture_read_unlock,
497 	.completed	= rcu_no_completed,
498 	.deferred_free	= rcu_sched_torture_deferred_free,
499 	.sync		= synchronize_sched,
500 	.exp_sync	= synchronize_sched_expedited,
501 	.call		= call_rcu_sched,
502 	.cb_barrier	= rcu_barrier_sched,
503 	.fqs		= rcu_sched_force_quiescent_state,
504 	.stats		= NULL,
505 	.irq_capable	= 1,
506 	.name		= "sched"
507 };
508 
509 /*
510  * RCU torture priority-boost testing.  Runs one real-time thread per
511  * CPU for moderate bursts, repeatedly registering RCU callbacks and
512  * spinning waiting for them to be invoked.  If a given callback takes
513  * too long to be invoked, we assume that priority inversion has occurred.
514  */
515 
516 struct rcu_boost_inflight {
517 	struct rcu_head rcu;
518 	int inflight;
519 };
520 
521 static void rcu_torture_boost_cb(struct rcu_head *head)
522 {
523 	struct rcu_boost_inflight *rbip =
524 		container_of(head, struct rcu_boost_inflight, rcu);
525 
526 	smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
527 	rbip->inflight = 0;
528 }
529 
530 static int rcu_torture_boost(void *arg)
531 {
532 	unsigned long call_rcu_time;
533 	unsigned long endtime;
534 	unsigned long oldstarttime;
535 	struct rcu_boost_inflight rbi = { .inflight = 0 };
536 	struct sched_param sp;
537 
538 	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
539 
540 	/* Set real-time priority. */
541 	sp.sched_priority = 1;
542 	if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
543 		VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
544 		n_rcu_torture_boost_rterror++;
545 	}
546 
547 	init_rcu_head_on_stack(&rbi.rcu);
548 	/* Each pass through the following loop does one boost-test cycle. */
549 	do {
550 		/* Wait for the next test interval. */
551 		oldstarttime = boost_starttime;
552 		while (ULONG_CMP_LT(jiffies, oldstarttime)) {
553 			schedule_timeout_interruptible(oldstarttime - jiffies);
554 			stutter_wait("rcu_torture_boost");
555 			if (torture_must_stop())
556 				goto checkwait;
557 		}
558 
559 		/* Do one boost-test interval. */
560 		endtime = oldstarttime + test_boost_duration * HZ;
561 		call_rcu_time = jiffies;
562 		while (ULONG_CMP_LT(jiffies, endtime)) {
563 			/* If we don't have a callback in flight, post one. */
564 			if (!rbi.inflight) {
565 				smp_mb(); /* RCU core before ->inflight = 1. */
566 				rbi.inflight = 1;
567 				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
568 				if (jiffies - call_rcu_time >
569 					 test_boost_duration * HZ - HZ / 2) {
570 					VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
571 					n_rcu_torture_boost_failure++;
572 				}
573 				call_rcu_time = jiffies;
574 			}
575 			cond_resched();
576 			stutter_wait("rcu_torture_boost");
577 			if (torture_must_stop())
578 				goto checkwait;
579 		}
580 
581 		/*
582 		 * Set the start time of the next test interval.
583 		 * Yes, this is vulnerable to long delays, but such
584 		 * delays simply cause a false negative for the next
585 		 * interval.  Besides, we are running at RT priority,
586 		 * so delays should be relatively rare.
587 		 */
588 		while (oldstarttime == boost_starttime &&
589 		       !kthread_should_stop()) {
590 			if (mutex_trylock(&boost_mutex)) {
591 				boost_starttime = jiffies +
592 						  test_boost_interval * HZ;
593 				n_rcu_torture_boosts++;
594 				mutex_unlock(&boost_mutex);
595 				break;
596 			}
597 			schedule_timeout_uninterruptible(1);
598 		}
599 
600 		/* Go do the stutter. */
601 checkwait:	stutter_wait("rcu_torture_boost");
602 	} while (!torture_must_stop());
603 
604 	/* Clean up and exit. */
605 	VERBOSE_TOROUT_STRING("rcu_torture_boost task stopping");
606 	torture_shutdown_absorb("rcu_torture_boost");
607 	while (!kthread_should_stop() || rbi.inflight)
608 		schedule_timeout_uninterruptible(1);
609 	smp_mb(); /* order accesses to ->inflight before stack-frame death. */
610 	destroy_rcu_head_on_stack(&rbi.rcu);
611 	return 0;
612 }
613 
614 /*
615  * RCU torture force-quiescent-state kthread.  Repeatedly induces
616  * bursts of calls to force_quiescent_state(), increasing the probability
617  * of occurrence of some important types of race conditions.
618  */
619 static int
620 rcu_torture_fqs(void *arg)
621 {
622 	unsigned long fqs_resume_time;
623 	int fqs_burst_remaining;
624 
625 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
626 	do {
627 		fqs_resume_time = jiffies + fqs_stutter * HZ;
628 		while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
629 		       !kthread_should_stop()) {
630 			schedule_timeout_interruptible(1);
631 		}
632 		fqs_burst_remaining = fqs_duration;
633 		while (fqs_burst_remaining > 0 &&
634 		       !kthread_should_stop()) {
635 			cur_ops->fqs();
636 			udelay(fqs_holdoff);
637 			fqs_burst_remaining -= fqs_holdoff;
638 		}
639 		stutter_wait("rcu_torture_fqs");
640 	} while (!torture_must_stop());
641 	VERBOSE_TOROUT_STRING("rcu_torture_fqs task stopping");
642 	torture_shutdown_absorb("rcu_torture_fqs");
643 	while (!kthread_should_stop())
644 		schedule_timeout_uninterruptible(1);
645 	return 0;
646 }
647 
648 /*
649  * RCU torture writer kthread.  Repeatedly substitutes a new structure
650  * for that pointed to by rcu_torture_current, freeing the old structure
651  * after a series of grace periods (the "pipeline").
652  */
653 static int
654 rcu_torture_writer(void *arg)
655 {
656 	bool exp;
657 	int i;
658 	struct rcu_torture *rp;
659 	struct rcu_torture *rp1;
660 	struct rcu_torture *old_rp;
661 	static DEFINE_TORTURE_RANDOM(rand);
662 
663 	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
664 	set_user_nice(current, 19);
665 
666 	do {
667 		schedule_timeout_uninterruptible(1);
668 		rp = rcu_torture_alloc();
669 		if (rp == NULL)
670 			continue;
671 		rp->rtort_pipe_count = 0;
672 		udelay(torture_random(&rand) & 0x3ff);
673 		old_rp = rcu_dereference_check(rcu_torture_current,
674 					       current == writer_task);
675 		rp->rtort_mbtest = 1;
676 		rcu_assign_pointer(rcu_torture_current, rp);
677 		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
678 		if (old_rp) {
679 			i = old_rp->rtort_pipe_count;
680 			if (i > RCU_TORTURE_PIPE_LEN)
681 				i = RCU_TORTURE_PIPE_LEN;
682 			atomic_inc(&rcu_torture_wcount[i]);
683 			old_rp->rtort_pipe_count++;
684 			if (gp_normal == gp_exp)
685 				exp = !!(torture_random(&rand) & 0x80);
686 			else
687 				exp = gp_exp;
688 			if (!exp) {
689 				cur_ops->deferred_free(old_rp);
690 			} else {
691 				cur_ops->exp_sync();
692 				list_add(&old_rp->rtort_free,
693 					 &rcu_torture_removed);
694 				list_for_each_entry_safe(rp, rp1,
695 							 &rcu_torture_removed,
696 							 rtort_free) {
697 					i = rp->rtort_pipe_count;
698 					if (i > RCU_TORTURE_PIPE_LEN)
699 						i = RCU_TORTURE_PIPE_LEN;
700 					atomic_inc(&rcu_torture_wcount[i]);
701 					if (++rp->rtort_pipe_count >=
702 					    RCU_TORTURE_PIPE_LEN) {
703 						rp->rtort_mbtest = 0;
704 						list_del(&rp->rtort_free);
705 						rcu_torture_free(rp);
706 					}
707 				 }
708 			}
709 		}
710 		rcutorture_record_progress(++rcu_torture_current_version);
711 		stutter_wait("rcu_torture_writer");
712 	} while (!torture_must_stop());
713 	VERBOSE_TOROUT_STRING("rcu_torture_writer task stopping");
714 	torture_shutdown_absorb("rcu_torture_writer");
715 	while (!kthread_should_stop())
716 		schedule_timeout_uninterruptible(1);
717 	return 0;
718 }
719 
720 /*
721  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
722  * delay between calls.
723  */
724 static int
725 rcu_torture_fakewriter(void *arg)
726 {
727 	DEFINE_TORTURE_RANDOM(rand);
728 
729 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
730 	set_user_nice(current, 19);
731 
732 	do {
733 		schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
734 		udelay(torture_random(&rand) & 0x3ff);
735 		if (cur_ops->cb_barrier != NULL &&
736 		    torture_random(&rand) % (nfakewriters * 8) == 0) {
737 			cur_ops->cb_barrier();
738 		} else if (gp_normal == gp_exp) {
739 			if (torture_random(&rand) & 0x80)
740 				cur_ops->sync();
741 			else
742 				cur_ops->exp_sync();
743 		} else if (gp_normal) {
744 			cur_ops->sync();
745 		} else {
746 			cur_ops->exp_sync();
747 		}
748 		stutter_wait("rcu_torture_fakewriter");
749 	} while (!torture_must_stop());
750 
751 	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task stopping");
752 	torture_shutdown_absorb("rcu_torture_fakewriter");
753 	while (!kthread_should_stop())
754 		schedule_timeout_uninterruptible(1);
755 	return 0;
756 }
757 
758 void rcutorture_trace_dump(void)
759 {
760 	static atomic_t beenhere = ATOMIC_INIT(0);
761 
762 	if (atomic_read(&beenhere))
763 		return;
764 	if (atomic_xchg(&beenhere, 1) != 0)
765 		return;
766 	ftrace_dump(DUMP_ALL);
767 }
768 
769 /*
770  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
771  * incrementing the corresponding element of the pipeline array.  The
772  * counter in the element should never be greater than 1, otherwise, the
773  * RCU implementation is broken.
774  */
775 static void rcu_torture_timer(unsigned long unused)
776 {
777 	int idx;
778 	int completed;
779 	int completed_end;
780 	static DEFINE_TORTURE_RANDOM(rand);
781 	static DEFINE_SPINLOCK(rand_lock);
782 	struct rcu_torture *p;
783 	int pipe_count;
784 	unsigned long long ts;
785 
786 	idx = cur_ops->readlock();
787 	completed = cur_ops->completed();
788 	ts = rcu_trace_clock_local();
789 	p = rcu_dereference_check(rcu_torture_current,
790 				  rcu_read_lock_bh_held() ||
791 				  rcu_read_lock_sched_held() ||
792 				  srcu_read_lock_held(&srcu_ctl));
793 	if (p == NULL) {
794 		/* Leave because rcu_torture_writer is not yet underway */
795 		cur_ops->readunlock(idx);
796 		return;
797 	}
798 	if (p->rtort_mbtest == 0)
799 		atomic_inc(&n_rcu_torture_mberror);
800 	spin_lock(&rand_lock);
801 	cur_ops->read_delay(&rand);
802 	n_rcu_torture_timers++;
803 	spin_unlock(&rand_lock);
804 	preempt_disable();
805 	pipe_count = p->rtort_pipe_count;
806 	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
807 		/* Should not happen, but... */
808 		pipe_count = RCU_TORTURE_PIPE_LEN;
809 	}
810 	completed_end = cur_ops->completed();
811 	if (pipe_count > 1) {
812 		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
813 					  completed, completed_end);
814 		rcutorture_trace_dump();
815 	}
816 	__this_cpu_inc(rcu_torture_count[pipe_count]);
817 	completed = completed_end - completed;
818 	if (completed > RCU_TORTURE_PIPE_LEN) {
819 		/* Should not happen, but... */
820 		completed = RCU_TORTURE_PIPE_LEN;
821 	}
822 	__this_cpu_inc(rcu_torture_batch[completed]);
823 	preempt_enable();
824 	cur_ops->readunlock(idx);
825 }
826 
827 /*
828  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
829  * incrementing the corresponding element of the pipeline array.  The
830  * counter in the element should never be greater than 1, otherwise, the
831  * RCU implementation is broken.
832  */
833 static int
834 rcu_torture_reader(void *arg)
835 {
836 	int completed;
837 	int completed_end;
838 	int idx;
839 	DEFINE_TORTURE_RANDOM(rand);
840 	struct rcu_torture *p;
841 	int pipe_count;
842 	struct timer_list t;
843 	unsigned long long ts;
844 
845 	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
846 	set_user_nice(current, 19);
847 	if (irqreader && cur_ops->irq_capable)
848 		setup_timer_on_stack(&t, rcu_torture_timer, 0);
849 
850 	do {
851 		if (irqreader && cur_ops->irq_capable) {
852 			if (!timer_pending(&t))
853 				mod_timer(&t, jiffies + 1);
854 		}
855 		idx = cur_ops->readlock();
856 		completed = cur_ops->completed();
857 		ts = rcu_trace_clock_local();
858 		p = rcu_dereference_check(rcu_torture_current,
859 					  rcu_read_lock_bh_held() ||
860 					  rcu_read_lock_sched_held() ||
861 					  srcu_read_lock_held(&srcu_ctl));
862 		if (p == NULL) {
863 			/* Wait for rcu_torture_writer to get underway */
864 			cur_ops->readunlock(idx);
865 			schedule_timeout_interruptible(HZ);
866 			continue;
867 		}
868 		if (p->rtort_mbtest == 0)
869 			atomic_inc(&n_rcu_torture_mberror);
870 		cur_ops->read_delay(&rand);
871 		preempt_disable();
872 		pipe_count = p->rtort_pipe_count;
873 		if (pipe_count > RCU_TORTURE_PIPE_LEN) {
874 			/* Should not happen, but... */
875 			pipe_count = RCU_TORTURE_PIPE_LEN;
876 		}
877 		completed_end = cur_ops->completed();
878 		if (pipe_count > 1) {
879 			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
880 						  ts, completed, completed_end);
881 			rcutorture_trace_dump();
882 		}
883 		__this_cpu_inc(rcu_torture_count[pipe_count]);
884 		completed = completed_end - completed;
885 		if (completed > RCU_TORTURE_PIPE_LEN) {
886 			/* Should not happen, but... */
887 			completed = RCU_TORTURE_PIPE_LEN;
888 		}
889 		__this_cpu_inc(rcu_torture_batch[completed]);
890 		preempt_enable();
891 		cur_ops->readunlock(idx);
892 		schedule();
893 		stutter_wait("rcu_torture_reader");
894 	} while (!torture_must_stop());
895 	VERBOSE_TOROUT_STRING("rcu_torture_reader task stopping");
896 	torture_shutdown_absorb("rcu_torture_reader");
897 	if (irqreader && cur_ops->irq_capable)
898 		del_timer_sync(&t);
899 	while (!kthread_should_stop())
900 		schedule_timeout_uninterruptible(1);
901 	return 0;
902 }
903 
904 /*
905  * Create an RCU-torture statistics message in the specified buffer.
906  */
907 static void
908 rcu_torture_printk(char *page)
909 {
910 	int cpu;
911 	int i;
912 	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
913 	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
914 
915 	for_each_possible_cpu(cpu) {
916 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
917 			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
918 			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
919 		}
920 	}
921 	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
922 		if (pipesummary[i] != 0)
923 			break;
924 	}
925 	page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
926 	page += sprintf(page,
927 		       "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
928 		       rcu_torture_current,
929 		       rcu_torture_current_version,
930 		       list_empty(&rcu_torture_freelist),
931 		       atomic_read(&n_rcu_torture_alloc),
932 		       atomic_read(&n_rcu_torture_alloc_fail),
933 		       atomic_read(&n_rcu_torture_free));
934 	page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
935 		       atomic_read(&n_rcu_torture_mberror),
936 		       n_rcu_torture_boost_ktrerror,
937 		       n_rcu_torture_boost_rterror);
938 	page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
939 		       n_rcu_torture_boost_failure,
940 		       n_rcu_torture_boosts,
941 		       n_rcu_torture_timers);
942 	page = torture_onoff_stats(page);
943 	page += sprintf(page, "barrier: %ld/%ld:%ld",
944 		       n_barrier_successes,
945 		       n_barrier_attempts,
946 		       n_rcu_torture_barrier_error);
947 	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
948 	if (atomic_read(&n_rcu_torture_mberror) != 0 ||
949 	    n_rcu_torture_barrier_error != 0 ||
950 	    n_rcu_torture_boost_ktrerror != 0 ||
951 	    n_rcu_torture_boost_rterror != 0 ||
952 	    n_rcu_torture_boost_failure != 0 ||
953 	    i > 1) {
954 		page += sprintf(page, "!!! ");
955 		atomic_inc(&n_rcu_torture_error);
956 		WARN_ON_ONCE(1);
957 	}
958 	page += sprintf(page, "Reader Pipe: ");
959 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
960 		page += sprintf(page, " %ld", pipesummary[i]);
961 	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
962 	page += sprintf(page, "Reader Batch: ");
963 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
964 		page += sprintf(page, " %ld", batchsummary[i]);
965 	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
966 	page += sprintf(page, "Free-Block Circulation: ");
967 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
968 		page += sprintf(page, " %d",
969 			       atomic_read(&rcu_torture_wcount[i]));
970 	}
971 	page += sprintf(page, "\n");
972 	if (cur_ops->stats)
973 		cur_ops->stats(page);
974 }
975 
976 /*
977  * Print torture statistics.  Caller must ensure that there is only
978  * one call to this function at a given time!!!  This is normally
979  * accomplished by relying on the module system to only have one copy
980  * of the module loaded, and then by giving the rcu_torture_stats
981  * kthread full control (or the init/cleanup functions when rcu_torture_stats
982  * thread is not running).
983  */
984 static void
985 rcu_torture_stats_print(void)
986 {
987 	int size = nr_cpu_ids * 200 + 8192;
988 	char *buf;
989 
990 	buf = kmalloc(size, GFP_KERNEL);
991 	if (!buf) {
992 		pr_err("rcu-torture: Out of memory, need: %d", size);
993 		return;
994 	}
995 	rcu_torture_printk(buf);
996 	pr_alert("%s", buf);
997 	kfree(buf);
998 }
999 
1000 /*
1001  * Periodically prints torture statistics, if periodic statistics printing
1002  * was specified via the stat_interval module parameter.
1003  */
1004 static int
1005 rcu_torture_stats(void *arg)
1006 {
1007 	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1008 	do {
1009 		schedule_timeout_interruptible(stat_interval * HZ);
1010 		rcu_torture_stats_print();
1011 		torture_shutdown_absorb("rcu_torture_stats");
1012 	} while (!torture_must_stop());
1013 	VERBOSE_TOROUT_STRING("rcu_torture_stats task stopping");
1014 	return 0;
1015 }
1016 
1017 static inline void
1018 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1019 {
1020 	pr_alert("%s" TORTURE_FLAG
1021 		 "--- %s: nreaders=%d nfakewriters=%d "
1022 		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1023 		 "shuffle_interval=%d stutter=%d irqreader=%d "
1024 		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1025 		 "test_boost=%d/%d test_boost_interval=%d "
1026 		 "test_boost_duration=%d shutdown_secs=%d "
1027 		 "stall_cpu=%d stall_cpu_holdoff=%d "
1028 		 "n_barrier_cbs=%d "
1029 		 "onoff_interval=%d onoff_holdoff=%d\n",
1030 		 torture_type, tag, nrealreaders, nfakewriters,
1031 		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1032 		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1033 		 test_boost, cur_ops->can_boost,
1034 		 test_boost_interval, test_boost_duration, shutdown_secs,
1035 		 stall_cpu, stall_cpu_holdoff,
1036 		 n_barrier_cbs,
1037 		 onoff_interval, onoff_holdoff);
1038 }
1039 
1040 static void rcutorture_booster_cleanup(int cpu)
1041 {
1042 	struct task_struct *t;
1043 
1044 	if (boost_tasks[cpu] == NULL)
1045 		return;
1046 	mutex_lock(&boost_mutex);
1047 	VERBOSE_TOROUT_STRING("Stopping rcu_torture_boost task");
1048 	t = boost_tasks[cpu];
1049 	boost_tasks[cpu] = NULL;
1050 	mutex_unlock(&boost_mutex);
1051 
1052 	/* This must be outside of the mutex, otherwise deadlock! */
1053 	kthread_stop(t);
1054 	boost_tasks[cpu] = NULL;
1055 }
1056 
1057 static int rcutorture_booster_init(int cpu)
1058 {
1059 	int retval;
1060 
1061 	if (boost_tasks[cpu] != NULL)
1062 		return 0;  /* Already created, nothing more to do. */
1063 
1064 	/* Don't allow time recalculation while creating a new task. */
1065 	mutex_lock(&boost_mutex);
1066 	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1067 	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1068 						  cpu_to_node(cpu),
1069 						  "rcu_torture_boost");
1070 	if (IS_ERR(boost_tasks[cpu])) {
1071 		retval = PTR_ERR(boost_tasks[cpu]);
1072 		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1073 		n_rcu_torture_boost_ktrerror++;
1074 		boost_tasks[cpu] = NULL;
1075 		mutex_unlock(&boost_mutex);
1076 		return retval;
1077 	}
1078 	kthread_bind(boost_tasks[cpu], cpu);
1079 	wake_up_process(boost_tasks[cpu]);
1080 	mutex_unlock(&boost_mutex);
1081 	return 0;
1082 }
1083 
1084 /*
1085  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1086  * induces a CPU stall for the time specified by stall_cpu.
1087  */
1088 static int rcu_torture_stall(void *args)
1089 {
1090 	unsigned long stop_at;
1091 
1092 	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1093 	if (stall_cpu_holdoff > 0) {
1094 		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1095 		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1096 		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1097 	}
1098 	if (!kthread_should_stop()) {
1099 		stop_at = get_seconds() + stall_cpu;
1100 		/* RCU CPU stall is expected behavior in following code. */
1101 		pr_alert("rcu_torture_stall start.\n");
1102 		rcu_read_lock();
1103 		preempt_disable();
1104 		while (ULONG_CMP_LT(get_seconds(), stop_at))
1105 			continue;  /* Induce RCU CPU stall warning. */
1106 		preempt_enable();
1107 		rcu_read_unlock();
1108 		pr_alert("rcu_torture_stall end.\n");
1109 	}
1110 	torture_shutdown_absorb("rcu_torture_stall");
1111 	while (!kthread_should_stop())
1112 		schedule_timeout_interruptible(10 * HZ);
1113 	return 0;
1114 }
1115 
1116 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1117 static int __init rcu_torture_stall_init(void)
1118 {
1119 	int ret;
1120 
1121 	if (stall_cpu <= 0)
1122 		return 0;
1123 	stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
1124 	if (IS_ERR(stall_task)) {
1125 		ret = PTR_ERR(stall_task);
1126 		stall_task = NULL;
1127 		return ret;
1128 	}
1129 	torture_shuffle_task_register(stall_task);
1130 	return 0;
1131 }
1132 
1133 /* Clean up after the CPU-stall kthread, if one was spawned. */
1134 static void rcu_torture_stall_cleanup(void)
1135 {
1136 	if (stall_task == NULL)
1137 		return;
1138 	VERBOSE_TOROUT_STRING("Stopping rcu_torture_stall_task.");
1139 	kthread_stop(stall_task);
1140 	stall_task = NULL;
1141 }
1142 
1143 /* Callback function for RCU barrier testing. */
1144 void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1145 {
1146 	atomic_inc(&barrier_cbs_invoked);
1147 }
1148 
1149 /* kthread function to register callbacks used to test RCU barriers. */
1150 static int rcu_torture_barrier_cbs(void *arg)
1151 {
1152 	long myid = (long)arg;
1153 	bool lastphase = 0;
1154 	bool newphase;
1155 	struct rcu_head rcu;
1156 
1157 	init_rcu_head_on_stack(&rcu);
1158 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1159 	set_user_nice(current, 19);
1160 	do {
1161 		wait_event(barrier_cbs_wq[myid],
1162 			   (newphase =
1163 			    ACCESS_ONCE(barrier_phase)) != lastphase ||
1164 			   torture_must_stop());
1165 		lastphase = newphase;
1166 		smp_mb(); /* ensure barrier_phase load before ->call(). */
1167 		if (torture_must_stop())
1168 			break;
1169 		cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1170 		if (atomic_dec_and_test(&barrier_cbs_count))
1171 			wake_up(&barrier_wq);
1172 	} while (!torture_must_stop());
1173 	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task stopping");
1174 	torture_shutdown_absorb("rcu_torture_barrier_cbs");
1175 	while (!kthread_should_stop())
1176 		schedule_timeout_interruptible(1);
1177 	cur_ops->cb_barrier();
1178 	destroy_rcu_head_on_stack(&rcu);
1179 	return 0;
1180 }
1181 
1182 /* kthread function to drive and coordinate RCU barrier testing. */
1183 static int rcu_torture_barrier(void *arg)
1184 {
1185 	int i;
1186 
1187 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1188 	do {
1189 		atomic_set(&barrier_cbs_invoked, 0);
1190 		atomic_set(&barrier_cbs_count, n_barrier_cbs);
1191 		smp_mb(); /* Ensure barrier_phase after prior assignments. */
1192 		barrier_phase = !barrier_phase;
1193 		for (i = 0; i < n_barrier_cbs; i++)
1194 			wake_up(&barrier_cbs_wq[i]);
1195 		wait_event(barrier_wq,
1196 			   atomic_read(&barrier_cbs_count) == 0 ||
1197 			   torture_must_stop());
1198 		if (torture_must_stop())
1199 			break;
1200 		n_barrier_attempts++;
1201 		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1202 		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1203 			n_rcu_torture_barrier_error++;
1204 			WARN_ON_ONCE(1);
1205 		}
1206 		n_barrier_successes++;
1207 		schedule_timeout_interruptible(HZ / 10);
1208 	} while (!torture_must_stop());
1209 	VERBOSE_TOROUT_STRING("rcu_torture_barrier task stopping");
1210 	torture_shutdown_absorb("rcu_torture_barrier");
1211 	while (!kthread_should_stop())
1212 		schedule_timeout_interruptible(1);
1213 	return 0;
1214 }
1215 
1216 /* Initialize RCU barrier testing. */
1217 static int rcu_torture_barrier_init(void)
1218 {
1219 	int i;
1220 	int ret;
1221 
1222 	if (n_barrier_cbs == 0)
1223 		return 0;
1224 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1225 		pr_alert("%s" TORTURE_FLAG
1226 			 " Call or barrier ops missing for %s,\n",
1227 			 torture_type, cur_ops->name);
1228 		pr_alert("%s" TORTURE_FLAG
1229 			 " RCU barrier testing omitted from run.\n",
1230 			 torture_type);
1231 		return 0;
1232 	}
1233 	atomic_set(&barrier_cbs_count, 0);
1234 	atomic_set(&barrier_cbs_invoked, 0);
1235 	barrier_cbs_tasks =
1236 		kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
1237 			GFP_KERNEL);
1238 	barrier_cbs_wq =
1239 		kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1240 			GFP_KERNEL);
1241 	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1242 		return -ENOMEM;
1243 	for (i = 0; i < n_barrier_cbs; i++) {
1244 		init_waitqueue_head(&barrier_cbs_wq[i]);
1245 		barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
1246 						   (void *)(long)i,
1247 						   "rcu_torture_barrier_cbs");
1248 		if (IS_ERR(barrier_cbs_tasks[i])) {
1249 			ret = PTR_ERR(barrier_cbs_tasks[i]);
1250 			VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
1251 			barrier_cbs_tasks[i] = NULL;
1252 			return ret;
1253 		}
1254 		torture_shuffle_task_register(barrier_cbs_tasks[i]);
1255 	}
1256 	barrier_task = kthread_run(rcu_torture_barrier, NULL,
1257 				   "rcu_torture_barrier");
1258 	if (IS_ERR(barrier_task)) {
1259 		ret = PTR_ERR(barrier_task);
1260 		VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier");
1261 		barrier_task = NULL;
1262 	}
1263 	torture_shuffle_task_register(barrier_task);
1264 	return 0;
1265 }
1266 
1267 /* Clean up after RCU barrier testing. */
1268 static void rcu_torture_barrier_cleanup(void)
1269 {
1270 	int i;
1271 
1272 	if (barrier_task != NULL) {
1273 		VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier task");
1274 		kthread_stop(barrier_task);
1275 		barrier_task = NULL;
1276 	}
1277 	if (barrier_cbs_tasks != NULL) {
1278 		for (i = 0; i < n_barrier_cbs; i++) {
1279 			if (barrier_cbs_tasks[i] != NULL) {
1280 				VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier_cbs task");
1281 				kthread_stop(barrier_cbs_tasks[i]);
1282 				barrier_cbs_tasks[i] = NULL;
1283 			}
1284 		}
1285 		kfree(barrier_cbs_tasks);
1286 		barrier_cbs_tasks = NULL;
1287 	}
1288 	if (barrier_cbs_wq != NULL) {
1289 		kfree(barrier_cbs_wq);
1290 		barrier_cbs_wq = NULL;
1291 	}
1292 }
1293 
1294 static int rcutorture_cpu_notify(struct notifier_block *self,
1295 				 unsigned long action, void *hcpu)
1296 {
1297 	long cpu = (long)hcpu;
1298 
1299 	switch (action) {
1300 	case CPU_ONLINE:
1301 	case CPU_DOWN_FAILED:
1302 		(void)rcutorture_booster_init(cpu);
1303 		break;
1304 	case CPU_DOWN_PREPARE:
1305 		rcutorture_booster_cleanup(cpu);
1306 		break;
1307 	default:
1308 		break;
1309 	}
1310 	return NOTIFY_OK;
1311 }
1312 
1313 static struct notifier_block rcutorture_cpu_nb = {
1314 	.notifier_call = rcutorture_cpu_notify,
1315 };
1316 
1317 static void
1318 rcu_torture_cleanup(void)
1319 {
1320 	int i;
1321 
1322 	rcutorture_record_test_transition();
1323 	if (torture_cleanup()) {
1324 		if (cur_ops->cb_barrier != NULL)
1325 			cur_ops->cb_barrier();
1326 		return;
1327 	}
1328 
1329 	rcu_torture_barrier_cleanup();
1330 	rcu_torture_stall_cleanup();
1331 	torture_stutter_cleanup();
1332 
1333 	if (writer_task) {
1334 		VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task");
1335 		kthread_stop(writer_task);
1336 	}
1337 	writer_task = NULL;
1338 
1339 	if (reader_tasks) {
1340 		for (i = 0; i < nrealreaders; i++) {
1341 			if (reader_tasks[i]) {
1342 				VERBOSE_TOROUT_STRING(
1343 					"Stopping rcu_torture_reader task");
1344 				kthread_stop(reader_tasks[i]);
1345 			}
1346 			reader_tasks[i] = NULL;
1347 		}
1348 		kfree(reader_tasks);
1349 		reader_tasks = NULL;
1350 	}
1351 	rcu_torture_current = NULL;
1352 
1353 	if (fakewriter_tasks) {
1354 		for (i = 0; i < nfakewriters; i++) {
1355 			if (fakewriter_tasks[i]) {
1356 				VERBOSE_TOROUT_STRING(
1357 					"Stopping rcu_torture_fakewriter task");
1358 				kthread_stop(fakewriter_tasks[i]);
1359 			}
1360 			fakewriter_tasks[i] = NULL;
1361 		}
1362 		kfree(fakewriter_tasks);
1363 		fakewriter_tasks = NULL;
1364 	}
1365 
1366 	if (stats_task) {
1367 		VERBOSE_TOROUT_STRING("Stopping rcu_torture_stats task");
1368 		kthread_stop(stats_task);
1369 	}
1370 	stats_task = NULL;
1371 
1372 	if (fqs_task) {
1373 		VERBOSE_TOROUT_STRING("Stopping rcu_torture_fqs task");
1374 		kthread_stop(fqs_task);
1375 	}
1376 	fqs_task = NULL;
1377 	if ((test_boost == 1 && cur_ops->can_boost) ||
1378 	    test_boost == 2) {
1379 		unregister_cpu_notifier(&rcutorture_cpu_nb);
1380 		for_each_possible_cpu(i)
1381 			rcutorture_booster_cleanup(i);
1382 	}
1383 	torture_shutdown_cleanup();
1384 
1385 	/* Wait for all RCU callbacks to fire.  */
1386 
1387 	if (cur_ops->cb_barrier != NULL)
1388 		cur_ops->cb_barrier();
1389 
1390 	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1391 
1392 	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1393 		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1394 	else if (torture_onoff_failures())
1395 		rcu_torture_print_module_parms(cur_ops,
1396 					       "End of test: RCU_HOTPLUG");
1397 	else
1398 		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1399 }
1400 
1401 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1402 static void rcu_torture_leak_cb(struct rcu_head *rhp)
1403 {
1404 }
1405 
1406 static void rcu_torture_err_cb(struct rcu_head *rhp)
1407 {
1408 	/*
1409 	 * This -might- happen due to race conditions, but is unlikely.
1410 	 * The scenario that leads to this happening is that the
1411 	 * first of the pair of duplicate callbacks is queued,
1412 	 * someone else starts a grace period that includes that
1413 	 * callback, then the second of the pair must wait for the
1414 	 * next grace period.  Unlikely, but can happen.  If it
1415 	 * does happen, the debug-objects subsystem won't have splatted.
1416 	 */
1417 	pr_alert("rcutorture: duplicated callback was invoked.\n");
1418 }
1419 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1420 
1421 /*
1422  * Verify that double-free causes debug-objects to complain, but only
1423  * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
1424  * cannot be carried out.
1425  */
1426 static void rcu_test_debug_objects(void)
1427 {
1428 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1429 	struct rcu_head rh1;
1430 	struct rcu_head rh2;
1431 
1432 	init_rcu_head_on_stack(&rh1);
1433 	init_rcu_head_on_stack(&rh2);
1434 	pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
1435 
1436 	/* Try to queue the rh2 pair of callbacks for the same grace period. */
1437 	preempt_disable(); /* Prevent preemption from interrupting test. */
1438 	rcu_read_lock(); /* Make it impossible to finish a grace period. */
1439 	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1440 	local_irq_disable(); /* Make it harder to start a new grace period. */
1441 	call_rcu(&rh2, rcu_torture_leak_cb);
1442 	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1443 	local_irq_enable();
1444 	rcu_read_unlock();
1445 	preempt_enable();
1446 
1447 	/* Wait for them all to get done so we can safely return. */
1448 	rcu_barrier();
1449 	pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
1450 	destroy_rcu_head_on_stack(&rh1);
1451 	destroy_rcu_head_on_stack(&rh2);
1452 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1453 	pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
1454 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1455 }
1456 
1457 static int __init
1458 rcu_torture_init(void)
1459 {
1460 	int i;
1461 	int cpu;
1462 	int firsterr = 0;
1463 	int retval;
1464 	static struct rcu_torture_ops *torture_ops[] = {
1465 		&rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
1466 	};
1467 
1468 	torture_init_begin(torture_type, verbose, &rcutorture_runnable);
1469 
1470 	/* Process args and tell the world that the torturer is on the job. */
1471 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1472 		cur_ops = torture_ops[i];
1473 		if (strcmp(torture_type, cur_ops->name) == 0)
1474 			break;
1475 	}
1476 	if (i == ARRAY_SIZE(torture_ops)) {
1477 		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1478 			 torture_type);
1479 		pr_alert("rcu-torture types:");
1480 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1481 			pr_alert(" %s", torture_ops[i]->name);
1482 		pr_alert("\n");
1483 		torture_init_end();
1484 		return -EINVAL;
1485 	}
1486 	if (cur_ops->fqs == NULL && fqs_duration != 0) {
1487 		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1488 		fqs_duration = 0;
1489 	}
1490 	if (cur_ops->init)
1491 		cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1492 
1493 	if (nreaders >= 0)
1494 		nrealreaders = nreaders;
1495 	else
1496 		nrealreaders = 2 * num_online_cpus();
1497 	rcu_torture_print_module_parms(cur_ops, "Start of test");
1498 
1499 	/* Set up the freelist. */
1500 
1501 	INIT_LIST_HEAD(&rcu_torture_freelist);
1502 	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1503 		rcu_tortures[i].rtort_mbtest = 0;
1504 		list_add_tail(&rcu_tortures[i].rtort_free,
1505 			      &rcu_torture_freelist);
1506 	}
1507 
1508 	/* Initialize the statistics so that each run gets its own numbers. */
1509 
1510 	rcu_torture_current = NULL;
1511 	rcu_torture_current_version = 0;
1512 	atomic_set(&n_rcu_torture_alloc, 0);
1513 	atomic_set(&n_rcu_torture_alloc_fail, 0);
1514 	atomic_set(&n_rcu_torture_free, 0);
1515 	atomic_set(&n_rcu_torture_mberror, 0);
1516 	atomic_set(&n_rcu_torture_error, 0);
1517 	n_rcu_torture_barrier_error = 0;
1518 	n_rcu_torture_boost_ktrerror = 0;
1519 	n_rcu_torture_boost_rterror = 0;
1520 	n_rcu_torture_boost_failure = 0;
1521 	n_rcu_torture_boosts = 0;
1522 	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1523 		atomic_set(&rcu_torture_wcount[i], 0);
1524 	for_each_possible_cpu(cpu) {
1525 		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1526 			per_cpu(rcu_torture_count, cpu)[i] = 0;
1527 			per_cpu(rcu_torture_batch, cpu)[i] = 0;
1528 		}
1529 	}
1530 
1531 	/* Start up the kthreads. */
1532 
1533 	VERBOSE_TOROUT_STRING("Creating rcu_torture_writer task");
1534 	writer_task = kthread_create(rcu_torture_writer, NULL,
1535 				     "rcu_torture_writer");
1536 	if (IS_ERR(writer_task)) {
1537 		firsterr = PTR_ERR(writer_task);
1538 		VERBOSE_TOROUT_ERRSTRING("Failed to create writer");
1539 		writer_task = NULL;
1540 		goto unwind;
1541 	}
1542 	torture_shuffle_task_register(writer_task);
1543 	wake_up_process(writer_task);
1544 	fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1545 				   GFP_KERNEL);
1546 	if (fakewriter_tasks == NULL) {
1547 		VERBOSE_TOROUT_ERRSTRING("out of memory");
1548 		firsterr = -ENOMEM;
1549 		goto unwind;
1550 	}
1551 	for (i = 0; i < nfakewriters; i++) {
1552 		VERBOSE_TOROUT_STRING("Creating rcu_torture_fakewriter task");
1553 		fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1554 						  "rcu_torture_fakewriter");
1555 		if (IS_ERR(fakewriter_tasks[i])) {
1556 			firsterr = PTR_ERR(fakewriter_tasks[i]);
1557 			VERBOSE_TOROUT_ERRSTRING("Failed to create fakewriter");
1558 			fakewriter_tasks[i] = NULL;
1559 			goto unwind;
1560 		}
1561 		torture_shuffle_task_register(fakewriter_tasks[i]);
1562 	}
1563 	reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1564 			       GFP_KERNEL);
1565 	if (reader_tasks == NULL) {
1566 		VERBOSE_TOROUT_ERRSTRING("out of memory");
1567 		firsterr = -ENOMEM;
1568 		goto unwind;
1569 	}
1570 	for (i = 0; i < nrealreaders; i++) {
1571 		VERBOSE_TOROUT_STRING("Creating rcu_torture_reader task");
1572 		reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1573 					      "rcu_torture_reader");
1574 		if (IS_ERR(reader_tasks[i])) {
1575 			firsterr = PTR_ERR(reader_tasks[i]);
1576 			VERBOSE_TOROUT_ERRSTRING("Failed to create reader");
1577 			reader_tasks[i] = NULL;
1578 			goto unwind;
1579 		}
1580 		torture_shuffle_task_register(reader_tasks[i]);
1581 	}
1582 	if (stat_interval > 0) {
1583 		VERBOSE_TOROUT_STRING("Creating rcu_torture_stats task");
1584 		stats_task = kthread_run(rcu_torture_stats, NULL,
1585 					"rcu_torture_stats");
1586 		if (IS_ERR(stats_task)) {
1587 			firsterr = PTR_ERR(stats_task);
1588 			VERBOSE_TOROUT_ERRSTRING("Failed to create stats");
1589 			stats_task = NULL;
1590 			goto unwind;
1591 		}
1592 		torture_shuffle_task_register(stats_task);
1593 	}
1594 	if (test_no_idle_hz) {
1595 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
1596 		if (firsterr)
1597 			goto unwind;
1598 	}
1599 	if (stutter < 0)
1600 		stutter = 0;
1601 	if (stutter) {
1602 		firsterr = torture_stutter_init(stutter * HZ);
1603 		if (firsterr)
1604 			goto unwind;
1605 	}
1606 	if (fqs_duration < 0)
1607 		fqs_duration = 0;
1608 	if (fqs_duration) {
1609 		/* Create the fqs thread */
1610 		fqs_task = kthread_run(rcu_torture_fqs, NULL,
1611 				       "rcu_torture_fqs");
1612 		if (IS_ERR(fqs_task)) {
1613 			firsterr = PTR_ERR(fqs_task);
1614 			VERBOSE_TOROUT_ERRSTRING("Failed to create fqs");
1615 			fqs_task = NULL;
1616 			goto unwind;
1617 		}
1618 		torture_shuffle_task_register(fqs_task);
1619 	}
1620 	if (test_boost_interval < 1)
1621 		test_boost_interval = 1;
1622 	if (test_boost_duration < 2)
1623 		test_boost_duration = 2;
1624 	if ((test_boost == 1 && cur_ops->can_boost) ||
1625 	    test_boost == 2) {
1626 
1627 		boost_starttime = jiffies + test_boost_interval * HZ;
1628 		register_cpu_notifier(&rcutorture_cpu_nb);
1629 		for_each_possible_cpu(i) {
1630 			if (cpu_is_offline(i))
1631 				continue;  /* Heuristic: CPU can go offline. */
1632 			retval = rcutorture_booster_init(i);
1633 			if (retval < 0) {
1634 				firsterr = retval;
1635 				goto unwind;
1636 			}
1637 		}
1638 	}
1639 	i = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
1640 	if (i != 0) {
1641 		firsterr = i;
1642 		goto unwind;
1643 	}
1644 	i = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
1645 	if (i != 0) {
1646 		firsterr = i;
1647 		goto unwind;
1648 	}
1649 	i = rcu_torture_stall_init();
1650 	if (i != 0) {
1651 		firsterr = i;
1652 		goto unwind;
1653 	}
1654 	retval = rcu_torture_barrier_init();
1655 	if (retval != 0) {
1656 		firsterr = retval;
1657 		goto unwind;
1658 	}
1659 	if (object_debug)
1660 		rcu_test_debug_objects();
1661 	rcutorture_record_test_transition();
1662 	torture_init_end();
1663 	return 0;
1664 
1665 unwind:
1666 	torture_init_end();
1667 	rcu_torture_cleanup();
1668 	return firsterr;
1669 }
1670 
1671 module_init(rcu_torture_init);
1672 module_exit(rcu_torture_cleanup);
1673