xref: /openbmc/linux/kernel/rcu/refscale.c (revision 90f59ee4)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Scalability test comparing RCU vs other mechanisms
4 // for acquiring references on objects.
5 //
6 // Copyright (C) Google, 2020.
7 //
8 // Author: Joel Fernandes <joel@joelfernandes.org>
9 
10 #define pr_fmt(fmt) fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/completion.h>
15 #include <linux/cpu.h>
16 #include <linux/delay.h>
17 #include <linux/err.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/kthread.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/notifier.h>
26 #include <linux/percpu.h>
27 #include <linux/rcupdate.h>
28 #include <linux/rcupdate_trace.h>
29 #include <linux/reboot.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/stat.h>
34 #include <linux/srcu.h>
35 #include <linux/slab.h>
36 #include <linux/torture.h>
37 #include <linux/types.h>
38 
39 #include "rcu.h"
40 
41 #define SCALE_FLAG "-ref-scale: "
42 
43 #define SCALEOUT(s, x...) \
44 	pr_alert("%s" SCALE_FLAG s, scale_type, ## x)
45 
46 #define VERBOSE_SCALEOUT(s, x...) \
47 	do { \
48 		if (verbose) \
49 			pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x); \
50 	} while (0)
51 
52 static atomic_t verbose_batch_ctr;
53 
54 #define VERBOSE_SCALEOUT_BATCH(s, x...)							\
55 do {											\
56 	if (verbose &&									\
57 	    (verbose_batched <= 0 ||							\
58 	     !(atomic_inc_return(&verbose_batch_ctr) % verbose_batched))) {		\
59 		schedule_timeout_uninterruptible(1);					\
60 		pr_alert("%s" SCALE_FLAG s "\n", scale_type, ## x);			\
61 	}										\
62 } while (0)
63 
64 #define SCALEOUT_ERRSTRING(s, x...) pr_alert("%s" SCALE_FLAG "!!! " s "\n", scale_type, ## x)
65 
66 MODULE_LICENSE("GPL");
67 MODULE_AUTHOR("Joel Fernandes (Google) <joel@joelfernandes.org>");
68 
69 static char *scale_type = "rcu";
70 module_param(scale_type, charp, 0444);
71 MODULE_PARM_DESC(scale_type, "Type of test (rcu, srcu, refcnt, rwsem, rwlock.");
72 
73 torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
74 torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
75 
76 // Wait until there are multiple CPUs before starting test.
77 torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
78 	      "Holdoff time before test start (s)");
79 // Number of loops per experiment, all readers execute operations concurrently.
80 torture_param(long, loops, 10000, "Number of loops per experiment.");
81 // Number of readers, with -1 defaulting to about 75% of the CPUs.
82 torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
83 // Number of runs.
84 torture_param(int, nruns, 30, "Number of experiments to run.");
85 // Reader delay in nanoseconds, 0 for no delay.
86 torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
87 
88 #ifdef MODULE
89 # define REFSCALE_SHUTDOWN 0
90 #else
91 # define REFSCALE_SHUTDOWN 1
92 #endif
93 
94 torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
95 	      "Shutdown at end of scalability tests.");
96 
97 struct reader_task {
98 	struct task_struct *task;
99 	int start_reader;
100 	wait_queue_head_t wq;
101 	u64 last_duration_ns;
102 };
103 
104 static struct task_struct *shutdown_task;
105 static wait_queue_head_t shutdown_wq;
106 
107 static struct task_struct *main_task;
108 static wait_queue_head_t main_wq;
109 static int shutdown_start;
110 
111 static struct reader_task *reader_tasks;
112 
113 // Number of readers that are part of the current experiment.
114 static atomic_t nreaders_exp;
115 
116 // Use to wait for all threads to start.
117 static atomic_t n_init;
118 static atomic_t n_started;
119 static atomic_t n_warmedup;
120 static atomic_t n_cooleddown;
121 
122 // Track which experiment is currently running.
123 static int exp_idx;
124 
125 // Operations vector for selecting different types of tests.
126 struct ref_scale_ops {
127 	void (*init)(void);
128 	void (*cleanup)(void);
129 	void (*readsection)(const int nloops);
130 	void (*delaysection)(const int nloops, const int udl, const int ndl);
131 	const char *name;
132 };
133 
134 static struct ref_scale_ops *cur_ops;
135 
136 static void un_delay(const int udl, const int ndl)
137 {
138 	if (udl)
139 		udelay(udl);
140 	if (ndl)
141 		ndelay(ndl);
142 }
143 
144 static void ref_rcu_read_section(const int nloops)
145 {
146 	int i;
147 
148 	for (i = nloops; i >= 0; i--) {
149 		rcu_read_lock();
150 		rcu_read_unlock();
151 	}
152 }
153 
154 static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
155 {
156 	int i;
157 
158 	for (i = nloops; i >= 0; i--) {
159 		rcu_read_lock();
160 		un_delay(udl, ndl);
161 		rcu_read_unlock();
162 	}
163 }
164 
165 static void rcu_sync_scale_init(void)
166 {
167 }
168 
169 static struct ref_scale_ops rcu_ops = {
170 	.init		= rcu_sync_scale_init,
171 	.readsection	= ref_rcu_read_section,
172 	.delaysection	= ref_rcu_delay_section,
173 	.name		= "rcu"
174 };
175 
176 // Definitions for SRCU ref scale testing.
177 DEFINE_STATIC_SRCU(srcu_refctl_scale);
178 static struct srcu_struct *srcu_ctlp = &srcu_refctl_scale;
179 
180 static void srcu_ref_scale_read_section(const int nloops)
181 {
182 	int i;
183 	int idx;
184 
185 	for (i = nloops; i >= 0; i--) {
186 		idx = srcu_read_lock(srcu_ctlp);
187 		srcu_read_unlock(srcu_ctlp, idx);
188 	}
189 }
190 
191 static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
192 {
193 	int i;
194 	int idx;
195 
196 	for (i = nloops; i >= 0; i--) {
197 		idx = srcu_read_lock(srcu_ctlp);
198 		un_delay(udl, ndl);
199 		srcu_read_unlock(srcu_ctlp, idx);
200 	}
201 }
202 
203 static struct ref_scale_ops srcu_ops = {
204 	.init		= rcu_sync_scale_init,
205 	.readsection	= srcu_ref_scale_read_section,
206 	.delaysection	= srcu_ref_scale_delay_section,
207 	.name		= "srcu"
208 };
209 
210 // Definitions for RCU Tasks ref scale testing: Empty read markers.
211 // These definitions also work for RCU Rude readers.
212 static void rcu_tasks_ref_scale_read_section(const int nloops)
213 {
214 	int i;
215 
216 	for (i = nloops; i >= 0; i--)
217 		continue;
218 }
219 
220 static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
221 {
222 	int i;
223 
224 	for (i = nloops; i >= 0; i--)
225 		un_delay(udl, ndl);
226 }
227 
228 static struct ref_scale_ops rcu_tasks_ops = {
229 	.init		= rcu_sync_scale_init,
230 	.readsection	= rcu_tasks_ref_scale_read_section,
231 	.delaysection	= rcu_tasks_ref_scale_delay_section,
232 	.name		= "rcu-tasks"
233 };
234 
235 // Definitions for RCU Tasks Trace ref scale testing.
236 static void rcu_trace_ref_scale_read_section(const int nloops)
237 {
238 	int i;
239 
240 	for (i = nloops; i >= 0; i--) {
241 		rcu_read_lock_trace();
242 		rcu_read_unlock_trace();
243 	}
244 }
245 
246 static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
247 {
248 	int i;
249 
250 	for (i = nloops; i >= 0; i--) {
251 		rcu_read_lock_trace();
252 		un_delay(udl, ndl);
253 		rcu_read_unlock_trace();
254 	}
255 }
256 
257 static struct ref_scale_ops rcu_trace_ops = {
258 	.init		= rcu_sync_scale_init,
259 	.readsection	= rcu_trace_ref_scale_read_section,
260 	.delaysection	= rcu_trace_ref_scale_delay_section,
261 	.name		= "rcu-trace"
262 };
263 
264 // Definitions for reference count
265 static atomic_t refcnt;
266 
267 static void ref_refcnt_section(const int nloops)
268 {
269 	int i;
270 
271 	for (i = nloops; i >= 0; i--) {
272 		atomic_inc(&refcnt);
273 		atomic_dec(&refcnt);
274 	}
275 }
276 
277 static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
278 {
279 	int i;
280 
281 	for (i = nloops; i >= 0; i--) {
282 		atomic_inc(&refcnt);
283 		un_delay(udl, ndl);
284 		atomic_dec(&refcnt);
285 	}
286 }
287 
288 static struct ref_scale_ops refcnt_ops = {
289 	.init		= rcu_sync_scale_init,
290 	.readsection	= ref_refcnt_section,
291 	.delaysection	= ref_refcnt_delay_section,
292 	.name		= "refcnt"
293 };
294 
295 // Definitions for rwlock
296 static rwlock_t test_rwlock;
297 
298 static void ref_rwlock_init(void)
299 {
300 	rwlock_init(&test_rwlock);
301 }
302 
303 static void ref_rwlock_section(const int nloops)
304 {
305 	int i;
306 
307 	for (i = nloops; i >= 0; i--) {
308 		read_lock(&test_rwlock);
309 		read_unlock(&test_rwlock);
310 	}
311 }
312 
313 static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
314 {
315 	int i;
316 
317 	for (i = nloops; i >= 0; i--) {
318 		read_lock(&test_rwlock);
319 		un_delay(udl, ndl);
320 		read_unlock(&test_rwlock);
321 	}
322 }
323 
324 static struct ref_scale_ops rwlock_ops = {
325 	.init		= ref_rwlock_init,
326 	.readsection	= ref_rwlock_section,
327 	.delaysection	= ref_rwlock_delay_section,
328 	.name		= "rwlock"
329 };
330 
331 // Definitions for rwsem
332 static struct rw_semaphore test_rwsem;
333 
334 static void ref_rwsem_init(void)
335 {
336 	init_rwsem(&test_rwsem);
337 }
338 
339 static void ref_rwsem_section(const int nloops)
340 {
341 	int i;
342 
343 	for (i = nloops; i >= 0; i--) {
344 		down_read(&test_rwsem);
345 		up_read(&test_rwsem);
346 	}
347 }
348 
349 static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
350 {
351 	int i;
352 
353 	for (i = nloops; i >= 0; i--) {
354 		down_read(&test_rwsem);
355 		un_delay(udl, ndl);
356 		up_read(&test_rwsem);
357 	}
358 }
359 
360 static struct ref_scale_ops rwsem_ops = {
361 	.init		= ref_rwsem_init,
362 	.readsection	= ref_rwsem_section,
363 	.delaysection	= ref_rwsem_delay_section,
364 	.name		= "rwsem"
365 };
366 
367 // Definitions for global spinlock
368 static DEFINE_SPINLOCK(test_lock);
369 
370 static void ref_lock_section(const int nloops)
371 {
372 	int i;
373 
374 	preempt_disable();
375 	for (i = nloops; i >= 0; i--) {
376 		spin_lock(&test_lock);
377 		spin_unlock(&test_lock);
378 	}
379 	preempt_enable();
380 }
381 
382 static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
383 {
384 	int i;
385 
386 	preempt_disable();
387 	for (i = nloops; i >= 0; i--) {
388 		spin_lock(&test_lock);
389 		un_delay(udl, ndl);
390 		spin_unlock(&test_lock);
391 	}
392 	preempt_enable();
393 }
394 
395 static struct ref_scale_ops lock_ops = {
396 	.readsection	= ref_lock_section,
397 	.delaysection	= ref_lock_delay_section,
398 	.name		= "lock"
399 };
400 
401 // Definitions for global irq-save spinlock
402 
403 static void ref_lock_irq_section(const int nloops)
404 {
405 	unsigned long flags;
406 	int i;
407 
408 	preempt_disable();
409 	for (i = nloops; i >= 0; i--) {
410 		spin_lock_irqsave(&test_lock, flags);
411 		spin_unlock_irqrestore(&test_lock, flags);
412 	}
413 	preempt_enable();
414 }
415 
416 static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
417 {
418 	unsigned long flags;
419 	int i;
420 
421 	preempt_disable();
422 	for (i = nloops; i >= 0; i--) {
423 		spin_lock_irqsave(&test_lock, flags);
424 		un_delay(udl, ndl);
425 		spin_unlock_irqrestore(&test_lock, flags);
426 	}
427 	preempt_enable();
428 }
429 
430 static struct ref_scale_ops lock_irq_ops = {
431 	.readsection	= ref_lock_irq_section,
432 	.delaysection	= ref_lock_irq_delay_section,
433 	.name		= "lock-irq"
434 };
435 
436 // Definitions acquire-release.
437 static DEFINE_PER_CPU(unsigned long, test_acqrel);
438 
439 static void ref_acqrel_section(const int nloops)
440 {
441 	unsigned long x;
442 	int i;
443 
444 	preempt_disable();
445 	for (i = nloops; i >= 0; i--) {
446 		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
447 		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
448 	}
449 	preempt_enable();
450 }
451 
452 static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
453 {
454 	unsigned long x;
455 	int i;
456 
457 	preempt_disable();
458 	for (i = nloops; i >= 0; i--) {
459 		x = smp_load_acquire(this_cpu_ptr(&test_acqrel));
460 		un_delay(udl, ndl);
461 		smp_store_release(this_cpu_ptr(&test_acqrel), x + 1);
462 	}
463 	preempt_enable();
464 }
465 
466 static struct ref_scale_ops acqrel_ops = {
467 	.readsection	= ref_acqrel_section,
468 	.delaysection	= ref_acqrel_delay_section,
469 	.name		= "acqrel"
470 };
471 
472 static volatile u64 stopopts;
473 
474 static void ref_clock_section(const int nloops)
475 {
476 	u64 x = 0;
477 	int i;
478 
479 	preempt_disable();
480 	for (i = nloops; i >= 0; i--)
481 		x += ktime_get_real_fast_ns();
482 	preempt_enable();
483 	stopopts = x;
484 }
485 
486 static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
487 {
488 	u64 x = 0;
489 	int i;
490 
491 	preempt_disable();
492 	for (i = nloops; i >= 0; i--) {
493 		x += ktime_get_real_fast_ns();
494 		un_delay(udl, ndl);
495 	}
496 	preempt_enable();
497 	stopopts = x;
498 }
499 
500 static struct ref_scale_ops clock_ops = {
501 	.readsection	= ref_clock_section,
502 	.delaysection	= ref_clock_delay_section,
503 	.name		= "clock"
504 };
505 
506 static void rcu_scale_one_reader(void)
507 {
508 	if (readdelay <= 0)
509 		cur_ops->readsection(loops);
510 	else
511 		cur_ops->delaysection(loops, readdelay / 1000, readdelay % 1000);
512 }
513 
514 // Reader kthread.  Repeatedly does empty RCU read-side
515 // critical section, minimizing update-side interference.
516 static int
517 ref_scale_reader(void *arg)
518 {
519 	unsigned long flags;
520 	long me = (long)arg;
521 	struct reader_task *rt = &(reader_tasks[me]);
522 	u64 start;
523 	s64 duration;
524 
525 	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: task started", me);
526 	WARN_ON_ONCE(set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)));
527 	set_user_nice(current, MAX_NICE);
528 	atomic_inc(&n_init);
529 	if (holdoff)
530 		schedule_timeout_interruptible(holdoff * HZ);
531 repeat:
532 	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: waiting to start next experiment on cpu %d", me, raw_smp_processor_id());
533 
534 	// Wait for signal that this reader can start.
535 	wait_event(rt->wq, (atomic_read(&nreaders_exp) && smp_load_acquire(&rt->start_reader)) ||
536 			   torture_must_stop());
537 
538 	if (torture_must_stop())
539 		goto end;
540 
541 	// Make sure that the CPU is affinitized appropriately during testing.
542 	WARN_ON_ONCE(raw_smp_processor_id() != me);
543 
544 	WRITE_ONCE(rt->start_reader, 0);
545 	if (!atomic_dec_return(&n_started))
546 		while (atomic_read_acquire(&n_started))
547 			cpu_relax();
548 
549 	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d started", me, exp_idx);
550 
551 
552 	// To reduce noise, do an initial cache-warming invocation, check
553 	// in, and then keep warming until everyone has checked in.
554 	rcu_scale_one_reader();
555 	if (!atomic_dec_return(&n_warmedup))
556 		while (atomic_read_acquire(&n_warmedup))
557 			rcu_scale_one_reader();
558 	// Also keep interrupts disabled.  This also has the effect
559 	// of preventing entries into slow path for rcu_read_unlock().
560 	local_irq_save(flags);
561 	start = ktime_get_mono_fast_ns();
562 
563 	rcu_scale_one_reader();
564 
565 	duration = ktime_get_mono_fast_ns() - start;
566 	local_irq_restore(flags);
567 
568 	rt->last_duration_ns = WARN_ON_ONCE(duration < 0) ? 0 : duration;
569 	// To reduce runtime-skew noise, do maintain-load invocations until
570 	// everyone is done.
571 	if (!atomic_dec_return(&n_cooleddown))
572 		while (atomic_read_acquire(&n_cooleddown))
573 			rcu_scale_one_reader();
574 
575 	if (atomic_dec_and_test(&nreaders_exp))
576 		wake_up(&main_wq);
577 
578 	VERBOSE_SCALEOUT_BATCH("ref_scale_reader %ld: experiment %d ended, (readers remaining=%d)",
579 				me, exp_idx, atomic_read(&nreaders_exp));
580 
581 	if (!torture_must_stop())
582 		goto repeat;
583 end:
584 	torture_kthread_stopping("ref_scale_reader");
585 	return 0;
586 }
587 
588 static void reset_readers(void)
589 {
590 	int i;
591 	struct reader_task *rt;
592 
593 	for (i = 0; i < nreaders; i++) {
594 		rt = &(reader_tasks[i]);
595 
596 		rt->last_duration_ns = 0;
597 	}
598 }
599 
600 // Print the results of each reader and return the sum of all their durations.
601 static u64 process_durations(int n)
602 {
603 	int i;
604 	struct reader_task *rt;
605 	char buf1[64];
606 	char *buf;
607 	u64 sum = 0;
608 
609 	buf = kmalloc(800 + 64, GFP_KERNEL);
610 	if (!buf)
611 		return 0;
612 	buf[0] = 0;
613 	sprintf(buf, "Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>)",
614 		exp_idx);
615 
616 	for (i = 0; i < n && !torture_must_stop(); i++) {
617 		rt = &(reader_tasks[i]);
618 		sprintf(buf1, "%d: %llu\t", i, rt->last_duration_ns);
619 
620 		if (i % 5 == 0)
621 			strcat(buf, "\n");
622 		if (strlen(buf) >= 800) {
623 			pr_alert("%s", buf);
624 			buf[0] = 0;
625 		}
626 		strcat(buf, buf1);
627 
628 		sum += rt->last_duration_ns;
629 	}
630 	pr_alert("%s\n", buf);
631 
632 	kfree(buf);
633 	return sum;
634 }
635 
636 // The main_func is the main orchestrator, it performs a bunch of
637 // experiments.  For every experiment, it orders all the readers
638 // involved to start and waits for them to finish the experiment. It
639 // then reads their timestamps and starts the next experiment. Each
640 // experiment progresses from 1 concurrent reader to N of them at which
641 // point all the timestamps are printed.
642 static int main_func(void *arg)
643 {
644 	int exp, r;
645 	char buf1[64];
646 	char *buf;
647 	u64 *result_avg;
648 
649 	set_cpus_allowed_ptr(current, cpumask_of(nreaders % nr_cpu_ids));
650 	set_user_nice(current, MAX_NICE);
651 
652 	VERBOSE_SCALEOUT("main_func task started");
653 	result_avg = kzalloc(nruns * sizeof(*result_avg), GFP_KERNEL);
654 	buf = kzalloc(800 + 64, GFP_KERNEL);
655 	if (!result_avg || !buf) {
656 		SCALEOUT_ERRSTRING("out of memory");
657 		goto oom_exit;
658 	}
659 	if (holdoff)
660 		schedule_timeout_interruptible(holdoff * HZ);
661 
662 	// Wait for all threads to start.
663 	atomic_inc(&n_init);
664 	while (atomic_read(&n_init) < nreaders + 1)
665 		schedule_timeout_uninterruptible(1);
666 
667 	// Start exp readers up per experiment
668 	for (exp = 0; exp < nruns && !torture_must_stop(); exp++) {
669 		if (torture_must_stop())
670 			goto end;
671 
672 		reset_readers();
673 		atomic_set(&nreaders_exp, nreaders);
674 		atomic_set(&n_started, nreaders);
675 		atomic_set(&n_warmedup, nreaders);
676 		atomic_set(&n_cooleddown, nreaders);
677 
678 		exp_idx = exp;
679 
680 		for (r = 0; r < nreaders; r++) {
681 			smp_store_release(&reader_tasks[r].start_reader, 1);
682 			wake_up(&reader_tasks[r].wq);
683 		}
684 
685 		VERBOSE_SCALEOUT("main_func: experiment started, waiting for %d readers",
686 				nreaders);
687 
688 		wait_event(main_wq,
689 			   !atomic_read(&nreaders_exp) || torture_must_stop());
690 
691 		VERBOSE_SCALEOUT("main_func: experiment ended");
692 
693 		if (torture_must_stop())
694 			goto end;
695 
696 		result_avg[exp] = div_u64(1000 * process_durations(nreaders), nreaders * loops);
697 	}
698 
699 	// Print the average of all experiments
700 	SCALEOUT("END OF TEST. Calculating average duration per loop (nanoseconds)...\n");
701 
702 	pr_alert("Runs\tTime(ns)\n");
703 	for (exp = 0; exp < nruns; exp++) {
704 		u64 avg;
705 		u32 rem;
706 
707 		avg = div_u64_rem(result_avg[exp], 1000, &rem);
708 		sprintf(buf1, "%d\t%llu.%03u\n", exp + 1, avg, rem);
709 		strcat(buf, buf1);
710 		if (strlen(buf) >= 800) {
711 			pr_alert("%s", buf);
712 			buf[0] = 0;
713 		}
714 	}
715 
716 	pr_alert("%s", buf);
717 
718 oom_exit:
719 	// This will shutdown everything including us.
720 	if (shutdown) {
721 		shutdown_start = 1;
722 		wake_up(&shutdown_wq);
723 	}
724 
725 	// Wait for torture to stop us
726 	while (!torture_must_stop())
727 		schedule_timeout_uninterruptible(1);
728 
729 end:
730 	torture_kthread_stopping("main_func");
731 	kfree(result_avg);
732 	kfree(buf);
733 	return 0;
734 }
735 
736 static void
737 ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
738 {
739 	pr_alert("%s" SCALE_FLAG
740 		 "--- %s:  verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d\n", scale_type, tag,
741 		 verbose, shutdown, holdoff, loops, nreaders, nruns, readdelay);
742 }
743 
744 static void
745 ref_scale_cleanup(void)
746 {
747 	int i;
748 
749 	if (torture_cleanup_begin())
750 		return;
751 
752 	if (!cur_ops) {
753 		torture_cleanup_end();
754 		return;
755 	}
756 
757 	if (reader_tasks) {
758 		for (i = 0; i < nreaders; i++)
759 			torture_stop_kthread("ref_scale_reader",
760 					     reader_tasks[i].task);
761 	}
762 	kfree(reader_tasks);
763 
764 	torture_stop_kthread("main_task", main_task);
765 	kfree(main_task);
766 
767 	// Do scale-type-specific cleanup operations.
768 	if (cur_ops->cleanup != NULL)
769 		cur_ops->cleanup();
770 
771 	torture_cleanup_end();
772 }
773 
774 // Shutdown kthread.  Just waits to be awakened, then shuts down system.
775 static int
776 ref_scale_shutdown(void *arg)
777 {
778 	wait_event(shutdown_wq, shutdown_start);
779 
780 	smp_mb(); // Wake before output.
781 	ref_scale_cleanup();
782 	kernel_power_off();
783 
784 	return -EINVAL;
785 }
786 
787 static int __init
788 ref_scale_init(void)
789 {
790 	long i;
791 	int firsterr = 0;
792 	static struct ref_scale_ops *scale_ops[] = {
793 		&rcu_ops, &srcu_ops, &rcu_trace_ops, &rcu_tasks_ops, &refcnt_ops, &rwlock_ops,
794 		&rwsem_ops, &lock_ops, &lock_irq_ops, &acqrel_ops, &clock_ops,
795 	};
796 
797 	if (!torture_init_begin(scale_type, verbose))
798 		return -EBUSY;
799 
800 	for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
801 		cur_ops = scale_ops[i];
802 		if (strcmp(scale_type, cur_ops->name) == 0)
803 			break;
804 	}
805 	if (i == ARRAY_SIZE(scale_ops)) {
806 		pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
807 		pr_alert("rcu-scale types:");
808 		for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
809 			pr_cont(" %s", scale_ops[i]->name);
810 		pr_cont("\n");
811 		firsterr = -EINVAL;
812 		cur_ops = NULL;
813 		goto unwind;
814 	}
815 	if (cur_ops->init)
816 		cur_ops->init();
817 
818 	ref_scale_print_module_parms(cur_ops, "Start of test");
819 
820 	// Shutdown task
821 	if (shutdown) {
822 		init_waitqueue_head(&shutdown_wq);
823 		firsterr = torture_create_kthread(ref_scale_shutdown, NULL,
824 						  shutdown_task);
825 		if (torture_init_error(firsterr))
826 			goto unwind;
827 		schedule_timeout_uninterruptible(1);
828 	}
829 
830 	// Reader tasks (default to ~75% of online CPUs).
831 	if (nreaders < 0)
832 		nreaders = (num_online_cpus() >> 1) + (num_online_cpus() >> 2);
833 	if (WARN_ONCE(loops <= 0, "%s: loops = %ld, adjusted to 1\n", __func__, loops))
834 		loops = 1;
835 	if (WARN_ONCE(nreaders <= 0, "%s: nreaders = %d, adjusted to 1\n", __func__, nreaders))
836 		nreaders = 1;
837 	if (WARN_ONCE(nruns <= 0, "%s: nruns = %d, adjusted to 1\n", __func__, nruns))
838 		nruns = 1;
839 	reader_tasks = kcalloc(nreaders, sizeof(reader_tasks[0]),
840 			       GFP_KERNEL);
841 	if (!reader_tasks) {
842 		SCALEOUT_ERRSTRING("out of memory");
843 		firsterr = -ENOMEM;
844 		goto unwind;
845 	}
846 
847 	VERBOSE_SCALEOUT("Starting %d reader threads", nreaders);
848 
849 	for (i = 0; i < nreaders; i++) {
850 		firsterr = torture_create_kthread(ref_scale_reader, (void *)i,
851 						  reader_tasks[i].task);
852 		if (torture_init_error(firsterr))
853 			goto unwind;
854 
855 		init_waitqueue_head(&(reader_tasks[i].wq));
856 	}
857 
858 	// Main Task
859 	init_waitqueue_head(&main_wq);
860 	firsterr = torture_create_kthread(main_func, NULL, main_task);
861 	if (torture_init_error(firsterr))
862 		goto unwind;
863 
864 	torture_init_end();
865 	return 0;
866 
867 unwind:
868 	torture_init_end();
869 	ref_scale_cleanup();
870 	if (shutdown) {
871 		WARN_ON(!IS_MODULE(CONFIG_RCU_REF_SCALE_TEST));
872 		kernel_power_off();
873 	}
874 	return firsterr;
875 }
876 
877 module_init(ref_scale_init);
878 module_exit(ref_scale_cleanup);
879