xref: /openbmc/linux/kernel/locking/locktorture.c (revision e7bae9bb)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Module-based torture test facility for locking
4  *
5  * Copyright (C) IBM Corporation, 2014
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  *          Davidlohr Bueso <dave@stgolabs.net>
9  *	Based on kernel/rcu/torture.c.
10  */
11 
12 #define pr_fmt(fmt) fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/percpu-rwsem.h>
31 #include <linux/torture.h>
32 
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
35 
36 torture_param(int, nwriters_stress, -1,
37 	     "Number of write-locking stress-test threads");
38 torture_param(int, nreaders_stress, -1,
39 	     "Number of read-locking stress-test threads");
40 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
41 torture_param(int, onoff_interval, 0,
42 	     "Time between CPU hotplugs (s), 0=disable");
43 torture_param(int, shuffle_interval, 3,
44 	     "Number of jiffies between shuffles, 0=disable");
45 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
46 torture_param(int, stat_interval, 60,
47 	     "Number of seconds between stats printk()s");
48 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
49 torture_param(int, verbose, 1,
50 	     "Enable verbose debugging printk()s");
51 
52 static char *torture_type = "spin_lock";
53 module_param(torture_type, charp, 0444);
54 MODULE_PARM_DESC(torture_type,
55 		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
56 
57 static struct task_struct *stats_task;
58 static struct task_struct **writer_tasks;
59 static struct task_struct **reader_tasks;
60 
61 static bool lock_is_write_held;
62 static bool lock_is_read_held;
63 
64 struct lock_stress_stats {
65 	long n_lock_fail;
66 	long n_lock_acquired;
67 };
68 
69 /* Forward reference. */
70 static void lock_torture_cleanup(void);
71 
72 /*
73  * Operations vector for selecting different types of tests.
74  */
75 struct lock_torture_ops {
76 	void (*init)(void);
77 	int (*writelock)(void);
78 	void (*write_delay)(struct torture_random_state *trsp);
79 	void (*task_boost)(struct torture_random_state *trsp);
80 	void (*writeunlock)(void);
81 	int (*readlock)(void);
82 	void (*read_delay)(struct torture_random_state *trsp);
83 	void (*readunlock)(void);
84 
85 	unsigned long flags; /* for irq spinlocks */
86 	const char *name;
87 };
88 
89 struct lock_torture_cxt {
90 	int nrealwriters_stress;
91 	int nrealreaders_stress;
92 	bool debug_lock;
93 	atomic_t n_lock_torture_errors;
94 	struct lock_torture_ops *cur_ops;
95 	struct lock_stress_stats *lwsa; /* writer statistics */
96 	struct lock_stress_stats *lrsa; /* reader statistics */
97 };
98 static struct lock_torture_cxt cxt = { 0, 0, false,
99 				       ATOMIC_INIT(0),
100 				       NULL, NULL};
101 /*
102  * Definitions for lock torture testing.
103  */
104 
105 static int torture_lock_busted_write_lock(void)
106 {
107 	return 0;  /* BUGGY, do not use in real life!!! */
108 }
109 
110 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
111 {
112 	const unsigned long longdelay_ms = 100;
113 
114 	/* We want a long delay occasionally to force massive contention.  */
115 	if (!(torture_random(trsp) %
116 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
117 		mdelay(longdelay_ms);
118 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
119 		torture_preempt_schedule();  /* Allow test to be preempted. */
120 }
121 
122 static void torture_lock_busted_write_unlock(void)
123 {
124 	  /* BUGGY, do not use in real life!!! */
125 }
126 
127 static void torture_boost_dummy(struct torture_random_state *trsp)
128 {
129 	/* Only rtmutexes care about priority */
130 }
131 
132 static struct lock_torture_ops lock_busted_ops = {
133 	.writelock	= torture_lock_busted_write_lock,
134 	.write_delay	= torture_lock_busted_write_delay,
135 	.task_boost     = torture_boost_dummy,
136 	.writeunlock	= torture_lock_busted_write_unlock,
137 	.readlock       = NULL,
138 	.read_delay     = NULL,
139 	.readunlock     = NULL,
140 	.name		= "lock_busted"
141 };
142 
143 static DEFINE_SPINLOCK(torture_spinlock);
144 
145 static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
146 {
147 	spin_lock(&torture_spinlock);
148 	return 0;
149 }
150 
151 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
152 {
153 	const unsigned long shortdelay_us = 2;
154 	const unsigned long longdelay_ms = 100;
155 
156 	/* We want a short delay mostly to emulate likely code, and
157 	 * we want a long delay occasionally to force massive contention.
158 	 */
159 	if (!(torture_random(trsp) %
160 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
161 		mdelay(longdelay_ms);
162 	if (!(torture_random(trsp) %
163 	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
164 		udelay(shortdelay_us);
165 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
166 		torture_preempt_schedule();  /* Allow test to be preempted. */
167 }
168 
169 static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
170 {
171 	spin_unlock(&torture_spinlock);
172 }
173 
174 static struct lock_torture_ops spin_lock_ops = {
175 	.writelock	= torture_spin_lock_write_lock,
176 	.write_delay	= torture_spin_lock_write_delay,
177 	.task_boost     = torture_boost_dummy,
178 	.writeunlock	= torture_spin_lock_write_unlock,
179 	.readlock       = NULL,
180 	.read_delay     = NULL,
181 	.readunlock     = NULL,
182 	.name		= "spin_lock"
183 };
184 
185 static int torture_spin_lock_write_lock_irq(void)
186 __acquires(torture_spinlock)
187 {
188 	unsigned long flags;
189 
190 	spin_lock_irqsave(&torture_spinlock, flags);
191 	cxt.cur_ops->flags = flags;
192 	return 0;
193 }
194 
195 static void torture_lock_spin_write_unlock_irq(void)
196 __releases(torture_spinlock)
197 {
198 	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
199 }
200 
201 static struct lock_torture_ops spin_lock_irq_ops = {
202 	.writelock	= torture_spin_lock_write_lock_irq,
203 	.write_delay	= torture_spin_lock_write_delay,
204 	.task_boost     = torture_boost_dummy,
205 	.writeunlock	= torture_lock_spin_write_unlock_irq,
206 	.readlock       = NULL,
207 	.read_delay     = NULL,
208 	.readunlock     = NULL,
209 	.name		= "spin_lock_irq"
210 };
211 
212 static DEFINE_RWLOCK(torture_rwlock);
213 
214 static int torture_rwlock_write_lock(void) __acquires(torture_rwlock)
215 {
216 	write_lock(&torture_rwlock);
217 	return 0;
218 }
219 
220 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
221 {
222 	const unsigned long shortdelay_us = 2;
223 	const unsigned long longdelay_ms = 100;
224 
225 	/* We want a short delay mostly to emulate likely code, and
226 	 * we want a long delay occasionally to force massive contention.
227 	 */
228 	if (!(torture_random(trsp) %
229 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
230 		mdelay(longdelay_ms);
231 	else
232 		udelay(shortdelay_us);
233 }
234 
235 static void torture_rwlock_write_unlock(void) __releases(torture_rwlock)
236 {
237 	write_unlock(&torture_rwlock);
238 }
239 
240 static int torture_rwlock_read_lock(void) __acquires(torture_rwlock)
241 {
242 	read_lock(&torture_rwlock);
243 	return 0;
244 }
245 
246 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
247 {
248 	const unsigned long shortdelay_us = 10;
249 	const unsigned long longdelay_ms = 100;
250 
251 	/* We want a short delay mostly to emulate likely code, and
252 	 * we want a long delay occasionally to force massive contention.
253 	 */
254 	if (!(torture_random(trsp) %
255 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
256 		mdelay(longdelay_ms);
257 	else
258 		udelay(shortdelay_us);
259 }
260 
261 static void torture_rwlock_read_unlock(void) __releases(torture_rwlock)
262 {
263 	read_unlock(&torture_rwlock);
264 }
265 
266 static struct lock_torture_ops rw_lock_ops = {
267 	.writelock	= torture_rwlock_write_lock,
268 	.write_delay	= torture_rwlock_write_delay,
269 	.task_boost     = torture_boost_dummy,
270 	.writeunlock	= torture_rwlock_write_unlock,
271 	.readlock       = torture_rwlock_read_lock,
272 	.read_delay     = torture_rwlock_read_delay,
273 	.readunlock     = torture_rwlock_read_unlock,
274 	.name		= "rw_lock"
275 };
276 
277 static int torture_rwlock_write_lock_irq(void) __acquires(torture_rwlock)
278 {
279 	unsigned long flags;
280 
281 	write_lock_irqsave(&torture_rwlock, flags);
282 	cxt.cur_ops->flags = flags;
283 	return 0;
284 }
285 
286 static void torture_rwlock_write_unlock_irq(void)
287 __releases(torture_rwlock)
288 {
289 	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
290 }
291 
292 static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
293 {
294 	unsigned long flags;
295 
296 	read_lock_irqsave(&torture_rwlock, flags);
297 	cxt.cur_ops->flags = flags;
298 	return 0;
299 }
300 
301 static void torture_rwlock_read_unlock_irq(void)
302 __releases(torture_rwlock)
303 {
304 	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
305 }
306 
307 static struct lock_torture_ops rw_lock_irq_ops = {
308 	.writelock	= torture_rwlock_write_lock_irq,
309 	.write_delay	= torture_rwlock_write_delay,
310 	.task_boost     = torture_boost_dummy,
311 	.writeunlock	= torture_rwlock_write_unlock_irq,
312 	.readlock       = torture_rwlock_read_lock_irq,
313 	.read_delay     = torture_rwlock_read_delay,
314 	.readunlock     = torture_rwlock_read_unlock_irq,
315 	.name		= "rw_lock_irq"
316 };
317 
318 static DEFINE_MUTEX(torture_mutex);
319 
320 static int torture_mutex_lock(void) __acquires(torture_mutex)
321 {
322 	mutex_lock(&torture_mutex);
323 	return 0;
324 }
325 
326 static void torture_mutex_delay(struct torture_random_state *trsp)
327 {
328 	const unsigned long longdelay_ms = 100;
329 
330 	/* We want a long delay occasionally to force massive contention.  */
331 	if (!(torture_random(trsp) %
332 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
333 		mdelay(longdelay_ms * 5);
334 	else
335 		mdelay(longdelay_ms / 5);
336 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
337 		torture_preempt_schedule();  /* Allow test to be preempted. */
338 }
339 
340 static void torture_mutex_unlock(void) __releases(torture_mutex)
341 {
342 	mutex_unlock(&torture_mutex);
343 }
344 
345 static struct lock_torture_ops mutex_lock_ops = {
346 	.writelock	= torture_mutex_lock,
347 	.write_delay	= torture_mutex_delay,
348 	.task_boost     = torture_boost_dummy,
349 	.writeunlock	= torture_mutex_unlock,
350 	.readlock       = NULL,
351 	.read_delay     = NULL,
352 	.readunlock     = NULL,
353 	.name		= "mutex_lock"
354 };
355 
356 #include <linux/ww_mutex.h>
357 static DEFINE_WD_CLASS(torture_ww_class);
358 static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
359 static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
360 static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
361 
362 static int torture_ww_mutex_lock(void)
363 __acquires(torture_ww_mutex_0)
364 __acquires(torture_ww_mutex_1)
365 __acquires(torture_ww_mutex_2)
366 {
367 	LIST_HEAD(list);
368 	struct reorder_lock {
369 		struct list_head link;
370 		struct ww_mutex *lock;
371 	} locks[3], *ll, *ln;
372 	struct ww_acquire_ctx ctx;
373 
374 	locks[0].lock = &torture_ww_mutex_0;
375 	list_add(&locks[0].link, &list);
376 
377 	locks[1].lock = &torture_ww_mutex_1;
378 	list_add(&locks[1].link, &list);
379 
380 	locks[2].lock = &torture_ww_mutex_2;
381 	list_add(&locks[2].link, &list);
382 
383 	ww_acquire_init(&ctx, &torture_ww_class);
384 
385 	list_for_each_entry(ll, &list, link) {
386 		int err;
387 
388 		err = ww_mutex_lock(ll->lock, &ctx);
389 		if (!err)
390 			continue;
391 
392 		ln = ll;
393 		list_for_each_entry_continue_reverse(ln, &list, link)
394 			ww_mutex_unlock(ln->lock);
395 
396 		if (err != -EDEADLK)
397 			return err;
398 
399 		ww_mutex_lock_slow(ll->lock, &ctx);
400 		list_move(&ll->link, &list);
401 	}
402 
403 	ww_acquire_fini(&ctx);
404 	return 0;
405 }
406 
407 static void torture_ww_mutex_unlock(void)
408 __releases(torture_ww_mutex_0)
409 __releases(torture_ww_mutex_1)
410 __releases(torture_ww_mutex_2)
411 {
412 	ww_mutex_unlock(&torture_ww_mutex_0);
413 	ww_mutex_unlock(&torture_ww_mutex_1);
414 	ww_mutex_unlock(&torture_ww_mutex_2);
415 }
416 
417 static struct lock_torture_ops ww_mutex_lock_ops = {
418 	.writelock	= torture_ww_mutex_lock,
419 	.write_delay	= torture_mutex_delay,
420 	.task_boost     = torture_boost_dummy,
421 	.writeunlock	= torture_ww_mutex_unlock,
422 	.readlock       = NULL,
423 	.read_delay     = NULL,
424 	.readunlock     = NULL,
425 	.name		= "ww_mutex_lock"
426 };
427 
428 #ifdef CONFIG_RT_MUTEXES
429 static DEFINE_RT_MUTEX(torture_rtmutex);
430 
431 static int torture_rtmutex_lock(void) __acquires(torture_rtmutex)
432 {
433 	rt_mutex_lock(&torture_rtmutex);
434 	return 0;
435 }
436 
437 static void torture_rtmutex_boost(struct torture_random_state *trsp)
438 {
439 	const unsigned int factor = 50000; /* yes, quite arbitrary */
440 
441 	if (!rt_task(current)) {
442 		/*
443 		 * Boost priority once every ~50k operations. When the
444 		 * task tries to take the lock, the rtmutex it will account
445 		 * for the new priority, and do any corresponding pi-dance.
446 		 */
447 		if (trsp && !(torture_random(trsp) %
448 			      (cxt.nrealwriters_stress * factor))) {
449 			sched_set_fifo(current);
450 		} else /* common case, do nothing */
451 			return;
452 	} else {
453 		/*
454 		 * The task will remain boosted for another ~500k operations,
455 		 * then restored back to its original prio, and so forth.
456 		 *
457 		 * When @trsp is nil, we want to force-reset the task for
458 		 * stopping the kthread.
459 		 */
460 		if (!trsp || !(torture_random(trsp) %
461 			       (cxt.nrealwriters_stress * factor * 2))) {
462 			sched_set_normal(current, 0);
463 		} else /* common case, do nothing */
464 			return;
465 	}
466 }
467 
468 static void torture_rtmutex_delay(struct torture_random_state *trsp)
469 {
470 	const unsigned long shortdelay_us = 2;
471 	const unsigned long longdelay_ms = 100;
472 
473 	/*
474 	 * We want a short delay mostly to emulate likely code, and
475 	 * we want a long delay occasionally to force massive contention.
476 	 */
477 	if (!(torture_random(trsp) %
478 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
479 		mdelay(longdelay_ms);
480 	if (!(torture_random(trsp) %
481 	      (cxt.nrealwriters_stress * 2 * shortdelay_us)))
482 		udelay(shortdelay_us);
483 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
484 		torture_preempt_schedule();  /* Allow test to be preempted. */
485 }
486 
487 static void torture_rtmutex_unlock(void) __releases(torture_rtmutex)
488 {
489 	rt_mutex_unlock(&torture_rtmutex);
490 }
491 
492 static struct lock_torture_ops rtmutex_lock_ops = {
493 	.writelock	= torture_rtmutex_lock,
494 	.write_delay	= torture_rtmutex_delay,
495 	.task_boost     = torture_rtmutex_boost,
496 	.writeunlock	= torture_rtmutex_unlock,
497 	.readlock       = NULL,
498 	.read_delay     = NULL,
499 	.readunlock     = NULL,
500 	.name		= "rtmutex_lock"
501 };
502 #endif
503 
504 static DECLARE_RWSEM(torture_rwsem);
505 static int torture_rwsem_down_write(void) __acquires(torture_rwsem)
506 {
507 	down_write(&torture_rwsem);
508 	return 0;
509 }
510 
511 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
512 {
513 	const unsigned long longdelay_ms = 100;
514 
515 	/* We want a long delay occasionally to force massive contention.  */
516 	if (!(torture_random(trsp) %
517 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
518 		mdelay(longdelay_ms * 10);
519 	else
520 		mdelay(longdelay_ms / 10);
521 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
522 		torture_preempt_schedule();  /* Allow test to be preempted. */
523 }
524 
525 static void torture_rwsem_up_write(void) __releases(torture_rwsem)
526 {
527 	up_write(&torture_rwsem);
528 }
529 
530 static int torture_rwsem_down_read(void) __acquires(torture_rwsem)
531 {
532 	down_read(&torture_rwsem);
533 	return 0;
534 }
535 
536 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
537 {
538 	const unsigned long longdelay_ms = 100;
539 
540 	/* We want a long delay occasionally to force massive contention.  */
541 	if (!(torture_random(trsp) %
542 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
543 		mdelay(longdelay_ms * 2);
544 	else
545 		mdelay(longdelay_ms / 2);
546 	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
547 		torture_preempt_schedule();  /* Allow test to be preempted. */
548 }
549 
550 static void torture_rwsem_up_read(void) __releases(torture_rwsem)
551 {
552 	up_read(&torture_rwsem);
553 }
554 
555 static struct lock_torture_ops rwsem_lock_ops = {
556 	.writelock	= torture_rwsem_down_write,
557 	.write_delay	= torture_rwsem_write_delay,
558 	.task_boost     = torture_boost_dummy,
559 	.writeunlock	= torture_rwsem_up_write,
560 	.readlock       = torture_rwsem_down_read,
561 	.read_delay     = torture_rwsem_read_delay,
562 	.readunlock     = torture_rwsem_up_read,
563 	.name		= "rwsem_lock"
564 };
565 
566 #include <linux/percpu-rwsem.h>
567 static struct percpu_rw_semaphore pcpu_rwsem;
568 
569 void torture_percpu_rwsem_init(void)
570 {
571 	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
572 }
573 
574 static int torture_percpu_rwsem_down_write(void) __acquires(pcpu_rwsem)
575 {
576 	percpu_down_write(&pcpu_rwsem);
577 	return 0;
578 }
579 
580 static void torture_percpu_rwsem_up_write(void) __releases(pcpu_rwsem)
581 {
582 	percpu_up_write(&pcpu_rwsem);
583 }
584 
585 static int torture_percpu_rwsem_down_read(void) __acquires(pcpu_rwsem)
586 {
587 	percpu_down_read(&pcpu_rwsem);
588 	return 0;
589 }
590 
591 static void torture_percpu_rwsem_up_read(void) __releases(pcpu_rwsem)
592 {
593 	percpu_up_read(&pcpu_rwsem);
594 }
595 
596 static struct lock_torture_ops percpu_rwsem_lock_ops = {
597 	.init		= torture_percpu_rwsem_init,
598 	.writelock	= torture_percpu_rwsem_down_write,
599 	.write_delay	= torture_rwsem_write_delay,
600 	.task_boost     = torture_boost_dummy,
601 	.writeunlock	= torture_percpu_rwsem_up_write,
602 	.readlock       = torture_percpu_rwsem_down_read,
603 	.read_delay     = torture_rwsem_read_delay,
604 	.readunlock     = torture_percpu_rwsem_up_read,
605 	.name		= "percpu_rwsem_lock"
606 };
607 
608 /*
609  * Lock torture writer kthread.  Repeatedly acquires and releases
610  * the lock, checking for duplicate acquisitions.
611  */
612 static int lock_torture_writer(void *arg)
613 {
614 	struct lock_stress_stats *lwsp = arg;
615 	DEFINE_TORTURE_RANDOM(rand);
616 
617 	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
618 	set_user_nice(current, MAX_NICE);
619 
620 	do {
621 		if ((torture_random(&rand) & 0xfffff) == 0)
622 			schedule_timeout_uninterruptible(1);
623 
624 		cxt.cur_ops->task_boost(&rand);
625 		cxt.cur_ops->writelock();
626 		if (WARN_ON_ONCE(lock_is_write_held))
627 			lwsp->n_lock_fail++;
628 		lock_is_write_held = true;
629 		if (WARN_ON_ONCE(lock_is_read_held))
630 			lwsp->n_lock_fail++; /* rare, but... */
631 
632 		lwsp->n_lock_acquired++;
633 		cxt.cur_ops->write_delay(&rand);
634 		lock_is_write_held = false;
635 		cxt.cur_ops->writeunlock();
636 
637 		stutter_wait("lock_torture_writer");
638 	} while (!torture_must_stop());
639 
640 	cxt.cur_ops->task_boost(NULL); /* reset prio */
641 	torture_kthread_stopping("lock_torture_writer");
642 	return 0;
643 }
644 
645 /*
646  * Lock torture reader kthread.  Repeatedly acquires and releases
647  * the reader lock.
648  */
649 static int lock_torture_reader(void *arg)
650 {
651 	struct lock_stress_stats *lrsp = arg;
652 	DEFINE_TORTURE_RANDOM(rand);
653 
654 	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
655 	set_user_nice(current, MAX_NICE);
656 
657 	do {
658 		if ((torture_random(&rand) & 0xfffff) == 0)
659 			schedule_timeout_uninterruptible(1);
660 
661 		cxt.cur_ops->readlock();
662 		lock_is_read_held = true;
663 		if (WARN_ON_ONCE(lock_is_write_held))
664 			lrsp->n_lock_fail++; /* rare, but... */
665 
666 		lrsp->n_lock_acquired++;
667 		cxt.cur_ops->read_delay(&rand);
668 		lock_is_read_held = false;
669 		cxt.cur_ops->readunlock();
670 
671 		stutter_wait("lock_torture_reader");
672 	} while (!torture_must_stop());
673 	torture_kthread_stopping("lock_torture_reader");
674 	return 0;
675 }
676 
677 /*
678  * Create an lock-torture-statistics message in the specified buffer.
679  */
680 static void __torture_print_stats(char *page,
681 				  struct lock_stress_stats *statp, bool write)
682 {
683 	bool fail = false;
684 	int i, n_stress;
685 	long max = 0, min = statp ? statp[0].n_lock_acquired : 0;
686 	long long sum = 0;
687 
688 	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
689 	for (i = 0; i < n_stress; i++) {
690 		if (statp[i].n_lock_fail)
691 			fail = true;
692 		sum += statp[i].n_lock_acquired;
693 		if (max < statp[i].n_lock_acquired)
694 			max = statp[i].n_lock_acquired;
695 		if (min > statp[i].n_lock_acquired)
696 			min = statp[i].n_lock_acquired;
697 	}
698 	page += sprintf(page,
699 			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
700 			write ? "Writes" : "Reads ",
701 			sum, max, min,
702 			!onoff_interval && max / 2 > min ? "???" : "",
703 			fail, fail ? "!!!" : "");
704 	if (fail)
705 		atomic_inc(&cxt.n_lock_torture_errors);
706 }
707 
708 /*
709  * Print torture statistics.  Caller must ensure that there is only one
710  * call to this function at a given time!!!  This is normally accomplished
711  * by relying on the module system to only have one copy of the module
712  * loaded, and then by giving the lock_torture_stats kthread full control
713  * (or the init/cleanup functions when lock_torture_stats thread is not
714  * running).
715  */
716 static void lock_torture_stats_print(void)
717 {
718 	int size = cxt.nrealwriters_stress * 200 + 8192;
719 	char *buf;
720 
721 	if (cxt.cur_ops->readlock)
722 		size += cxt.nrealreaders_stress * 200 + 8192;
723 
724 	buf = kmalloc(size, GFP_KERNEL);
725 	if (!buf) {
726 		pr_err("lock_torture_stats_print: Out of memory, need: %d",
727 		       size);
728 		return;
729 	}
730 
731 	__torture_print_stats(buf, cxt.lwsa, true);
732 	pr_alert("%s", buf);
733 	kfree(buf);
734 
735 	if (cxt.cur_ops->readlock) {
736 		buf = kmalloc(size, GFP_KERNEL);
737 		if (!buf) {
738 			pr_err("lock_torture_stats_print: Out of memory, need: %d",
739 			       size);
740 			return;
741 		}
742 
743 		__torture_print_stats(buf, cxt.lrsa, false);
744 		pr_alert("%s", buf);
745 		kfree(buf);
746 	}
747 }
748 
749 /*
750  * Periodically prints torture statistics, if periodic statistics printing
751  * was specified via the stat_interval module parameter.
752  *
753  * No need to worry about fullstop here, since this one doesn't reference
754  * volatile state or register callbacks.
755  */
756 static int lock_torture_stats(void *arg)
757 {
758 	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
759 	do {
760 		schedule_timeout_interruptible(stat_interval * HZ);
761 		lock_torture_stats_print();
762 		torture_shutdown_absorb("lock_torture_stats");
763 	} while (!torture_must_stop());
764 	torture_kthread_stopping("lock_torture_stats");
765 	return 0;
766 }
767 
768 static inline void
769 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
770 				const char *tag)
771 {
772 	pr_alert("%s" TORTURE_FLAG
773 		 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
774 		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
775 		 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
776 		 verbose, shuffle_interval, stutter, shutdown_secs,
777 		 onoff_interval, onoff_holdoff);
778 }
779 
780 static void lock_torture_cleanup(void)
781 {
782 	int i;
783 
784 	if (torture_cleanup_begin())
785 		return;
786 
787 	/*
788 	 * Indicates early cleanup, meaning that the test has not run,
789 	 * such as when passing bogus args when loading the module. As
790 	 * such, only perform the underlying torture-specific cleanups,
791 	 * and avoid anything related to locktorture.
792 	 */
793 	if (!cxt.lwsa && !cxt.lrsa)
794 		goto end;
795 
796 	if (writer_tasks) {
797 		for (i = 0; i < cxt.nrealwriters_stress; i++)
798 			torture_stop_kthread(lock_torture_writer,
799 					     writer_tasks[i]);
800 		kfree(writer_tasks);
801 		writer_tasks = NULL;
802 	}
803 
804 	if (reader_tasks) {
805 		for (i = 0; i < cxt.nrealreaders_stress; i++)
806 			torture_stop_kthread(lock_torture_reader,
807 					     reader_tasks[i]);
808 		kfree(reader_tasks);
809 		reader_tasks = NULL;
810 	}
811 
812 	torture_stop_kthread(lock_torture_stats, stats_task);
813 	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
814 
815 	if (atomic_read(&cxt.n_lock_torture_errors))
816 		lock_torture_print_module_parms(cxt.cur_ops,
817 						"End of test: FAILURE");
818 	else if (torture_onoff_failures())
819 		lock_torture_print_module_parms(cxt.cur_ops,
820 						"End of test: LOCK_HOTPLUG");
821 	else
822 		lock_torture_print_module_parms(cxt.cur_ops,
823 						"End of test: SUCCESS");
824 
825 	kfree(cxt.lwsa);
826 	cxt.lwsa = NULL;
827 	kfree(cxt.lrsa);
828 	cxt.lrsa = NULL;
829 
830 end:
831 	torture_cleanup_end();
832 }
833 
834 static int __init lock_torture_init(void)
835 {
836 	int i, j;
837 	int firsterr = 0;
838 	static struct lock_torture_ops *torture_ops[] = {
839 		&lock_busted_ops,
840 		&spin_lock_ops, &spin_lock_irq_ops,
841 		&rw_lock_ops, &rw_lock_irq_ops,
842 		&mutex_lock_ops,
843 		&ww_mutex_lock_ops,
844 #ifdef CONFIG_RT_MUTEXES
845 		&rtmutex_lock_ops,
846 #endif
847 		&rwsem_lock_ops,
848 		&percpu_rwsem_lock_ops,
849 	};
850 
851 	if (!torture_init_begin(torture_type, verbose))
852 		return -EBUSY;
853 
854 	/* Process args and tell the world that the torturer is on the job. */
855 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
856 		cxt.cur_ops = torture_ops[i];
857 		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
858 			break;
859 	}
860 	if (i == ARRAY_SIZE(torture_ops)) {
861 		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
862 			 torture_type);
863 		pr_alert("lock-torture types:");
864 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
865 			pr_alert(" %s", torture_ops[i]->name);
866 		pr_alert("\n");
867 		firsterr = -EINVAL;
868 		goto unwind;
869 	}
870 
871 	if (nwriters_stress == 0 && nreaders_stress == 0) {
872 		pr_alert("lock-torture: must run at least one locking thread\n");
873 		firsterr = -EINVAL;
874 		goto unwind;
875 	}
876 
877 	if (cxt.cur_ops->init)
878 		cxt.cur_ops->init();
879 
880 	if (nwriters_stress >= 0)
881 		cxt.nrealwriters_stress = nwriters_stress;
882 	else
883 		cxt.nrealwriters_stress = 2 * num_online_cpus();
884 
885 #ifdef CONFIG_DEBUG_MUTEXES
886 	if (str_has_prefix(torture_type, "mutex"))
887 		cxt.debug_lock = true;
888 #endif
889 #ifdef CONFIG_DEBUG_RT_MUTEXES
890 	if (str_has_prefix(torture_type, "rtmutex"))
891 		cxt.debug_lock = true;
892 #endif
893 #ifdef CONFIG_DEBUG_SPINLOCK
894 	if ((str_has_prefix(torture_type, "spin")) ||
895 	    (str_has_prefix(torture_type, "rw_lock")))
896 		cxt.debug_lock = true;
897 #endif
898 
899 	/* Initialize the statistics so that each run gets its own numbers. */
900 	if (nwriters_stress) {
901 		lock_is_write_held = false;
902 		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
903 					 sizeof(*cxt.lwsa),
904 					 GFP_KERNEL);
905 		if (cxt.lwsa == NULL) {
906 			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
907 			firsterr = -ENOMEM;
908 			goto unwind;
909 		}
910 
911 		for (i = 0; i < cxt.nrealwriters_stress; i++) {
912 			cxt.lwsa[i].n_lock_fail = 0;
913 			cxt.lwsa[i].n_lock_acquired = 0;
914 		}
915 	}
916 
917 	if (cxt.cur_ops->readlock) {
918 		if (nreaders_stress >= 0)
919 			cxt.nrealreaders_stress = nreaders_stress;
920 		else {
921 			/*
922 			 * By default distribute evenly the number of
923 			 * readers and writers. We still run the same number
924 			 * of threads as the writer-only locks default.
925 			 */
926 			if (nwriters_stress < 0) /* user doesn't care */
927 				cxt.nrealwriters_stress = num_online_cpus();
928 			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
929 		}
930 
931 		if (nreaders_stress) {
932 			lock_is_read_held = false;
933 			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
934 						 sizeof(*cxt.lrsa),
935 						 GFP_KERNEL);
936 			if (cxt.lrsa == NULL) {
937 				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
938 				firsterr = -ENOMEM;
939 				kfree(cxt.lwsa);
940 				cxt.lwsa = NULL;
941 				goto unwind;
942 			}
943 
944 			for (i = 0; i < cxt.nrealreaders_stress; i++) {
945 				cxt.lrsa[i].n_lock_fail = 0;
946 				cxt.lrsa[i].n_lock_acquired = 0;
947 			}
948 		}
949 	}
950 
951 	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
952 
953 	/* Prepare torture context. */
954 	if (onoff_interval > 0) {
955 		firsterr = torture_onoff_init(onoff_holdoff * HZ,
956 					      onoff_interval * HZ, NULL);
957 		if (firsterr)
958 			goto unwind;
959 	}
960 	if (shuffle_interval > 0) {
961 		firsterr = torture_shuffle_init(shuffle_interval);
962 		if (firsterr)
963 			goto unwind;
964 	}
965 	if (shutdown_secs > 0) {
966 		firsterr = torture_shutdown_init(shutdown_secs,
967 						 lock_torture_cleanup);
968 		if (firsterr)
969 			goto unwind;
970 	}
971 	if (stutter > 0) {
972 		firsterr = torture_stutter_init(stutter, stutter);
973 		if (firsterr)
974 			goto unwind;
975 	}
976 
977 	if (nwriters_stress) {
978 		writer_tasks = kcalloc(cxt.nrealwriters_stress,
979 				       sizeof(writer_tasks[0]),
980 				       GFP_KERNEL);
981 		if (writer_tasks == NULL) {
982 			VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
983 			firsterr = -ENOMEM;
984 			goto unwind;
985 		}
986 	}
987 
988 	if (cxt.cur_ops->readlock) {
989 		reader_tasks = kcalloc(cxt.nrealreaders_stress,
990 				       sizeof(reader_tasks[0]),
991 				       GFP_KERNEL);
992 		if (reader_tasks == NULL) {
993 			VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
994 			kfree(writer_tasks);
995 			writer_tasks = NULL;
996 			firsterr = -ENOMEM;
997 			goto unwind;
998 		}
999 	}
1000 
1001 	/*
1002 	 * Create the kthreads and start torturing (oh, those poor little locks).
1003 	 *
1004 	 * TODO: Note that we interleave writers with readers, giving writers a
1005 	 * slight advantage, by creating its kthread first. This can be modified
1006 	 * for very specific needs, or even let the user choose the policy, if
1007 	 * ever wanted.
1008 	 */
1009 	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1010 		    j < cxt.nrealreaders_stress; i++, j++) {
1011 		if (i >= cxt.nrealwriters_stress)
1012 			goto create_reader;
1013 
1014 		/* Create writer. */
1015 		firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1016 						  writer_tasks[i]);
1017 		if (firsterr)
1018 			goto unwind;
1019 
1020 	create_reader:
1021 		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1022 			continue;
1023 		/* Create reader. */
1024 		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1025 						  reader_tasks[j]);
1026 		if (firsterr)
1027 			goto unwind;
1028 	}
1029 	if (stat_interval > 0) {
1030 		firsterr = torture_create_kthread(lock_torture_stats, NULL,
1031 						  stats_task);
1032 		if (firsterr)
1033 			goto unwind;
1034 	}
1035 	torture_init_end();
1036 	return 0;
1037 
1038 unwind:
1039 	torture_init_end();
1040 	lock_torture_cleanup();
1041 	return firsterr;
1042 }
1043 
1044 module_init(lock_torture_init);
1045 module_exit(lock_torture_cleanup);
1046