xref: /openbmc/linux/kernel/locking/locktorture.c (revision 5d248bb3)
15a4eb3cbSPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
20af3fe1eSPaul E. McKenney /*
30af3fe1eSPaul E. McKenney  * Module-based torture test facility for locking
40af3fe1eSPaul E. McKenney  *
50af3fe1eSPaul E. McKenney  * Copyright (C) IBM Corporation, 2014
60af3fe1eSPaul E. McKenney  *
75a4eb3cbSPaul E. McKenney  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8095777c4SDavidlohr Bueso  *          Davidlohr Bueso <dave@stgolabs.net>
90af3fe1eSPaul E. McKenney  *	Based on kernel/rcu/torture.c.
100af3fe1eSPaul E. McKenney  */
1160500037SPaul E. McKenney 
1260500037SPaul E. McKenney #define pr_fmt(fmt) fmt
1360500037SPaul E. McKenney 
140af3fe1eSPaul E. McKenney #include <linux/kernel.h>
150af3fe1eSPaul E. McKenney #include <linux/module.h>
160af3fe1eSPaul E. McKenney #include <linux/kthread.h>
17095777c4SDavidlohr Bueso #include <linux/sched/rt.h>
180af3fe1eSPaul E. McKenney #include <linux/spinlock.h>
1942ddc75dSDavidlohr Bueso #include <linux/mutex.h>
20c98fed9fSDavidlohr Bueso #include <linux/rwsem.h>
210af3fe1eSPaul E. McKenney #include <linux/smp.h>
220af3fe1eSPaul E. McKenney #include <linux/interrupt.h>
230af3fe1eSPaul E. McKenney #include <linux/sched.h>
24ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
25037741a6SIngo Molnar #include <linux/rtmutex.h>
260af3fe1eSPaul E. McKenney #include <linux/atomic.h>
270af3fe1eSPaul E. McKenney #include <linux/moduleparam.h>
280af3fe1eSPaul E. McKenney #include <linux/delay.h>
290af3fe1eSPaul E. McKenney #include <linux/slab.h>
300af3fe1eSPaul E. McKenney #include <linux/torture.h>
316b74fa0aSPaul E. McKenney #include <linux/reboot.h>
320af3fe1eSPaul E. McKenney 
330af3fe1eSPaul E. McKenney MODULE_LICENSE("GPL");
345a4eb3cbSPaul E. McKenney MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
350af3fe1eSPaul E. McKenney 
36f8619c30SPaul E. McKenney torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
37f8619c30SPaul E. McKenney torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
38f8619c30SPaul E. McKenney torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
390af3fe1eSPaul E. McKenney torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
40f8619c30SPaul E. McKenney torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
41f8619c30SPaul E. McKenney torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
420af3fe1eSPaul E. McKenney torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
43f8619c30SPaul E. McKenney torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
440af3fe1eSPaul E. McKenney torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
45e01f3a1aSJoel Fernandes (Google) torture_param(int, rt_boost, 2,
46e01f3a1aSJoel Fernandes (Google) 		   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
47c24501b2SJoel Fernandes (Google) torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
48*5d248bb3SDietmar Eggemann torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
49f8619c30SPaul E. McKenney torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
50b6334320SJohn Stultz torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
51b6334320SJohn Stultz /* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
52b6334320SJohn Stultz #define MAX_NESTED_LOCKS 8
530af3fe1eSPaul E. McKenney 
545d65cf6aSZqiang static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
550af3fe1eSPaul E. McKenney module_param(torture_type, charp, 0444);
560af3fe1eSPaul E. McKenney MODULE_PARM_DESC(torture_type,
5742ddc75dSDavidlohr Bueso 		 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
580af3fe1eSPaul E. McKenney 
590af3fe1eSPaul E. McKenney static struct task_struct *stats_task;
600af3fe1eSPaul E. McKenney static struct task_struct **writer_tasks;
614f6332c1SDavidlohr Bueso static struct task_struct **reader_tasks;
620af3fe1eSPaul E. McKenney 
630af3fe1eSPaul E. McKenney static bool lock_is_write_held;
64af5f6e27SPaul E. McKenney static atomic_t lock_is_read_held;
653480d677SPaul E. McKenney static unsigned long last_lock_release;
660af3fe1eSPaul E. McKenney 
671e6757a9SDavidlohr Bueso struct lock_stress_stats {
681e6757a9SDavidlohr Bueso 	long n_lock_fail;
691e6757a9SDavidlohr Bueso 	long n_lock_acquired;
700af3fe1eSPaul E. McKenney };
710af3fe1eSPaul E. McKenney 
720af3fe1eSPaul E. McKenney /* Forward reference. */
730af3fe1eSPaul E. McKenney static void lock_torture_cleanup(void);
740af3fe1eSPaul E. McKenney 
750af3fe1eSPaul E. McKenney /*
760af3fe1eSPaul E. McKenney  * Operations vector for selecting different types of tests.
770af3fe1eSPaul E. McKenney  */
780af3fe1eSPaul E. McKenney struct lock_torture_ops {
790af3fe1eSPaul E. McKenney 	void (*init)(void);
800d720287SHou Tao 	void (*exit)(void);
81b6334320SJohn Stultz 	int (*nested_lock)(int tid, u32 lockset);
82aa3a5f31SWaiman Long 	int (*writelock)(int tid);
830af3fe1eSPaul E. McKenney 	void (*write_delay)(struct torture_random_state *trsp);
84095777c4SDavidlohr Bueso 	void (*task_boost)(struct torture_random_state *trsp);
85aa3a5f31SWaiman Long 	void (*writeunlock)(int tid);
86b6334320SJohn Stultz 	void (*nested_unlock)(int tid, u32 lockset);
87aa3a5f31SWaiman Long 	int (*readlock)(int tid);
884f6332c1SDavidlohr Bueso 	void (*read_delay)(struct torture_random_state *trsp);
89aa3a5f31SWaiman Long 	void (*readunlock)(int tid);
90095777c4SDavidlohr Bueso 
91095777c4SDavidlohr Bueso 	unsigned long flags; /* for irq spinlocks */
920af3fe1eSPaul E. McKenney 	const char *name;
930af3fe1eSPaul E. McKenney };
940af3fe1eSPaul E. McKenney 
95630952c2SDavidlohr Bueso struct lock_torture_cxt {
96630952c2SDavidlohr Bueso 	int nrealwriters_stress;
97630952c2SDavidlohr Bueso 	int nrealreaders_stress;
98630952c2SDavidlohr Bueso 	bool debug_lock;
990d720287SHou Tao 	bool init_called;
100630952c2SDavidlohr Bueso 	atomic_t n_lock_torture_errors;
101630952c2SDavidlohr Bueso 	struct lock_torture_ops *cur_ops;
102630952c2SDavidlohr Bueso 	struct lock_stress_stats *lwsa; /* writer statistics */
103630952c2SDavidlohr Bueso 	struct lock_stress_stats *lrsa; /* reader statistics */
104630952c2SDavidlohr Bueso };
1050d720287SHou Tao static struct lock_torture_cxt cxt = { 0, 0, false, false,
106630952c2SDavidlohr Bueso 				       ATOMIC_INIT(0),
107630952c2SDavidlohr Bueso 				       NULL, NULL};
1080af3fe1eSPaul E. McKenney /*
1090af3fe1eSPaul E. McKenney  * Definitions for lock torture testing.
1100af3fe1eSPaul E. McKenney  */
1110af3fe1eSPaul E. McKenney 
torture_lock_busted_write_lock(int tid __maybe_unused)112aa3a5f31SWaiman Long static int torture_lock_busted_write_lock(int tid __maybe_unused)
113e086481bSPaul E. McKenney {
114e086481bSPaul E. McKenney 	return 0;  /* BUGGY, do not use in real life!!! */
115e086481bSPaul E. McKenney }
116e086481bSPaul E. McKenney 
torture_lock_busted_write_delay(struct torture_random_state * trsp)117e086481bSPaul E. McKenney static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
118e086481bSPaul E. McKenney {
119f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
120e086481bSPaul E. McKenney 
121e086481bSPaul E. McKenney 	/* We want a long delay occasionally to force massive contention.  */
122e086481bSPaul E. McKenney 	if (!(torture_random(trsp) %
12361d49d2fSPaul E. McKenney 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
12461d49d2fSPaul E. McKenney 		mdelay(longdelay_ms);
125630952c2SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
126cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
127e086481bSPaul E. McKenney }
128e086481bSPaul E. McKenney 
torture_lock_busted_write_unlock(int tid __maybe_unused)129aa3a5f31SWaiman Long static void torture_lock_busted_write_unlock(int tid __maybe_unused)
130e086481bSPaul E. McKenney {
131e086481bSPaul E. McKenney 	  /* BUGGY, do not use in real life!!! */
132e086481bSPaul E. McKenney }
133e086481bSPaul E. McKenney 
__torture_rt_boost(struct torture_random_state * trsp)134e01f3a1aSJoel Fernandes (Google) static void __torture_rt_boost(struct torture_random_state *trsp)
135095777c4SDavidlohr Bueso {
136c24501b2SJoel Fernandes (Google) 	const unsigned int factor = rt_boost_factor;
137e01f3a1aSJoel Fernandes (Google) 
138e01f3a1aSJoel Fernandes (Google) 	if (!rt_task(current)) {
139e01f3a1aSJoel Fernandes (Google) 		/*
140c24501b2SJoel Fernandes (Google) 		 * Boost priority once every rt_boost_factor operations. When
141c24501b2SJoel Fernandes (Google) 		 * the task tries to take the lock, the rtmutex it will account
142e01f3a1aSJoel Fernandes (Google) 		 * for the new priority, and do any corresponding pi-dance.
143e01f3a1aSJoel Fernandes (Google) 		 */
144e01f3a1aSJoel Fernandes (Google) 		if (trsp && !(torture_random(trsp) %
145e01f3a1aSJoel Fernandes (Google) 			      (cxt.nrealwriters_stress * factor))) {
146e01f3a1aSJoel Fernandes (Google) 			sched_set_fifo(current);
147e01f3a1aSJoel Fernandes (Google) 		} else /* common case, do nothing */
148e01f3a1aSJoel Fernandes (Google) 			return;
149e01f3a1aSJoel Fernandes (Google) 	} else {
150e01f3a1aSJoel Fernandes (Google) 		/*
151c24501b2SJoel Fernandes (Google) 		 * The task will remain boosted for another 10 * rt_boost_factor
152c24501b2SJoel Fernandes (Google) 		 * operations, then restored back to its original prio, and so
153c24501b2SJoel Fernandes (Google) 		 * forth.
154e01f3a1aSJoel Fernandes (Google) 		 *
155e01f3a1aSJoel Fernandes (Google) 		 * When @trsp is nil, we want to force-reset the task for
156e01f3a1aSJoel Fernandes (Google) 		 * stopping the kthread.
157e01f3a1aSJoel Fernandes (Google) 		 */
158e01f3a1aSJoel Fernandes (Google) 		if (!trsp || !(torture_random(trsp) %
159e01f3a1aSJoel Fernandes (Google) 			       (cxt.nrealwriters_stress * factor * 2))) {
160e01f3a1aSJoel Fernandes (Google) 			sched_set_normal(current, 0);
161e01f3a1aSJoel Fernandes (Google) 		} else /* common case, do nothing */
162e01f3a1aSJoel Fernandes (Google) 			return;
163e01f3a1aSJoel Fernandes (Google) 	}
164e01f3a1aSJoel Fernandes (Google) }
165e01f3a1aSJoel Fernandes (Google) 
torture_rt_boost(struct torture_random_state * trsp)166e01f3a1aSJoel Fernandes (Google) static void torture_rt_boost(struct torture_random_state *trsp)
167e01f3a1aSJoel Fernandes (Google) {
168e01f3a1aSJoel Fernandes (Google) 	if (rt_boost != 2)
169e01f3a1aSJoel Fernandes (Google) 		return;
170e01f3a1aSJoel Fernandes (Google) 
171e01f3a1aSJoel Fernandes (Google) 	__torture_rt_boost(trsp);
172095777c4SDavidlohr Bueso }
173095777c4SDavidlohr Bueso 
174e086481bSPaul E. McKenney static struct lock_torture_ops lock_busted_ops = {
175e086481bSPaul E. McKenney 	.writelock	= torture_lock_busted_write_lock,
176e086481bSPaul E. McKenney 	.write_delay	= torture_lock_busted_write_delay,
177e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
178e086481bSPaul E. McKenney 	.writeunlock	= torture_lock_busted_write_unlock,
1794f6332c1SDavidlohr Bueso 	.readlock       = NULL,
1804f6332c1SDavidlohr Bueso 	.read_delay     = NULL,
1814f6332c1SDavidlohr Bueso 	.readunlock     = NULL,
182e086481bSPaul E. McKenney 	.name		= "lock_busted"
183e086481bSPaul E. McKenney };
184e086481bSPaul E. McKenney 
1850af3fe1eSPaul E. McKenney static DEFINE_SPINLOCK(torture_spinlock);
1860af3fe1eSPaul E. McKenney 
torture_spin_lock_write_lock(int tid __maybe_unused)187aa3a5f31SWaiman Long static int torture_spin_lock_write_lock(int tid __maybe_unused)
188aa3a5f31SWaiman Long __acquires(torture_spinlock)
1890af3fe1eSPaul E. McKenney {
1900af3fe1eSPaul E. McKenney 	spin_lock(&torture_spinlock);
1910af3fe1eSPaul E. McKenney 	return 0;
1920af3fe1eSPaul E. McKenney }
1930af3fe1eSPaul E. McKenney 
torture_spin_lock_write_delay(struct torture_random_state * trsp)1940af3fe1eSPaul E. McKenney static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
1950af3fe1eSPaul E. McKenney {
1960af3fe1eSPaul E. McKenney 	const unsigned long shortdelay_us = 2;
197f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
198f8619c30SPaul E. McKenney 	unsigned long j;
1990af3fe1eSPaul E. McKenney 
2000af3fe1eSPaul E. McKenney 	/* We want a short delay mostly to emulate likely code, and
2010af3fe1eSPaul E. McKenney 	 * we want a long delay occasionally to force massive contention.
2020af3fe1eSPaul E. McKenney 	 */
203f8619c30SPaul E. McKenney 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 2000 * longdelay_ms))) {
204f8619c30SPaul E. McKenney 		j = jiffies;
20561d49d2fSPaul E. McKenney 		mdelay(longdelay_ms);
206f8619c30SPaul E. McKenney 		pr_alert("%s: delay = %lu jiffies.\n", __func__, jiffies - j);
207f8619c30SPaul E. McKenney 	}
208f8619c30SPaul E. McKenney 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 200 * shortdelay_us)))
2090af3fe1eSPaul E. McKenney 		udelay(shortdelay_us);
210630952c2SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
211cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
2120af3fe1eSPaul E. McKenney }
2130af3fe1eSPaul E. McKenney 
torture_spin_lock_write_unlock(int tid __maybe_unused)214aa3a5f31SWaiman Long static void torture_spin_lock_write_unlock(int tid __maybe_unused)
215aa3a5f31SWaiman Long __releases(torture_spinlock)
2160af3fe1eSPaul E. McKenney {
2170af3fe1eSPaul E. McKenney 	spin_unlock(&torture_spinlock);
2180af3fe1eSPaul E. McKenney }
2190af3fe1eSPaul E. McKenney 
2200af3fe1eSPaul E. McKenney static struct lock_torture_ops spin_lock_ops = {
2210af3fe1eSPaul E. McKenney 	.writelock	= torture_spin_lock_write_lock,
2220af3fe1eSPaul E. McKenney 	.write_delay	= torture_spin_lock_write_delay,
223e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
2240af3fe1eSPaul E. McKenney 	.writeunlock	= torture_spin_lock_write_unlock,
2254f6332c1SDavidlohr Bueso 	.readlock       = NULL,
2264f6332c1SDavidlohr Bueso 	.read_delay     = NULL,
2274f6332c1SDavidlohr Bueso 	.readunlock     = NULL,
2280af3fe1eSPaul E. McKenney 	.name		= "spin_lock"
2290af3fe1eSPaul E. McKenney };
2300af3fe1eSPaul E. McKenney 
torture_spin_lock_write_lock_irq(int tid __maybe_unused)231aa3a5f31SWaiman Long static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
232219f800fSDavidlohr Bueso __acquires(torture_spinlock)
2330af3fe1eSPaul E. McKenney {
2340af3fe1eSPaul E. McKenney 	unsigned long flags;
2350af3fe1eSPaul E. McKenney 
2360af3fe1eSPaul E. McKenney 	spin_lock_irqsave(&torture_spinlock, flags);
237630952c2SDavidlohr Bueso 	cxt.cur_ops->flags = flags;
2380af3fe1eSPaul E. McKenney 	return 0;
2390af3fe1eSPaul E. McKenney }
2400af3fe1eSPaul E. McKenney 
torture_lock_spin_write_unlock_irq(int tid __maybe_unused)241aa3a5f31SWaiman Long static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
2420af3fe1eSPaul E. McKenney __releases(torture_spinlock)
2430af3fe1eSPaul E. McKenney {
244630952c2SDavidlohr Bueso 	spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
2450af3fe1eSPaul E. McKenney }
2460af3fe1eSPaul E. McKenney 
2470af3fe1eSPaul E. McKenney static struct lock_torture_ops spin_lock_irq_ops = {
2480af3fe1eSPaul E. McKenney 	.writelock	= torture_spin_lock_write_lock_irq,
2490af3fe1eSPaul E. McKenney 	.write_delay	= torture_spin_lock_write_delay,
250e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
2510af3fe1eSPaul E. McKenney 	.writeunlock	= torture_lock_spin_write_unlock_irq,
2524f6332c1SDavidlohr Bueso 	.readlock       = NULL,
2534f6332c1SDavidlohr Bueso 	.read_delay     = NULL,
2544f6332c1SDavidlohr Bueso 	.readunlock     = NULL,
2550af3fe1eSPaul E. McKenney 	.name		= "spin_lock_irq"
2560af3fe1eSPaul E. McKenney };
2570af3fe1eSPaul E. McKenney 
2585d65cf6aSZqiang static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
2595d65cf6aSZqiang 
torture_raw_spin_lock_write_lock(int tid __maybe_unused)2605d65cf6aSZqiang static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
2615d65cf6aSZqiang __acquires(torture_raw_spinlock)
2625d65cf6aSZqiang {
2635d65cf6aSZqiang 	raw_spin_lock(&torture_raw_spinlock);
2645d65cf6aSZqiang 	return 0;
2655d65cf6aSZqiang }
2665d65cf6aSZqiang 
torture_raw_spin_lock_write_unlock(int tid __maybe_unused)2675d65cf6aSZqiang static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
2685d65cf6aSZqiang __releases(torture_raw_spinlock)
2695d65cf6aSZqiang {
2705d65cf6aSZqiang 	raw_spin_unlock(&torture_raw_spinlock);
2715d65cf6aSZqiang }
2725d65cf6aSZqiang 
2735d65cf6aSZqiang static struct lock_torture_ops raw_spin_lock_ops = {
2745d65cf6aSZqiang 	.writelock	= torture_raw_spin_lock_write_lock,
2755d65cf6aSZqiang 	.write_delay	= torture_spin_lock_write_delay,
2765d65cf6aSZqiang 	.task_boost	= torture_rt_boost,
2775d65cf6aSZqiang 	.writeunlock	= torture_raw_spin_lock_write_unlock,
2785d65cf6aSZqiang 	.readlock	= NULL,
2795d65cf6aSZqiang 	.read_delay	= NULL,
2805d65cf6aSZqiang 	.readunlock	= NULL,
2815d65cf6aSZqiang 	.name		= "raw_spin_lock"
2825d65cf6aSZqiang };
2835d65cf6aSZqiang 
torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)2845d65cf6aSZqiang static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
2855d65cf6aSZqiang __acquires(torture_raw_spinlock)
2865d65cf6aSZqiang {
2875d65cf6aSZqiang 	unsigned long flags;
2885d65cf6aSZqiang 
2895d65cf6aSZqiang 	raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
2905d65cf6aSZqiang 	cxt.cur_ops->flags = flags;
2915d65cf6aSZqiang 	return 0;
2925d65cf6aSZqiang }
2935d65cf6aSZqiang 
torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)2945d65cf6aSZqiang static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
2955d65cf6aSZqiang __releases(torture_raw_spinlock)
2965d65cf6aSZqiang {
2975d65cf6aSZqiang 	raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
2985d65cf6aSZqiang }
2995d65cf6aSZqiang 
3005d65cf6aSZqiang static struct lock_torture_ops raw_spin_lock_irq_ops = {
3015d65cf6aSZqiang 	.writelock	= torture_raw_spin_lock_write_lock_irq,
3025d65cf6aSZqiang 	.write_delay	= torture_spin_lock_write_delay,
3035d65cf6aSZqiang 	.task_boost	= torture_rt_boost,
3045d65cf6aSZqiang 	.writeunlock	= torture_raw_spin_lock_write_unlock_irq,
3055d65cf6aSZqiang 	.readlock	= NULL,
3065d65cf6aSZqiang 	.read_delay	= NULL,
3075d65cf6aSZqiang 	.readunlock	= NULL,
3085d65cf6aSZqiang 	.name		= "raw_spin_lock_irq"
3095d65cf6aSZqiang };
3105d65cf6aSZqiang 
311e34191faSDavidlohr Bueso static DEFINE_RWLOCK(torture_rwlock);
312e34191faSDavidlohr Bueso 
torture_rwlock_write_lock(int tid __maybe_unused)313aa3a5f31SWaiman Long static int torture_rwlock_write_lock(int tid __maybe_unused)
314aa3a5f31SWaiman Long __acquires(torture_rwlock)
315e34191faSDavidlohr Bueso {
316e34191faSDavidlohr Bueso 	write_lock(&torture_rwlock);
317e34191faSDavidlohr Bueso 	return 0;
318e34191faSDavidlohr Bueso }
319e34191faSDavidlohr Bueso 
torture_rwlock_write_delay(struct torture_random_state * trsp)320e34191faSDavidlohr Bueso static void torture_rwlock_write_delay(struct torture_random_state *trsp)
321e34191faSDavidlohr Bueso {
322e34191faSDavidlohr Bueso 	const unsigned long shortdelay_us = 2;
323f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
324e34191faSDavidlohr Bueso 
325e34191faSDavidlohr Bueso 	/* We want a short delay mostly to emulate likely code, and
326e34191faSDavidlohr Bueso 	 * we want a long delay occasionally to force massive contention.
327e34191faSDavidlohr Bueso 	 */
328e34191faSDavidlohr Bueso 	if (!(torture_random(trsp) %
329e34191faSDavidlohr Bueso 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
330e34191faSDavidlohr Bueso 		mdelay(longdelay_ms);
331e34191faSDavidlohr Bueso 	else
332e34191faSDavidlohr Bueso 		udelay(shortdelay_us);
333e34191faSDavidlohr Bueso }
334e34191faSDavidlohr Bueso 
torture_rwlock_write_unlock(int tid __maybe_unused)335aa3a5f31SWaiman Long static void torture_rwlock_write_unlock(int tid __maybe_unused)
336aa3a5f31SWaiman Long __releases(torture_rwlock)
337e34191faSDavidlohr Bueso {
338e34191faSDavidlohr Bueso 	write_unlock(&torture_rwlock);
339e34191faSDavidlohr Bueso }
340e34191faSDavidlohr Bueso 
torture_rwlock_read_lock(int tid __maybe_unused)341aa3a5f31SWaiman Long static int torture_rwlock_read_lock(int tid __maybe_unused)
342aa3a5f31SWaiman Long __acquires(torture_rwlock)
343e34191faSDavidlohr Bueso {
344e34191faSDavidlohr Bueso 	read_lock(&torture_rwlock);
345e34191faSDavidlohr Bueso 	return 0;
346e34191faSDavidlohr Bueso }
347e34191faSDavidlohr Bueso 
torture_rwlock_read_delay(struct torture_random_state * trsp)348e34191faSDavidlohr Bueso static void torture_rwlock_read_delay(struct torture_random_state *trsp)
349e34191faSDavidlohr Bueso {
350e34191faSDavidlohr Bueso 	const unsigned long shortdelay_us = 10;
351e34191faSDavidlohr Bueso 	const unsigned long longdelay_ms = 100;
352e34191faSDavidlohr Bueso 
353e34191faSDavidlohr Bueso 	/* We want a short delay mostly to emulate likely code, and
354e34191faSDavidlohr Bueso 	 * we want a long delay occasionally to force massive contention.
355e34191faSDavidlohr Bueso 	 */
356e34191faSDavidlohr Bueso 	if (!(torture_random(trsp) %
357e34191faSDavidlohr Bueso 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
358e34191faSDavidlohr Bueso 		mdelay(longdelay_ms);
359e34191faSDavidlohr Bueso 	else
360e34191faSDavidlohr Bueso 		udelay(shortdelay_us);
361e34191faSDavidlohr Bueso }
362e34191faSDavidlohr Bueso 
torture_rwlock_read_unlock(int tid __maybe_unused)363aa3a5f31SWaiman Long static void torture_rwlock_read_unlock(int tid __maybe_unused)
364aa3a5f31SWaiman Long __releases(torture_rwlock)
365e34191faSDavidlohr Bueso {
366e34191faSDavidlohr Bueso 	read_unlock(&torture_rwlock);
367e34191faSDavidlohr Bueso }
368e34191faSDavidlohr Bueso 
369e34191faSDavidlohr Bueso static struct lock_torture_ops rw_lock_ops = {
370e34191faSDavidlohr Bueso 	.writelock	= torture_rwlock_write_lock,
371e34191faSDavidlohr Bueso 	.write_delay	= torture_rwlock_write_delay,
372e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
373e34191faSDavidlohr Bueso 	.writeunlock	= torture_rwlock_write_unlock,
374e34191faSDavidlohr Bueso 	.readlock       = torture_rwlock_read_lock,
375e34191faSDavidlohr Bueso 	.read_delay     = torture_rwlock_read_delay,
376e34191faSDavidlohr Bueso 	.readunlock     = torture_rwlock_read_unlock,
377e34191faSDavidlohr Bueso 	.name		= "rw_lock"
378e34191faSDavidlohr Bueso };
379e34191faSDavidlohr Bueso 
torture_rwlock_write_lock_irq(int tid __maybe_unused)380aa3a5f31SWaiman Long static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
381aa3a5f31SWaiman Long __acquires(torture_rwlock)
382e34191faSDavidlohr Bueso {
383e34191faSDavidlohr Bueso 	unsigned long flags;
384e34191faSDavidlohr Bueso 
385e34191faSDavidlohr Bueso 	write_lock_irqsave(&torture_rwlock, flags);
386e34191faSDavidlohr Bueso 	cxt.cur_ops->flags = flags;
387e34191faSDavidlohr Bueso 	return 0;
388e34191faSDavidlohr Bueso }
389e34191faSDavidlohr Bueso 
torture_rwlock_write_unlock_irq(int tid __maybe_unused)390aa3a5f31SWaiman Long static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
391e34191faSDavidlohr Bueso __releases(torture_rwlock)
392e34191faSDavidlohr Bueso {
393e34191faSDavidlohr Bueso 	write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
394e34191faSDavidlohr Bueso }
395e34191faSDavidlohr Bueso 
torture_rwlock_read_lock_irq(int tid __maybe_unused)396aa3a5f31SWaiman Long static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
397aa3a5f31SWaiman Long __acquires(torture_rwlock)
398e34191faSDavidlohr Bueso {
399e34191faSDavidlohr Bueso 	unsigned long flags;
400e34191faSDavidlohr Bueso 
401e34191faSDavidlohr Bueso 	read_lock_irqsave(&torture_rwlock, flags);
402e34191faSDavidlohr Bueso 	cxt.cur_ops->flags = flags;
403e34191faSDavidlohr Bueso 	return 0;
404e34191faSDavidlohr Bueso }
405e34191faSDavidlohr Bueso 
torture_rwlock_read_unlock_irq(int tid __maybe_unused)406aa3a5f31SWaiman Long static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
407e34191faSDavidlohr Bueso __releases(torture_rwlock)
408e34191faSDavidlohr Bueso {
409f548d99eSAlexey Kodanev 	read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
410e34191faSDavidlohr Bueso }
411e34191faSDavidlohr Bueso 
412e34191faSDavidlohr Bueso static struct lock_torture_ops rw_lock_irq_ops = {
413e34191faSDavidlohr Bueso 	.writelock	= torture_rwlock_write_lock_irq,
414e34191faSDavidlohr Bueso 	.write_delay	= torture_rwlock_write_delay,
415e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
416e34191faSDavidlohr Bueso 	.writeunlock	= torture_rwlock_write_unlock_irq,
417e34191faSDavidlohr Bueso 	.readlock       = torture_rwlock_read_lock_irq,
418e34191faSDavidlohr Bueso 	.read_delay     = torture_rwlock_read_delay,
419e34191faSDavidlohr Bueso 	.readunlock     = torture_rwlock_read_unlock_irq,
420e34191faSDavidlohr Bueso 	.name		= "rw_lock_irq"
421e34191faSDavidlohr Bueso };
422e34191faSDavidlohr Bueso 
42342ddc75dSDavidlohr Bueso static DEFINE_MUTEX(torture_mutex);
4243e5aeaf5SJohn Stultz static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
4253e5aeaf5SJohn Stultz static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
4263e5aeaf5SJohn Stultz 
torture_mutex_init(void)4273e5aeaf5SJohn Stultz static void torture_mutex_init(void)
4283e5aeaf5SJohn Stultz {
4293e5aeaf5SJohn Stultz 	int i;
4303e5aeaf5SJohn Stultz 
4313e5aeaf5SJohn Stultz 	for (i = 0; i < MAX_NESTED_LOCKS; i++)
4323e5aeaf5SJohn Stultz 		__mutex_init(&torture_nested_mutexes[i], __func__,
4333e5aeaf5SJohn Stultz 			     &nested_mutex_keys[i]);
4343e5aeaf5SJohn Stultz }
4353e5aeaf5SJohn Stultz 
torture_mutex_nested_lock(int tid __maybe_unused,u32 lockset)4363e5aeaf5SJohn Stultz static int torture_mutex_nested_lock(int tid __maybe_unused,
4373e5aeaf5SJohn Stultz 				     u32 lockset)
4383e5aeaf5SJohn Stultz {
4393e5aeaf5SJohn Stultz 	int i;
4403e5aeaf5SJohn Stultz 
4413e5aeaf5SJohn Stultz 	for (i = 0; i < nested_locks; i++)
4423e5aeaf5SJohn Stultz 		if (lockset & (1 << i))
4433e5aeaf5SJohn Stultz 			mutex_lock(&torture_nested_mutexes[i]);
4443e5aeaf5SJohn Stultz 	return 0;
4453e5aeaf5SJohn Stultz }
44642ddc75dSDavidlohr Bueso 
torture_mutex_lock(int tid __maybe_unused)447aa3a5f31SWaiman Long static int torture_mutex_lock(int tid __maybe_unused)
448aa3a5f31SWaiman Long __acquires(torture_mutex)
44942ddc75dSDavidlohr Bueso {
45042ddc75dSDavidlohr Bueso 	mutex_lock(&torture_mutex);
45142ddc75dSDavidlohr Bueso 	return 0;
45242ddc75dSDavidlohr Bueso }
45342ddc75dSDavidlohr Bueso 
torture_mutex_delay(struct torture_random_state * trsp)45442ddc75dSDavidlohr Bueso static void torture_mutex_delay(struct torture_random_state *trsp)
45542ddc75dSDavidlohr Bueso {
456f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
45742ddc75dSDavidlohr Bueso 
45842ddc75dSDavidlohr Bueso 	/* We want a long delay occasionally to force massive contention.  */
45942ddc75dSDavidlohr Bueso 	if (!(torture_random(trsp) %
460630952c2SDavidlohr Bueso 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
46142ddc75dSDavidlohr Bueso 		mdelay(longdelay_ms * 5);
462630952c2SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
463cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
46442ddc75dSDavidlohr Bueso }
46542ddc75dSDavidlohr Bueso 
torture_mutex_unlock(int tid __maybe_unused)466aa3a5f31SWaiman Long static void torture_mutex_unlock(int tid __maybe_unused)
467aa3a5f31SWaiman Long __releases(torture_mutex)
46842ddc75dSDavidlohr Bueso {
46942ddc75dSDavidlohr Bueso 	mutex_unlock(&torture_mutex);
47042ddc75dSDavidlohr Bueso }
47142ddc75dSDavidlohr Bueso 
torture_mutex_nested_unlock(int tid __maybe_unused,u32 lockset)4723e5aeaf5SJohn Stultz static void torture_mutex_nested_unlock(int tid __maybe_unused,
4733e5aeaf5SJohn Stultz 					u32 lockset)
4743e5aeaf5SJohn Stultz {
4753e5aeaf5SJohn Stultz 	int i;
4763e5aeaf5SJohn Stultz 
4773e5aeaf5SJohn Stultz 	for (i = nested_locks - 1; i >= 0; i--)
4783e5aeaf5SJohn Stultz 		if (lockset & (1 << i))
4793e5aeaf5SJohn Stultz 			mutex_unlock(&torture_nested_mutexes[i]);
4803e5aeaf5SJohn Stultz }
4813e5aeaf5SJohn Stultz 
48242ddc75dSDavidlohr Bueso static struct lock_torture_ops mutex_lock_ops = {
4833e5aeaf5SJohn Stultz 	.init		= torture_mutex_init,
4843e5aeaf5SJohn Stultz 	.nested_lock	= torture_mutex_nested_lock,
48542ddc75dSDavidlohr Bueso 	.writelock	= torture_mutex_lock,
48642ddc75dSDavidlohr Bueso 	.write_delay	= torture_mutex_delay,
487e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
48842ddc75dSDavidlohr Bueso 	.writeunlock	= torture_mutex_unlock,
4893e5aeaf5SJohn Stultz 	.nested_unlock	= torture_mutex_nested_unlock,
4904f6332c1SDavidlohr Bueso 	.readlock       = NULL,
4914f6332c1SDavidlohr Bueso 	.read_delay     = NULL,
4924f6332c1SDavidlohr Bueso 	.readunlock     = NULL,
49342ddc75dSDavidlohr Bueso 	.name		= "mutex_lock"
49442ddc75dSDavidlohr Bueso };
49542ddc75dSDavidlohr Bueso 
4960186a6cbSChris Wilson #include <linux/ww_mutex.h>
4972ea55bbbSWaiman Long /*
4982ea55bbbSWaiman Long  * The torture ww_mutexes should belong to the same lock class as
4992ea55bbbSWaiman Long  * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
5002ea55bbbSWaiman Long  * function is called for initialization to ensure that.
5012ea55bbbSWaiman Long  */
50208295b3bSThomas Hellstrom static DEFINE_WD_CLASS(torture_ww_class);
5032ea55bbbSWaiman Long static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
5048c52cca0SWaiman Long static struct ww_acquire_ctx *ww_acquire_ctxs;
5052ea55bbbSWaiman Long 
torture_ww_mutex_init(void)5062ea55bbbSWaiman Long static void torture_ww_mutex_init(void)
5072ea55bbbSWaiman Long {
5082ea55bbbSWaiman Long 	ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
5092ea55bbbSWaiman Long 	ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
5102ea55bbbSWaiman Long 	ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
5118c52cca0SWaiman Long 
5128c52cca0SWaiman Long 	ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
5138c52cca0SWaiman Long 					sizeof(*ww_acquire_ctxs),
5148c52cca0SWaiman Long 					GFP_KERNEL);
5158c52cca0SWaiman Long 	if (!ww_acquire_ctxs)
5168c52cca0SWaiman Long 		VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
5172ea55bbbSWaiman Long }
5180186a6cbSChris Wilson 
torture_ww_mutex_exit(void)5198c52cca0SWaiman Long static void torture_ww_mutex_exit(void)
5208c52cca0SWaiman Long {
5218c52cca0SWaiman Long 	kfree(ww_acquire_ctxs);
5228c52cca0SWaiman Long }
5238c52cca0SWaiman Long 
torture_ww_mutex_lock(int tid)5248c52cca0SWaiman Long static int torture_ww_mutex_lock(int tid)
5250186a6cbSChris Wilson __acquires(torture_ww_mutex_0)
5260186a6cbSChris Wilson __acquires(torture_ww_mutex_1)
5270186a6cbSChris Wilson __acquires(torture_ww_mutex_2)
5280186a6cbSChris Wilson {
5290186a6cbSChris Wilson 	LIST_HEAD(list);
5300186a6cbSChris Wilson 	struct reorder_lock {
5310186a6cbSChris Wilson 		struct list_head link;
5320186a6cbSChris Wilson 		struct ww_mutex *lock;
5330186a6cbSChris Wilson 	} locks[3], *ll, *ln;
5348c52cca0SWaiman Long 	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
5350186a6cbSChris Wilson 
5360186a6cbSChris Wilson 	locks[0].lock = &torture_ww_mutex_0;
5370186a6cbSChris Wilson 	list_add(&locks[0].link, &list);
5380186a6cbSChris Wilson 
5390186a6cbSChris Wilson 	locks[1].lock = &torture_ww_mutex_1;
5400186a6cbSChris Wilson 	list_add(&locks[1].link, &list);
5410186a6cbSChris Wilson 
5420186a6cbSChris Wilson 	locks[2].lock = &torture_ww_mutex_2;
5430186a6cbSChris Wilson 	list_add(&locks[2].link, &list);
5440186a6cbSChris Wilson 
5458c52cca0SWaiman Long 	ww_acquire_init(ctx, &torture_ww_class);
5460186a6cbSChris Wilson 
5470186a6cbSChris Wilson 	list_for_each_entry(ll, &list, link) {
5480186a6cbSChris Wilson 		int err;
5490186a6cbSChris Wilson 
5508c52cca0SWaiman Long 		err = ww_mutex_lock(ll->lock, ctx);
5510186a6cbSChris Wilson 		if (!err)
5520186a6cbSChris Wilson 			continue;
5530186a6cbSChris Wilson 
5540186a6cbSChris Wilson 		ln = ll;
5550186a6cbSChris Wilson 		list_for_each_entry_continue_reverse(ln, &list, link)
5560186a6cbSChris Wilson 			ww_mutex_unlock(ln->lock);
5570186a6cbSChris Wilson 
5580186a6cbSChris Wilson 		if (err != -EDEADLK)
5590186a6cbSChris Wilson 			return err;
5600186a6cbSChris Wilson 
5618c52cca0SWaiman Long 		ww_mutex_lock_slow(ll->lock, ctx);
5620186a6cbSChris Wilson 		list_move(&ll->link, &list);
5630186a6cbSChris Wilson 	}
5640186a6cbSChris Wilson 
5650186a6cbSChris Wilson 	return 0;
5660186a6cbSChris Wilson }
5670186a6cbSChris Wilson 
torture_ww_mutex_unlock(int tid)5688c52cca0SWaiman Long static void torture_ww_mutex_unlock(int tid)
5690186a6cbSChris Wilson __releases(torture_ww_mutex_0)
5700186a6cbSChris Wilson __releases(torture_ww_mutex_1)
5710186a6cbSChris Wilson __releases(torture_ww_mutex_2)
5720186a6cbSChris Wilson {
5738c52cca0SWaiman Long 	struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
5748c52cca0SWaiman Long 
5750186a6cbSChris Wilson 	ww_mutex_unlock(&torture_ww_mutex_0);
5760186a6cbSChris Wilson 	ww_mutex_unlock(&torture_ww_mutex_1);
5770186a6cbSChris Wilson 	ww_mutex_unlock(&torture_ww_mutex_2);
5788c52cca0SWaiman Long 	ww_acquire_fini(ctx);
5790186a6cbSChris Wilson }
5800186a6cbSChris Wilson 
5810186a6cbSChris Wilson static struct lock_torture_ops ww_mutex_lock_ops = {
5822ea55bbbSWaiman Long 	.init		= torture_ww_mutex_init,
5838c52cca0SWaiman Long 	.exit		= torture_ww_mutex_exit,
5840186a6cbSChris Wilson 	.writelock	= torture_ww_mutex_lock,
5850186a6cbSChris Wilson 	.write_delay	= torture_mutex_delay,
586e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
5870186a6cbSChris Wilson 	.writeunlock	= torture_ww_mutex_unlock,
5880186a6cbSChris Wilson 	.readlock       = NULL,
5890186a6cbSChris Wilson 	.read_delay     = NULL,
5900186a6cbSChris Wilson 	.readunlock     = NULL,
5910186a6cbSChris Wilson 	.name		= "ww_mutex_lock"
5920186a6cbSChris Wilson };
5930186a6cbSChris Wilson 
594095777c4SDavidlohr Bueso #ifdef CONFIG_RT_MUTEXES
595095777c4SDavidlohr Bueso static DEFINE_RT_MUTEX(torture_rtmutex);
596ae4823e4SJohn Stultz static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
597ae4823e4SJohn Stultz static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
598ae4823e4SJohn Stultz 
torture_rtmutex_init(void)599ae4823e4SJohn Stultz static void torture_rtmutex_init(void)
600ae4823e4SJohn Stultz {
601ae4823e4SJohn Stultz 	int i;
602ae4823e4SJohn Stultz 
603ae4823e4SJohn Stultz 	for (i = 0; i < MAX_NESTED_LOCKS; i++)
604ae4823e4SJohn Stultz 		__rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
605ae4823e4SJohn Stultz 				&nested_rtmutex_keys[i]);
606ae4823e4SJohn Stultz }
607ae4823e4SJohn Stultz 
torture_rtmutex_nested_lock(int tid __maybe_unused,u32 lockset)608ae4823e4SJohn Stultz static int torture_rtmutex_nested_lock(int tid __maybe_unused,
609ae4823e4SJohn Stultz 				       u32 lockset)
610ae4823e4SJohn Stultz {
611ae4823e4SJohn Stultz 	int i;
612ae4823e4SJohn Stultz 
613ae4823e4SJohn Stultz 	for (i = 0; i < nested_locks; i++)
614ae4823e4SJohn Stultz 		if (lockset & (1 << i))
615ae4823e4SJohn Stultz 			rt_mutex_lock(&torture_nested_rtmutexes[i]);
616ae4823e4SJohn Stultz 	return 0;
617ae4823e4SJohn Stultz }
618095777c4SDavidlohr Bueso 
torture_rtmutex_lock(int tid __maybe_unused)619aa3a5f31SWaiman Long static int torture_rtmutex_lock(int tid __maybe_unused)
620aa3a5f31SWaiman Long __acquires(torture_rtmutex)
621095777c4SDavidlohr Bueso {
622095777c4SDavidlohr Bueso 	rt_mutex_lock(&torture_rtmutex);
623095777c4SDavidlohr Bueso 	return 0;
624095777c4SDavidlohr Bueso }
625095777c4SDavidlohr Bueso 
torture_rtmutex_delay(struct torture_random_state * trsp)626095777c4SDavidlohr Bueso static void torture_rtmutex_delay(struct torture_random_state *trsp)
627095777c4SDavidlohr Bueso {
628095777c4SDavidlohr Bueso 	const unsigned long shortdelay_us = 2;
629f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
630095777c4SDavidlohr Bueso 
631095777c4SDavidlohr Bueso 	/*
632095777c4SDavidlohr Bueso 	 * We want a short delay mostly to emulate likely code, and
633095777c4SDavidlohr Bueso 	 * we want a long delay occasionally to force massive contention.
634095777c4SDavidlohr Bueso 	 */
635095777c4SDavidlohr Bueso 	if (!(torture_random(trsp) %
636095777c4SDavidlohr Bueso 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
637095777c4SDavidlohr Bueso 		mdelay(longdelay_ms);
638095777c4SDavidlohr Bueso 	if (!(torture_random(trsp) %
639f8619c30SPaul E. McKenney 	      (cxt.nrealwriters_stress * 200 * shortdelay_us)))
640095777c4SDavidlohr Bueso 		udelay(shortdelay_us);
641095777c4SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
642cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
643095777c4SDavidlohr Bueso }
644095777c4SDavidlohr Bueso 
torture_rtmutex_unlock(int tid __maybe_unused)645aa3a5f31SWaiman Long static void torture_rtmutex_unlock(int tid __maybe_unused)
646aa3a5f31SWaiman Long __releases(torture_rtmutex)
647095777c4SDavidlohr Bueso {
648095777c4SDavidlohr Bueso 	rt_mutex_unlock(&torture_rtmutex);
649095777c4SDavidlohr Bueso }
650095777c4SDavidlohr Bueso 
torture_rt_boost_rtmutex(struct torture_random_state * trsp)651e01f3a1aSJoel Fernandes (Google) static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
652e01f3a1aSJoel Fernandes (Google) {
653e01f3a1aSJoel Fernandes (Google) 	if (!rt_boost)
654e01f3a1aSJoel Fernandes (Google) 		return;
655e01f3a1aSJoel Fernandes (Google) 
656e01f3a1aSJoel Fernandes (Google) 	__torture_rt_boost(trsp);
657e01f3a1aSJoel Fernandes (Google) }
658e01f3a1aSJoel Fernandes (Google) 
torture_rtmutex_nested_unlock(int tid __maybe_unused,u32 lockset)659ae4823e4SJohn Stultz static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
660ae4823e4SJohn Stultz 					  u32 lockset)
661ae4823e4SJohn Stultz {
662ae4823e4SJohn Stultz 	int i;
663ae4823e4SJohn Stultz 
664ae4823e4SJohn Stultz 	for (i = nested_locks - 1; i >= 0; i--)
665ae4823e4SJohn Stultz 		if (lockset & (1 << i))
666ae4823e4SJohn Stultz 			rt_mutex_unlock(&torture_nested_rtmutexes[i]);
667ae4823e4SJohn Stultz }
668ae4823e4SJohn Stultz 
669095777c4SDavidlohr Bueso static struct lock_torture_ops rtmutex_lock_ops = {
670ae4823e4SJohn Stultz 	.init		= torture_rtmutex_init,
671ae4823e4SJohn Stultz 	.nested_lock	= torture_rtmutex_nested_lock,
672095777c4SDavidlohr Bueso 	.writelock	= torture_rtmutex_lock,
673095777c4SDavidlohr Bueso 	.write_delay	= torture_rtmutex_delay,
674e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost_rtmutex,
675095777c4SDavidlohr Bueso 	.writeunlock	= torture_rtmutex_unlock,
676ae4823e4SJohn Stultz 	.nested_unlock	= torture_rtmutex_nested_unlock,
677095777c4SDavidlohr Bueso 	.readlock       = NULL,
678095777c4SDavidlohr Bueso 	.read_delay     = NULL,
679095777c4SDavidlohr Bueso 	.readunlock     = NULL,
680095777c4SDavidlohr Bueso 	.name		= "rtmutex_lock"
681095777c4SDavidlohr Bueso };
682095777c4SDavidlohr Bueso #endif
683095777c4SDavidlohr Bueso 
6844a3b427fSDavidlohr Bueso static DECLARE_RWSEM(torture_rwsem);
torture_rwsem_down_write(int tid __maybe_unused)685aa3a5f31SWaiman Long static int torture_rwsem_down_write(int tid __maybe_unused)
686aa3a5f31SWaiman Long __acquires(torture_rwsem)
6874a3b427fSDavidlohr Bueso {
6884a3b427fSDavidlohr Bueso 	down_write(&torture_rwsem);
6894a3b427fSDavidlohr Bueso 	return 0;
6904a3b427fSDavidlohr Bueso }
6914a3b427fSDavidlohr Bueso 
torture_rwsem_write_delay(struct torture_random_state * trsp)6924a3b427fSDavidlohr Bueso static void torture_rwsem_write_delay(struct torture_random_state *trsp)
6934a3b427fSDavidlohr Bueso {
694f8619c30SPaul E. McKenney 	const unsigned long longdelay_ms = long_hold ? long_hold : ULONG_MAX;
6954a3b427fSDavidlohr Bueso 
6964a3b427fSDavidlohr Bueso 	/* We want a long delay occasionally to force massive contention.  */
6974a3b427fSDavidlohr Bueso 	if (!(torture_random(trsp) %
698630952c2SDavidlohr Bueso 	      (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
6994a3b427fSDavidlohr Bueso 		mdelay(longdelay_ms * 10);
700630952c2SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
701cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
7024a3b427fSDavidlohr Bueso }
7034a3b427fSDavidlohr Bueso 
torture_rwsem_up_write(int tid __maybe_unused)704aa3a5f31SWaiman Long static void torture_rwsem_up_write(int tid __maybe_unused)
705aa3a5f31SWaiman Long __releases(torture_rwsem)
7064a3b427fSDavidlohr Bueso {
7074a3b427fSDavidlohr Bueso 	up_write(&torture_rwsem);
7084a3b427fSDavidlohr Bueso }
7094a3b427fSDavidlohr Bueso 
torture_rwsem_down_read(int tid __maybe_unused)710aa3a5f31SWaiman Long static int torture_rwsem_down_read(int tid __maybe_unused)
711aa3a5f31SWaiman Long __acquires(torture_rwsem)
7124a3b427fSDavidlohr Bueso {
7134a3b427fSDavidlohr Bueso 	down_read(&torture_rwsem);
7144a3b427fSDavidlohr Bueso 	return 0;
7154a3b427fSDavidlohr Bueso }
7164a3b427fSDavidlohr Bueso 
torture_rwsem_read_delay(struct torture_random_state * trsp)7174a3b427fSDavidlohr Bueso static void torture_rwsem_read_delay(struct torture_random_state *trsp)
7184a3b427fSDavidlohr Bueso {
7194a3b427fSDavidlohr Bueso 	const unsigned long longdelay_ms = 100;
7204a3b427fSDavidlohr Bueso 
7214a3b427fSDavidlohr Bueso 	/* We want a long delay occasionally to force massive contention.  */
7224a3b427fSDavidlohr Bueso 	if (!(torture_random(trsp) %
723f2f76260SDavidlohr Bueso 	      (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
7244a3b427fSDavidlohr Bueso 		mdelay(longdelay_ms * 2);
7254a3b427fSDavidlohr Bueso 	else
7264a3b427fSDavidlohr Bueso 		mdelay(longdelay_ms / 2);
727630952c2SDavidlohr Bueso 	if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
728cc1321c9SPaul E. McKenney 		torture_preempt_schedule();  /* Allow test to be preempted. */
7294a3b427fSDavidlohr Bueso }
7304a3b427fSDavidlohr Bueso 
torture_rwsem_up_read(int tid __maybe_unused)731aa3a5f31SWaiman Long static void torture_rwsem_up_read(int tid __maybe_unused)
732aa3a5f31SWaiman Long __releases(torture_rwsem)
7334a3b427fSDavidlohr Bueso {
7344a3b427fSDavidlohr Bueso 	up_read(&torture_rwsem);
7354a3b427fSDavidlohr Bueso }
7364a3b427fSDavidlohr Bueso 
7374a3b427fSDavidlohr Bueso static struct lock_torture_ops rwsem_lock_ops = {
7384a3b427fSDavidlohr Bueso 	.writelock	= torture_rwsem_down_write,
7394a3b427fSDavidlohr Bueso 	.write_delay	= torture_rwsem_write_delay,
740e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
7414a3b427fSDavidlohr Bueso 	.writeunlock	= torture_rwsem_up_write,
7424a3b427fSDavidlohr Bueso 	.readlock       = torture_rwsem_down_read,
7434a3b427fSDavidlohr Bueso 	.read_delay     = torture_rwsem_read_delay,
7444a3b427fSDavidlohr Bueso 	.readunlock     = torture_rwsem_up_read,
7454a3b427fSDavidlohr Bueso 	.name		= "rwsem_lock"
7464a3b427fSDavidlohr Bueso };
7474a3b427fSDavidlohr Bueso 
748617783ddSPaul E. McKenney #include <linux/percpu-rwsem.h>
749617783ddSPaul E. McKenney static struct percpu_rw_semaphore pcpu_rwsem;
750617783ddSPaul E. McKenney 
torture_percpu_rwsem_init(void)751d49bed9aSWei Yongjun static void torture_percpu_rwsem_init(void)
752617783ddSPaul E. McKenney {
753617783ddSPaul E. McKenney 	BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
754617783ddSPaul E. McKenney }
755617783ddSPaul E. McKenney 
torture_percpu_rwsem_exit(void)7560d720287SHou Tao static void torture_percpu_rwsem_exit(void)
7570d720287SHou Tao {
7580d720287SHou Tao 	percpu_free_rwsem(&pcpu_rwsem);
7590d720287SHou Tao }
7600d720287SHou Tao 
torture_percpu_rwsem_down_write(int tid __maybe_unused)761aa3a5f31SWaiman Long static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
762aa3a5f31SWaiman Long __acquires(pcpu_rwsem)
763617783ddSPaul E. McKenney {
764617783ddSPaul E. McKenney 	percpu_down_write(&pcpu_rwsem);
765617783ddSPaul E. McKenney 	return 0;
766617783ddSPaul E. McKenney }
767617783ddSPaul E. McKenney 
torture_percpu_rwsem_up_write(int tid __maybe_unused)768aa3a5f31SWaiman Long static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
769aa3a5f31SWaiman Long __releases(pcpu_rwsem)
770617783ddSPaul E. McKenney {
771617783ddSPaul E. McKenney 	percpu_up_write(&pcpu_rwsem);
772617783ddSPaul E. McKenney }
773617783ddSPaul E. McKenney 
torture_percpu_rwsem_down_read(int tid __maybe_unused)774aa3a5f31SWaiman Long static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
775aa3a5f31SWaiman Long __acquires(pcpu_rwsem)
776617783ddSPaul E. McKenney {
777617783ddSPaul E. McKenney 	percpu_down_read(&pcpu_rwsem);
778617783ddSPaul E. McKenney 	return 0;
779617783ddSPaul E. McKenney }
780617783ddSPaul E. McKenney 
torture_percpu_rwsem_up_read(int tid __maybe_unused)781aa3a5f31SWaiman Long static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
782aa3a5f31SWaiman Long __releases(pcpu_rwsem)
783617783ddSPaul E. McKenney {
784617783ddSPaul E. McKenney 	percpu_up_read(&pcpu_rwsem);
785617783ddSPaul E. McKenney }
786617783ddSPaul E. McKenney 
787617783ddSPaul E. McKenney static struct lock_torture_ops percpu_rwsem_lock_ops = {
788617783ddSPaul E. McKenney 	.init		= torture_percpu_rwsem_init,
7890d720287SHou Tao 	.exit		= torture_percpu_rwsem_exit,
790617783ddSPaul E. McKenney 	.writelock	= torture_percpu_rwsem_down_write,
791617783ddSPaul E. McKenney 	.write_delay	= torture_rwsem_write_delay,
792e01f3a1aSJoel Fernandes (Google) 	.task_boost     = torture_rt_boost,
793617783ddSPaul E. McKenney 	.writeunlock	= torture_percpu_rwsem_up_write,
794617783ddSPaul E. McKenney 	.readlock       = torture_percpu_rwsem_down_read,
795617783ddSPaul E. McKenney 	.read_delay     = torture_rwsem_read_delay,
796617783ddSPaul E. McKenney 	.readunlock     = torture_percpu_rwsem_up_read,
797617783ddSPaul E. McKenney 	.name		= "percpu_rwsem_lock"
798617783ddSPaul E. McKenney };
799617783ddSPaul E. McKenney 
8000af3fe1eSPaul E. McKenney /*
8010af3fe1eSPaul E. McKenney  * Lock torture writer kthread.  Repeatedly acquires and releases
8020af3fe1eSPaul E. McKenney  * the lock, checking for duplicate acquisitions.
8030af3fe1eSPaul E. McKenney  */
lock_torture_writer(void * arg)8040af3fe1eSPaul E. McKenney static int lock_torture_writer(void *arg)
8050af3fe1eSPaul E. McKenney {
8061e6757a9SDavidlohr Bueso 	struct lock_stress_stats *lwsp = arg;
807aa3a5f31SWaiman Long 	int tid = lwsp - cxt.lwsa;
808c0e1472dSPaul E. McKenney 	DEFINE_TORTURE_RANDOM(rand);
809b6334320SJohn Stultz 	u32 lockset_mask;
81045bcf0bdSJohn Stultz 	bool skip_main_lock;
8110af3fe1eSPaul E. McKenney 
8120af3fe1eSPaul E. McKenney 	VERBOSE_TOROUT_STRING("lock_torture_writer task started");
813*5d248bb3SDietmar Eggemann 	if (!rt_task(current))
8148698a745SDongsheng Yang 		set_user_nice(current, MAX_NICE);
8150af3fe1eSPaul E. McKenney 
8160af3fe1eSPaul E. McKenney 	do {
817da601c63SPaul E. McKenney 		if ((torture_random(&rand) & 0xfffff) == 0)
8180af3fe1eSPaul E. McKenney 			schedule_timeout_uninterruptible(1);
819a1229491SDavidlohr Bueso 
820b6334320SJohn Stultz 		lockset_mask = torture_random(&rand);
82145bcf0bdSJohn Stultz 		/*
82245bcf0bdSJohn Stultz 		 * When using nested_locks, we want to occasionally
82345bcf0bdSJohn Stultz 		 * skip the main lock so we can avoid always serializing
82445bcf0bdSJohn Stultz 		 * the lock chains on that central lock. By skipping the
82545bcf0bdSJohn Stultz 		 * main lock occasionally, we can create different
82645bcf0bdSJohn Stultz 		 * contention patterns (allowing for multiple disjoint
82745bcf0bdSJohn Stultz 		 * blocked trees)
82845bcf0bdSJohn Stultz 		 */
82945bcf0bdSJohn Stultz 		skip_main_lock = (nested_locks &&
83045bcf0bdSJohn Stultz 				 !(torture_random(&rand) % 100));
83145bcf0bdSJohn Stultz 
832095777c4SDavidlohr Bueso 		cxt.cur_ops->task_boost(&rand);
833b6334320SJohn Stultz 		if (cxt.cur_ops->nested_lock)
834b6334320SJohn Stultz 			cxt.cur_ops->nested_lock(tid, lockset_mask);
83545bcf0bdSJohn Stultz 
83645bcf0bdSJohn Stultz 		if (!skip_main_lock) {
837aa3a5f31SWaiman Long 			cxt.cur_ops->writelock(tid);
8380af3fe1eSPaul E. McKenney 			if (WARN_ON_ONCE(lock_is_write_held))
8391e6757a9SDavidlohr Bueso 				lwsp->n_lock_fail++;
840d02c6b52SZou Wei 			lock_is_write_held = true;
841af5f6e27SPaul E. McKenney 			if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
842a1229491SDavidlohr Bueso 				lwsp->n_lock_fail++; /* rare, but... */
843a1229491SDavidlohr Bueso 
8441e6757a9SDavidlohr Bueso 			lwsp->n_lock_acquired++;
84545bcf0bdSJohn Stultz 		}
84645bcf0bdSJohn Stultz 		if (!skip_main_lock) {
847f8619c30SPaul E. McKenney 			cxt.cur_ops->write_delay(&rand);
848d02c6b52SZou Wei 			lock_is_write_held = false;
8493480d677SPaul E. McKenney 			WRITE_ONCE(last_lock_release, jiffies);
850aa3a5f31SWaiman Long 			cxt.cur_ops->writeunlock(tid);
85145bcf0bdSJohn Stultz 		}
852b6334320SJohn Stultz 		if (cxt.cur_ops->nested_unlock)
853b6334320SJohn Stultz 			cxt.cur_ops->nested_unlock(tid, lockset_mask);
854a1229491SDavidlohr Bueso 
8550af3fe1eSPaul E. McKenney 		stutter_wait("lock_torture_writer");
8560af3fe1eSPaul E. McKenney 	} while (!torture_must_stop());
857095777c4SDavidlohr Bueso 
858095777c4SDavidlohr Bueso 	cxt.cur_ops->task_boost(NULL); /* reset prio */
8590af3fe1eSPaul E. McKenney 	torture_kthread_stopping("lock_torture_writer");
8600af3fe1eSPaul E. McKenney 	return 0;
8610af3fe1eSPaul E. McKenney }
8620af3fe1eSPaul E. McKenney 
8630af3fe1eSPaul E. McKenney /*
8644f6332c1SDavidlohr Bueso  * Lock torture reader kthread.  Repeatedly acquires and releases
8654f6332c1SDavidlohr Bueso  * the reader lock.
8664f6332c1SDavidlohr Bueso  */
lock_torture_reader(void * arg)8674f6332c1SDavidlohr Bueso static int lock_torture_reader(void *arg)
8684f6332c1SDavidlohr Bueso {
8694f6332c1SDavidlohr Bueso 	struct lock_stress_stats *lrsp = arg;
870aa3a5f31SWaiman Long 	int tid = lrsp - cxt.lrsa;
871c0e1472dSPaul E. McKenney 	DEFINE_TORTURE_RANDOM(rand);
8724f6332c1SDavidlohr Bueso 
8734f6332c1SDavidlohr Bueso 	VERBOSE_TOROUT_STRING("lock_torture_reader task started");
8744f6332c1SDavidlohr Bueso 	set_user_nice(current, MAX_NICE);
8754f6332c1SDavidlohr Bueso 
8764f6332c1SDavidlohr Bueso 	do {
8774f6332c1SDavidlohr Bueso 		if ((torture_random(&rand) & 0xfffff) == 0)
8784f6332c1SDavidlohr Bueso 			schedule_timeout_uninterruptible(1);
879a1229491SDavidlohr Bueso 
880aa3a5f31SWaiman Long 		cxt.cur_ops->readlock(tid);
881af5f6e27SPaul E. McKenney 		atomic_inc(&lock_is_read_held);
882a1229491SDavidlohr Bueso 		if (WARN_ON_ONCE(lock_is_write_held))
883a1229491SDavidlohr Bueso 			lrsp->n_lock_fail++; /* rare, but... */
884a1229491SDavidlohr Bueso 
8854f6332c1SDavidlohr Bueso 		lrsp->n_lock_acquired++;
886630952c2SDavidlohr Bueso 		cxt.cur_ops->read_delay(&rand);
887af5f6e27SPaul E. McKenney 		atomic_dec(&lock_is_read_held);
888aa3a5f31SWaiman Long 		cxt.cur_ops->readunlock(tid);
889a1229491SDavidlohr Bueso 
8904f6332c1SDavidlohr Bueso 		stutter_wait("lock_torture_reader");
8914f6332c1SDavidlohr Bueso 	} while (!torture_must_stop());
8924f6332c1SDavidlohr Bueso 	torture_kthread_stopping("lock_torture_reader");
8934f6332c1SDavidlohr Bueso 	return 0;
8944f6332c1SDavidlohr Bueso }
8954f6332c1SDavidlohr Bueso 
8964f6332c1SDavidlohr Bueso /*
8970af3fe1eSPaul E. McKenney  * Create an lock-torture-statistics message in the specified buffer.
8980af3fe1eSPaul E. McKenney  */
__torture_print_stats(char * page,struct lock_stress_stats * statp,bool write)8994f6332c1SDavidlohr Bueso static void __torture_print_stats(char *page,
9004f6332c1SDavidlohr Bueso 				  struct lock_stress_stats *statp, bool write)
9010af3fe1eSPaul E. McKenney {
9025b237d65SPaul E. McKenney 	long cur;
903d02c6b52SZou Wei 	bool fail = false;
9044f6332c1SDavidlohr Bueso 	int i, n_stress;
9055b237d65SPaul E. McKenney 	long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
9060af3fe1eSPaul E. McKenney 	long long sum = 0;
9070af3fe1eSPaul E. McKenney 
908630952c2SDavidlohr Bueso 	n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
9094f6332c1SDavidlohr Bueso 	for (i = 0; i < n_stress; i++) {
9105b237d65SPaul E. McKenney 		if (data_race(statp[i].n_lock_fail))
9110af3fe1eSPaul E. McKenney 			fail = true;
9125b237d65SPaul E. McKenney 		cur = data_race(statp[i].n_lock_acquired);
9135b237d65SPaul E. McKenney 		sum += cur;
9145b237d65SPaul E. McKenney 		if (max < cur)
9155b237d65SPaul E. McKenney 			max = cur;
9165b237d65SPaul E. McKenney 		if (min > cur)
9175b237d65SPaul E. McKenney 			min = cur;
9180af3fe1eSPaul E. McKenney 	}
9190af3fe1eSPaul E. McKenney 	page += sprintf(page,
9204f6332c1SDavidlohr Bueso 			"%s:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
9214f6332c1SDavidlohr Bueso 			write ? "Writes" : "Reads ",
92228e09a2eSPaul E. McKenney 			sum, max, min,
92328e09a2eSPaul E. McKenney 			!onoff_interval && max / 2 > min ? "???" : "",
9240af3fe1eSPaul E. McKenney 			fail, fail ? "!!!" : "");
9250af3fe1eSPaul E. McKenney 	if (fail)
926630952c2SDavidlohr Bueso 		atomic_inc(&cxt.n_lock_torture_errors);
9270af3fe1eSPaul E. McKenney }
9280af3fe1eSPaul E. McKenney 
9290af3fe1eSPaul E. McKenney /*
9300af3fe1eSPaul E. McKenney  * Print torture statistics.  Caller must ensure that there is only one
9310af3fe1eSPaul E. McKenney  * call to this function at a given time!!!  This is normally accomplished
9320af3fe1eSPaul E. McKenney  * by relying on the module system to only have one copy of the module
9330af3fe1eSPaul E. McKenney  * loaded, and then by giving the lock_torture_stats kthread full control
9340af3fe1eSPaul E. McKenney  * (or the init/cleanup functions when lock_torture_stats thread is not
9350af3fe1eSPaul E. McKenney  * running).
9360af3fe1eSPaul E. McKenney  */
lock_torture_stats_print(void)9370af3fe1eSPaul E. McKenney static void lock_torture_stats_print(void)
9380af3fe1eSPaul E. McKenney {
939630952c2SDavidlohr Bueso 	int size = cxt.nrealwriters_stress * 200 + 8192;
9400af3fe1eSPaul E. McKenney 	char *buf;
9410af3fe1eSPaul E. McKenney 
942630952c2SDavidlohr Bueso 	if (cxt.cur_ops->readlock)
943630952c2SDavidlohr Bueso 		size += cxt.nrealreaders_stress * 200 + 8192;
9444f6332c1SDavidlohr Bueso 
9450af3fe1eSPaul E. McKenney 	buf = kmalloc(size, GFP_KERNEL);
9460af3fe1eSPaul E. McKenney 	if (!buf) {
9470af3fe1eSPaul E. McKenney 		pr_err("lock_torture_stats_print: Out of memory, need: %d",
9480af3fe1eSPaul E. McKenney 		       size);
9490af3fe1eSPaul E. McKenney 		return;
9500af3fe1eSPaul E. McKenney 	}
9514f6332c1SDavidlohr Bueso 
952630952c2SDavidlohr Bueso 	__torture_print_stats(buf, cxt.lwsa, true);
9530af3fe1eSPaul E. McKenney 	pr_alert("%s", buf);
9540af3fe1eSPaul E. McKenney 	kfree(buf);
9554f6332c1SDavidlohr Bueso 
956630952c2SDavidlohr Bueso 	if (cxt.cur_ops->readlock) {
9574f6332c1SDavidlohr Bueso 		buf = kmalloc(size, GFP_KERNEL);
9584f6332c1SDavidlohr Bueso 		if (!buf) {
9594f6332c1SDavidlohr Bueso 			pr_err("lock_torture_stats_print: Out of memory, need: %d",
9604f6332c1SDavidlohr Bueso 			       size);
9614f6332c1SDavidlohr Bueso 			return;
9624f6332c1SDavidlohr Bueso 		}
9634f6332c1SDavidlohr Bueso 
964630952c2SDavidlohr Bueso 		__torture_print_stats(buf, cxt.lrsa, false);
9654f6332c1SDavidlohr Bueso 		pr_alert("%s", buf);
9664f6332c1SDavidlohr Bueso 		kfree(buf);
9674f6332c1SDavidlohr Bueso 	}
9680af3fe1eSPaul E. McKenney }
9690af3fe1eSPaul E. McKenney 
9700af3fe1eSPaul E. McKenney /*
9710af3fe1eSPaul E. McKenney  * Periodically prints torture statistics, if periodic statistics printing
9720af3fe1eSPaul E. McKenney  * was specified via the stat_interval module parameter.
9730af3fe1eSPaul E. McKenney  *
9740af3fe1eSPaul E. McKenney  * No need to worry about fullstop here, since this one doesn't reference
9750af3fe1eSPaul E. McKenney  * volatile state or register callbacks.
9760af3fe1eSPaul E. McKenney  */
lock_torture_stats(void * arg)9770af3fe1eSPaul E. McKenney static int lock_torture_stats(void *arg)
9780af3fe1eSPaul E. McKenney {
9790af3fe1eSPaul E. McKenney 	VERBOSE_TOROUT_STRING("lock_torture_stats task started");
9800af3fe1eSPaul E. McKenney 	do {
9810af3fe1eSPaul E. McKenney 		schedule_timeout_interruptible(stat_interval * HZ);
9820af3fe1eSPaul E. McKenney 		lock_torture_stats_print();
9830af3fe1eSPaul E. McKenney 		torture_shutdown_absorb("lock_torture_stats");
9840af3fe1eSPaul E. McKenney 	} while (!torture_must_stop());
9850af3fe1eSPaul E. McKenney 	torture_kthread_stopping("lock_torture_stats");
9860af3fe1eSPaul E. McKenney 	return 0;
9870af3fe1eSPaul E. McKenney }
9880af3fe1eSPaul E. McKenney 
9890af3fe1eSPaul E. McKenney static inline void
lock_torture_print_module_parms(struct lock_torture_ops * cur_ops,const char * tag)9900af3fe1eSPaul E. McKenney lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
9910af3fe1eSPaul E. McKenney 				const char *tag)
9920af3fe1eSPaul E. McKenney {
9930af3fe1eSPaul E. McKenney 	pr_alert("%s" TORTURE_FLAG
994b6334320SJohn Stultz 		 "--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
995630952c2SDavidlohr Bueso 		 torture_type, tag, cxt.debug_lock ? " [debug]": "",
996b6334320SJohn Stultz 		 cxt.nrealwriters_stress, cxt.nrealreaders_stress,
997b6334320SJohn Stultz 		 nested_locks, stat_interval, verbose, shuffle_interval,
998b6334320SJohn Stultz 		 stutter, shutdown_secs, onoff_interval, onoff_holdoff);
9990af3fe1eSPaul E. McKenney }
10000af3fe1eSPaul E. McKenney 
lock_torture_cleanup(void)10010af3fe1eSPaul E. McKenney static void lock_torture_cleanup(void)
10020af3fe1eSPaul E. McKenney {
10030af3fe1eSPaul E. McKenney 	int i;
10040af3fe1eSPaul E. McKenney 
1005d36a7a0dSDavidlohr Bueso 	if (torture_cleanup_begin())
10060af3fe1eSPaul E. McKenney 		return;
10070af3fe1eSPaul E. McKenney 
1008c1c33b92SDavidlohr Bueso 	/*
1009c1c33b92SDavidlohr Bueso 	 * Indicates early cleanup, meaning that the test has not run,
10100d720287SHou Tao 	 * such as when passing bogus args when loading the module.
10110d720287SHou Tao 	 * However cxt->cur_ops.init() may have been invoked, so beside
10120d720287SHou Tao 	 * perform the underlying torture-specific cleanups, cur_ops.exit()
10130d720287SHou Tao 	 * will be invoked if needed.
1014c1c33b92SDavidlohr Bueso 	 */
10152ce77d16SDavidlohr Bueso 	if (!cxt.lwsa && !cxt.lrsa)
1016c1c33b92SDavidlohr Bueso 		goto end;
1017c1c33b92SDavidlohr Bueso 
10180af3fe1eSPaul E. McKenney 	if (writer_tasks) {
1019630952c2SDavidlohr Bueso 		for (i = 0; i < cxt.nrealwriters_stress; i++)
1020*5d248bb3SDietmar Eggemann 			torture_stop_kthread(lock_torture_writer, writer_tasks[i]);
10210af3fe1eSPaul E. McKenney 		kfree(writer_tasks);
10220af3fe1eSPaul E. McKenney 		writer_tasks = NULL;
10230af3fe1eSPaul E. McKenney 	}
10240af3fe1eSPaul E. McKenney 
10254f6332c1SDavidlohr Bueso 	if (reader_tasks) {
1026630952c2SDavidlohr Bueso 		for (i = 0; i < cxt.nrealreaders_stress; i++)
10274f6332c1SDavidlohr Bueso 			torture_stop_kthread(lock_torture_reader,
10284f6332c1SDavidlohr Bueso 					     reader_tasks[i]);
10294f6332c1SDavidlohr Bueso 		kfree(reader_tasks);
10304f6332c1SDavidlohr Bueso 		reader_tasks = NULL;
10314f6332c1SDavidlohr Bueso 	}
10324f6332c1SDavidlohr Bueso 
10330af3fe1eSPaul E. McKenney 	torture_stop_kthread(lock_torture_stats, stats_task);
10340af3fe1eSPaul E. McKenney 	lock_torture_stats_print();  /* -After- the stats thread is stopped! */
10350af3fe1eSPaul E. McKenney 
1036630952c2SDavidlohr Bueso 	if (atomic_read(&cxt.n_lock_torture_errors))
1037630952c2SDavidlohr Bueso 		lock_torture_print_module_parms(cxt.cur_ops,
10380af3fe1eSPaul E. McKenney 						"End of test: FAILURE");
10390af3fe1eSPaul E. McKenney 	else if (torture_onoff_failures())
1040630952c2SDavidlohr Bueso 		lock_torture_print_module_parms(cxt.cur_ops,
10410af3fe1eSPaul E. McKenney 						"End of test: LOCK_HOTPLUG");
10420af3fe1eSPaul E. McKenney 	else
1043630952c2SDavidlohr Bueso 		lock_torture_print_module_parms(cxt.cur_ops,
10440af3fe1eSPaul E. McKenney 						"End of test: SUCCESS");
1045f4dbba59SYang Shi 
1046f4dbba59SYang Shi 	kfree(cxt.lwsa);
1047a9d6938dSPaul E. McKenney 	cxt.lwsa = NULL;
1048f4dbba59SYang Shi 	kfree(cxt.lrsa);
1049a9d6938dSPaul E. McKenney 	cxt.lrsa = NULL;
1050f4dbba59SYang Shi 
1051c1c33b92SDavidlohr Bueso end:
10520d720287SHou Tao 	if (cxt.init_called) {
10530d720287SHou Tao 		if (cxt.cur_ops->exit)
10540d720287SHou Tao 			cxt.cur_ops->exit();
10550d720287SHou Tao 		cxt.init_called = false;
10560d720287SHou Tao 	}
1057d36a7a0dSDavidlohr Bueso 	torture_cleanup_end();
10580af3fe1eSPaul E. McKenney }
10590af3fe1eSPaul E. McKenney 
lock_torture_init(void)10600af3fe1eSPaul E. McKenney static int __init lock_torture_init(void)
10610af3fe1eSPaul E. McKenney {
10624f6332c1SDavidlohr Bueso 	int i, j;
10630af3fe1eSPaul E. McKenney 	int firsterr = 0;
10640af3fe1eSPaul E. McKenney 	static struct lock_torture_ops *torture_ops[] = {
1065e34191faSDavidlohr Bueso 		&lock_busted_ops,
1066e34191faSDavidlohr Bueso 		&spin_lock_ops, &spin_lock_irq_ops,
10675d65cf6aSZqiang 		&raw_spin_lock_ops, &raw_spin_lock_irq_ops,
1068e34191faSDavidlohr Bueso 		&rw_lock_ops, &rw_lock_irq_ops,
1069e34191faSDavidlohr Bueso 		&mutex_lock_ops,
10700186a6cbSChris Wilson 		&ww_mutex_lock_ops,
1071095777c4SDavidlohr Bueso #ifdef CONFIG_RT_MUTEXES
1072095777c4SDavidlohr Bueso 		&rtmutex_lock_ops,
1073095777c4SDavidlohr Bueso #endif
1074e34191faSDavidlohr Bueso 		&rwsem_lock_ops,
1075617783ddSPaul E. McKenney 		&percpu_rwsem_lock_ops,
10760af3fe1eSPaul E. McKenney 	};
10770af3fe1eSPaul E. McKenney 
1078a2f2577dSPaul E. McKenney 	if (!torture_init_begin(torture_type, verbose))
10795228084eSPaul E. McKenney 		return -EBUSY;
10800af3fe1eSPaul E. McKenney 
10810af3fe1eSPaul E. McKenney 	/* Process args and tell the world that the torturer is on the job. */
10820af3fe1eSPaul E. McKenney 	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1083630952c2SDavidlohr Bueso 		cxt.cur_ops = torture_ops[i];
1084630952c2SDavidlohr Bueso 		if (strcmp(torture_type, cxt.cur_ops->name) == 0)
10850af3fe1eSPaul E. McKenney 			break;
10860af3fe1eSPaul E. McKenney 	}
10870af3fe1eSPaul E. McKenney 	if (i == ARRAY_SIZE(torture_ops)) {
10880af3fe1eSPaul E. McKenney 		pr_alert("lock-torture: invalid torture type: \"%s\"\n",
10890af3fe1eSPaul E. McKenney 			 torture_type);
10900af3fe1eSPaul E. McKenney 		pr_alert("lock-torture types:");
10910af3fe1eSPaul E. McKenney 		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
10920af3fe1eSPaul E. McKenney 			pr_alert(" %s", torture_ops[i]->name);
10930af3fe1eSPaul E. McKenney 		pr_alert("\n");
1094a36a9961SPaul E. McKenney 		firsterr = -EINVAL;
1095a36a9961SPaul E. McKenney 		goto unwind;
10960af3fe1eSPaul E. McKenney 	}
10972ce77d16SDavidlohr Bueso 
1098e5ace37dSHou Tao 	if (nwriters_stress == 0 &&
1099e5ace37dSHou Tao 	    (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
11002ce77d16SDavidlohr Bueso 		pr_alert("lock-torture: must run at least one locking thread\n");
11012ce77d16SDavidlohr Bueso 		firsterr = -EINVAL;
11022ce77d16SDavidlohr Bueso 		goto unwind;
11032ce77d16SDavidlohr Bueso 	}
11042ce77d16SDavidlohr Bueso 
11050af3fe1eSPaul E. McKenney 	if (nwriters_stress >= 0)
1106630952c2SDavidlohr Bueso 		cxt.nrealwriters_stress = nwriters_stress;
11070af3fe1eSPaul E. McKenney 	else
1108630952c2SDavidlohr Bueso 		cxt.nrealwriters_stress = 2 * num_online_cpus();
1109f095bfc0SDavidlohr Bueso 
11108c52cca0SWaiman Long 	if (cxt.cur_ops->init) {
11118c52cca0SWaiman Long 		cxt.cur_ops->init();
11128c52cca0SWaiman Long 		cxt.init_called = true;
11138c52cca0SWaiman Long 	}
11148c52cca0SWaiman Long 
1115f095bfc0SDavidlohr Bueso #ifdef CONFIG_DEBUG_MUTEXES
1116c5d3c8caSChuhong Yuan 	if (str_has_prefix(torture_type, "mutex"))
1117630952c2SDavidlohr Bueso 		cxt.debug_lock = true;
1118f095bfc0SDavidlohr Bueso #endif
1119095777c4SDavidlohr Bueso #ifdef CONFIG_DEBUG_RT_MUTEXES
1120c5d3c8caSChuhong Yuan 	if (str_has_prefix(torture_type, "rtmutex"))
1121095777c4SDavidlohr Bueso 		cxt.debug_lock = true;
1122095777c4SDavidlohr Bueso #endif
1123f095bfc0SDavidlohr Bueso #ifdef CONFIG_DEBUG_SPINLOCK
1124c5d3c8caSChuhong Yuan 	if ((str_has_prefix(torture_type, "spin")) ||
1125c5d3c8caSChuhong Yuan 	    (str_has_prefix(torture_type, "rw_lock")))
1126630952c2SDavidlohr Bueso 		cxt.debug_lock = true;
1127f095bfc0SDavidlohr Bueso #endif
11280af3fe1eSPaul E. McKenney 
11290af3fe1eSPaul E. McKenney 	/* Initialize the statistics so that each run gets its own numbers. */
11302ce77d16SDavidlohr Bueso 	if (nwriters_stress) {
1131d02c6b52SZou Wei 		lock_is_write_held = false;
11326da2ec56SKees Cook 		cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
11336da2ec56SKees Cook 					 sizeof(*cxt.lwsa),
11346da2ec56SKees Cook 					 GFP_KERNEL);
1135630952c2SDavidlohr Bueso 		if (cxt.lwsa == NULL) {
1136630952c2SDavidlohr Bueso 			VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
11370af3fe1eSPaul E. McKenney 			firsterr = -ENOMEM;
11380af3fe1eSPaul E. McKenney 			goto unwind;
11390af3fe1eSPaul E. McKenney 		}
11402ce77d16SDavidlohr Bueso 
1141630952c2SDavidlohr Bueso 		for (i = 0; i < cxt.nrealwriters_stress; i++) {
1142630952c2SDavidlohr Bueso 			cxt.lwsa[i].n_lock_fail = 0;
1143630952c2SDavidlohr Bueso 			cxt.lwsa[i].n_lock_acquired = 0;
11440af3fe1eSPaul E. McKenney 		}
11452ce77d16SDavidlohr Bueso 	}
11460af3fe1eSPaul E. McKenney 
1147630952c2SDavidlohr Bueso 	if (cxt.cur_ops->readlock) {
11484f6332c1SDavidlohr Bueso 		if (nreaders_stress >= 0)
1149630952c2SDavidlohr Bueso 			cxt.nrealreaders_stress = nreaders_stress;
11504f6332c1SDavidlohr Bueso 		else {
11514f6332c1SDavidlohr Bueso 			/*
11524f6332c1SDavidlohr Bueso 			 * By default distribute evenly the number of
11534f6332c1SDavidlohr Bueso 			 * readers and writers. We still run the same number
11544f6332c1SDavidlohr Bueso 			 * of threads as the writer-only locks default.
11554f6332c1SDavidlohr Bueso 			 */
11564f6332c1SDavidlohr Bueso 			if (nwriters_stress < 0) /* user doesn't care */
1157630952c2SDavidlohr Bueso 				cxt.nrealwriters_stress = num_online_cpus();
1158630952c2SDavidlohr Bueso 			cxt.nrealreaders_stress = cxt.nrealwriters_stress;
11594f6332c1SDavidlohr Bueso 		}
11600af3fe1eSPaul E. McKenney 
11612ce77d16SDavidlohr Bueso 		if (nreaders_stress) {
11626da2ec56SKees Cook 			cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
11636da2ec56SKees Cook 						 sizeof(*cxt.lrsa),
11646da2ec56SKees Cook 						 GFP_KERNEL);
1165630952c2SDavidlohr Bueso 			if (cxt.lrsa == NULL) {
1166630952c2SDavidlohr Bueso 				VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
11674f6332c1SDavidlohr Bueso 				firsterr = -ENOMEM;
1168630952c2SDavidlohr Bueso 				kfree(cxt.lwsa);
1169c1c33b92SDavidlohr Bueso 				cxt.lwsa = NULL;
11704f6332c1SDavidlohr Bueso 				goto unwind;
11714f6332c1SDavidlohr Bueso 			}
11724f6332c1SDavidlohr Bueso 
1173630952c2SDavidlohr Bueso 			for (i = 0; i < cxt.nrealreaders_stress; i++) {
1174630952c2SDavidlohr Bueso 				cxt.lrsa[i].n_lock_fail = 0;
1175630952c2SDavidlohr Bueso 				cxt.lrsa[i].n_lock_acquired = 0;
11764f6332c1SDavidlohr Bueso 			}
11774f6332c1SDavidlohr Bueso 		}
11782ce77d16SDavidlohr Bueso 	}
1179c1c33b92SDavidlohr Bueso 
1180630952c2SDavidlohr Bueso 	lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
11814f6332c1SDavidlohr Bueso 
11824f6332c1SDavidlohr Bueso 	/* Prepare torture context. */
11830af3fe1eSPaul E. McKenney 	if (onoff_interval > 0) {
11840af3fe1eSPaul E. McKenney 		firsterr = torture_onoff_init(onoff_holdoff * HZ,
11853a6cb58fSPaul E. McKenney 					      onoff_interval * HZ, NULL);
1186b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
11870af3fe1eSPaul E. McKenney 			goto unwind;
11880af3fe1eSPaul E. McKenney 	}
11890af3fe1eSPaul E. McKenney 	if (shuffle_interval > 0) {
11900af3fe1eSPaul E. McKenney 		firsterr = torture_shuffle_init(shuffle_interval);
1191b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
11920af3fe1eSPaul E. McKenney 			goto unwind;
11930af3fe1eSPaul E. McKenney 	}
11940af3fe1eSPaul E. McKenney 	if (shutdown_secs > 0) {
11950af3fe1eSPaul E. McKenney 		firsterr = torture_shutdown_init(shutdown_secs,
11960af3fe1eSPaul E. McKenney 						 lock_torture_cleanup);
1197b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
11980af3fe1eSPaul E. McKenney 			goto unwind;
11990af3fe1eSPaul E. McKenney 	}
12000af3fe1eSPaul E. McKenney 	if (stutter > 0) {
1201ff3bf92dSPaul E. McKenney 		firsterr = torture_stutter_init(stutter, stutter);
1202b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
12030af3fe1eSPaul E. McKenney 			goto unwind;
12040af3fe1eSPaul E. McKenney 	}
12050af3fe1eSPaul E. McKenney 
12062ce77d16SDavidlohr Bueso 	if (nwriters_stress) {
12076396bb22SKees Cook 		writer_tasks = kcalloc(cxt.nrealwriters_stress,
12086396bb22SKees Cook 				       sizeof(writer_tasks[0]),
12090af3fe1eSPaul E. McKenney 				       GFP_KERNEL);
12100af3fe1eSPaul E. McKenney 		if (writer_tasks == NULL) {
121181faa4f6SLi Zhijian 			TOROUT_ERRSTRING("writer_tasks: Out of memory");
12120af3fe1eSPaul E. McKenney 			firsterr = -ENOMEM;
12130af3fe1eSPaul E. McKenney 			goto unwind;
12140af3fe1eSPaul E. McKenney 		}
12152ce77d16SDavidlohr Bueso 	}
12164f6332c1SDavidlohr Bueso 
1217b6334320SJohn Stultz 	/* cap nested_locks to MAX_NESTED_LOCKS */
1218b6334320SJohn Stultz 	if (nested_locks > MAX_NESTED_LOCKS)
1219b6334320SJohn Stultz 		nested_locks = MAX_NESTED_LOCKS;
1220b6334320SJohn Stultz 
1221630952c2SDavidlohr Bueso 	if (cxt.cur_ops->readlock) {
12226396bb22SKees Cook 		reader_tasks = kcalloc(cxt.nrealreaders_stress,
12236396bb22SKees Cook 				       sizeof(reader_tasks[0]),
12244f6332c1SDavidlohr Bueso 				       GFP_KERNEL);
12254f6332c1SDavidlohr Bueso 		if (reader_tasks == NULL) {
122681faa4f6SLi Zhijian 			TOROUT_ERRSTRING("reader_tasks: Out of memory");
1227f4dbba59SYang Shi 			kfree(writer_tasks);
1228f4dbba59SYang Shi 			writer_tasks = NULL;
12294f6332c1SDavidlohr Bueso 			firsterr = -ENOMEM;
12304f6332c1SDavidlohr Bueso 			goto unwind;
12314f6332c1SDavidlohr Bueso 		}
12324f6332c1SDavidlohr Bueso 	}
12334f6332c1SDavidlohr Bueso 
12344f6332c1SDavidlohr Bueso 	/*
12354f6332c1SDavidlohr Bueso 	 * Create the kthreads and start torturing (oh, those poor little locks).
12364f6332c1SDavidlohr Bueso 	 *
12374f6332c1SDavidlohr Bueso 	 * TODO: Note that we interleave writers with readers, giving writers a
12384f6332c1SDavidlohr Bueso 	 * slight advantage, by creating its kthread first. This can be modified
12394f6332c1SDavidlohr Bueso 	 * for very specific needs, or even let the user choose the policy, if
12404f6332c1SDavidlohr Bueso 	 * ever wanted.
12414f6332c1SDavidlohr Bueso 	 */
1242630952c2SDavidlohr Bueso 	for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1243630952c2SDavidlohr Bueso 		    j < cxt.nrealreaders_stress; i++, j++) {
1244630952c2SDavidlohr Bueso 		if (i >= cxt.nrealwriters_stress)
12454f6332c1SDavidlohr Bueso 			goto create_reader;
12464f6332c1SDavidlohr Bueso 
12474f6332c1SDavidlohr Bueso 		/* Create writer. */
1248*5d248bb3SDietmar Eggemann 		firsterr = torture_create_kthread_cb(lock_torture_writer, &cxt.lwsa[i],
1249*5d248bb3SDietmar Eggemann 						     writer_tasks[i],
1250*5d248bb3SDietmar Eggemann 						     writer_fifo ? sched_set_fifo : NULL);
1251b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
12520af3fe1eSPaul E. McKenney 			goto unwind;
12534f6332c1SDavidlohr Bueso 
12544f6332c1SDavidlohr Bueso 	create_reader:
1255630952c2SDavidlohr Bueso 		if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
12564f6332c1SDavidlohr Bueso 			continue;
12574f6332c1SDavidlohr Bueso 		/* Create reader. */
1258630952c2SDavidlohr Bueso 		firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
12594f6332c1SDavidlohr Bueso 						  reader_tasks[j]);
1260b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
12614f6332c1SDavidlohr Bueso 			goto unwind;
12620af3fe1eSPaul E. McKenney 	}
12630af3fe1eSPaul E. McKenney 	if (stat_interval > 0) {
12640af3fe1eSPaul E. McKenney 		firsterr = torture_create_kthread(lock_torture_stats, NULL,
12650af3fe1eSPaul E. McKenney 						  stats_task);
1266b3b3cc61SPaul E. McKenney 		if (torture_init_error(firsterr))
12670af3fe1eSPaul E. McKenney 			goto unwind;
12680af3fe1eSPaul E. McKenney 	}
12690af3fe1eSPaul E. McKenney 	torture_init_end();
12700af3fe1eSPaul E. McKenney 	return 0;
12710af3fe1eSPaul E. McKenney 
12720af3fe1eSPaul E. McKenney unwind:
12730af3fe1eSPaul E. McKenney 	torture_init_end();
12740af3fe1eSPaul E. McKenney 	lock_torture_cleanup();
12756b74fa0aSPaul E. McKenney 	if (shutdown_secs) {
12766b74fa0aSPaul E. McKenney 		WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
12776b74fa0aSPaul E. McKenney 		kernel_power_off();
12786b74fa0aSPaul E. McKenney 	}
12790af3fe1eSPaul E. McKenney 	return firsterr;
12800af3fe1eSPaul E. McKenney }
12810af3fe1eSPaul E. McKenney 
12820af3fe1eSPaul E. McKenney module_init(lock_torture_init);
12830af3fe1eSPaul E. McKenney module_exit(lock_torture_cleanup);
1284