xref: /openbmc/linux/include/linux/lockdep.h (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Runtime locking correctness validator
4   *
5   *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6   *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7   *
8   * see Documentation/locking/lockdep-design.rst for more details.
9   */
10  #ifndef __LINUX_LOCKDEP_H
11  #define __LINUX_LOCKDEP_H
12  
13  #include <linux/lockdep_types.h>
14  #include <linux/smp.h>
15  #include <asm/percpu.h>
16  
17  struct task_struct;
18  
19  #ifdef CONFIG_LOCKDEP
20  
21  #include <linux/linkage.h>
22  #include <linux/list.h>
23  #include <linux/debug_locks.h>
24  #include <linux/stacktrace.h>
25  
lockdep_copy_map(struct lockdep_map * to,struct lockdep_map * from)26  static inline void lockdep_copy_map(struct lockdep_map *to,
27  				    struct lockdep_map *from)
28  {
29  	int i;
30  
31  	*to = *from;
32  	/*
33  	 * Since the class cache can be modified concurrently we could observe
34  	 * half pointers (64bit arch using 32bit copy insns). Therefore clear
35  	 * the caches and take the performance hit.
36  	 *
37  	 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
38  	 *     that relies on cache abuse.
39  	 */
40  	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
41  		to->class_cache[i] = NULL;
42  }
43  
44  /*
45   * Every lock has a list of other locks that were taken after it.
46   * We only grow the list, never remove from it:
47   */
48  struct lock_list {
49  	struct list_head		entry;
50  	struct lock_class		*class;
51  	struct lock_class		*links_to;
52  	const struct lock_trace		*trace;
53  	u16				distance;
54  	/* bitmap of different dependencies from head to this */
55  	u8				dep;
56  	/* used by BFS to record whether "prev -> this" only has -(*R)-> */
57  	u8				only_xr;
58  
59  	/*
60  	 * The parent field is used to implement breadth-first search, and the
61  	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
62  	 */
63  	struct lock_list		*parent;
64  };
65  
66  /**
67   * struct lock_chain - lock dependency chain record
68   *
69   * @irq_context: the same as irq_context in held_lock below
70   * @depth:       the number of held locks in this chain
71   * @base:        the index in chain_hlocks for this chain
72   * @entry:       the collided lock chains in lock_chain hash list
73   * @chain_key:   the hash key of this lock_chain
74   */
75  struct lock_chain {
76  	/* see BUILD_BUG_ON()s in add_chain_cache() */
77  	unsigned int			irq_context :  2,
78  					depth       :  6,
79  					base	    : 24;
80  	/* 4 byte hole */
81  	struct hlist_node		entry;
82  	u64				chain_key;
83  };
84  
85  #define MAX_LOCKDEP_KEYS_BITS		13
86  #define MAX_LOCKDEP_KEYS		(1UL << MAX_LOCKDEP_KEYS_BITS)
87  #define INITIAL_CHAIN_KEY		-1
88  
89  struct held_lock {
90  	/*
91  	 * One-way hash of the dependency chain up to this point. We
92  	 * hash the hashes step by step as the dependency chain grows.
93  	 *
94  	 * We use it for dependency-caching and we skip detection
95  	 * passes and dependency-updates if there is a cache-hit, so
96  	 * it is absolutely critical for 100% coverage of the validator
97  	 * to have a unique key value for every unique dependency path
98  	 * that can occur in the system, to make a unique hash value
99  	 * as likely as possible - hence the 64-bit width.
100  	 *
101  	 * The task struct holds the current hash value (initialized
102  	 * with zero), here we store the previous hash value:
103  	 */
104  	u64				prev_chain_key;
105  	unsigned long			acquire_ip;
106  	struct lockdep_map		*instance;
107  	struct lockdep_map		*nest_lock;
108  #ifdef CONFIG_LOCK_STAT
109  	u64 				waittime_stamp;
110  	u64				holdtime_stamp;
111  #endif
112  	/*
113  	 * class_idx is zero-indexed; it points to the element in
114  	 * lock_classes this held lock instance belongs to. class_idx is in
115  	 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
116  	 */
117  	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
118  	/*
119  	 * The lock-stack is unified in that the lock chains of interrupt
120  	 * contexts nest ontop of process context chains, but we 'separate'
121  	 * the hashes by starting with 0 if we cross into an interrupt
122  	 * context, and we also keep do not add cross-context lock
123  	 * dependencies - the lock usage graph walking covers that area
124  	 * anyway, and we'd just unnecessarily increase the number of
125  	 * dependencies otherwise. [Note: hardirq and softirq contexts
126  	 * are separated from each other too.]
127  	 *
128  	 * The following field is used to detect when we cross into an
129  	 * interrupt context:
130  	 */
131  	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
132  	unsigned int trylock:1;						/* 16 bits */
133  
134  	unsigned int read:2;        /* see lock_acquire() comment */
135  	unsigned int check:1;       /* see lock_acquire() comment */
136  	unsigned int hardirqs_off:1;
137  	unsigned int sync:1;
138  	unsigned int references:11;					/* 32 bits */
139  	unsigned int pin_count;
140  };
141  
142  /*
143   * Initialization, self-test and debugging-output methods:
144   */
145  extern void lockdep_init(void);
146  extern void lockdep_reset(void);
147  extern void lockdep_reset_lock(struct lockdep_map *lock);
148  extern void lockdep_free_key_range(void *start, unsigned long size);
149  extern asmlinkage void lockdep_sys_exit(void);
150  extern void lockdep_set_selftest_task(struct task_struct *task);
151  
152  extern void lockdep_init_task(struct task_struct *task);
153  
154  /*
155   * Split the recursion counter in two to readily detect 'off' vs recursion.
156   */
157  #define LOCKDEP_RECURSION_BITS	16
158  #define LOCKDEP_OFF		(1U << LOCKDEP_RECURSION_BITS)
159  #define LOCKDEP_RECURSION_MASK	(LOCKDEP_OFF - 1)
160  
161  /*
162   * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
163   * to header dependencies.
164   */
165  
166  #define lockdep_off()					\
167  do {							\
168  	current->lockdep_recursion += LOCKDEP_OFF;	\
169  } while (0)
170  
171  #define lockdep_on()					\
172  do {							\
173  	current->lockdep_recursion -= LOCKDEP_OFF;	\
174  } while (0)
175  
176  extern void lockdep_register_key(struct lock_class_key *key);
177  extern void lockdep_unregister_key(struct lock_class_key *key);
178  
179  /*
180   * These methods are used by specific locking variants (spinlocks,
181   * rwlocks, mutexes and rwsems) to pass init/acquire/release events
182   * to lockdep:
183   */
184  
185  extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
186  	struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type);
187  
188  static inline void
lockdep_init_map_waits(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner,u8 outer)189  lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
190  		       struct lock_class_key *key, int subclass, u8 inner, u8 outer)
191  {
192  	lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
193  }
194  
195  static inline void
lockdep_init_map_wait(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass,u8 inner)196  lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
197  		      struct lock_class_key *key, int subclass, u8 inner)
198  {
199  	lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
200  }
201  
lockdep_init_map(struct lockdep_map * lock,const char * name,struct lock_class_key * key,int subclass)202  static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
203  			     struct lock_class_key *key, int subclass)
204  {
205  	lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
206  }
207  
208  /*
209   * Reinitialize a lock key - for cases where there is special locking or
210   * special initialization of locks so that the validator gets the scope
211   * of dependencies wrong: they are either too broad (they need a class-split)
212   * or they are too narrow (they suffer from a false class-split):
213   */
214  #define lockdep_set_class(lock, key)				\
215  	lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,	\
216  			      (lock)->dep_map.wait_type_inner,	\
217  			      (lock)->dep_map.wait_type_outer,	\
218  			      (lock)->dep_map.lock_type)
219  
220  #define lockdep_set_class_and_name(lock, key, name)		\
221  	lockdep_init_map_type(&(lock)->dep_map, name, key, 0,	\
222  			      (lock)->dep_map.wait_type_inner,	\
223  			      (lock)->dep_map.wait_type_outer,	\
224  			      (lock)->dep_map.lock_type)
225  
226  #define lockdep_set_class_and_subclass(lock, key, sub)		\
227  	lockdep_init_map_type(&(lock)->dep_map, #key, key, sub,	\
228  			      (lock)->dep_map.wait_type_inner,	\
229  			      (lock)->dep_map.wait_type_outer,	\
230  			      (lock)->dep_map.lock_type)
231  
232  #define lockdep_set_subclass(lock, sub)					\
233  	lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
234  			      (lock)->dep_map.wait_type_inner,		\
235  			      (lock)->dep_map.wait_type_outer,		\
236  			      (lock)->dep_map.lock_type)
237  
238  #define lockdep_set_novalidate_class(lock) \
239  	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
240  
241  /*
242   * Compare locking classes
243   */
244  #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
245  
lockdep_match_key(struct lockdep_map * lock,struct lock_class_key * key)246  static inline int lockdep_match_key(struct lockdep_map *lock,
247  				    struct lock_class_key *key)
248  {
249  	return lock->key == key;
250  }
251  
252  /*
253   * Acquire a lock.
254   *
255   * Values for "read":
256   *
257   *   0: exclusive (write) acquire
258   *   1: read-acquire (no recursion allowed)
259   *   2: read-acquire with same-instance recursion allowed
260   *
261   * Values for check:
262   *
263   *   0: simple checks (freeing, held-at-exit-time, etc.)
264   *   1: full validation
265   */
266  extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
267  			 int trylock, int read, int check,
268  			 struct lockdep_map *nest_lock, unsigned long ip);
269  
270  extern void lock_release(struct lockdep_map *lock, unsigned long ip);
271  
272  extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
273  		      int read, int check, struct lockdep_map *nest_lock,
274  		      unsigned long ip);
275  
276  /* lock_is_held_type() returns */
277  #define LOCK_STATE_UNKNOWN	-1
278  #define LOCK_STATE_NOT_HELD	0
279  #define LOCK_STATE_HELD		1
280  
281  /*
282   * Same "read" as for lock_acquire(), except -1 means any.
283   */
284  extern int lock_is_held_type(const struct lockdep_map *lock, int read);
285  
lock_is_held(const struct lockdep_map * lock)286  static inline int lock_is_held(const struct lockdep_map *lock)
287  {
288  	return lock_is_held_type(lock, -1);
289  }
290  
291  #define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
292  #define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
293  
294  extern void lock_set_class(struct lockdep_map *lock, const char *name,
295  			   struct lock_class_key *key, unsigned int subclass,
296  			   unsigned long ip);
297  
298  #define lock_set_novalidate_class(l, n, i) \
299  	lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
300  
lock_set_subclass(struct lockdep_map * lock,unsigned int subclass,unsigned long ip)301  static inline void lock_set_subclass(struct lockdep_map *lock,
302  		unsigned int subclass, unsigned long ip)
303  {
304  	lock_set_class(lock, lock->name, lock->key, subclass, ip);
305  }
306  
307  extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
308  
309  #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
310  
311  extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
312  extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
313  extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
314  
315  #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
316  
317  #define lockdep_assert(cond)		\
318  	do { WARN_ON(debug_locks && !(cond)); } while (0)
319  
320  #define lockdep_assert_once(cond)	\
321  	do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
322  
323  #define lockdep_assert_held(l)		\
324  	lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
325  
326  #define lockdep_assert_not_held(l)	\
327  	lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
328  
329  #define lockdep_assert_held_write(l)	\
330  	lockdep_assert(lockdep_is_held_type(l, 0))
331  
332  #define lockdep_assert_held_read(l)	\
333  	lockdep_assert(lockdep_is_held_type(l, 1))
334  
335  #define lockdep_assert_held_once(l)		\
336  	lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
337  
338  #define lockdep_assert_none_held_once()		\
339  	lockdep_assert_once(!current->lockdep_depth)
340  
341  #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
342  
343  #define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
344  #define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
345  #define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
346  
347  /*
348   * Must use lock_map_aquire_try() with override maps to avoid
349   * lockdep thinking they participate in the block chain.
350   */
351  #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\
352  	struct lockdep_map _name = {			\
353  		.name = #_name "-wait-type-override",	\
354  		.wait_type_inner = _wait_type,		\
355  		.lock_type = LD_LOCK_WAIT_OVERRIDE, }
356  
357  #else /* !CONFIG_LOCKDEP */
358  
lockdep_init_task(struct task_struct * task)359  static inline void lockdep_init_task(struct task_struct *task)
360  {
361  }
362  
lockdep_off(void)363  static inline void lockdep_off(void)
364  {
365  }
366  
lockdep_on(void)367  static inline void lockdep_on(void)
368  {
369  }
370  
lockdep_set_selftest_task(struct task_struct * task)371  static inline void lockdep_set_selftest_task(struct task_struct *task)
372  {
373  }
374  
375  # define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
376  # define lock_release(l, i)			do { } while (0)
377  # define lock_downgrade(l, i)			do { } while (0)
378  # define lock_set_class(l, n, key, s, i)	do { (void)(key); } while (0)
379  # define lock_set_novalidate_class(l, n, i)	do { } while (0)
380  # define lock_set_subclass(l, s, i)		do { } while (0)
381  # define lockdep_init()				do { } while (0)
382  # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
383  		do { (void)(name); (void)(key); } while (0)
384  # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
385  		do { (void)(name); (void)(key); } while (0)
386  # define lockdep_init_map_wait(lock, name, key, sub, inner) \
387  		do { (void)(name); (void)(key); } while (0)
388  # define lockdep_init_map(lock, name, key, sub) \
389  		do { (void)(name); (void)(key); } while (0)
390  # define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
391  # define lockdep_set_class_and_name(lock, key, name) \
392  		do { (void)(key); (void)(name); } while (0)
393  #define lockdep_set_class_and_subclass(lock, key, sub) \
394  		do { (void)(key); } while (0)
395  #define lockdep_set_subclass(lock, sub)		do { } while (0)
396  
397  #define lockdep_set_novalidate_class(lock) do { } while (0)
398  
399  /*
400   * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
401   * case since the result is not well defined and the caller should rather
402   * #ifdef the call himself.
403   */
404  
405  # define lockdep_reset()		do { debug_locks = 1; } while (0)
406  # define lockdep_free_key_range(start, size)	do { } while (0)
407  # define lockdep_sys_exit() 			do { } while (0)
408  
lockdep_register_key(struct lock_class_key * key)409  static inline void lockdep_register_key(struct lock_class_key *key)
410  {
411  }
412  
lockdep_unregister_key(struct lock_class_key * key)413  static inline void lockdep_unregister_key(struct lock_class_key *key)
414  {
415  }
416  
417  #define lockdep_depth(tsk)	(0)
418  
419  /*
420   * Dummy forward declarations, allow users to write less ifdef-y code
421   * and depend on dead code elimination.
422   */
423  extern int lock_is_held(const void *);
424  extern int lockdep_is_held(const void *);
425  #define lockdep_is_held_type(l, r)		(1)
426  
427  #define lockdep_assert(c)			do { } while (0)
428  #define lockdep_assert_once(c)			do { } while (0)
429  
430  #define lockdep_assert_held(l)			do { (void)(l); } while (0)
431  #define lockdep_assert_not_held(l)		do { (void)(l); } while (0)
432  #define lockdep_assert_held_write(l)		do { (void)(l); } while (0)
433  #define lockdep_assert_held_read(l)		do { (void)(l); } while (0)
434  #define lockdep_assert_held_once(l)		do { (void)(l); } while (0)
435  #define lockdep_assert_none_held_once()	do { } while (0)
436  
437  #define lockdep_recursing(tsk)			(0)
438  
439  #define NIL_COOKIE (struct pin_cookie){ }
440  
441  #define lockdep_pin_lock(l)			({ struct pin_cookie cookie = { }; cookie; })
442  #define lockdep_repin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
443  #define lockdep_unpin_lock(l, c)		do { (void)(l); (void)(c); } while (0)
444  
445  #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)	\
446  	struct lockdep_map __maybe_unused _name = {}
447  
448  #endif /* !LOCKDEP */
449  
450  #ifdef CONFIG_PROVE_LOCKING
451  void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
452  
453  #define lock_set_cmp_fn(lock, ...)	lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
454  #else
455  #define lock_set_cmp_fn(lock, ...)	do { } while (0)
456  #endif
457  
458  enum xhlock_context_t {
459  	XHLOCK_HARD,
460  	XHLOCK_SOFT,
461  	XHLOCK_CTX_NR,
462  };
463  
464  /*
465   * To initialize a lockdep_map statically use this macro.
466   * Note that _name must not be NULL.
467   */
468  #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
469  	{ .name = (_name), .key = (void *)(_key), }
470  
lockdep_invariant_state(bool force)471  static inline void lockdep_invariant_state(bool force) {}
lockdep_free_task(struct task_struct * task)472  static inline void lockdep_free_task(struct task_struct *task) {}
473  
474  #ifdef CONFIG_LOCK_STAT
475  
476  extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
477  extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
478  
479  #define LOCK_CONTENDED(_lock, try, lock)			\
480  do {								\
481  	if (!try(_lock)) {					\
482  		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
483  		lock(_lock);					\
484  	}							\
485  	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
486  } while (0)
487  
488  #define LOCK_CONTENDED_RETURN(_lock, try, lock)			\
489  ({								\
490  	int ____err = 0;					\
491  	if (!try(_lock)) {					\
492  		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
493  		____err = lock(_lock);				\
494  	}							\
495  	if (!____err)						\
496  		lock_acquired(&(_lock)->dep_map, _RET_IP_);	\
497  	____err;						\
498  })
499  
500  #else /* CONFIG_LOCK_STAT */
501  
502  #define lock_contended(lockdep_map, ip) do {} while (0)
503  #define lock_acquired(lockdep_map, ip) do {} while (0)
504  
505  #define LOCK_CONTENDED(_lock, try, lock) \
506  	lock(_lock)
507  
508  #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
509  	lock(_lock)
510  
511  #endif /* CONFIG_LOCK_STAT */
512  
513  #ifdef CONFIG_PROVE_LOCKING
514  extern void print_irqtrace_events(struct task_struct *curr);
515  #else
print_irqtrace_events(struct task_struct * curr)516  static inline void print_irqtrace_events(struct task_struct *curr)
517  {
518  }
519  #endif
520  
521  /* Variable used to make lockdep treat read_lock() as recursive in selftests */
522  #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
523  extern unsigned int force_read_lock_recursive;
524  #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
525  #define force_read_lock_recursive 0
526  #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
527  
528  #ifdef CONFIG_LOCKDEP
529  extern bool read_lock_is_recursive(void);
530  #else /* CONFIG_LOCKDEP */
531  /* If !LOCKDEP, the value is meaningless */
532  #define read_lock_is_recursive() 0
533  #endif
534  
535  /*
536   * For trivial one-depth nesting of a lock-class, the following
537   * global define can be used. (Subsystems with multiple levels
538   * of nesting should define their own lock-nesting subclasses.)
539   */
540  #define SINGLE_DEPTH_NESTING			1
541  
542  /*
543   * Map the dependency ops to NOP or to real lockdep ops, depending
544   * on the per lock-class debug mode:
545   */
546  
547  #define lock_acquire_exclusive(l, s, t, n, i)		lock_acquire(l, s, t, 0, 1, n, i)
548  #define lock_acquire_shared(l, s, t, n, i)		lock_acquire(l, s, t, 1, 1, n, i)
549  #define lock_acquire_shared_recursive(l, s, t, n, i)	lock_acquire(l, s, t, 2, 1, n, i)
550  
551  #define spin_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
552  #define spin_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
553  #define spin_release(l, i)			lock_release(l, i)
554  
555  #define rwlock_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
556  #define rwlock_acquire_read(l, s, t, i)					\
557  do {									\
558  	if (read_lock_is_recursive())					\
559  		lock_acquire_shared_recursive(l, s, t, NULL, i);	\
560  	else								\
561  		lock_acquire_shared(l, s, t, NULL, i);			\
562  } while (0)
563  
564  #define rwlock_release(l, i)			lock_release(l, i)
565  
566  #define seqcount_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
567  #define seqcount_acquire_read(l, s, t, i)	lock_acquire_shared_recursive(l, s, t, NULL, i)
568  #define seqcount_release(l, i)			lock_release(l, i)
569  
570  #define mutex_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
571  #define mutex_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
572  #define mutex_release(l, i)			lock_release(l, i)
573  
574  #define rwsem_acquire(l, s, t, i)		lock_acquire_exclusive(l, s, t, NULL, i)
575  #define rwsem_acquire_nest(l, s, t, n, i)	lock_acquire_exclusive(l, s, t, n, i)
576  #define rwsem_acquire_read(l, s, t, i)		lock_acquire_shared(l, s, t, NULL, i)
577  #define rwsem_release(l, i)			lock_release(l, i)
578  
579  #define lock_map_acquire(l)			lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
580  #define lock_map_acquire_try(l)			lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
581  #define lock_map_acquire_read(l)		lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
582  #define lock_map_acquire_tryread(l)		lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
583  #define lock_map_release(l)			lock_release(l, _THIS_IP_)
584  #define lock_map_sync(l)			lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
585  
586  #ifdef CONFIG_PROVE_LOCKING
587  # define might_lock(lock)						\
588  do {									\
589  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
590  	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
591  	lock_release(&(lock)->dep_map, _THIS_IP_);			\
592  } while (0)
593  # define might_lock_read(lock)						\
594  do {									\
595  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
596  	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
597  	lock_release(&(lock)->dep_map, _THIS_IP_);			\
598  } while (0)
599  # define might_lock_nested(lock, subclass)				\
600  do {									\
601  	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
602  	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
603  		     _THIS_IP_);					\
604  	lock_release(&(lock)->dep_map, _THIS_IP_);			\
605  } while (0)
606  
607  DECLARE_PER_CPU(int, hardirqs_enabled);
608  DECLARE_PER_CPU(int, hardirq_context);
609  DECLARE_PER_CPU(unsigned int, lockdep_recursion);
610  
611  #define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
612  
613  #define lockdep_assert_irqs_enabled()					\
614  do {									\
615  	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
616  } while (0)
617  
618  #define lockdep_assert_irqs_disabled()					\
619  do {									\
620  	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
621  } while (0)
622  
623  #define lockdep_assert_in_irq()						\
624  do {									\
625  	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
626  } while (0)
627  
628  #define lockdep_assert_no_hardirq()					\
629  do {									\
630  	WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
631  					   !this_cpu_read(hardirqs_enabled))); \
632  } while (0)
633  
634  #define lockdep_assert_preemption_enabled()				\
635  do {									\
636  	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
637  		     __lockdep_enabled			&&		\
638  		     (preempt_count() != 0		||		\
639  		      !this_cpu_read(hardirqs_enabled)));		\
640  } while (0)
641  
642  #define lockdep_assert_preemption_disabled()				\
643  do {									\
644  	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
645  		     __lockdep_enabled			&&		\
646  		     (preempt_count() == 0		&&		\
647  		      this_cpu_read(hardirqs_enabled)));		\
648  } while (0)
649  
650  /*
651   * Acceptable for protecting per-CPU resources accessed from BH.
652   * Much like in_softirq() - semantics are ambiguous, use carefully.
653   */
654  #define lockdep_assert_in_softirq()					\
655  do {									\
656  	WARN_ON_ONCE(__lockdep_enabled			&&		\
657  		     (!in_softirq() || in_irq() || in_nmi()));		\
658  } while (0)
659  
660  #else
661  # define might_lock(lock) do { } while (0)
662  # define might_lock_read(lock) do { } while (0)
663  # define might_lock_nested(lock, subclass) do { } while (0)
664  
665  # define lockdep_assert_irqs_enabled() do { } while (0)
666  # define lockdep_assert_irqs_disabled() do { } while (0)
667  # define lockdep_assert_in_irq() do { } while (0)
668  # define lockdep_assert_no_hardirq() do { } while (0)
669  
670  # define lockdep_assert_preemption_enabled() do { } while (0)
671  # define lockdep_assert_preemption_disabled() do { } while (0)
672  # define lockdep_assert_in_softirq() do { } while (0)
673  #endif
674  
675  #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
676  
677  # define lockdep_assert_RT_in_threaded_ctx() do {			\
678  		WARN_ONCE(debug_locks && !current->lockdep_recursion &&	\
679  			  lockdep_hardirq_context() &&			\
680  			  !(current->hardirq_threaded || current->irq_config),	\
681  			  "Not in threaded context on PREEMPT_RT as expected\n");	\
682  } while (0)
683  
684  #else
685  
686  # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
687  
688  #endif
689  
690  #ifdef CONFIG_LOCKDEP
691  void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
692  #else
693  static inline void
lockdep_rcu_suspicious(const char * file,const int line,const char * s)694  lockdep_rcu_suspicious(const char *file, const int line, const char *s)
695  {
696  }
697  #endif
698  
699  #endif /* __LINUX_LOCKDEP_H */
700