1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9 
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)		\
15 	LOCK_USED_IN_##__STATE,		\
16 	LOCK_USED_IN_##__STATE##_READ,	\
17 	LOCK_ENABLED_##__STATE,		\
18 	LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21 	LOCK_USED,
22 	LOCK_USAGE_STATES
23 };
24 
25 #define LOCK_USAGE_READ_MASK 1
26 #define LOCK_USAGE_DIR_MASK  2
27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
28 
29 /*
30  * Usage-state bitmasks:
31  */
32 #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
33 
34 enum {
35 #define LOCKDEP_STATE(__STATE)						\
36 	__LOCKF(USED_IN_##__STATE)					\
37 	__LOCKF(USED_IN_##__STATE##_READ)				\
38 	__LOCKF(ENABLED_##__STATE)					\
39 	__LOCKF(ENABLED_##__STATE##_READ)
40 #include "lockdep_states.h"
41 #undef LOCKDEP_STATE
42 	__LOCKF(USED)
43 };
44 
45 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
46 static const unsigned long LOCKF_ENABLED_IRQ =
47 #include "lockdep_states.h"
48 	0;
49 #undef LOCKDEP_STATE
50 
51 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
52 static const unsigned long LOCKF_USED_IN_IRQ =
53 #include "lockdep_states.h"
54 	0;
55 #undef LOCKDEP_STATE
56 
57 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
58 static const unsigned long LOCKF_ENABLED_IRQ_READ =
59 #include "lockdep_states.h"
60 	0;
61 #undef LOCKDEP_STATE
62 
63 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
64 static const unsigned long LOCKF_USED_IN_IRQ_READ =
65 #include "lockdep_states.h"
66 	0;
67 #undef LOCKDEP_STATE
68 
69 /*
70  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
71  * .data and .bss to fit in required 32MB limit for the kernel. With
72  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
73  * So, reduce the static allocations for lockdeps related structures so that
74  * everything fits in current required size limit.
75  */
76 #ifdef CONFIG_LOCKDEP_SMALL
77 /*
78  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
79  * we track.
80  *
81  * We use the per-lock dependency maps in two ways: we grow it by adding
82  * every to-be-taken lock to all currently held lock's own dependency
83  * table (if it's not there yet), and we check it for lock order
84  * conflicts and deadlocks.
85  */
86 #define MAX_LOCKDEP_ENTRIES	16384UL
87 #define MAX_LOCKDEP_CHAINS_BITS	15
88 #define MAX_STACK_TRACE_ENTRIES	262144UL
89 #else
90 #define MAX_LOCKDEP_ENTRIES	32768UL
91 
92 #define MAX_LOCKDEP_CHAINS_BITS	16
93 
94 /*
95  * Stack-trace: tightly packed array of stack backtrace
96  * addresses. Protected by the hash_lock.
97  */
98 #define MAX_STACK_TRACE_ENTRIES	524288UL
99 #endif
100 
101 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
102 
103 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
104 
105 extern struct list_head all_lock_classes;
106 extern struct lock_chain lock_chains[];
107 
108 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
109 
110 extern void get_usage_chars(struct lock_class *class,
111 			    char usage[LOCK_USAGE_CHARS]);
112 
113 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
114 
115 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
116 
117 extern unsigned long nr_lock_classes;
118 extern unsigned long nr_list_entries;
119 long lockdep_next_lockchain(long i);
120 unsigned long lock_chain_count(void);
121 extern int nr_chain_hlocks;
122 extern unsigned long nr_stack_trace_entries;
123 
124 extern unsigned int nr_hardirq_chains;
125 extern unsigned int nr_softirq_chains;
126 extern unsigned int nr_process_chains;
127 extern unsigned int max_lockdep_depth;
128 extern unsigned int max_recursion_depth;
129 
130 extern unsigned int max_bfs_queue_depth;
131 
132 #ifdef CONFIG_PROVE_LOCKING
133 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
134 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
135 #else
136 static inline unsigned long
137 lockdep_count_forward_deps(struct lock_class *class)
138 {
139 	return 0;
140 }
141 static inline unsigned long
142 lockdep_count_backward_deps(struct lock_class *class)
143 {
144 	return 0;
145 }
146 #endif
147 
148 #ifdef CONFIG_DEBUG_LOCKDEP
149 
150 #include <asm/local.h>
151 /*
152  * Various lockdep statistics.
153  * We want them per cpu as they are often accessed in fast path
154  * and we want to avoid too much cache bouncing.
155  */
156 struct lockdep_stats {
157 	int	chain_lookup_hits;
158 	int	chain_lookup_misses;
159 	int	hardirqs_on_events;
160 	int	hardirqs_off_events;
161 	int	redundant_hardirqs_on;
162 	int	redundant_hardirqs_off;
163 	int	softirqs_on_events;
164 	int	softirqs_off_events;
165 	int	redundant_softirqs_on;
166 	int	redundant_softirqs_off;
167 	int	nr_unused_locks;
168 	int	nr_redundant_checks;
169 	int	nr_redundant;
170 	int	nr_cyclic_checks;
171 	int	nr_cyclic_check_recursions;
172 	int	nr_find_usage_forwards_checks;
173 	int	nr_find_usage_forwards_recursions;
174 	int	nr_find_usage_backwards_checks;
175 	int	nr_find_usage_backwards_recursions;
176 
177 	/*
178 	 * Per lock class locking operation stat counts
179 	 */
180 	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
181 };
182 
183 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
184 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
185 
186 #define __debug_atomic_inc(ptr)					\
187 	this_cpu_inc(lockdep_stats.ptr);
188 
189 #define debug_atomic_inc(ptr)			{		\
190 	WARN_ON_ONCE(!irqs_disabled());				\
191 	__this_cpu_inc(lockdep_stats.ptr);			\
192 }
193 
194 #define debug_atomic_dec(ptr)			{		\
195 	WARN_ON_ONCE(!irqs_disabled());				\
196 	__this_cpu_dec(lockdep_stats.ptr);			\
197 }
198 
199 #define debug_atomic_read(ptr)		({				\
200 	struct lockdep_stats *__cpu_lockdep_stats;			\
201 	unsigned long long __total = 0;					\
202 	int __cpu;							\
203 	for_each_possible_cpu(__cpu) {					\
204 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
205 		__total += __cpu_lockdep_stats->ptr;			\
206 	}								\
207 	__total;							\
208 })
209 
210 static inline void debug_class_ops_inc(struct lock_class *class)
211 {
212 	int idx;
213 
214 	idx = class - lock_classes;
215 	__debug_atomic_inc(lock_class_ops[idx]);
216 }
217 
218 static inline unsigned long debug_class_ops_read(struct lock_class *class)
219 {
220 	int idx, cpu;
221 	unsigned long ops = 0;
222 
223 	idx = class - lock_classes;
224 	for_each_possible_cpu(cpu)
225 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
226 	return ops;
227 }
228 
229 #else
230 # define __debug_atomic_inc(ptr)	do { } while (0)
231 # define debug_atomic_inc(ptr)		do { } while (0)
232 # define debug_atomic_dec(ptr)		do { } while (0)
233 # define debug_atomic_read(ptr)		0
234 # define debug_class_ops_inc(ptr)	do { } while (0)
235 #endif
236