1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * kernel/lockdep_internals.h
4  *
5  * Runtime locking correctness validator
6  *
7  * lockdep subsystem internal functions and variables.
8  */
9 
10 /*
11  * Lock-class usage-state bits:
12  */
13 enum lock_usage_bit {
14 #define LOCKDEP_STATE(__STATE)		\
15 	LOCK_USED_IN_##__STATE,		\
16 	LOCK_USED_IN_##__STATE##_READ,	\
17 	LOCK_ENABLED_##__STATE,		\
18 	LOCK_ENABLED_##__STATE##_READ,
19 #include "lockdep_states.h"
20 #undef LOCKDEP_STATE
21 	LOCK_USED,
22 	LOCK_USED_READ,
23 	LOCK_USAGE_STATES
24 };
25 
26 #define LOCK_USAGE_READ_MASK 1
27 #define LOCK_USAGE_DIR_MASK  2
28 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
29 
30 /*
31  * Usage-state bitmasks:
32  */
33 #define __LOCKF(__STATE)	LOCKF_##__STATE = (1 << LOCK_##__STATE),
34 
35 enum {
36 #define LOCKDEP_STATE(__STATE)						\
37 	__LOCKF(USED_IN_##__STATE)					\
38 	__LOCKF(USED_IN_##__STATE##_READ)				\
39 	__LOCKF(ENABLED_##__STATE)					\
40 	__LOCKF(ENABLED_##__STATE##_READ)
41 #include "lockdep_states.h"
42 #undef LOCKDEP_STATE
43 	__LOCKF(USED)
44 	__LOCKF(USED_READ)
45 };
46 
47 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE |
48 static const unsigned long LOCKF_ENABLED_IRQ =
49 #include "lockdep_states.h"
50 	0;
51 #undef LOCKDEP_STATE
52 
53 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE |
54 static const unsigned long LOCKF_USED_IN_IRQ =
55 #include "lockdep_states.h"
56 	0;
57 #undef LOCKDEP_STATE
58 
59 #define LOCKDEP_STATE(__STATE)	LOCKF_ENABLED_##__STATE##_READ |
60 static const unsigned long LOCKF_ENABLED_IRQ_READ =
61 #include "lockdep_states.h"
62 	0;
63 #undef LOCKDEP_STATE
64 
65 #define LOCKDEP_STATE(__STATE)	LOCKF_USED_IN_##__STATE##_READ |
66 static const unsigned long LOCKF_USED_IN_IRQ_READ =
67 #include "lockdep_states.h"
68 	0;
69 #undef LOCKDEP_STATE
70 
71 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ)
72 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ)
73 
74 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ)
75 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ)
76 
77 /*
78  * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
79  * .data and .bss to fit in required 32MB limit for the kernel. With
80  * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
81  * So, reduce the static allocations for lockdeps related structures so that
82  * everything fits in current required size limit.
83  */
84 #ifdef CONFIG_LOCKDEP_SMALL
85 /*
86  * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
87  * we track.
88  *
89  * We use the per-lock dependency maps in two ways: we grow it by adding
90  * every to-be-taken lock to all currently held lock's own dependency
91  * table (if it's not there yet), and we check it for lock order
92  * conflicts and deadlocks.
93  */
94 #define MAX_LOCKDEP_ENTRIES	16384UL
95 #define MAX_LOCKDEP_CHAINS_BITS	15
96 #define MAX_STACK_TRACE_ENTRIES	262144UL
97 #define STACK_TRACE_HASH_SIZE	8192
98 #else
99 #define MAX_LOCKDEP_ENTRIES	32768UL
100 
101 #define MAX_LOCKDEP_CHAINS_BITS	16
102 
103 /*
104  * Stack-trace: tightly packed array of stack backtrace
105  * addresses. Protected by the hash_lock.
106  */
107 #define MAX_STACK_TRACE_ENTRIES	524288UL
108 #define STACK_TRACE_HASH_SIZE	16384
109 #endif
110 
111 /*
112  * Bit definitions for lock_chain.irq_context
113  */
114 #define LOCK_CHAIN_SOFTIRQ_CONTEXT	(1 << 0)
115 #define LOCK_CHAIN_HARDIRQ_CONTEXT	(1 << 1)
116 
117 #define MAX_LOCKDEP_CHAINS	(1UL << MAX_LOCKDEP_CHAINS_BITS)
118 
119 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
120 
121 extern struct list_head all_lock_classes;
122 extern struct lock_chain lock_chains[];
123 
124 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
125 
126 extern void get_usage_chars(struct lock_class *class,
127 			    char usage[LOCK_USAGE_CHARS]);
128 
129 extern const char *__get_key_name(const struct lockdep_subclass_key *key,
130 				  char *str);
131 
132 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
133 
134 extern unsigned long nr_lock_classes;
135 extern unsigned long nr_zapped_classes;
136 extern unsigned long nr_zapped_lock_chains;
137 extern unsigned long nr_list_entries;
138 long lockdep_next_lockchain(long i);
139 unsigned long lock_chain_count(void);
140 extern unsigned long nr_stack_trace_entries;
141 
142 extern unsigned int nr_hardirq_chains;
143 extern unsigned int nr_softirq_chains;
144 extern unsigned int nr_process_chains;
145 extern unsigned int nr_free_chain_hlocks;
146 extern unsigned int nr_lost_chain_hlocks;
147 extern unsigned int nr_large_chain_blocks;
148 
149 extern unsigned int max_lockdep_depth;
150 extern unsigned int max_bfs_queue_depth;
151 
152 #ifdef CONFIG_PROVE_LOCKING
153 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
154 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
155 #ifdef CONFIG_TRACE_IRQFLAGS
156 u64 lockdep_stack_trace_count(void);
157 u64 lockdep_stack_hash_count(void);
158 #endif
159 #else
160 static inline unsigned long
161 lockdep_count_forward_deps(struct lock_class *class)
162 {
163 	return 0;
164 }
165 static inline unsigned long
166 lockdep_count_backward_deps(struct lock_class *class)
167 {
168 	return 0;
169 }
170 #endif
171 
172 #ifdef CONFIG_DEBUG_LOCKDEP
173 
174 #include <asm/local.h>
175 /*
176  * Various lockdep statistics.
177  * We want them per cpu as they are often accessed in fast path
178  * and we want to avoid too much cache bouncing.
179  */
180 struct lockdep_stats {
181 	unsigned long  chain_lookup_hits;
182 	unsigned int   chain_lookup_misses;
183 	unsigned long  hardirqs_on_events;
184 	unsigned long  hardirqs_off_events;
185 	unsigned long  redundant_hardirqs_on;
186 	unsigned long  redundant_hardirqs_off;
187 	unsigned long  softirqs_on_events;
188 	unsigned long  softirqs_off_events;
189 	unsigned long  redundant_softirqs_on;
190 	unsigned long  redundant_softirqs_off;
191 	int            nr_unused_locks;
192 	unsigned int   nr_redundant_checks;
193 	unsigned int   nr_redundant;
194 	unsigned int   nr_cyclic_checks;
195 	unsigned int   nr_find_usage_forwards_checks;
196 	unsigned int   nr_find_usage_backwards_checks;
197 
198 	/*
199 	 * Per lock class locking operation stat counts
200 	 */
201 	unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
202 };
203 
204 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
205 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
206 
207 #define __debug_atomic_inc(ptr)					\
208 	this_cpu_inc(lockdep_stats.ptr);
209 
210 #define debug_atomic_inc(ptr)			{		\
211 	WARN_ON_ONCE(!irqs_disabled());				\
212 	__this_cpu_inc(lockdep_stats.ptr);			\
213 }
214 
215 #define debug_atomic_dec(ptr)			{		\
216 	WARN_ON_ONCE(!irqs_disabled());				\
217 	__this_cpu_dec(lockdep_stats.ptr);			\
218 }
219 
220 #define debug_atomic_read(ptr)		({				\
221 	struct lockdep_stats *__cpu_lockdep_stats;			\
222 	unsigned long long __total = 0;					\
223 	int __cpu;							\
224 	for_each_possible_cpu(__cpu) {					\
225 		__cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu);	\
226 		__total += __cpu_lockdep_stats->ptr;			\
227 	}								\
228 	__total;							\
229 })
230 
231 static inline void debug_class_ops_inc(struct lock_class *class)
232 {
233 	int idx;
234 
235 	idx = class - lock_classes;
236 	__debug_atomic_inc(lock_class_ops[idx]);
237 }
238 
239 static inline unsigned long debug_class_ops_read(struct lock_class *class)
240 {
241 	int idx, cpu;
242 	unsigned long ops = 0;
243 
244 	idx = class - lock_classes;
245 	for_each_possible_cpu(cpu)
246 		ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
247 	return ops;
248 }
249 
250 #else
251 # define __debug_atomic_inc(ptr)	do { } while (0)
252 # define debug_atomic_inc(ptr)		do { } while (0)
253 # define debug_atomic_dec(ptr)		do { } while (0)
254 # define debug_atomic_read(ptr)		0
255 # define debug_class_ops_inc(ptr)	do { } while (0)
256 #endif
257