1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * kernel/lockdep_internals.h 4 * 5 * Runtime locking correctness validator 6 * 7 * lockdep subsystem internal functions and variables. 8 */ 9 10 /* 11 * Lock-class usage-state bits: 12 */ 13 enum lock_usage_bit { 14 #define LOCKDEP_STATE(__STATE) \ 15 LOCK_USED_IN_##__STATE, \ 16 LOCK_USED_IN_##__STATE##_READ, \ 17 LOCK_ENABLED_##__STATE, \ 18 LOCK_ENABLED_##__STATE##_READ, 19 #include "lockdep_states.h" 20 #undef LOCKDEP_STATE 21 LOCK_USED, 22 LOCK_USAGE_STATES 23 }; 24 25 #define LOCK_USAGE_READ_MASK 1 26 #define LOCK_USAGE_DIR_MASK 2 27 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) 28 29 /* 30 * Usage-state bitmasks: 31 */ 32 #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), 33 34 enum { 35 #define LOCKDEP_STATE(__STATE) \ 36 __LOCKF(USED_IN_##__STATE) \ 37 __LOCKF(USED_IN_##__STATE##_READ) \ 38 __LOCKF(ENABLED_##__STATE) \ 39 __LOCKF(ENABLED_##__STATE##_READ) 40 #include "lockdep_states.h" 41 #undef LOCKDEP_STATE 42 __LOCKF(USED) 43 }; 44 45 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE | 46 static const unsigned long LOCKF_ENABLED_IRQ = 47 #include "lockdep_states.h" 48 0; 49 #undef LOCKDEP_STATE 50 51 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE | 52 static const unsigned long LOCKF_USED_IN_IRQ = 53 #include "lockdep_states.h" 54 0; 55 #undef LOCKDEP_STATE 56 57 #define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE##_READ | 58 static const unsigned long LOCKF_ENABLED_IRQ_READ = 59 #include "lockdep_states.h" 60 0; 61 #undef LOCKDEP_STATE 62 63 #define LOCKDEP_STATE(__STATE) LOCKF_USED_IN_##__STATE##_READ | 64 static const unsigned long LOCKF_USED_IN_IRQ_READ = 65 #include "lockdep_states.h" 66 0; 67 #undef LOCKDEP_STATE 68 69 #define LOCKF_ENABLED_IRQ_ALL (LOCKF_ENABLED_IRQ | LOCKF_ENABLED_IRQ_READ) 70 #define LOCKF_USED_IN_IRQ_ALL (LOCKF_USED_IN_IRQ | LOCKF_USED_IN_IRQ_READ) 71 72 #define LOCKF_IRQ (LOCKF_ENABLED_IRQ | LOCKF_USED_IN_IRQ) 73 #define LOCKF_IRQ_READ (LOCKF_ENABLED_IRQ_READ | LOCKF_USED_IN_IRQ_READ) 74 75 /* 76 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, 77 * .data and .bss to fit in required 32MB limit for the kernel. With 78 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. 79 * So, reduce the static allocations for lockdeps related structures so that 80 * everything fits in current required size limit. 81 */ 82 #ifdef CONFIG_LOCKDEP_SMALL 83 /* 84 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 85 * we track. 86 * 87 * We use the per-lock dependency maps in two ways: we grow it by adding 88 * every to-be-taken lock to all currently held lock's own dependency 89 * table (if it's not there yet), and we check it for lock order 90 * conflicts and deadlocks. 91 */ 92 #define MAX_LOCKDEP_ENTRIES 16384UL 93 #define MAX_LOCKDEP_CHAINS_BITS 15 94 #define MAX_STACK_TRACE_ENTRIES 262144UL 95 #else 96 #define MAX_LOCKDEP_ENTRIES 32768UL 97 98 #define MAX_LOCKDEP_CHAINS_BITS 16 99 100 /* 101 * Stack-trace: tightly packed array of stack backtrace 102 * addresses. Protected by the hash_lock. 103 */ 104 #define MAX_STACK_TRACE_ENTRIES 524288UL 105 #endif 106 107 #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 108 109 #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) 110 111 extern struct list_head all_lock_classes; 112 extern struct lock_chain lock_chains[]; 113 114 #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) 115 116 extern void get_usage_chars(struct lock_class *class, 117 char usage[LOCK_USAGE_CHARS]); 118 119 extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); 120 121 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); 122 123 extern unsigned long nr_lock_classes; 124 extern unsigned long nr_list_entries; 125 long lockdep_next_lockchain(long i); 126 unsigned long lock_chain_count(void); 127 extern int nr_chain_hlocks; 128 extern unsigned long nr_stack_trace_entries; 129 130 extern unsigned int nr_hardirq_chains; 131 extern unsigned int nr_softirq_chains; 132 extern unsigned int nr_process_chains; 133 extern unsigned int max_lockdep_depth; 134 extern unsigned int max_recursion_depth; 135 136 extern unsigned int max_bfs_queue_depth; 137 138 #ifdef CONFIG_PROVE_LOCKING 139 extern unsigned long lockdep_count_forward_deps(struct lock_class *); 140 extern unsigned long lockdep_count_backward_deps(struct lock_class *); 141 #else 142 static inline unsigned long 143 lockdep_count_forward_deps(struct lock_class *class) 144 { 145 return 0; 146 } 147 static inline unsigned long 148 lockdep_count_backward_deps(struct lock_class *class) 149 { 150 return 0; 151 } 152 #endif 153 154 #ifdef CONFIG_DEBUG_LOCKDEP 155 156 #include <asm/local.h> 157 /* 158 * Various lockdep statistics. 159 * We want them per cpu as they are often accessed in fast path 160 * and we want to avoid too much cache bouncing. 161 */ 162 struct lockdep_stats { 163 int chain_lookup_hits; 164 int chain_lookup_misses; 165 int hardirqs_on_events; 166 int hardirqs_off_events; 167 int redundant_hardirqs_on; 168 int redundant_hardirqs_off; 169 int softirqs_on_events; 170 int softirqs_off_events; 171 int redundant_softirqs_on; 172 int redundant_softirqs_off; 173 int nr_unused_locks; 174 int nr_redundant_checks; 175 int nr_redundant; 176 int nr_cyclic_checks; 177 int nr_cyclic_check_recursions; 178 int nr_find_usage_forwards_checks; 179 int nr_find_usage_forwards_recursions; 180 int nr_find_usage_backwards_checks; 181 int nr_find_usage_backwards_recursions; 182 183 /* 184 * Per lock class locking operation stat counts 185 */ 186 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; 187 }; 188 189 DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); 190 extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 191 192 #define __debug_atomic_inc(ptr) \ 193 this_cpu_inc(lockdep_stats.ptr); 194 195 #define debug_atomic_inc(ptr) { \ 196 WARN_ON_ONCE(!irqs_disabled()); \ 197 __this_cpu_inc(lockdep_stats.ptr); \ 198 } 199 200 #define debug_atomic_dec(ptr) { \ 201 WARN_ON_ONCE(!irqs_disabled()); \ 202 __this_cpu_dec(lockdep_stats.ptr); \ 203 } 204 205 #define debug_atomic_read(ptr) ({ \ 206 struct lockdep_stats *__cpu_lockdep_stats; \ 207 unsigned long long __total = 0; \ 208 int __cpu; \ 209 for_each_possible_cpu(__cpu) { \ 210 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ 211 __total += __cpu_lockdep_stats->ptr; \ 212 } \ 213 __total; \ 214 }) 215 216 static inline void debug_class_ops_inc(struct lock_class *class) 217 { 218 int idx; 219 220 idx = class - lock_classes; 221 __debug_atomic_inc(lock_class_ops[idx]); 222 } 223 224 static inline unsigned long debug_class_ops_read(struct lock_class *class) 225 { 226 int idx, cpu; 227 unsigned long ops = 0; 228 229 idx = class - lock_classes; 230 for_each_possible_cpu(cpu) 231 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); 232 return ops; 233 } 234 235 #else 236 # define __debug_atomic_inc(ptr) do { } while (0) 237 # define debug_atomic_inc(ptr) do { } while (0) 238 # define debug_atomic_dec(ptr) do { } while (0) 239 # define debug_atomic_read(ptr) 0 240 # define debug_class_ops_inc(ptr) do { } while (0) 241 #endif 242