1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 13 #include <linux/lockdep_types.h> 14 #include <linux/smp.h> 15 #include <asm/percpu.h> 16 17 struct task_struct; 18 19 /* for sysctl */ 20 extern int prove_locking; 21 extern int lock_stat; 22 23 #ifdef CONFIG_LOCKDEP 24 25 #include <linux/linkage.h> 26 #include <linux/list.h> 27 #include <linux/debug_locks.h> 28 #include <linux/stacktrace.h> 29 30 static inline void lockdep_copy_map(struct lockdep_map *to, 31 struct lockdep_map *from) 32 { 33 int i; 34 35 *to = *from; 36 /* 37 * Since the class cache can be modified concurrently we could observe 38 * half pointers (64bit arch using 32bit copy insns). Therefore clear 39 * the caches and take the performance hit. 40 * 41 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 42 * that relies on cache abuse. 43 */ 44 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 45 to->class_cache[i] = NULL; 46 } 47 48 /* 49 * Every lock has a list of other locks that were taken after it. 50 * We only grow the list, never remove from it: 51 */ 52 struct lock_list { 53 struct list_head entry; 54 struct lock_class *class; 55 struct lock_class *links_to; 56 const struct lock_trace *trace; 57 u16 distance; 58 /* bitmap of different dependencies from head to this */ 59 u8 dep; 60 /* used by BFS to record whether "prev -> this" only has -(*R)-> */ 61 u8 only_xr; 62 63 /* 64 * The parent field is used to implement breadth-first search, and the 65 * bit 0 is reused to indicate if the lock has been accessed in BFS. 66 */ 67 struct lock_list *parent; 68 }; 69 70 /** 71 * struct lock_chain - lock dependency chain record 72 * 73 * @irq_context: the same as irq_context in held_lock below 74 * @depth: the number of held locks in this chain 75 * @base: the index in chain_hlocks for this chain 76 * @entry: the collided lock chains in lock_chain hash list 77 * @chain_key: the hash key of this lock_chain 78 */ 79 struct lock_chain { 80 /* see BUILD_BUG_ON()s in add_chain_cache() */ 81 unsigned int irq_context : 2, 82 depth : 6, 83 base : 24; 84 /* 4 byte hole */ 85 struct hlist_node entry; 86 u64 chain_key; 87 }; 88 89 #define MAX_LOCKDEP_KEYS_BITS 13 90 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 91 #define INITIAL_CHAIN_KEY -1 92 93 struct held_lock { 94 /* 95 * One-way hash of the dependency chain up to this point. We 96 * hash the hashes step by step as the dependency chain grows. 97 * 98 * We use it for dependency-caching and we skip detection 99 * passes and dependency-updates if there is a cache-hit, so 100 * it is absolutely critical for 100% coverage of the validator 101 * to have a unique key value for every unique dependency path 102 * that can occur in the system, to make a unique hash value 103 * as likely as possible - hence the 64-bit width. 104 * 105 * The task struct holds the current hash value (initialized 106 * with zero), here we store the previous hash value: 107 */ 108 u64 prev_chain_key; 109 unsigned long acquire_ip; 110 struct lockdep_map *instance; 111 struct lockdep_map *nest_lock; 112 #ifdef CONFIG_LOCK_STAT 113 u64 waittime_stamp; 114 u64 holdtime_stamp; 115 #endif 116 /* 117 * class_idx is zero-indexed; it points to the element in 118 * lock_classes this held lock instance belongs to. class_idx is in 119 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 120 */ 121 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 122 /* 123 * The lock-stack is unified in that the lock chains of interrupt 124 * contexts nest ontop of process context chains, but we 'separate' 125 * the hashes by starting with 0 if we cross into an interrupt 126 * context, and we also keep do not add cross-context lock 127 * dependencies - the lock usage graph walking covers that area 128 * anyway, and we'd just unnecessarily increase the number of 129 * dependencies otherwise. [Note: hardirq and softirq contexts 130 * are separated from each other too.] 131 * 132 * The following field is used to detect when we cross into an 133 * interrupt context: 134 */ 135 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 136 unsigned int trylock:1; /* 16 bits */ 137 138 unsigned int read:2; /* see lock_acquire() comment */ 139 unsigned int check:1; /* see lock_acquire() comment */ 140 unsigned int hardirqs_off:1; 141 unsigned int references:12; /* 32 bits */ 142 unsigned int pin_count; 143 }; 144 145 /* 146 * Initialization, self-test and debugging-output methods: 147 */ 148 extern void lockdep_init(void); 149 extern void lockdep_reset(void); 150 extern void lockdep_reset_lock(struct lockdep_map *lock); 151 extern void lockdep_free_key_range(void *start, unsigned long size); 152 extern asmlinkage void lockdep_sys_exit(void); 153 extern void lockdep_set_selftest_task(struct task_struct *task); 154 155 extern void lockdep_init_task(struct task_struct *task); 156 157 /* 158 * Split the recrursion counter in two to readily detect 'off' vs recursion. 159 */ 160 #define LOCKDEP_RECURSION_BITS 16 161 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 162 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 163 164 /* 165 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 166 * to header dependencies. 167 */ 168 169 #define lockdep_off() \ 170 do { \ 171 current->lockdep_recursion += LOCKDEP_OFF; \ 172 } while (0) 173 174 #define lockdep_on() \ 175 do { \ 176 current->lockdep_recursion -= LOCKDEP_OFF; \ 177 } while (0) 178 179 extern void lockdep_register_key(struct lock_class_key *key); 180 extern void lockdep_unregister_key(struct lock_class_key *key); 181 182 /* 183 * These methods are used by specific locking variants (spinlocks, 184 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 185 * to lockdep: 186 */ 187 188 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 189 struct lock_class_key *key, int subclass, u8 inner, u8 outer, u8 lock_type); 190 191 static inline void 192 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 193 struct lock_class_key *key, int subclass, u8 inner, u8 outer) 194 { 195 lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL); 196 } 197 198 static inline void 199 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 200 struct lock_class_key *key, int subclass, u8 inner) 201 { 202 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 203 } 204 205 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 206 struct lock_class_key *key, int subclass) 207 { 208 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 209 } 210 211 /* 212 * Reinitialize a lock key - for cases where there is special locking or 213 * special initialization of locks so that the validator gets the scope 214 * of dependencies wrong: they are either too broad (they need a class-split) 215 * or they are too narrow (they suffer from a false class-split): 216 */ 217 #define lockdep_set_class(lock, key) \ 218 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ 219 (lock)->dep_map.wait_type_inner, \ 220 (lock)->dep_map.wait_type_outer) 221 222 #define lockdep_set_class_and_name(lock, key, name) \ 223 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ 224 (lock)->dep_map.wait_type_inner, \ 225 (lock)->dep_map.wait_type_outer) 226 227 #define lockdep_set_class_and_subclass(lock, key, sub) \ 228 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ 229 (lock)->dep_map.wait_type_inner, \ 230 (lock)->dep_map.wait_type_outer) 231 232 #define lockdep_set_subclass(lock, sub) \ 233 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 234 (lock)->dep_map.wait_type_inner, \ 235 (lock)->dep_map.wait_type_outer) 236 237 #define lockdep_set_novalidate_class(lock) \ 238 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 239 240 /* 241 * Compare locking classes 242 */ 243 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 244 245 static inline int lockdep_match_key(struct lockdep_map *lock, 246 struct lock_class_key *key) 247 { 248 return lock->key == key; 249 } 250 251 /* 252 * Acquire a lock. 253 * 254 * Values for "read": 255 * 256 * 0: exclusive (write) acquire 257 * 1: read-acquire (no recursion allowed) 258 * 2: read-acquire with same-instance recursion allowed 259 * 260 * Values for check: 261 * 262 * 0: simple checks (freeing, held-at-exit-time, etc.) 263 * 1: full validation 264 */ 265 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 266 int trylock, int read, int check, 267 struct lockdep_map *nest_lock, unsigned long ip); 268 269 extern void lock_release(struct lockdep_map *lock, unsigned long ip); 270 271 /* 272 * Same "read" as for lock_acquire(), except -1 means any. 273 */ 274 extern int lock_is_held_type(const struct lockdep_map *lock, int read); 275 276 static inline int lock_is_held(const struct lockdep_map *lock) 277 { 278 return lock_is_held_type(lock, -1); 279 } 280 281 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 282 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 283 284 extern void lock_set_class(struct lockdep_map *lock, const char *name, 285 struct lock_class_key *key, unsigned int subclass, 286 unsigned long ip); 287 288 static inline void lock_set_subclass(struct lockdep_map *lock, 289 unsigned int subclass, unsigned long ip) 290 { 291 lock_set_class(lock, lock->name, lock->key, subclass, ip); 292 } 293 294 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 295 296 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 297 298 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 299 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 300 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 301 302 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 303 304 #define lockdep_assert_held(l) do { \ 305 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 306 } while (0) 307 308 #define lockdep_assert_held_write(l) do { \ 309 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 310 } while (0) 311 312 #define lockdep_assert_held_read(l) do { \ 313 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 314 } while (0) 315 316 #define lockdep_assert_held_once(l) do { \ 317 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 318 } while (0) 319 320 #define lockdep_assert_none_held_once() do { \ 321 WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ 322 } while (0) 323 324 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 325 326 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 327 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 328 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 329 330 #else /* !CONFIG_LOCKDEP */ 331 332 static inline void lockdep_init_task(struct task_struct *task) 333 { 334 } 335 336 static inline void lockdep_off(void) 337 { 338 } 339 340 static inline void lockdep_on(void) 341 { 342 } 343 344 static inline void lockdep_set_selftest_task(struct task_struct *task) 345 { 346 } 347 348 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 349 # define lock_release(l, i) do { } while (0) 350 # define lock_downgrade(l, i) do { } while (0) 351 # define lock_set_class(l, n, k, s, i) do { } while (0) 352 # define lock_set_subclass(l, s, i) do { } while (0) 353 # define lockdep_init() do { } while (0) 354 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ 355 do { (void)(name); (void)(key); } while (0) 356 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 357 do { (void)(name); (void)(key); } while (0) 358 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ 359 do { (void)(name); (void)(key); } while (0) 360 # define lockdep_init_map(lock, name, key, sub) \ 361 do { (void)(name); (void)(key); } while (0) 362 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 363 # define lockdep_set_class_and_name(lock, key, name) \ 364 do { (void)(key); (void)(name); } while (0) 365 #define lockdep_set_class_and_subclass(lock, key, sub) \ 366 do { (void)(key); } while (0) 367 #define lockdep_set_subclass(lock, sub) do { } while (0) 368 369 #define lockdep_set_novalidate_class(lock) do { } while (0) 370 371 /* 372 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 373 * case since the result is not well defined and the caller should rather 374 * #ifdef the call himself. 375 */ 376 377 # define lockdep_reset() do { debug_locks = 1; } while (0) 378 # define lockdep_free_key_range(start, size) do { } while (0) 379 # define lockdep_sys_exit() do { } while (0) 380 381 static inline void lockdep_register_key(struct lock_class_key *key) 382 { 383 } 384 385 static inline void lockdep_unregister_key(struct lock_class_key *key) 386 { 387 } 388 389 #define lockdep_depth(tsk) (0) 390 391 /* 392 * Dummy forward declarations, allow users to write less ifdef-y code 393 * and depend on dead code elimination. 394 */ 395 extern int lock_is_held(const void *); 396 extern int lockdep_is_held(const void *); 397 #define lockdep_is_held_type(l, r) (1) 398 399 #define lockdep_assert_held(l) do { (void)(l); } while (0) 400 #define lockdep_assert_held_write(l) do { (void)(l); } while (0) 401 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 402 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 403 #define lockdep_assert_none_held_once() do { } while (0) 404 405 #define lockdep_recursing(tsk) (0) 406 407 #define NIL_COOKIE (struct pin_cookie){ } 408 409 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 410 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 411 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 412 413 #endif /* !LOCKDEP */ 414 415 enum xhlock_context_t { 416 XHLOCK_HARD, 417 XHLOCK_SOFT, 418 XHLOCK_CTX_NR, 419 }; 420 421 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 422 /* 423 * To initialize a lockdep_map statically use this macro. 424 * Note that _name must not be NULL. 425 */ 426 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 427 { .name = (_name), .key = (void *)(_key), } 428 429 static inline void lockdep_invariant_state(bool force) {} 430 static inline void lockdep_free_task(struct task_struct *task) {} 431 432 #ifdef CONFIG_LOCK_STAT 433 434 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 435 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 436 437 #define LOCK_CONTENDED(_lock, try, lock) \ 438 do { \ 439 if (!try(_lock)) { \ 440 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 441 lock(_lock); \ 442 } \ 443 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 444 } while (0) 445 446 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 447 ({ \ 448 int ____err = 0; \ 449 if (!try(_lock)) { \ 450 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 451 ____err = lock(_lock); \ 452 } \ 453 if (!____err) \ 454 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 455 ____err; \ 456 }) 457 458 #else /* CONFIG_LOCK_STAT */ 459 460 #define lock_contended(lockdep_map, ip) do {} while (0) 461 #define lock_acquired(lockdep_map, ip) do {} while (0) 462 463 #define LOCK_CONTENDED(_lock, try, lock) \ 464 lock(_lock) 465 466 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 467 lock(_lock) 468 469 #endif /* CONFIG_LOCK_STAT */ 470 471 #ifdef CONFIG_LOCKDEP 472 473 /* 474 * On lockdep we dont want the hand-coded irq-enable of 475 * _raw_*_lock_flags() code, because lockdep assumes 476 * that interrupts are not re-enabled during lock-acquire: 477 */ 478 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 479 LOCK_CONTENDED((_lock), (try), (lock)) 480 481 #else /* CONFIG_LOCKDEP */ 482 483 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 484 lockfl((_lock), (flags)) 485 486 #endif /* CONFIG_LOCKDEP */ 487 488 #ifdef CONFIG_PROVE_LOCKING 489 extern void print_irqtrace_events(struct task_struct *curr); 490 #else 491 static inline void print_irqtrace_events(struct task_struct *curr) 492 { 493 } 494 #endif 495 496 /* Variable used to make lockdep treat read_lock() as recursive in selftests */ 497 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 498 extern unsigned int force_read_lock_recursive; 499 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 500 #define force_read_lock_recursive 0 501 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 502 503 #ifdef CONFIG_LOCKDEP 504 extern bool read_lock_is_recursive(void); 505 #else /* CONFIG_LOCKDEP */ 506 /* If !LOCKDEP, the value is meaningless */ 507 #define read_lock_is_recursive() 0 508 #endif 509 510 /* 511 * For trivial one-depth nesting of a lock-class, the following 512 * global define can be used. (Subsystems with multiple levels 513 * of nesting should define their own lock-nesting subclasses.) 514 */ 515 #define SINGLE_DEPTH_NESTING 1 516 517 /* 518 * Map the dependency ops to NOP or to real lockdep ops, depending 519 * on the per lock-class debug mode: 520 */ 521 522 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 523 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 524 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 525 526 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 527 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 528 #define spin_release(l, i) lock_release(l, i) 529 530 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 531 #define rwlock_acquire_read(l, s, t, i) \ 532 do { \ 533 if (read_lock_is_recursive()) \ 534 lock_acquire_shared_recursive(l, s, t, NULL, i); \ 535 else \ 536 lock_acquire_shared(l, s, t, NULL, i); \ 537 } while (0) 538 539 #define rwlock_release(l, i) lock_release(l, i) 540 541 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 542 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 543 #define seqcount_release(l, i) lock_release(l, i) 544 545 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 546 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 547 #define mutex_release(l, i) lock_release(l, i) 548 549 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 550 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 551 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 552 #define rwsem_release(l, i) lock_release(l, i) 553 554 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 555 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 556 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 557 #define lock_map_release(l) lock_release(l, _THIS_IP_) 558 559 #ifdef CONFIG_PROVE_LOCKING 560 # define might_lock(lock) \ 561 do { \ 562 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 563 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 564 lock_release(&(lock)->dep_map, _THIS_IP_); \ 565 } while (0) 566 # define might_lock_read(lock) \ 567 do { \ 568 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 569 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 570 lock_release(&(lock)->dep_map, _THIS_IP_); \ 571 } while (0) 572 # define might_lock_nested(lock, subclass) \ 573 do { \ 574 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 575 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 576 _THIS_IP_); \ 577 lock_release(&(lock)->dep_map, _THIS_IP_); \ 578 } while (0) 579 580 DECLARE_PER_CPU(int, hardirqs_enabled); 581 DECLARE_PER_CPU(int, hardirq_context); 582 DECLARE_PER_CPU(unsigned int, lockdep_recursion); 583 584 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) 585 586 #define lockdep_assert_irqs_enabled() \ 587 do { \ 588 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ 589 } while (0) 590 591 #define lockdep_assert_irqs_disabled() \ 592 do { \ 593 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ 594 } while (0) 595 596 #define lockdep_assert_in_irq() \ 597 do { \ 598 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ 599 } while (0) 600 601 #define lockdep_assert_preemption_enabled() \ 602 do { \ 603 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 604 __lockdep_enabled && \ 605 (preempt_count() != 0 || \ 606 !this_cpu_read(hardirqs_enabled))); \ 607 } while (0) 608 609 #define lockdep_assert_preemption_disabled() \ 610 do { \ 611 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 612 __lockdep_enabled && \ 613 (preempt_count() == 0 && \ 614 this_cpu_read(hardirqs_enabled))); \ 615 } while (0) 616 617 /* 618 * Acceptable for protecting per-CPU resources accessed from BH. 619 * Much like in_softirq() - semantics are ambiguous, use carefully. 620 */ 621 #define lockdep_assert_in_softirq() \ 622 do { \ 623 WARN_ON_ONCE(__lockdep_enabled && \ 624 (!in_softirq() || in_irq() || in_nmi())); \ 625 } while (0) 626 627 #else 628 # define might_lock(lock) do { } while (0) 629 # define might_lock_read(lock) do { } while (0) 630 # define might_lock_nested(lock, subclass) do { } while (0) 631 632 # define lockdep_assert_irqs_enabled() do { } while (0) 633 # define lockdep_assert_irqs_disabled() do { } while (0) 634 # define lockdep_assert_in_irq() do { } while (0) 635 636 # define lockdep_assert_preemption_enabled() do { } while (0) 637 # define lockdep_assert_preemption_disabled() do { } while (0) 638 # define lockdep_assert_in_softirq() do { } while (0) 639 #endif 640 641 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 642 643 # define lockdep_assert_RT_in_threaded_ctx() do { \ 644 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 645 lockdep_hardirq_context() && \ 646 !(current->hardirq_threaded || current->irq_config), \ 647 "Not in threaded context on PREEMPT_RT as expected\n"); \ 648 } while (0) 649 650 #else 651 652 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 653 654 #endif 655 656 #ifdef CONFIG_LOCKDEP 657 void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 658 #else 659 static inline void 660 lockdep_rcu_suspicious(const char *file, const int line, const char *s) 661 { 662 } 663 #endif 664 665 #endif /* __LINUX_LOCKDEP_H */ 666