1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Runtime locking correctness validator 4 * 5 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7 * 8 * see Documentation/locking/lockdep-design.rst for more details. 9 */ 10 #ifndef __LINUX_LOCKDEP_H 11 #define __LINUX_LOCKDEP_H 12 13 #include <linux/lockdep_types.h> 14 #include <linux/smp.h> 15 #include <asm/percpu.h> 16 17 struct task_struct; 18 19 /* for sysctl */ 20 extern int prove_locking; 21 extern int lock_stat; 22 23 #ifdef CONFIG_LOCKDEP 24 25 #include <linux/linkage.h> 26 #include <linux/list.h> 27 #include <linux/debug_locks.h> 28 #include <linux/stacktrace.h> 29 30 static inline void lockdep_copy_map(struct lockdep_map *to, 31 struct lockdep_map *from) 32 { 33 int i; 34 35 *to = *from; 36 /* 37 * Since the class cache can be modified concurrently we could observe 38 * half pointers (64bit arch using 32bit copy insns). Therefore clear 39 * the caches and take the performance hit. 40 * 41 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since 42 * that relies on cache abuse. 43 */ 44 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 45 to->class_cache[i] = NULL; 46 } 47 48 /* 49 * Every lock has a list of other locks that were taken after it. 50 * We only grow the list, never remove from it: 51 */ 52 struct lock_list { 53 struct list_head entry; 54 struct lock_class *class; 55 struct lock_class *links_to; 56 const struct lock_trace *trace; 57 u16 distance; 58 /* bitmap of different dependencies from head to this */ 59 u8 dep; 60 /* used by BFS to record whether "prev -> this" only has -(*R)-> */ 61 u8 only_xr; 62 63 /* 64 * The parent field is used to implement breadth-first search, and the 65 * bit 0 is reused to indicate if the lock has been accessed in BFS. 66 */ 67 struct lock_list *parent; 68 }; 69 70 /** 71 * struct lock_chain - lock dependency chain record 72 * 73 * @irq_context: the same as irq_context in held_lock below 74 * @depth: the number of held locks in this chain 75 * @base: the index in chain_hlocks for this chain 76 * @entry: the collided lock chains in lock_chain hash list 77 * @chain_key: the hash key of this lock_chain 78 */ 79 struct lock_chain { 80 /* see BUILD_BUG_ON()s in add_chain_cache() */ 81 unsigned int irq_context : 2, 82 depth : 6, 83 base : 24; 84 /* 4 byte hole */ 85 struct hlist_node entry; 86 u64 chain_key; 87 }; 88 89 #define MAX_LOCKDEP_KEYS_BITS 13 90 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 91 #define INITIAL_CHAIN_KEY -1 92 93 struct held_lock { 94 /* 95 * One-way hash of the dependency chain up to this point. We 96 * hash the hashes step by step as the dependency chain grows. 97 * 98 * We use it for dependency-caching and we skip detection 99 * passes and dependency-updates if there is a cache-hit, so 100 * it is absolutely critical for 100% coverage of the validator 101 * to have a unique key value for every unique dependency path 102 * that can occur in the system, to make a unique hash value 103 * as likely as possible - hence the 64-bit width. 104 * 105 * The task struct holds the current hash value (initialized 106 * with zero), here we store the previous hash value: 107 */ 108 u64 prev_chain_key; 109 unsigned long acquire_ip; 110 struct lockdep_map *instance; 111 struct lockdep_map *nest_lock; 112 #ifdef CONFIG_LOCK_STAT 113 u64 waittime_stamp; 114 u64 holdtime_stamp; 115 #endif 116 /* 117 * class_idx is zero-indexed; it points to the element in 118 * lock_classes this held lock instance belongs to. class_idx is in 119 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 120 */ 121 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 122 /* 123 * The lock-stack is unified in that the lock chains of interrupt 124 * contexts nest ontop of process context chains, but we 'separate' 125 * the hashes by starting with 0 if we cross into an interrupt 126 * context, and we also keep do not add cross-context lock 127 * dependencies - the lock usage graph walking covers that area 128 * anyway, and we'd just unnecessarily increase the number of 129 * dependencies otherwise. [Note: hardirq and softirq contexts 130 * are separated from each other too.] 131 * 132 * The following field is used to detect when we cross into an 133 * interrupt context: 134 */ 135 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 136 unsigned int trylock:1; /* 16 bits */ 137 138 unsigned int read:2; /* see lock_acquire() comment */ 139 unsigned int check:1; /* see lock_acquire() comment */ 140 unsigned int hardirqs_off:1; 141 unsigned int references:12; /* 32 bits */ 142 unsigned int pin_count; 143 }; 144 145 /* 146 * Initialization, self-test and debugging-output methods: 147 */ 148 extern void lockdep_init(void); 149 extern void lockdep_reset(void); 150 extern void lockdep_reset_lock(struct lockdep_map *lock); 151 extern void lockdep_free_key_range(void *start, unsigned long size); 152 extern asmlinkage void lockdep_sys_exit(void); 153 extern void lockdep_set_selftest_task(struct task_struct *task); 154 155 extern void lockdep_init_task(struct task_struct *task); 156 157 /* 158 * Split the recrursion counter in two to readily detect 'off' vs recursion. 159 */ 160 #define LOCKDEP_RECURSION_BITS 16 161 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) 162 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) 163 164 /* 165 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due 166 * to header dependencies. 167 */ 168 169 #define lockdep_off() \ 170 do { \ 171 current->lockdep_recursion += LOCKDEP_OFF; \ 172 } while (0) 173 174 #define lockdep_on() \ 175 do { \ 176 current->lockdep_recursion -= LOCKDEP_OFF; \ 177 } while (0) 178 179 extern void lockdep_register_key(struct lock_class_key *key); 180 extern void lockdep_unregister_key(struct lock_class_key *key); 181 182 /* 183 * These methods are used by specific locking variants (spinlocks, 184 * rwlocks, mutexes and rwsems) to pass init/acquire/release events 185 * to lockdep: 186 */ 187 188 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name, 189 struct lock_class_key *key, int subclass, short inner, short outer); 190 191 static inline void 192 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, 193 struct lock_class_key *key, int subclass, short inner) 194 { 195 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); 196 } 197 198 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, 199 struct lock_class_key *key, int subclass) 200 { 201 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); 202 } 203 204 /* 205 * Reinitialize a lock key - for cases where there is special locking or 206 * special initialization of locks so that the validator gets the scope 207 * of dependencies wrong: they are either too broad (they need a class-split) 208 * or they are too narrow (they suffer from a false class-split): 209 */ 210 #define lockdep_set_class(lock, key) \ 211 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \ 212 (lock)->dep_map.wait_type_inner, \ 213 (lock)->dep_map.wait_type_outer) 214 215 #define lockdep_set_class_and_name(lock, key, name) \ 216 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \ 217 (lock)->dep_map.wait_type_inner, \ 218 (lock)->dep_map.wait_type_outer) 219 220 #define lockdep_set_class_and_subclass(lock, key, sub) \ 221 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\ 222 (lock)->dep_map.wait_type_inner, \ 223 (lock)->dep_map.wait_type_outer) 224 225 #define lockdep_set_subclass(lock, sub) \ 226 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ 227 (lock)->dep_map.wait_type_inner, \ 228 (lock)->dep_map.wait_type_outer) 229 230 #define lockdep_set_novalidate_class(lock) \ 231 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) 232 233 /* 234 * Compare locking classes 235 */ 236 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) 237 238 static inline int lockdep_match_key(struct lockdep_map *lock, 239 struct lock_class_key *key) 240 { 241 return lock->key == key; 242 } 243 244 /* 245 * Acquire a lock. 246 * 247 * Values for "read": 248 * 249 * 0: exclusive (write) acquire 250 * 1: read-acquire (no recursion allowed) 251 * 2: read-acquire with same-instance recursion allowed 252 * 253 * Values for check: 254 * 255 * 0: simple checks (freeing, held-at-exit-time, etc.) 256 * 1: full validation 257 */ 258 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 259 int trylock, int read, int check, 260 struct lockdep_map *nest_lock, unsigned long ip); 261 262 extern void lock_release(struct lockdep_map *lock, unsigned long ip); 263 264 /* 265 * Same "read" as for lock_acquire(), except -1 means any. 266 */ 267 extern int lock_is_held_type(const struct lockdep_map *lock, int read); 268 269 static inline int lock_is_held(const struct lockdep_map *lock) 270 { 271 return lock_is_held_type(lock, -1); 272 } 273 274 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) 275 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) 276 277 extern void lock_set_class(struct lockdep_map *lock, const char *name, 278 struct lock_class_key *key, unsigned int subclass, 279 unsigned long ip); 280 281 static inline void lock_set_subclass(struct lockdep_map *lock, 282 unsigned int subclass, unsigned long ip) 283 { 284 lock_set_class(lock, lock->name, lock->key, subclass, ip); 285 } 286 287 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); 288 289 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } 290 291 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); 292 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie); 293 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); 294 295 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 296 297 #define lockdep_assert_held(l) do { \ 298 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 299 } while (0) 300 301 #define lockdep_assert_held_write(l) do { \ 302 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ 303 } while (0) 304 305 #define lockdep_assert_held_read(l) do { \ 306 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ 307 } while (0) 308 309 #define lockdep_assert_held_once(l) do { \ 310 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ 311 } while (0) 312 313 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 314 315 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) 316 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 317 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 318 319 #else /* !CONFIG_LOCKDEP */ 320 321 static inline void lockdep_init_task(struct task_struct *task) 322 { 323 } 324 325 static inline void lockdep_off(void) 326 { 327 } 328 329 static inline void lockdep_on(void) 330 { 331 } 332 333 static inline void lockdep_set_selftest_task(struct task_struct *task) 334 { 335 } 336 337 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 338 # define lock_release(l, i) do { } while (0) 339 # define lock_downgrade(l, i) do { } while (0) 340 # define lock_set_class(l, n, k, s, i) do { } while (0) 341 # define lock_set_subclass(l, s, i) do { } while (0) 342 # define lockdep_init() do { } while (0) 343 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ 344 do { (void)(name); (void)(key); } while (0) 345 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ 346 do { (void)(name); (void)(key); } while (0) 347 # define lockdep_init_map(lock, name, key, sub) \ 348 do { (void)(name); (void)(key); } while (0) 349 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) 350 # define lockdep_set_class_and_name(lock, key, name) \ 351 do { (void)(key); (void)(name); } while (0) 352 #define lockdep_set_class_and_subclass(lock, key, sub) \ 353 do { (void)(key); } while (0) 354 #define lockdep_set_subclass(lock, sub) do { } while (0) 355 356 #define lockdep_set_novalidate_class(lock) do { } while (0) 357 358 /* 359 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP 360 * case since the result is not well defined and the caller should rather 361 * #ifdef the call himself. 362 */ 363 364 # define lockdep_reset() do { debug_locks = 1; } while (0) 365 # define lockdep_free_key_range(start, size) do { } while (0) 366 # define lockdep_sys_exit() do { } while (0) 367 368 static inline void lockdep_register_key(struct lock_class_key *key) 369 { 370 } 371 372 static inline void lockdep_unregister_key(struct lock_class_key *key) 373 { 374 } 375 376 #define lockdep_depth(tsk) (0) 377 378 #define lockdep_is_held_type(l, r) (1) 379 380 #define lockdep_assert_held(l) do { (void)(l); } while (0) 381 #define lockdep_assert_held_write(l) do { (void)(l); } while (0) 382 #define lockdep_assert_held_read(l) do { (void)(l); } while (0) 383 #define lockdep_assert_held_once(l) do { (void)(l); } while (0) 384 385 #define lockdep_recursing(tsk) (0) 386 387 #define NIL_COOKIE (struct pin_cookie){ } 388 389 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 390 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 391 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 392 393 #endif /* !LOCKDEP */ 394 395 enum xhlock_context_t { 396 XHLOCK_HARD, 397 XHLOCK_SOFT, 398 XHLOCK_CTX_NR, 399 }; 400 401 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) 402 /* 403 * To initialize a lockdep_map statically use this macro. 404 * Note that _name must not be NULL. 405 */ 406 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ 407 { .name = (_name), .key = (void *)(_key), } 408 409 static inline void lockdep_invariant_state(bool force) {} 410 static inline void lockdep_free_task(struct task_struct *task) {} 411 412 #ifdef CONFIG_LOCK_STAT 413 414 extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 415 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); 416 417 #define LOCK_CONTENDED(_lock, try, lock) \ 418 do { \ 419 if (!try(_lock)) { \ 420 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 421 lock(_lock); \ 422 } \ 423 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 424 } while (0) 425 426 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 427 ({ \ 428 int ____err = 0; \ 429 if (!try(_lock)) { \ 430 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 431 ____err = lock(_lock); \ 432 } \ 433 if (!____err) \ 434 lock_acquired(&(_lock)->dep_map, _RET_IP_); \ 435 ____err; \ 436 }) 437 438 #else /* CONFIG_LOCK_STAT */ 439 440 #define lock_contended(lockdep_map, ip) do {} while (0) 441 #define lock_acquired(lockdep_map, ip) do {} while (0) 442 443 #define LOCK_CONTENDED(_lock, try, lock) \ 444 lock(_lock) 445 446 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ 447 lock(_lock) 448 449 #endif /* CONFIG_LOCK_STAT */ 450 451 #ifdef CONFIG_LOCKDEP 452 453 /* 454 * On lockdep we dont want the hand-coded irq-enable of 455 * _raw_*_lock_flags() code, because lockdep assumes 456 * that interrupts are not re-enabled during lock-acquire: 457 */ 458 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 459 LOCK_CONTENDED((_lock), (try), (lock)) 460 461 #else /* CONFIG_LOCKDEP */ 462 463 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ 464 lockfl((_lock), (flags)) 465 466 #endif /* CONFIG_LOCKDEP */ 467 468 #ifdef CONFIG_PROVE_LOCKING 469 extern void print_irqtrace_events(struct task_struct *curr); 470 #else 471 static inline void print_irqtrace_events(struct task_struct *curr) 472 { 473 } 474 #endif 475 476 /* Variable used to make lockdep treat read_lock() as recursive in selftests */ 477 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS 478 extern unsigned int force_read_lock_recursive; 479 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 480 #define force_read_lock_recursive 0 481 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */ 482 483 #ifdef CONFIG_LOCKDEP 484 extern bool read_lock_is_recursive(void); 485 #else /* CONFIG_LOCKDEP */ 486 /* If !LOCKDEP, the value is meaningless */ 487 #define read_lock_is_recursive() 0 488 #endif 489 490 /* 491 * For trivial one-depth nesting of a lock-class, the following 492 * global define can be used. (Subsystems with multiple levels 493 * of nesting should define their own lock-nesting subclasses.) 494 */ 495 #define SINGLE_DEPTH_NESTING 1 496 497 /* 498 * Map the dependency ops to NOP or to real lockdep ops, depending 499 * on the per lock-class debug mode: 500 */ 501 502 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) 503 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) 504 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) 505 506 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 507 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 508 #define spin_release(l, i) lock_release(l, i) 509 510 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 511 #define rwlock_acquire_read(l, s, t, i) \ 512 do { \ 513 if (read_lock_is_recursive()) \ 514 lock_acquire_shared_recursive(l, s, t, NULL, i); \ 515 else \ 516 lock_acquire_shared(l, s, t, NULL, i); \ 517 } while (0) 518 519 #define rwlock_release(l, i) lock_release(l, i) 520 521 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 522 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 523 #define seqcount_release(l, i) lock_release(l, i) 524 525 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 526 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 527 #define mutex_release(l, i) lock_release(l, i) 528 529 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 530 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 531 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 532 #define rwsem_release(l, i) lock_release(l, i) 533 534 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 535 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 536 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 537 #define lock_map_release(l) lock_release(l, _THIS_IP_) 538 539 #ifdef CONFIG_PROVE_LOCKING 540 # define might_lock(lock) \ 541 do { \ 542 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 543 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 544 lock_release(&(lock)->dep_map, _THIS_IP_); \ 545 } while (0) 546 # define might_lock_read(lock) \ 547 do { \ 548 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 549 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 550 lock_release(&(lock)->dep_map, _THIS_IP_); \ 551 } while (0) 552 # define might_lock_nested(lock, subclass) \ 553 do { \ 554 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 555 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \ 556 _THIS_IP_); \ 557 lock_release(&(lock)->dep_map, _THIS_IP_); \ 558 } while (0) 559 560 DECLARE_PER_CPU(int, hardirqs_enabled); 561 DECLARE_PER_CPU(int, hardirq_context); 562 DECLARE_PER_CPU(unsigned int, lockdep_recursion); 563 564 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion)) 565 566 #define lockdep_assert_irqs_enabled() \ 567 do { \ 568 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \ 569 } while (0) 570 571 #define lockdep_assert_irqs_disabled() \ 572 do { \ 573 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \ 574 } while (0) 575 576 #define lockdep_assert_in_irq() \ 577 do { \ 578 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \ 579 } while (0) 580 581 #define lockdep_assert_preemption_enabled() \ 582 do { \ 583 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 584 __lockdep_enabled && \ 585 (preempt_count() != 0 || \ 586 !this_cpu_read(hardirqs_enabled))); \ 587 } while (0) 588 589 #define lockdep_assert_preemption_disabled() \ 590 do { \ 591 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ 592 __lockdep_enabled && \ 593 (preempt_count() == 0 && \ 594 this_cpu_read(hardirqs_enabled))); \ 595 } while (0) 596 597 #else 598 # define might_lock(lock) do { } while (0) 599 # define might_lock_read(lock) do { } while (0) 600 # define might_lock_nested(lock, subclass) do { } while (0) 601 602 # define lockdep_assert_irqs_enabled() do { } while (0) 603 # define lockdep_assert_irqs_disabled() do { } while (0) 604 # define lockdep_assert_in_irq() do { } while (0) 605 606 # define lockdep_assert_preemption_enabled() do { } while (0) 607 # define lockdep_assert_preemption_disabled() do { } while (0) 608 #endif 609 610 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 611 612 # define lockdep_assert_RT_in_threaded_ctx() do { \ 613 WARN_ONCE(debug_locks && !current->lockdep_recursion && \ 614 lockdep_hardirq_context() && \ 615 !(current->hardirq_threaded || current->irq_config), \ 616 "Not in threaded context on PREEMPT_RT as expected\n"); \ 617 } while (0) 618 619 #else 620 621 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0) 622 623 #endif 624 625 #ifdef CONFIG_LOCKDEP 626 void lockdep_rcu_suspicious(const char *file, const int line, const char *s); 627 #else 628 static inline void 629 lockdep_rcu_suspicious(const char *file, const int line, const char *s) 630 { 631 } 632 #endif 633 634 #endif /* __LINUX_LOCKDEP_H */ 635