1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/lockdep.c 4 * 5 * Runtime locking correctness validator 6 * 7 * Started by Ingo Molnar: 8 * 9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 11 * 12 * this code maps all the lock dependencies as they occur in a live kernel 13 * and will warn about the following classes of locking bugs: 14 * 15 * - lock inversion scenarios 16 * - circular lock dependencies 17 * - hardirq/softirq safe/unsafe locking bugs 18 * 19 * Bugs are reported even if the current locking scenario does not cause 20 * any deadlock at this point. 21 * 22 * I.e. if anytime in the past two locks were taken in a different order, 23 * even if it happened for another task, even if those were different 24 * locks (but of the same class as this lock), this code will detect it. 25 * 26 * Thanks to Arjan van de Ven for coming up with the initial idea of 27 * mapping lock dependencies runtime. 28 */ 29 #define DISABLE_BRANCH_PROFILING 30 #include <linux/mutex.h> 31 #include <linux/sched.h> 32 #include <linux/sched/clock.h> 33 #include <linux/sched/task.h> 34 #include <linux/sched/mm.h> 35 #include <linux/delay.h> 36 #include <linux/module.h> 37 #include <linux/proc_fs.h> 38 #include <linux/seq_file.h> 39 #include <linux/spinlock.h> 40 #include <linux/kallsyms.h> 41 #include <linux/interrupt.h> 42 #include <linux/stacktrace.h> 43 #include <linux/debug_locks.h> 44 #include <linux/irqflags.h> 45 #include <linux/utsname.h> 46 #include <linux/hash.h> 47 #include <linux/ftrace.h> 48 #include <linux/stringify.h> 49 #include <linux/bitmap.h> 50 #include <linux/bitops.h> 51 #include <linux/gfp.h> 52 #include <linux/random.h> 53 #include <linux/jhash.h> 54 #include <linux/nmi.h> 55 #include <linux/rcupdate.h> 56 #include <linux/kprobes.h> 57 58 #include <asm/sections.h> 59 60 #include "lockdep_internals.h" 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/lock.h> 64 65 #ifdef CONFIG_PROVE_LOCKING 66 int prove_locking = 1; 67 module_param(prove_locking, int, 0644); 68 #else 69 #define prove_locking 0 70 #endif 71 72 #ifdef CONFIG_LOCK_STAT 73 int lock_stat = 1; 74 module_param(lock_stat, int, 0644); 75 #else 76 #define lock_stat 0 77 #endif 78 79 /* 80 * lockdep_lock: protects the lockdep graph, the hashes and the 81 * class/list/hash allocators. 82 * 83 * This is one of the rare exceptions where it's justified 84 * to use a raw spinlock - we really dont want the spinlock 85 * code to recurse back into the lockdep code... 86 */ 87 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 88 static struct task_struct *lockdep_selftest_task_struct; 89 90 static int graph_lock(void) 91 { 92 arch_spin_lock(&lockdep_lock); 93 /* 94 * Make sure that if another CPU detected a bug while 95 * walking the graph we dont change it (while the other 96 * CPU is busy printing out stuff with the graph lock 97 * dropped already) 98 */ 99 if (!debug_locks) { 100 arch_spin_unlock(&lockdep_lock); 101 return 0; 102 } 103 /* prevent any recursions within lockdep from causing deadlocks */ 104 current->lockdep_recursion++; 105 return 1; 106 } 107 108 static inline int graph_unlock(void) 109 { 110 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { 111 /* 112 * The lockdep graph lock isn't locked while we expect it to 113 * be, we're confused now, bye! 114 */ 115 return DEBUG_LOCKS_WARN_ON(1); 116 } 117 118 current->lockdep_recursion--; 119 arch_spin_unlock(&lockdep_lock); 120 return 0; 121 } 122 123 /* 124 * Turn lock debugging off and return with 0 if it was off already, 125 * and also release the graph lock: 126 */ 127 static inline int debug_locks_off_graph_unlock(void) 128 { 129 int ret = debug_locks_off(); 130 131 arch_spin_unlock(&lockdep_lock); 132 133 return ret; 134 } 135 136 unsigned long nr_list_entries; 137 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 138 static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); 139 140 /* 141 * All data structures here are protected by the global debug_lock. 142 * 143 * nr_lock_classes is the number of elements of lock_classes[] that is 144 * in use. 145 */ 146 #define KEYHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 147 #define KEYHASH_SIZE (1UL << KEYHASH_BITS) 148 static struct hlist_head lock_keys_hash[KEYHASH_SIZE]; 149 unsigned long nr_lock_classes; 150 #ifndef CONFIG_DEBUG_LOCKDEP 151 static 152 #endif 153 struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 154 155 static inline struct lock_class *hlock_class(struct held_lock *hlock) 156 { 157 if (!hlock->class_idx) { 158 /* 159 * Someone passed in garbage, we give up. 160 */ 161 DEBUG_LOCKS_WARN_ON(1); 162 return NULL; 163 } 164 return lock_classes + hlock->class_idx - 1; 165 } 166 167 #ifdef CONFIG_LOCK_STAT 168 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 169 170 static inline u64 lockstat_clock(void) 171 { 172 return local_clock(); 173 } 174 175 static int lock_point(unsigned long points[], unsigned long ip) 176 { 177 int i; 178 179 for (i = 0; i < LOCKSTAT_POINTS; i++) { 180 if (points[i] == 0) { 181 points[i] = ip; 182 break; 183 } 184 if (points[i] == ip) 185 break; 186 } 187 188 return i; 189 } 190 191 static void lock_time_inc(struct lock_time *lt, u64 time) 192 { 193 if (time > lt->max) 194 lt->max = time; 195 196 if (time < lt->min || !lt->nr) 197 lt->min = time; 198 199 lt->total += time; 200 lt->nr++; 201 } 202 203 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 204 { 205 if (!src->nr) 206 return; 207 208 if (src->max > dst->max) 209 dst->max = src->max; 210 211 if (src->min < dst->min || !dst->nr) 212 dst->min = src->min; 213 214 dst->total += src->total; 215 dst->nr += src->nr; 216 } 217 218 struct lock_class_stats lock_stats(struct lock_class *class) 219 { 220 struct lock_class_stats stats; 221 int cpu, i; 222 223 memset(&stats, 0, sizeof(struct lock_class_stats)); 224 for_each_possible_cpu(cpu) { 225 struct lock_class_stats *pcs = 226 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 227 228 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 229 stats.contention_point[i] += pcs->contention_point[i]; 230 231 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 232 stats.contending_point[i] += pcs->contending_point[i]; 233 234 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 235 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 236 237 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 238 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 239 240 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 241 stats.bounces[i] += pcs->bounces[i]; 242 } 243 244 return stats; 245 } 246 247 void clear_lock_stats(struct lock_class *class) 248 { 249 int cpu; 250 251 for_each_possible_cpu(cpu) { 252 struct lock_class_stats *cpu_stats = 253 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 254 255 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 256 } 257 memset(class->contention_point, 0, sizeof(class->contention_point)); 258 memset(class->contending_point, 0, sizeof(class->contending_point)); 259 } 260 261 static struct lock_class_stats *get_lock_stats(struct lock_class *class) 262 { 263 return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes]; 264 } 265 266 static void lock_release_holdtime(struct held_lock *hlock) 267 { 268 struct lock_class_stats *stats; 269 u64 holdtime; 270 271 if (!lock_stat) 272 return; 273 274 holdtime = lockstat_clock() - hlock->holdtime_stamp; 275 276 stats = get_lock_stats(hlock_class(hlock)); 277 if (hlock->read) 278 lock_time_inc(&stats->read_holdtime, holdtime); 279 else 280 lock_time_inc(&stats->write_holdtime, holdtime); 281 } 282 #else 283 static inline void lock_release_holdtime(struct held_lock *hlock) 284 { 285 } 286 #endif 287 288 /* 289 * We keep a global list of all lock classes. The list is only accessed with 290 * the lockdep spinlock lock held. free_lock_classes is a list with free 291 * elements. These elements are linked together by the lock_entry member in 292 * struct lock_class. 293 */ 294 LIST_HEAD(all_lock_classes); 295 static LIST_HEAD(free_lock_classes); 296 297 /** 298 * struct pending_free - information about data structures about to be freed 299 * @zapped: Head of a list with struct lock_class elements. 300 * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements 301 * are about to be freed. 302 */ 303 struct pending_free { 304 struct list_head zapped; 305 DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS); 306 }; 307 308 /** 309 * struct delayed_free - data structures used for delayed freeing 310 * 311 * A data structure for delayed freeing of data structures that may be 312 * accessed by RCU readers at the time these were freed. 313 * 314 * @rcu_head: Used to schedule an RCU callback for freeing data structures. 315 * @index: Index of @pf to which freed data structures are added. 316 * @scheduled: Whether or not an RCU callback has been scheduled. 317 * @pf: Array with information about data structures about to be freed. 318 */ 319 static struct delayed_free { 320 struct rcu_head rcu_head; 321 int index; 322 int scheduled; 323 struct pending_free pf[2]; 324 } delayed_free; 325 326 /* 327 * The lockdep classes are in a hash-table as well, for fast lookup: 328 */ 329 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 330 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 331 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 332 #define classhashentry(key) (classhash_table + __classhashfn((key))) 333 334 static struct hlist_head classhash_table[CLASSHASH_SIZE]; 335 336 /* 337 * We put the lock dependency chains into a hash-table as well, to cache 338 * their existence: 339 */ 340 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 341 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 342 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 343 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 344 345 static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 346 347 /* 348 * The hash key of the lock dependency chains is a hash itself too: 349 * it's a hash of all locks taken up to that lock, including that lock. 350 * It's a 64-bit hash, because it's important for the keys to be 351 * unique. 352 */ 353 static inline u64 iterate_chain_key(u64 key, u32 idx) 354 { 355 u32 k0 = key, k1 = key >> 32; 356 357 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */ 358 359 return k0 | (u64)k1 << 32; 360 } 361 362 void lockdep_off(void) 363 { 364 current->lockdep_recursion++; 365 } 366 EXPORT_SYMBOL(lockdep_off); 367 368 void lockdep_on(void) 369 { 370 current->lockdep_recursion--; 371 } 372 EXPORT_SYMBOL(lockdep_on); 373 374 void lockdep_set_selftest_task(struct task_struct *task) 375 { 376 lockdep_selftest_task_struct = task; 377 } 378 379 /* 380 * Debugging switches: 381 */ 382 383 #define VERBOSE 0 384 #define VERY_VERBOSE 0 385 386 #if VERBOSE 387 # define HARDIRQ_VERBOSE 1 388 # define SOFTIRQ_VERBOSE 1 389 #else 390 # define HARDIRQ_VERBOSE 0 391 # define SOFTIRQ_VERBOSE 0 392 #endif 393 394 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 395 /* 396 * Quick filtering for interesting events: 397 */ 398 static int class_filter(struct lock_class *class) 399 { 400 #if 0 401 /* Example */ 402 if (class->name_version == 1 && 403 !strcmp(class->name, "lockname")) 404 return 1; 405 if (class->name_version == 1 && 406 !strcmp(class->name, "&struct->lockfield")) 407 return 1; 408 #endif 409 /* Filter everything else. 1 would be to allow everything else */ 410 return 0; 411 } 412 #endif 413 414 static int verbose(struct lock_class *class) 415 { 416 #if VERBOSE 417 return class_filter(class); 418 #endif 419 return 0; 420 } 421 422 /* 423 * Stack-trace: tightly packed array of stack backtrace 424 * addresses. Protected by the graph_lock. 425 */ 426 unsigned long nr_stack_trace_entries; 427 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 428 429 static void print_lockdep_off(const char *bug_msg) 430 { 431 printk(KERN_DEBUG "%s\n", bug_msg); 432 printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 433 #ifdef CONFIG_LOCK_STAT 434 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 435 #endif 436 } 437 438 static int save_trace(struct lock_trace *trace) 439 { 440 unsigned long *entries = stack_trace + nr_stack_trace_entries; 441 unsigned int max_entries; 442 443 trace->offset = nr_stack_trace_entries; 444 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 445 trace->nr_entries = stack_trace_save(entries, max_entries, 3); 446 nr_stack_trace_entries += trace->nr_entries; 447 448 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 449 if (!debug_locks_off_graph_unlock()) 450 return 0; 451 452 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 453 dump_stack(); 454 455 return 0; 456 } 457 458 return 1; 459 } 460 461 unsigned int nr_hardirq_chains; 462 unsigned int nr_softirq_chains; 463 unsigned int nr_process_chains; 464 unsigned int max_lockdep_depth; 465 466 #ifdef CONFIG_DEBUG_LOCKDEP 467 /* 468 * Various lockdep statistics: 469 */ 470 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 471 #endif 472 473 /* 474 * Locking printouts: 475 */ 476 477 #define __USAGE(__STATE) \ 478 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 479 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 480 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 481 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 482 483 static const char *usage_str[] = 484 { 485 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 486 #include "lockdep_states.h" 487 #undef LOCKDEP_STATE 488 [LOCK_USED] = "INITIAL USE", 489 }; 490 491 const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 492 { 493 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 494 } 495 496 static inline unsigned long lock_flag(enum lock_usage_bit bit) 497 { 498 return 1UL << bit; 499 } 500 501 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 502 { 503 char c = '.'; 504 505 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) 506 c = '+'; 507 if (class->usage_mask & lock_flag(bit)) { 508 c = '-'; 509 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) 510 c = '?'; 511 } 512 513 return c; 514 } 515 516 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 517 { 518 int i = 0; 519 520 #define LOCKDEP_STATE(__STATE) \ 521 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 522 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 523 #include "lockdep_states.h" 524 #undef LOCKDEP_STATE 525 526 usage[i] = '\0'; 527 } 528 529 static void __print_lock_name(struct lock_class *class) 530 { 531 char str[KSYM_NAME_LEN]; 532 const char *name; 533 534 name = class->name; 535 if (!name) { 536 name = __get_key_name(class->key, str); 537 printk(KERN_CONT "%s", name); 538 } else { 539 printk(KERN_CONT "%s", name); 540 if (class->name_version > 1) 541 printk(KERN_CONT "#%d", class->name_version); 542 if (class->subclass) 543 printk(KERN_CONT "/%d", class->subclass); 544 } 545 } 546 547 static void print_lock_name(struct lock_class *class) 548 { 549 char usage[LOCK_USAGE_CHARS]; 550 551 get_usage_chars(class, usage); 552 553 printk(KERN_CONT " ("); 554 __print_lock_name(class); 555 printk(KERN_CONT "){%s}", usage); 556 } 557 558 static void print_lockdep_cache(struct lockdep_map *lock) 559 { 560 const char *name; 561 char str[KSYM_NAME_LEN]; 562 563 name = lock->name; 564 if (!name) 565 name = __get_key_name(lock->key->subkeys, str); 566 567 printk(KERN_CONT "%s", name); 568 } 569 570 static void print_lock(struct held_lock *hlock) 571 { 572 /* 573 * We can be called locklessly through debug_show_all_locks() so be 574 * extra careful, the hlock might have been released and cleared. 575 */ 576 unsigned int class_idx = hlock->class_idx; 577 578 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ 579 barrier(); 580 581 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { 582 printk(KERN_CONT "<RELEASED>\n"); 583 return; 584 } 585 586 printk(KERN_CONT "%p", hlock->instance); 587 print_lock_name(lock_classes + class_idx - 1); 588 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); 589 } 590 591 static void lockdep_print_held_locks(struct task_struct *p) 592 { 593 int i, depth = READ_ONCE(p->lockdep_depth); 594 595 if (!depth) 596 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); 597 else 598 printk("%d lock%s held by %s/%d:\n", depth, 599 depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); 600 /* 601 * It's not reliable to print a task's held locks if it's not sleeping 602 * and it's not the current task. 603 */ 604 if (p->state == TASK_RUNNING && p != current) 605 return; 606 for (i = 0; i < depth; i++) { 607 printk(" #%d: ", i); 608 print_lock(p->held_locks + i); 609 } 610 } 611 612 static void print_kernel_ident(void) 613 { 614 printk("%s %.*s %s\n", init_utsname()->release, 615 (int)strcspn(init_utsname()->version, " "), 616 init_utsname()->version, 617 print_tainted()); 618 } 619 620 static int very_verbose(struct lock_class *class) 621 { 622 #if VERY_VERBOSE 623 return class_filter(class); 624 #endif 625 return 0; 626 } 627 628 /* 629 * Is this the address of a static object: 630 */ 631 #ifdef __KERNEL__ 632 static int static_obj(const void *obj) 633 { 634 unsigned long start = (unsigned long) &_stext, 635 end = (unsigned long) &_end, 636 addr = (unsigned long) obj; 637 638 if (arch_is_kernel_initmem_freed(addr)) 639 return 0; 640 641 /* 642 * static variable? 643 */ 644 if ((addr >= start) && (addr < end)) 645 return 1; 646 647 if (arch_is_kernel_data(addr)) 648 return 1; 649 650 /* 651 * in-kernel percpu var? 652 */ 653 if (is_kernel_percpu_address(addr)) 654 return 1; 655 656 /* 657 * module static or percpu var? 658 */ 659 return is_module_address(addr) || is_module_percpu_address(addr); 660 } 661 #endif 662 663 /* 664 * To make lock name printouts unique, we calculate a unique 665 * class->name_version generation counter. The caller must hold the graph 666 * lock. 667 */ 668 static int count_matching_names(struct lock_class *new_class) 669 { 670 struct lock_class *class; 671 int count = 0; 672 673 if (!new_class->name) 674 return 0; 675 676 list_for_each_entry(class, &all_lock_classes, lock_entry) { 677 if (new_class->key - new_class->subclass == class->key) 678 return class->name_version; 679 if (class->name && !strcmp(class->name, new_class->name)) 680 count = max(count, class->name_version); 681 } 682 683 return count + 1; 684 } 685 686 static inline struct lock_class * 687 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass) 688 { 689 struct lockdep_subclass_key *key; 690 struct hlist_head *hash_head; 691 struct lock_class *class; 692 693 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 694 debug_locks_off(); 695 printk(KERN_ERR 696 "BUG: looking up invalid subclass: %u\n", subclass); 697 printk(KERN_ERR 698 "turning off the locking correctness validator.\n"); 699 dump_stack(); 700 return NULL; 701 } 702 703 /* 704 * If it is not initialised then it has never been locked, 705 * so it won't be present in the hash table. 706 */ 707 if (unlikely(!lock->key)) 708 return NULL; 709 710 /* 711 * NOTE: the class-key must be unique. For dynamic locks, a static 712 * lock_class_key variable is passed in through the mutex_init() 713 * (or spin_lock_init()) call - which acts as the key. For static 714 * locks we use the lock object itself as the key. 715 */ 716 BUILD_BUG_ON(sizeof(struct lock_class_key) > 717 sizeof(struct lockdep_map)); 718 719 key = lock->key->subkeys + subclass; 720 721 hash_head = classhashentry(key); 722 723 /* 724 * We do an RCU walk of the hash, see lockdep_free_key_range(). 725 */ 726 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 727 return NULL; 728 729 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 730 if (class->key == key) { 731 /* 732 * Huh! same key, different name? Did someone trample 733 * on some memory? We're most confused. 734 */ 735 WARN_ON_ONCE(class->name != lock->name); 736 return class; 737 } 738 } 739 740 return NULL; 741 } 742 743 /* 744 * Static locks do not have their class-keys yet - for them the key is 745 * the lock object itself. If the lock is in the per cpu area, the 746 * canonical address of the lock (per cpu offset removed) is used. 747 */ 748 static bool assign_lock_key(struct lockdep_map *lock) 749 { 750 unsigned long can_addr, addr = (unsigned long)lock; 751 752 #ifdef __KERNEL__ 753 /* 754 * lockdep_free_key_range() assumes that struct lock_class_key 755 * objects do not overlap. Since we use the address of lock 756 * objects as class key for static objects, check whether the 757 * size of lock_class_key objects does not exceed the size of 758 * the smallest lock object. 759 */ 760 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t)); 761 #endif 762 763 if (__is_kernel_percpu_address(addr, &can_addr)) 764 lock->key = (void *)can_addr; 765 else if (__is_module_percpu_address(addr, &can_addr)) 766 lock->key = (void *)can_addr; 767 else if (static_obj(lock)) 768 lock->key = (void *)lock; 769 else { 770 /* Debug-check: all keys must be persistent! */ 771 debug_locks_off(); 772 pr_err("INFO: trying to register non-static key.\n"); 773 pr_err("the code is fine but needs lockdep annotation.\n"); 774 pr_err("turning off the locking correctness validator.\n"); 775 dump_stack(); 776 return false; 777 } 778 779 return true; 780 } 781 782 #ifdef CONFIG_DEBUG_LOCKDEP 783 784 /* Check whether element @e occurs in list @h */ 785 static bool in_list(struct list_head *e, struct list_head *h) 786 { 787 struct list_head *f; 788 789 list_for_each(f, h) { 790 if (e == f) 791 return true; 792 } 793 794 return false; 795 } 796 797 /* 798 * Check whether entry @e occurs in any of the locks_after or locks_before 799 * lists. 800 */ 801 static bool in_any_class_list(struct list_head *e) 802 { 803 struct lock_class *class; 804 int i; 805 806 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 807 class = &lock_classes[i]; 808 if (in_list(e, &class->locks_after) || 809 in_list(e, &class->locks_before)) 810 return true; 811 } 812 return false; 813 } 814 815 static bool class_lock_list_valid(struct lock_class *c, struct list_head *h) 816 { 817 struct lock_list *e; 818 819 list_for_each_entry(e, h, entry) { 820 if (e->links_to != c) { 821 printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s", 822 c->name ? : "(?)", 823 (unsigned long)(e - list_entries), 824 e->links_to && e->links_to->name ? 825 e->links_to->name : "(?)", 826 e->class && e->class->name ? e->class->name : 827 "(?)"); 828 return false; 829 } 830 } 831 return true; 832 } 833 834 #ifdef CONFIG_PROVE_LOCKING 835 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 836 #endif 837 838 static bool check_lock_chain_key(struct lock_chain *chain) 839 { 840 #ifdef CONFIG_PROVE_LOCKING 841 u64 chain_key = 0; 842 int i; 843 844 for (i = chain->base; i < chain->base + chain->depth; i++) 845 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1); 846 /* 847 * The 'unsigned long long' casts avoid that a compiler warning 848 * is reported when building tools/lib/lockdep. 849 */ 850 if (chain->chain_key != chain_key) { 851 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n", 852 (unsigned long long)(chain - lock_chains), 853 (unsigned long long)chain->chain_key, 854 (unsigned long long)chain_key); 855 return false; 856 } 857 #endif 858 return true; 859 } 860 861 static bool in_any_zapped_class_list(struct lock_class *class) 862 { 863 struct pending_free *pf; 864 int i; 865 866 for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) { 867 if (in_list(&class->lock_entry, &pf->zapped)) 868 return true; 869 } 870 871 return false; 872 } 873 874 static bool __check_data_structures(void) 875 { 876 struct lock_class *class; 877 struct lock_chain *chain; 878 struct hlist_head *head; 879 struct lock_list *e; 880 int i; 881 882 /* Check whether all classes occur in a lock list. */ 883 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 884 class = &lock_classes[i]; 885 if (!in_list(&class->lock_entry, &all_lock_classes) && 886 !in_list(&class->lock_entry, &free_lock_classes) && 887 !in_any_zapped_class_list(class)) { 888 printk(KERN_INFO "class %px/%s is not in any class list\n", 889 class, class->name ? : "(?)"); 890 return false; 891 } 892 } 893 894 /* Check whether all classes have valid lock lists. */ 895 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 896 class = &lock_classes[i]; 897 if (!class_lock_list_valid(class, &class->locks_before)) 898 return false; 899 if (!class_lock_list_valid(class, &class->locks_after)) 900 return false; 901 } 902 903 /* Check the chain_key of all lock chains. */ 904 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 905 head = chainhash_table + i; 906 hlist_for_each_entry_rcu(chain, head, entry) { 907 if (!check_lock_chain_key(chain)) 908 return false; 909 } 910 } 911 912 /* 913 * Check whether all list entries that are in use occur in a class 914 * lock list. 915 */ 916 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 917 e = list_entries + i; 918 if (!in_any_class_list(&e->entry)) { 919 printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n", 920 (unsigned int)(e - list_entries), 921 e->class->name ? : "(?)", 922 e->links_to->name ? : "(?)"); 923 return false; 924 } 925 } 926 927 /* 928 * Check whether all list entries that are not in use do not occur in 929 * a class lock list. 930 */ 931 for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 932 e = list_entries + i; 933 if (in_any_class_list(&e->entry)) { 934 printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n", 935 (unsigned int)(e - list_entries), 936 e->class && e->class->name ? e->class->name : 937 "(?)", 938 e->links_to && e->links_to->name ? 939 e->links_to->name : "(?)"); 940 return false; 941 } 942 } 943 944 return true; 945 } 946 947 int check_consistency = 0; 948 module_param(check_consistency, int, 0644); 949 950 static void check_data_structures(void) 951 { 952 static bool once = false; 953 954 if (check_consistency && !once) { 955 if (!__check_data_structures()) { 956 once = true; 957 WARN_ON(once); 958 } 959 } 960 } 961 962 #else /* CONFIG_DEBUG_LOCKDEP */ 963 964 static inline void check_data_structures(void) { } 965 966 #endif /* CONFIG_DEBUG_LOCKDEP */ 967 968 /* 969 * Initialize the lock_classes[] array elements, the free_lock_classes list 970 * and also the delayed_free structure. 971 */ 972 static void init_data_structures_once(void) 973 { 974 static bool ds_initialized, rcu_head_initialized; 975 int i; 976 977 if (likely(rcu_head_initialized)) 978 return; 979 980 if (system_state >= SYSTEM_SCHEDULING) { 981 init_rcu_head(&delayed_free.rcu_head); 982 rcu_head_initialized = true; 983 } 984 985 if (ds_initialized) 986 return; 987 988 ds_initialized = true; 989 990 INIT_LIST_HEAD(&delayed_free.pf[0].zapped); 991 INIT_LIST_HEAD(&delayed_free.pf[1].zapped); 992 993 for (i = 0; i < ARRAY_SIZE(lock_classes); i++) { 994 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes); 995 INIT_LIST_HEAD(&lock_classes[i].locks_after); 996 INIT_LIST_HEAD(&lock_classes[i].locks_before); 997 } 998 } 999 1000 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key) 1001 { 1002 unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS); 1003 1004 return lock_keys_hash + hash; 1005 } 1006 1007 /* Register a dynamically allocated key. */ 1008 void lockdep_register_key(struct lock_class_key *key) 1009 { 1010 struct hlist_head *hash_head; 1011 struct lock_class_key *k; 1012 unsigned long flags; 1013 1014 if (WARN_ON_ONCE(static_obj(key))) 1015 return; 1016 hash_head = keyhashentry(key); 1017 1018 raw_local_irq_save(flags); 1019 if (!graph_lock()) 1020 goto restore_irqs; 1021 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1022 if (WARN_ON_ONCE(k == key)) 1023 goto out_unlock; 1024 } 1025 hlist_add_head_rcu(&key->hash_entry, hash_head); 1026 out_unlock: 1027 graph_unlock(); 1028 restore_irqs: 1029 raw_local_irq_restore(flags); 1030 } 1031 EXPORT_SYMBOL_GPL(lockdep_register_key); 1032 1033 /* Check whether a key has been registered as a dynamic key. */ 1034 static bool is_dynamic_key(const struct lock_class_key *key) 1035 { 1036 struct hlist_head *hash_head; 1037 struct lock_class_key *k; 1038 bool found = false; 1039 1040 if (WARN_ON_ONCE(static_obj(key))) 1041 return false; 1042 1043 /* 1044 * If lock debugging is disabled lock_keys_hash[] may contain 1045 * pointers to memory that has already been freed. Avoid triggering 1046 * a use-after-free in that case by returning early. 1047 */ 1048 if (!debug_locks) 1049 return true; 1050 1051 hash_head = keyhashentry(key); 1052 1053 rcu_read_lock(); 1054 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 1055 if (k == key) { 1056 found = true; 1057 break; 1058 } 1059 } 1060 rcu_read_unlock(); 1061 1062 return found; 1063 } 1064 1065 /* 1066 * Register a lock's class in the hash-table, if the class is not present 1067 * yet. Otherwise we look it up. We cache the result in the lock object 1068 * itself, so actual lookup of the hash should be once per lock object. 1069 */ 1070 static struct lock_class * 1071 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 1072 { 1073 struct lockdep_subclass_key *key; 1074 struct hlist_head *hash_head; 1075 struct lock_class *class; 1076 1077 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1078 1079 class = look_up_lock_class(lock, subclass); 1080 if (likely(class)) 1081 goto out_set_class_cache; 1082 1083 if (!lock->key) { 1084 if (!assign_lock_key(lock)) 1085 return NULL; 1086 } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { 1087 return NULL; 1088 } 1089 1090 key = lock->key->subkeys + subclass; 1091 hash_head = classhashentry(key); 1092 1093 if (!graph_lock()) { 1094 return NULL; 1095 } 1096 /* 1097 * We have to do the hash-walk again, to avoid races 1098 * with another CPU: 1099 */ 1100 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 1101 if (class->key == key) 1102 goto out_unlock_set; 1103 } 1104 1105 init_data_structures_once(); 1106 1107 /* Allocate a new lock class and add it to the hash. */ 1108 class = list_first_entry_or_null(&free_lock_classes, typeof(*class), 1109 lock_entry); 1110 if (!class) { 1111 if (!debug_locks_off_graph_unlock()) { 1112 return NULL; 1113 } 1114 1115 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 1116 dump_stack(); 1117 return NULL; 1118 } 1119 nr_lock_classes++; 1120 debug_atomic_inc(nr_unused_locks); 1121 class->key = key; 1122 class->name = lock->name; 1123 class->subclass = subclass; 1124 WARN_ON_ONCE(!list_empty(&class->locks_before)); 1125 WARN_ON_ONCE(!list_empty(&class->locks_after)); 1126 class->name_version = count_matching_names(class); 1127 /* 1128 * We use RCU's safe list-add method to make 1129 * parallel walking of the hash-list safe: 1130 */ 1131 hlist_add_head_rcu(&class->hash_entry, hash_head); 1132 /* 1133 * Remove the class from the free list and add it to the global list 1134 * of classes. 1135 */ 1136 list_move_tail(&class->lock_entry, &all_lock_classes); 1137 1138 if (verbose(class)) { 1139 graph_unlock(); 1140 1141 printk("\nnew class %px: %s", class->key, class->name); 1142 if (class->name_version > 1) 1143 printk(KERN_CONT "#%d", class->name_version); 1144 printk(KERN_CONT "\n"); 1145 dump_stack(); 1146 1147 if (!graph_lock()) { 1148 return NULL; 1149 } 1150 } 1151 out_unlock_set: 1152 graph_unlock(); 1153 1154 out_set_class_cache: 1155 if (!subclass || force) 1156 lock->class_cache[0] = class; 1157 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 1158 lock->class_cache[subclass] = class; 1159 1160 /* 1161 * Hash collision, did we smoke some? We found a class with a matching 1162 * hash but the subclass -- which is hashed in -- didn't match. 1163 */ 1164 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 1165 return NULL; 1166 1167 return class; 1168 } 1169 1170 #ifdef CONFIG_PROVE_LOCKING 1171 /* 1172 * Allocate a lockdep entry. (assumes the graph_lock held, returns 1173 * with NULL on failure) 1174 */ 1175 static struct lock_list *alloc_list_entry(void) 1176 { 1177 int idx = find_first_zero_bit(list_entries_in_use, 1178 ARRAY_SIZE(list_entries)); 1179 1180 if (idx >= ARRAY_SIZE(list_entries)) { 1181 if (!debug_locks_off_graph_unlock()) 1182 return NULL; 1183 1184 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 1185 dump_stack(); 1186 return NULL; 1187 } 1188 nr_list_entries++; 1189 __set_bit(idx, list_entries_in_use); 1190 return list_entries + idx; 1191 } 1192 1193 /* 1194 * Add a new dependency to the head of the list: 1195 */ 1196 static int add_lock_to_list(struct lock_class *this, 1197 struct lock_class *links_to, struct list_head *head, 1198 unsigned long ip, int distance, 1199 struct lock_trace *trace) 1200 { 1201 struct lock_list *entry; 1202 /* 1203 * Lock not present yet - get a new dependency struct and 1204 * add it to the list: 1205 */ 1206 entry = alloc_list_entry(); 1207 if (!entry) 1208 return 0; 1209 1210 entry->class = this; 1211 entry->links_to = links_to; 1212 entry->distance = distance; 1213 entry->trace = *trace; 1214 /* 1215 * Both allocation and removal are done under the graph lock; but 1216 * iteration is under RCU-sched; see look_up_lock_class() and 1217 * lockdep_free_key_range(). 1218 */ 1219 list_add_tail_rcu(&entry->entry, head); 1220 1221 return 1; 1222 } 1223 1224 /* 1225 * For good efficiency of modular, we use power of 2 1226 */ 1227 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL 1228 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 1229 1230 /* 1231 * The circular_queue and helpers is used to implement the 1232 * breadth-first search(BFS)algorithem, by which we can build 1233 * the shortest path from the next lock to be acquired to the 1234 * previous held lock if there is a circular between them. 1235 */ 1236 struct circular_queue { 1237 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; 1238 unsigned int front, rear; 1239 }; 1240 1241 static struct circular_queue lock_cq; 1242 1243 unsigned int max_bfs_queue_depth; 1244 1245 static unsigned int lockdep_dependency_gen_id; 1246 1247 static inline void __cq_init(struct circular_queue *cq) 1248 { 1249 cq->front = cq->rear = 0; 1250 lockdep_dependency_gen_id++; 1251 } 1252 1253 static inline int __cq_empty(struct circular_queue *cq) 1254 { 1255 return (cq->front == cq->rear); 1256 } 1257 1258 static inline int __cq_full(struct circular_queue *cq) 1259 { 1260 return ((cq->rear + 1) & CQ_MASK) == cq->front; 1261 } 1262 1263 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) 1264 { 1265 if (__cq_full(cq)) 1266 return -1; 1267 1268 cq->element[cq->rear] = elem; 1269 cq->rear = (cq->rear + 1) & CQ_MASK; 1270 return 0; 1271 } 1272 1273 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) 1274 { 1275 if (__cq_empty(cq)) 1276 return -1; 1277 1278 *elem = cq->element[cq->front]; 1279 cq->front = (cq->front + 1) & CQ_MASK; 1280 return 0; 1281 } 1282 1283 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 1284 { 1285 return (cq->rear - cq->front) & CQ_MASK; 1286 } 1287 1288 static inline void mark_lock_accessed(struct lock_list *lock, 1289 struct lock_list *parent) 1290 { 1291 unsigned long nr; 1292 1293 nr = lock - list_entries; 1294 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ 1295 lock->parent = parent; 1296 lock->class->dep_gen_id = lockdep_dependency_gen_id; 1297 } 1298 1299 static inline unsigned long lock_accessed(struct lock_list *lock) 1300 { 1301 unsigned long nr; 1302 1303 nr = lock - list_entries; 1304 WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ 1305 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 1306 } 1307 1308 static inline struct lock_list *get_lock_parent(struct lock_list *child) 1309 { 1310 return child->parent; 1311 } 1312 1313 static inline int get_lock_depth(struct lock_list *child) 1314 { 1315 int depth = 0; 1316 struct lock_list *parent; 1317 1318 while ((parent = get_lock_parent(child))) { 1319 child = parent; 1320 depth++; 1321 } 1322 return depth; 1323 } 1324 1325 static int __bfs(struct lock_list *source_entry, 1326 void *data, 1327 int (*match)(struct lock_list *entry, void *data), 1328 struct lock_list **target_entry, 1329 int forward) 1330 { 1331 struct lock_list *entry; 1332 struct list_head *head; 1333 struct circular_queue *cq = &lock_cq; 1334 int ret = 1; 1335 1336 if (match(source_entry, data)) { 1337 *target_entry = source_entry; 1338 ret = 0; 1339 goto exit; 1340 } 1341 1342 if (forward) 1343 head = &source_entry->class->locks_after; 1344 else 1345 head = &source_entry->class->locks_before; 1346 1347 if (list_empty(head)) 1348 goto exit; 1349 1350 __cq_init(cq); 1351 __cq_enqueue(cq, (unsigned long)source_entry); 1352 1353 while (!__cq_empty(cq)) { 1354 struct lock_list *lock; 1355 1356 __cq_dequeue(cq, (unsigned long *)&lock); 1357 1358 if (!lock->class) { 1359 ret = -2; 1360 goto exit; 1361 } 1362 1363 if (forward) 1364 head = &lock->class->locks_after; 1365 else 1366 head = &lock->class->locks_before; 1367 1368 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1369 1370 list_for_each_entry_rcu(entry, head, entry) { 1371 if (!lock_accessed(entry)) { 1372 unsigned int cq_depth; 1373 mark_lock_accessed(entry, lock); 1374 if (match(entry, data)) { 1375 *target_entry = entry; 1376 ret = 0; 1377 goto exit; 1378 } 1379 1380 if (__cq_enqueue(cq, (unsigned long)entry)) { 1381 ret = -1; 1382 goto exit; 1383 } 1384 cq_depth = __cq_get_elem_count(cq); 1385 if (max_bfs_queue_depth < cq_depth) 1386 max_bfs_queue_depth = cq_depth; 1387 } 1388 } 1389 } 1390 exit: 1391 return ret; 1392 } 1393 1394 static inline int __bfs_forwards(struct lock_list *src_entry, 1395 void *data, 1396 int (*match)(struct lock_list *entry, void *data), 1397 struct lock_list **target_entry) 1398 { 1399 return __bfs(src_entry, data, match, target_entry, 1); 1400 1401 } 1402 1403 static inline int __bfs_backwards(struct lock_list *src_entry, 1404 void *data, 1405 int (*match)(struct lock_list *entry, void *data), 1406 struct lock_list **target_entry) 1407 { 1408 return __bfs(src_entry, data, match, target_entry, 0); 1409 1410 } 1411 1412 /* 1413 * Recursive, forwards-direction lock-dependency checking, used for 1414 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1415 * checking. 1416 */ 1417 1418 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) 1419 { 1420 unsigned long *entries = stack_trace + trace->offset; 1421 1422 stack_trace_print(entries, trace->nr_entries, spaces); 1423 } 1424 1425 /* 1426 * Print a dependency chain entry (this is only done when a deadlock 1427 * has been detected): 1428 */ 1429 static noinline int 1430 print_circular_bug_entry(struct lock_list *target, int depth) 1431 { 1432 if (debug_locks_silent) 1433 return 0; 1434 printk("\n-> #%u", depth); 1435 print_lock_name(target->class); 1436 printk(KERN_CONT ":\n"); 1437 print_lock_trace(&target->trace, 6); 1438 return 0; 1439 } 1440 1441 static void 1442 print_circular_lock_scenario(struct held_lock *src, 1443 struct held_lock *tgt, 1444 struct lock_list *prt) 1445 { 1446 struct lock_class *source = hlock_class(src); 1447 struct lock_class *target = hlock_class(tgt); 1448 struct lock_class *parent = prt->class; 1449 1450 /* 1451 * A direct locking problem where unsafe_class lock is taken 1452 * directly by safe_class lock, then all we need to show 1453 * is the deadlock scenario, as it is obvious that the 1454 * unsafe lock is taken under the safe lock. 1455 * 1456 * But if there is a chain instead, where the safe lock takes 1457 * an intermediate lock (middle_class) where this lock is 1458 * not the same as the safe lock, then the lock chain is 1459 * used to describe the problem. Otherwise we would need 1460 * to show a different CPU case for each link in the chain 1461 * from the safe_class lock to the unsafe_class lock. 1462 */ 1463 if (parent != source) { 1464 printk("Chain exists of:\n "); 1465 __print_lock_name(source); 1466 printk(KERN_CONT " --> "); 1467 __print_lock_name(parent); 1468 printk(KERN_CONT " --> "); 1469 __print_lock_name(target); 1470 printk(KERN_CONT "\n\n"); 1471 } 1472 1473 printk(" Possible unsafe locking scenario:\n\n"); 1474 printk(" CPU0 CPU1\n"); 1475 printk(" ---- ----\n"); 1476 printk(" lock("); 1477 __print_lock_name(target); 1478 printk(KERN_CONT ");\n"); 1479 printk(" lock("); 1480 __print_lock_name(parent); 1481 printk(KERN_CONT ");\n"); 1482 printk(" lock("); 1483 __print_lock_name(target); 1484 printk(KERN_CONT ");\n"); 1485 printk(" lock("); 1486 __print_lock_name(source); 1487 printk(KERN_CONT ");\n"); 1488 printk("\n *** DEADLOCK ***\n\n"); 1489 } 1490 1491 /* 1492 * When a circular dependency is detected, print the 1493 * header first: 1494 */ 1495 static noinline int 1496 print_circular_bug_header(struct lock_list *entry, unsigned int depth, 1497 struct held_lock *check_src, 1498 struct held_lock *check_tgt) 1499 { 1500 struct task_struct *curr = current; 1501 1502 if (debug_locks_silent) 1503 return 0; 1504 1505 pr_warn("\n"); 1506 pr_warn("======================================================\n"); 1507 pr_warn("WARNING: possible circular locking dependency detected\n"); 1508 print_kernel_ident(); 1509 pr_warn("------------------------------------------------------\n"); 1510 pr_warn("%s/%d is trying to acquire lock:\n", 1511 curr->comm, task_pid_nr(curr)); 1512 print_lock(check_src); 1513 1514 pr_warn("\nbut task is already holding lock:\n"); 1515 1516 print_lock(check_tgt); 1517 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1518 pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); 1519 1520 print_circular_bug_entry(entry, depth); 1521 1522 return 0; 1523 } 1524 1525 static inline int class_equal(struct lock_list *entry, void *data) 1526 { 1527 return entry->class == data; 1528 } 1529 1530 static noinline int print_circular_bug(struct lock_list *this, 1531 struct lock_list *target, 1532 struct held_lock *check_src, 1533 struct held_lock *check_tgt) 1534 { 1535 struct task_struct *curr = current; 1536 struct lock_list *parent; 1537 struct lock_list *first_parent; 1538 int depth; 1539 1540 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1541 return 0; 1542 1543 if (!save_trace(&this->trace)) 1544 return 0; 1545 1546 depth = get_lock_depth(target); 1547 1548 print_circular_bug_header(target, depth, check_src, check_tgt); 1549 1550 parent = get_lock_parent(target); 1551 first_parent = parent; 1552 1553 while (parent) { 1554 print_circular_bug_entry(parent, --depth); 1555 parent = get_lock_parent(parent); 1556 } 1557 1558 printk("\nother info that might help us debug this:\n\n"); 1559 print_circular_lock_scenario(check_src, check_tgt, 1560 first_parent); 1561 1562 lockdep_print_held_locks(curr); 1563 1564 printk("\nstack backtrace:\n"); 1565 dump_stack(); 1566 1567 return 0; 1568 } 1569 1570 static noinline int print_bfs_bug(int ret) 1571 { 1572 if (!debug_locks_off_graph_unlock()) 1573 return 0; 1574 1575 /* 1576 * Breadth-first-search failed, graph got corrupted? 1577 */ 1578 WARN(1, "lockdep bfs error:%d\n", ret); 1579 1580 return 0; 1581 } 1582 1583 static int noop_count(struct lock_list *entry, void *data) 1584 { 1585 (*(unsigned long *)data)++; 1586 return 0; 1587 } 1588 1589 static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 1590 { 1591 unsigned long count = 0; 1592 struct lock_list *uninitialized_var(target_entry); 1593 1594 __bfs_forwards(this, (void *)&count, noop_count, &target_entry); 1595 1596 return count; 1597 } 1598 unsigned long lockdep_count_forward_deps(struct lock_class *class) 1599 { 1600 unsigned long ret, flags; 1601 struct lock_list this; 1602 1603 this.parent = NULL; 1604 this.class = class; 1605 1606 raw_local_irq_save(flags); 1607 arch_spin_lock(&lockdep_lock); 1608 ret = __lockdep_count_forward_deps(&this); 1609 arch_spin_unlock(&lockdep_lock); 1610 raw_local_irq_restore(flags); 1611 1612 return ret; 1613 } 1614 1615 static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 1616 { 1617 unsigned long count = 0; 1618 struct lock_list *uninitialized_var(target_entry); 1619 1620 __bfs_backwards(this, (void *)&count, noop_count, &target_entry); 1621 1622 return count; 1623 } 1624 1625 unsigned long lockdep_count_backward_deps(struct lock_class *class) 1626 { 1627 unsigned long ret, flags; 1628 struct lock_list this; 1629 1630 this.parent = NULL; 1631 this.class = class; 1632 1633 raw_local_irq_save(flags); 1634 arch_spin_lock(&lockdep_lock); 1635 ret = __lockdep_count_backward_deps(&this); 1636 arch_spin_unlock(&lockdep_lock); 1637 raw_local_irq_restore(flags); 1638 1639 return ret; 1640 } 1641 1642 /* 1643 * Prove that the dependency graph starting at <entry> can not 1644 * lead to <target>. Print an error and return 0 if it does. 1645 */ 1646 static noinline int 1647 check_noncircular(struct lock_list *root, struct lock_class *target, 1648 struct lock_list **target_entry) 1649 { 1650 int result; 1651 1652 debug_atomic_inc(nr_cyclic_checks); 1653 1654 result = __bfs_forwards(root, target, class_equal, target_entry); 1655 1656 return result; 1657 } 1658 1659 static noinline int 1660 check_redundant(struct lock_list *root, struct lock_class *target, 1661 struct lock_list **target_entry) 1662 { 1663 int result; 1664 1665 debug_atomic_inc(nr_redundant_checks); 1666 1667 result = __bfs_forwards(root, target, class_equal, target_entry); 1668 1669 return result; 1670 } 1671 1672 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1673 1674 static inline int usage_accumulate(struct lock_list *entry, void *mask) 1675 { 1676 *(unsigned long *)mask |= entry->class->usage_mask; 1677 1678 return 0; 1679 } 1680 1681 /* 1682 * Forwards and backwards subgraph searching, for the purposes of 1683 * proving that two subgraphs can be connected by a new dependency 1684 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1685 */ 1686 1687 static inline int usage_match(struct lock_list *entry, void *mask) 1688 { 1689 return entry->class->usage_mask & *(unsigned long *)mask; 1690 } 1691 1692 /* 1693 * Find a node in the forwards-direction dependency sub-graph starting 1694 * at @root->class that matches @bit. 1695 * 1696 * Return 0 if such a node exists in the subgraph, and put that node 1697 * into *@target_entry. 1698 * 1699 * Return 1 otherwise and keep *@target_entry unchanged. 1700 * Return <0 on error. 1701 */ 1702 static int 1703 find_usage_forwards(struct lock_list *root, unsigned long usage_mask, 1704 struct lock_list **target_entry) 1705 { 1706 int result; 1707 1708 debug_atomic_inc(nr_find_usage_forwards_checks); 1709 1710 result = __bfs_forwards(root, &usage_mask, usage_match, target_entry); 1711 1712 return result; 1713 } 1714 1715 /* 1716 * Find a node in the backwards-direction dependency sub-graph starting 1717 * at @root->class that matches @bit. 1718 * 1719 * Return 0 if such a node exists in the subgraph, and put that node 1720 * into *@target_entry. 1721 * 1722 * Return 1 otherwise and keep *@target_entry unchanged. 1723 * Return <0 on error. 1724 */ 1725 static int 1726 find_usage_backwards(struct lock_list *root, unsigned long usage_mask, 1727 struct lock_list **target_entry) 1728 { 1729 int result; 1730 1731 debug_atomic_inc(nr_find_usage_backwards_checks); 1732 1733 result = __bfs_backwards(root, &usage_mask, usage_match, target_entry); 1734 1735 return result; 1736 } 1737 1738 static void print_lock_class_header(struct lock_class *class, int depth) 1739 { 1740 int bit; 1741 1742 printk("%*s->", depth, ""); 1743 print_lock_name(class); 1744 #ifdef CONFIG_DEBUG_LOCKDEP 1745 printk(KERN_CONT " ops: %lu", debug_class_ops_read(class)); 1746 #endif 1747 printk(KERN_CONT " {\n"); 1748 1749 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { 1750 if (class->usage_mask & (1 << bit)) { 1751 int len = depth; 1752 1753 len += printk("%*s %s", depth, "", usage_str[bit]); 1754 len += printk(KERN_CONT " at:\n"); 1755 print_lock_trace(class->usage_traces + bit, len); 1756 } 1757 } 1758 printk("%*s }\n", depth, ""); 1759 1760 printk("%*s ... key at: [<%px>] %pS\n", 1761 depth, "", class->key, class->key); 1762 } 1763 1764 /* 1765 * printk the shortest lock dependencies from @start to @end in reverse order: 1766 */ 1767 static void __used 1768 print_shortest_lock_dependencies(struct lock_list *leaf, 1769 struct lock_list *root) 1770 { 1771 struct lock_list *entry = leaf; 1772 int depth; 1773 1774 /*compute depth from generated tree by BFS*/ 1775 depth = get_lock_depth(leaf); 1776 1777 do { 1778 print_lock_class_header(entry->class, depth); 1779 printk("%*s ... acquired at:\n", depth, ""); 1780 print_lock_trace(&entry->trace, 2); 1781 printk("\n"); 1782 1783 if (depth == 0 && (entry != root)) { 1784 printk("lockdep:%s bad path found in chain graph\n", __func__); 1785 break; 1786 } 1787 1788 entry = get_lock_parent(entry); 1789 depth--; 1790 } while (entry && (depth >= 0)); 1791 1792 return; 1793 } 1794 1795 static void 1796 print_irq_lock_scenario(struct lock_list *safe_entry, 1797 struct lock_list *unsafe_entry, 1798 struct lock_class *prev_class, 1799 struct lock_class *next_class) 1800 { 1801 struct lock_class *safe_class = safe_entry->class; 1802 struct lock_class *unsafe_class = unsafe_entry->class; 1803 struct lock_class *middle_class = prev_class; 1804 1805 if (middle_class == safe_class) 1806 middle_class = next_class; 1807 1808 /* 1809 * A direct locking problem where unsafe_class lock is taken 1810 * directly by safe_class lock, then all we need to show 1811 * is the deadlock scenario, as it is obvious that the 1812 * unsafe lock is taken under the safe lock. 1813 * 1814 * But if there is a chain instead, where the safe lock takes 1815 * an intermediate lock (middle_class) where this lock is 1816 * not the same as the safe lock, then the lock chain is 1817 * used to describe the problem. Otherwise we would need 1818 * to show a different CPU case for each link in the chain 1819 * from the safe_class lock to the unsafe_class lock. 1820 */ 1821 if (middle_class != unsafe_class) { 1822 printk("Chain exists of:\n "); 1823 __print_lock_name(safe_class); 1824 printk(KERN_CONT " --> "); 1825 __print_lock_name(middle_class); 1826 printk(KERN_CONT " --> "); 1827 __print_lock_name(unsafe_class); 1828 printk(KERN_CONT "\n\n"); 1829 } 1830 1831 printk(" Possible interrupt unsafe locking scenario:\n\n"); 1832 printk(" CPU0 CPU1\n"); 1833 printk(" ---- ----\n"); 1834 printk(" lock("); 1835 __print_lock_name(unsafe_class); 1836 printk(KERN_CONT ");\n"); 1837 printk(" local_irq_disable();\n"); 1838 printk(" lock("); 1839 __print_lock_name(safe_class); 1840 printk(KERN_CONT ");\n"); 1841 printk(" lock("); 1842 __print_lock_name(middle_class); 1843 printk(KERN_CONT ");\n"); 1844 printk(" <Interrupt>\n"); 1845 printk(" lock("); 1846 __print_lock_name(safe_class); 1847 printk(KERN_CONT ");\n"); 1848 printk("\n *** DEADLOCK ***\n\n"); 1849 } 1850 1851 static int 1852 print_bad_irq_dependency(struct task_struct *curr, 1853 struct lock_list *prev_root, 1854 struct lock_list *next_root, 1855 struct lock_list *backwards_entry, 1856 struct lock_list *forwards_entry, 1857 struct held_lock *prev, 1858 struct held_lock *next, 1859 enum lock_usage_bit bit1, 1860 enum lock_usage_bit bit2, 1861 const char *irqclass) 1862 { 1863 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1864 return 0; 1865 1866 pr_warn("\n"); 1867 pr_warn("=====================================================\n"); 1868 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", 1869 irqclass, irqclass); 1870 print_kernel_ident(); 1871 pr_warn("-----------------------------------------------------\n"); 1872 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1873 curr->comm, task_pid_nr(curr), 1874 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1875 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1876 curr->hardirqs_enabled, 1877 curr->softirqs_enabled); 1878 print_lock(next); 1879 1880 pr_warn("\nand this task is already holding:\n"); 1881 print_lock(prev); 1882 pr_warn("which would create a new lock dependency:\n"); 1883 print_lock_name(hlock_class(prev)); 1884 pr_cont(" ->"); 1885 print_lock_name(hlock_class(next)); 1886 pr_cont("\n"); 1887 1888 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", 1889 irqclass); 1890 print_lock_name(backwards_entry->class); 1891 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 1892 1893 print_lock_trace(backwards_entry->class->usage_traces + bit1, 1); 1894 1895 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 1896 print_lock_name(forwards_entry->class); 1897 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 1898 pr_warn("..."); 1899 1900 print_lock_trace(forwards_entry->class->usage_traces + bit2, 1); 1901 1902 pr_warn("\nother info that might help us debug this:\n\n"); 1903 print_irq_lock_scenario(backwards_entry, forwards_entry, 1904 hlock_class(prev), hlock_class(next)); 1905 1906 lockdep_print_held_locks(curr); 1907 1908 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); 1909 if (!save_trace(&prev_root->trace)) 1910 return 0; 1911 print_shortest_lock_dependencies(backwards_entry, prev_root); 1912 1913 pr_warn("\nthe dependencies between the lock to be acquired"); 1914 pr_warn(" and %s-irq-unsafe lock:\n", irqclass); 1915 if (!save_trace(&next_root->trace)) 1916 return 0; 1917 print_shortest_lock_dependencies(forwards_entry, next_root); 1918 1919 pr_warn("\nstack backtrace:\n"); 1920 dump_stack(); 1921 1922 return 0; 1923 } 1924 1925 static const char *state_names[] = { 1926 #define LOCKDEP_STATE(__STATE) \ 1927 __stringify(__STATE), 1928 #include "lockdep_states.h" 1929 #undef LOCKDEP_STATE 1930 }; 1931 1932 static const char *state_rnames[] = { 1933 #define LOCKDEP_STATE(__STATE) \ 1934 __stringify(__STATE)"-READ", 1935 #include "lockdep_states.h" 1936 #undef LOCKDEP_STATE 1937 }; 1938 1939 static inline const char *state_name(enum lock_usage_bit bit) 1940 { 1941 if (bit & LOCK_USAGE_READ_MASK) 1942 return state_rnames[bit >> LOCK_USAGE_DIR_MASK]; 1943 else 1944 return state_names[bit >> LOCK_USAGE_DIR_MASK]; 1945 } 1946 1947 /* 1948 * The bit number is encoded like: 1949 * 1950 * bit0: 0 exclusive, 1 read lock 1951 * bit1: 0 used in irq, 1 irq enabled 1952 * bit2-n: state 1953 */ 1954 static int exclusive_bit(int new_bit) 1955 { 1956 int state = new_bit & LOCK_USAGE_STATE_MASK; 1957 int dir = new_bit & LOCK_USAGE_DIR_MASK; 1958 1959 /* 1960 * keep state, bit flip the direction and strip read. 1961 */ 1962 return state | (dir ^ LOCK_USAGE_DIR_MASK); 1963 } 1964 1965 /* 1966 * Observe that when given a bitmask where each bitnr is encoded as above, a 1967 * right shift of the mask transforms the individual bitnrs as -1 and 1968 * conversely, a left shift transforms into +1 for the individual bitnrs. 1969 * 1970 * So for all bits whose number have LOCK_ENABLED_* set (bitnr1 == 1), we can 1971 * create the mask with those bit numbers using LOCK_USED_IN_* (bitnr1 == 0) 1972 * instead by subtracting the bit number by 2, or shifting the mask right by 2. 1973 * 1974 * Similarly, bitnr1 == 0 becomes bitnr1 == 1 by adding 2, or shifting left 2. 1975 * 1976 * So split the mask (note that LOCKF_ENABLED_IRQ_ALL|LOCKF_USED_IN_IRQ_ALL is 1977 * all bits set) and recompose with bitnr1 flipped. 1978 */ 1979 static unsigned long invert_dir_mask(unsigned long mask) 1980 { 1981 unsigned long excl = 0; 1982 1983 /* Invert dir */ 1984 excl |= (mask & LOCKF_ENABLED_IRQ_ALL) >> LOCK_USAGE_DIR_MASK; 1985 excl |= (mask & LOCKF_USED_IN_IRQ_ALL) << LOCK_USAGE_DIR_MASK; 1986 1987 return excl; 1988 } 1989 1990 /* 1991 * As above, we clear bitnr0 (LOCK_*_READ off) with bitmask ops. First, for all 1992 * bits with bitnr0 set (LOCK_*_READ), add those with bitnr0 cleared (LOCK_*). 1993 * And then mask out all bitnr0. 1994 */ 1995 static unsigned long exclusive_mask(unsigned long mask) 1996 { 1997 unsigned long excl = invert_dir_mask(mask); 1998 1999 /* Strip read */ 2000 excl |= (excl & LOCKF_IRQ_READ) >> LOCK_USAGE_READ_MASK; 2001 excl &= ~LOCKF_IRQ_READ; 2002 2003 return excl; 2004 } 2005 2006 /* 2007 * Retrieve the _possible_ original mask to which @mask is 2008 * exclusive. Ie: this is the opposite of exclusive_mask(). 2009 * Note that 2 possible original bits can match an exclusive 2010 * bit: one has LOCK_USAGE_READ_MASK set, the other has it 2011 * cleared. So both are returned for each exclusive bit. 2012 */ 2013 static unsigned long original_mask(unsigned long mask) 2014 { 2015 unsigned long excl = invert_dir_mask(mask); 2016 2017 /* Include read in existing usages */ 2018 excl |= (excl & LOCKF_IRQ) << LOCK_USAGE_READ_MASK; 2019 2020 return excl; 2021 } 2022 2023 /* 2024 * Find the first pair of bit match between an original 2025 * usage mask and an exclusive usage mask. 2026 */ 2027 static int find_exclusive_match(unsigned long mask, 2028 unsigned long excl_mask, 2029 enum lock_usage_bit *bitp, 2030 enum lock_usage_bit *excl_bitp) 2031 { 2032 int bit, excl; 2033 2034 for_each_set_bit(bit, &mask, LOCK_USED) { 2035 excl = exclusive_bit(bit); 2036 if (excl_mask & lock_flag(excl)) { 2037 *bitp = bit; 2038 *excl_bitp = excl; 2039 return 0; 2040 } 2041 } 2042 return -1; 2043 } 2044 2045 /* 2046 * Prove that the new dependency does not connect a hardirq-safe(-read) 2047 * lock with a hardirq-unsafe lock - to achieve this we search 2048 * the backwards-subgraph starting at <prev>, and the 2049 * forwards-subgraph starting at <next>: 2050 */ 2051 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 2052 struct held_lock *next) 2053 { 2054 unsigned long usage_mask = 0, forward_mask, backward_mask; 2055 enum lock_usage_bit forward_bit = 0, backward_bit = 0; 2056 struct lock_list *uninitialized_var(target_entry1); 2057 struct lock_list *uninitialized_var(target_entry); 2058 struct lock_list this, that; 2059 int ret; 2060 2061 /* 2062 * Step 1: gather all hard/soft IRQs usages backward in an 2063 * accumulated usage mask. 2064 */ 2065 this.parent = NULL; 2066 this.class = hlock_class(prev); 2067 2068 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, NULL); 2069 if (ret < 0) 2070 return print_bfs_bug(ret); 2071 2072 usage_mask &= LOCKF_USED_IN_IRQ_ALL; 2073 if (!usage_mask) 2074 return 1; 2075 2076 /* 2077 * Step 2: find exclusive uses forward that match the previous 2078 * backward accumulated mask. 2079 */ 2080 forward_mask = exclusive_mask(usage_mask); 2081 2082 that.parent = NULL; 2083 that.class = hlock_class(next); 2084 2085 ret = find_usage_forwards(&that, forward_mask, &target_entry1); 2086 if (ret < 0) 2087 return print_bfs_bug(ret); 2088 if (ret == 1) 2089 return ret; 2090 2091 /* 2092 * Step 3: we found a bad match! Now retrieve a lock from the backward 2093 * list whose usage mask matches the exclusive usage mask from the 2094 * lock found on the forward list. 2095 */ 2096 backward_mask = original_mask(target_entry1->class->usage_mask); 2097 2098 ret = find_usage_backwards(&this, backward_mask, &target_entry); 2099 if (ret < 0) 2100 return print_bfs_bug(ret); 2101 if (DEBUG_LOCKS_WARN_ON(ret == 1)) 2102 return 1; 2103 2104 /* 2105 * Step 4: narrow down to a pair of incompatible usage bits 2106 * and report it. 2107 */ 2108 ret = find_exclusive_match(target_entry->class->usage_mask, 2109 target_entry1->class->usage_mask, 2110 &backward_bit, &forward_bit); 2111 if (DEBUG_LOCKS_WARN_ON(ret == -1)) 2112 return 1; 2113 2114 return print_bad_irq_dependency(curr, &this, &that, 2115 target_entry, target_entry1, 2116 prev, next, 2117 backward_bit, forward_bit, 2118 state_name(backward_bit)); 2119 } 2120 2121 static void inc_chains(void) 2122 { 2123 if (current->hardirq_context) 2124 nr_hardirq_chains++; 2125 else { 2126 if (current->softirq_context) 2127 nr_softirq_chains++; 2128 else 2129 nr_process_chains++; 2130 } 2131 } 2132 2133 #else 2134 2135 static inline int check_irq_usage(struct task_struct *curr, 2136 struct held_lock *prev, struct held_lock *next) 2137 { 2138 return 1; 2139 } 2140 2141 static inline void inc_chains(void) 2142 { 2143 nr_process_chains++; 2144 } 2145 2146 #endif 2147 2148 static void 2149 print_deadlock_scenario(struct held_lock *nxt, 2150 struct held_lock *prv) 2151 { 2152 struct lock_class *next = hlock_class(nxt); 2153 struct lock_class *prev = hlock_class(prv); 2154 2155 printk(" Possible unsafe locking scenario:\n\n"); 2156 printk(" CPU0\n"); 2157 printk(" ----\n"); 2158 printk(" lock("); 2159 __print_lock_name(prev); 2160 printk(KERN_CONT ");\n"); 2161 printk(" lock("); 2162 __print_lock_name(next); 2163 printk(KERN_CONT ");\n"); 2164 printk("\n *** DEADLOCK ***\n\n"); 2165 printk(" May be due to missing lock nesting notation\n\n"); 2166 } 2167 2168 static int 2169 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 2170 struct held_lock *next) 2171 { 2172 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2173 return 0; 2174 2175 pr_warn("\n"); 2176 pr_warn("============================================\n"); 2177 pr_warn("WARNING: possible recursive locking detected\n"); 2178 print_kernel_ident(); 2179 pr_warn("--------------------------------------------\n"); 2180 pr_warn("%s/%d is trying to acquire lock:\n", 2181 curr->comm, task_pid_nr(curr)); 2182 print_lock(next); 2183 pr_warn("\nbut task is already holding lock:\n"); 2184 print_lock(prev); 2185 2186 pr_warn("\nother info that might help us debug this:\n"); 2187 print_deadlock_scenario(next, prev); 2188 lockdep_print_held_locks(curr); 2189 2190 pr_warn("\nstack backtrace:\n"); 2191 dump_stack(); 2192 2193 return 0; 2194 } 2195 2196 /* 2197 * Check whether we are holding such a class already. 2198 * 2199 * (Note that this has to be done separately, because the graph cannot 2200 * detect such classes of deadlocks.) 2201 * 2202 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read 2203 */ 2204 static int 2205 check_deadlock(struct task_struct *curr, struct held_lock *next, 2206 struct lockdep_map *next_instance, int read) 2207 { 2208 struct held_lock *prev; 2209 struct held_lock *nest = NULL; 2210 int i; 2211 2212 for (i = 0; i < curr->lockdep_depth; i++) { 2213 prev = curr->held_locks + i; 2214 2215 if (prev->instance == next->nest_lock) 2216 nest = prev; 2217 2218 if (hlock_class(prev) != hlock_class(next)) 2219 continue; 2220 2221 /* 2222 * Allow read-after-read recursion of the same 2223 * lock class (i.e. read_lock(lock)+read_lock(lock)): 2224 */ 2225 if ((read == 2) && prev->read) 2226 return 2; 2227 2228 /* 2229 * We're holding the nest_lock, which serializes this lock's 2230 * nesting behaviour. 2231 */ 2232 if (nest) 2233 return 2; 2234 2235 return print_deadlock_bug(curr, prev, next); 2236 } 2237 return 1; 2238 } 2239 2240 /* 2241 * There was a chain-cache miss, and we are about to add a new dependency 2242 * to a previous lock. We recursively validate the following rules: 2243 * 2244 * - would the adding of the <prev> -> <next> dependency create a 2245 * circular dependency in the graph? [== circular deadlock] 2246 * 2247 * - does the new prev->next dependency connect any hardirq-safe lock 2248 * (in the full backwards-subgraph starting at <prev>) with any 2249 * hardirq-unsafe lock (in the full forwards-subgraph starting at 2250 * <next>)? [== illegal lock inversion with hardirq contexts] 2251 * 2252 * - does the new prev->next dependency connect any softirq-safe lock 2253 * (in the full backwards-subgraph starting at <prev>) with any 2254 * softirq-unsafe lock (in the full forwards-subgraph starting at 2255 * <next>)? [== illegal lock inversion with softirq contexts] 2256 * 2257 * any of these scenarios could lead to a deadlock. 2258 * 2259 * Then if all the validations pass, we add the forwards and backwards 2260 * dependency. 2261 */ 2262 static int 2263 check_prev_add(struct task_struct *curr, struct held_lock *prev, 2264 struct held_lock *next, int distance, struct lock_trace *trace) 2265 { 2266 struct lock_list *uninitialized_var(target_entry); 2267 struct lock_list *entry; 2268 struct lock_list this; 2269 int ret; 2270 2271 if (!hlock_class(prev)->key || !hlock_class(next)->key) { 2272 /* 2273 * The warning statements below may trigger a use-after-free 2274 * of the class name. It is better to trigger a use-after free 2275 * and to have the class name most of the time instead of not 2276 * having the class name available. 2277 */ 2278 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key, 2279 "Detected use-after-free of lock class %px/%s\n", 2280 hlock_class(prev), 2281 hlock_class(prev)->name); 2282 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key, 2283 "Detected use-after-free of lock class %px/%s\n", 2284 hlock_class(next), 2285 hlock_class(next)->name); 2286 return 2; 2287 } 2288 2289 /* 2290 * Prove that the new <prev> -> <next> dependency would not 2291 * create a circular dependency in the graph. (We do this by 2292 * forward-recursing into the graph starting at <next>, and 2293 * checking whether we can reach <prev>.) 2294 * 2295 * We are using global variables to control the recursion, to 2296 * keep the stackframe size of the recursive functions low: 2297 */ 2298 this.class = hlock_class(next); 2299 this.parent = NULL; 2300 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 2301 if (unlikely(!ret)) { 2302 if (!trace->nr_entries) { 2303 /* 2304 * If save_trace fails here, the printing might 2305 * trigger a WARN but because of the !nr_entries it 2306 * should not do bad things. 2307 */ 2308 save_trace(trace); 2309 } 2310 return print_circular_bug(&this, target_entry, next, prev); 2311 } 2312 else if (unlikely(ret < 0)) 2313 return print_bfs_bug(ret); 2314 2315 if (!check_irq_usage(curr, prev, next)) 2316 return 0; 2317 2318 /* 2319 * For recursive read-locks we do all the dependency checks, 2320 * but we dont store read-triggered dependencies (only 2321 * write-triggered dependencies). This ensures that only the 2322 * write-side dependencies matter, and that if for example a 2323 * write-lock never takes any other locks, then the reads are 2324 * equivalent to a NOP. 2325 */ 2326 if (next->read == 2 || prev->read == 2) 2327 return 1; 2328 /* 2329 * Is the <prev> -> <next> dependency already present? 2330 * 2331 * (this may occur even though this is a new chain: consider 2332 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 2333 * chains - the second one will be new, but L1 already has 2334 * L2 added to its dependency list, due to the first chain.) 2335 */ 2336 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 2337 if (entry->class == hlock_class(next)) { 2338 if (distance == 1) 2339 entry->distance = 1; 2340 return 1; 2341 } 2342 } 2343 2344 /* 2345 * Is the <prev> -> <next> link redundant? 2346 */ 2347 this.class = hlock_class(prev); 2348 this.parent = NULL; 2349 ret = check_redundant(&this, hlock_class(next), &target_entry); 2350 if (!ret) { 2351 debug_atomic_inc(nr_redundant); 2352 return 2; 2353 } 2354 if (ret < 0) 2355 return print_bfs_bug(ret); 2356 2357 2358 if (!trace->nr_entries && !save_trace(trace)) 2359 return 0; 2360 2361 /* 2362 * Ok, all validations passed, add the new lock 2363 * to the previous lock's dependency list: 2364 */ 2365 ret = add_lock_to_list(hlock_class(next), hlock_class(prev), 2366 &hlock_class(prev)->locks_after, 2367 next->acquire_ip, distance, trace); 2368 2369 if (!ret) 2370 return 0; 2371 2372 ret = add_lock_to_list(hlock_class(prev), hlock_class(next), 2373 &hlock_class(next)->locks_before, 2374 next->acquire_ip, distance, trace); 2375 if (!ret) 2376 return 0; 2377 2378 return 2; 2379 } 2380 2381 /* 2382 * Add the dependency to all directly-previous locks that are 'relevant'. 2383 * The ones that are relevant are (in increasing distance from curr): 2384 * all consecutive trylock entries and the final non-trylock entry - or 2385 * the end of this context's lock-chain - whichever comes first. 2386 */ 2387 static int 2388 check_prevs_add(struct task_struct *curr, struct held_lock *next) 2389 { 2390 struct lock_trace trace = { .nr_entries = 0 }; 2391 int depth = curr->lockdep_depth; 2392 struct held_lock *hlock; 2393 2394 /* 2395 * Debugging checks. 2396 * 2397 * Depth must not be zero for a non-head lock: 2398 */ 2399 if (!depth) 2400 goto out_bug; 2401 /* 2402 * At least two relevant locks must exist for this 2403 * to be a head: 2404 */ 2405 if (curr->held_locks[depth].irq_context != 2406 curr->held_locks[depth-1].irq_context) 2407 goto out_bug; 2408 2409 for (;;) { 2410 int distance = curr->lockdep_depth - depth + 1; 2411 hlock = curr->held_locks + depth - 1; 2412 2413 /* 2414 * Only non-recursive-read entries get new dependencies 2415 * added: 2416 */ 2417 if (hlock->read != 2 && hlock->check) { 2418 int ret = check_prev_add(curr, hlock, next, distance, 2419 &trace); 2420 if (!ret) 2421 return 0; 2422 2423 /* 2424 * Stop after the first non-trylock entry, 2425 * as non-trylock entries have added their 2426 * own direct dependencies already, so this 2427 * lock is connected to them indirectly: 2428 */ 2429 if (!hlock->trylock) 2430 break; 2431 } 2432 2433 depth--; 2434 /* 2435 * End of lock-stack? 2436 */ 2437 if (!depth) 2438 break; 2439 /* 2440 * Stop the search if we cross into another context: 2441 */ 2442 if (curr->held_locks[depth].irq_context != 2443 curr->held_locks[depth-1].irq_context) 2444 break; 2445 } 2446 return 1; 2447 out_bug: 2448 if (!debug_locks_off_graph_unlock()) 2449 return 0; 2450 2451 /* 2452 * Clearly we all shouldn't be here, but since we made it we 2453 * can reliable say we messed up our state. See the above two 2454 * gotos for reasons why we could possibly end up here. 2455 */ 2456 WARN_ON(1); 2457 2458 return 0; 2459 } 2460 2461 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 2462 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS); 2463 int nr_chain_hlocks; 2464 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 2465 2466 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 2467 { 2468 return lock_classes + chain_hlocks[chain->base + i]; 2469 } 2470 2471 /* 2472 * Returns the index of the first held_lock of the current chain 2473 */ 2474 static inline int get_first_held_lock(struct task_struct *curr, 2475 struct held_lock *hlock) 2476 { 2477 int i; 2478 struct held_lock *hlock_curr; 2479 2480 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 2481 hlock_curr = curr->held_locks + i; 2482 if (hlock_curr->irq_context != hlock->irq_context) 2483 break; 2484 2485 } 2486 2487 return ++i; 2488 } 2489 2490 #ifdef CONFIG_DEBUG_LOCKDEP 2491 /* 2492 * Returns the next chain_key iteration 2493 */ 2494 static u64 print_chain_key_iteration(int class_idx, u64 chain_key) 2495 { 2496 u64 new_chain_key = iterate_chain_key(chain_key, class_idx); 2497 2498 printk(" class_idx:%d -> chain_key:%016Lx", 2499 class_idx, 2500 (unsigned long long)new_chain_key); 2501 return new_chain_key; 2502 } 2503 2504 static void 2505 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) 2506 { 2507 struct held_lock *hlock; 2508 u64 chain_key = 0; 2509 int depth = curr->lockdep_depth; 2510 int i; 2511 2512 printk("depth: %u\n", depth + 1); 2513 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { 2514 hlock = curr->held_locks + i; 2515 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); 2516 2517 print_lock(hlock); 2518 } 2519 2520 print_chain_key_iteration(hlock_next->class_idx, chain_key); 2521 print_lock(hlock_next); 2522 } 2523 2524 static void print_chain_keys_chain(struct lock_chain *chain) 2525 { 2526 int i; 2527 u64 chain_key = 0; 2528 int class_id; 2529 2530 printk("depth: %u\n", chain->depth); 2531 for (i = 0; i < chain->depth; i++) { 2532 class_id = chain_hlocks[chain->base + i]; 2533 chain_key = print_chain_key_iteration(class_id + 1, chain_key); 2534 2535 print_lock_name(lock_classes + class_id); 2536 printk("\n"); 2537 } 2538 } 2539 2540 static void print_collision(struct task_struct *curr, 2541 struct held_lock *hlock_next, 2542 struct lock_chain *chain) 2543 { 2544 pr_warn("\n"); 2545 pr_warn("============================\n"); 2546 pr_warn("WARNING: chain_key collision\n"); 2547 print_kernel_ident(); 2548 pr_warn("----------------------------\n"); 2549 pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); 2550 pr_warn("Hash chain already cached but the contents don't match!\n"); 2551 2552 pr_warn("Held locks:"); 2553 print_chain_keys_held_locks(curr, hlock_next); 2554 2555 pr_warn("Locks in cached chain:"); 2556 print_chain_keys_chain(chain); 2557 2558 pr_warn("\nstack backtrace:\n"); 2559 dump_stack(); 2560 } 2561 #endif 2562 2563 /* 2564 * Checks whether the chain and the current held locks are consistent 2565 * in depth and also in content. If they are not it most likely means 2566 * that there was a collision during the calculation of the chain_key. 2567 * Returns: 0 not passed, 1 passed 2568 */ 2569 static int check_no_collision(struct task_struct *curr, 2570 struct held_lock *hlock, 2571 struct lock_chain *chain) 2572 { 2573 #ifdef CONFIG_DEBUG_LOCKDEP 2574 int i, j, id; 2575 2576 i = get_first_held_lock(curr, hlock); 2577 2578 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { 2579 print_collision(curr, hlock, chain); 2580 return 0; 2581 } 2582 2583 for (j = 0; j < chain->depth - 1; j++, i++) { 2584 id = curr->held_locks[i].class_idx - 1; 2585 2586 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { 2587 print_collision(curr, hlock, chain); 2588 return 0; 2589 } 2590 } 2591 #endif 2592 return 1; 2593 } 2594 2595 /* 2596 * Given an index that is >= -1, return the index of the next lock chain. 2597 * Return -2 if there is no next lock chain. 2598 */ 2599 long lockdep_next_lockchain(long i) 2600 { 2601 i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1); 2602 return i < ARRAY_SIZE(lock_chains) ? i : -2; 2603 } 2604 2605 unsigned long lock_chain_count(void) 2606 { 2607 return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains)); 2608 } 2609 2610 /* Must be called with the graph lock held. */ 2611 static struct lock_chain *alloc_lock_chain(void) 2612 { 2613 int idx = find_first_zero_bit(lock_chains_in_use, 2614 ARRAY_SIZE(lock_chains)); 2615 2616 if (unlikely(idx >= ARRAY_SIZE(lock_chains))) 2617 return NULL; 2618 __set_bit(idx, lock_chains_in_use); 2619 return lock_chains + idx; 2620 } 2621 2622 /* 2623 * Adds a dependency chain into chain hashtable. And must be called with 2624 * graph_lock held. 2625 * 2626 * Return 0 if fail, and graph_lock is released. 2627 * Return 1 if succeed, with graph_lock held. 2628 */ 2629 static inline int add_chain_cache(struct task_struct *curr, 2630 struct held_lock *hlock, 2631 u64 chain_key) 2632 { 2633 struct lock_class *class = hlock_class(hlock); 2634 struct hlist_head *hash_head = chainhashentry(chain_key); 2635 struct lock_chain *chain; 2636 int i, j; 2637 2638 /* 2639 * The caller must hold the graph lock, ensure we've got IRQs 2640 * disabled to make this an IRQ-safe lock.. for recursion reasons 2641 * lockdep won't complain about its own locking errors. 2642 */ 2643 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2644 return 0; 2645 2646 chain = alloc_lock_chain(); 2647 if (!chain) { 2648 if (!debug_locks_off_graph_unlock()) 2649 return 0; 2650 2651 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 2652 dump_stack(); 2653 return 0; 2654 } 2655 chain->chain_key = chain_key; 2656 chain->irq_context = hlock->irq_context; 2657 i = get_first_held_lock(curr, hlock); 2658 chain->depth = curr->lockdep_depth + 1 - i; 2659 2660 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 2661 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 2662 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 2663 2664 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2665 chain->base = nr_chain_hlocks; 2666 for (j = 0; j < chain->depth - 1; j++, i++) { 2667 int lock_id = curr->held_locks[i].class_idx - 1; 2668 chain_hlocks[chain->base + j] = lock_id; 2669 } 2670 chain_hlocks[chain->base + j] = class - lock_classes; 2671 nr_chain_hlocks += chain->depth; 2672 } else { 2673 if (!debug_locks_off_graph_unlock()) 2674 return 0; 2675 2676 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 2677 dump_stack(); 2678 return 0; 2679 } 2680 2681 hlist_add_head_rcu(&chain->entry, hash_head); 2682 debug_atomic_inc(chain_lookup_misses); 2683 inc_chains(); 2684 2685 return 1; 2686 } 2687 2688 /* 2689 * Look up a dependency chain. Must be called with either the graph lock or 2690 * the RCU read lock held. 2691 */ 2692 static inline struct lock_chain *lookup_chain_cache(u64 chain_key) 2693 { 2694 struct hlist_head *hash_head = chainhashentry(chain_key); 2695 struct lock_chain *chain; 2696 2697 hlist_for_each_entry_rcu(chain, hash_head, entry) { 2698 if (READ_ONCE(chain->chain_key) == chain_key) { 2699 debug_atomic_inc(chain_lookup_hits); 2700 return chain; 2701 } 2702 } 2703 return NULL; 2704 } 2705 2706 /* 2707 * If the key is not present yet in dependency chain cache then 2708 * add it and return 1 - in this case the new dependency chain is 2709 * validated. If the key is already hashed, return 0. 2710 * (On return with 1 graph_lock is held.) 2711 */ 2712 static inline int lookup_chain_cache_add(struct task_struct *curr, 2713 struct held_lock *hlock, 2714 u64 chain_key) 2715 { 2716 struct lock_class *class = hlock_class(hlock); 2717 struct lock_chain *chain = lookup_chain_cache(chain_key); 2718 2719 if (chain) { 2720 cache_hit: 2721 if (!check_no_collision(curr, hlock, chain)) 2722 return 0; 2723 2724 if (very_verbose(class)) { 2725 printk("\nhash chain already cached, key: " 2726 "%016Lx tail class: [%px] %s\n", 2727 (unsigned long long)chain_key, 2728 class->key, class->name); 2729 } 2730 2731 return 0; 2732 } 2733 2734 if (very_verbose(class)) { 2735 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n", 2736 (unsigned long long)chain_key, class->key, class->name); 2737 } 2738 2739 if (!graph_lock()) 2740 return 0; 2741 2742 /* 2743 * We have to walk the chain again locked - to avoid duplicates: 2744 */ 2745 chain = lookup_chain_cache(chain_key); 2746 if (chain) { 2747 graph_unlock(); 2748 goto cache_hit; 2749 } 2750 2751 if (!add_chain_cache(curr, hlock, chain_key)) 2752 return 0; 2753 2754 return 1; 2755 } 2756 2757 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, 2758 struct held_lock *hlock, int chain_head, u64 chain_key) 2759 { 2760 /* 2761 * Trylock needs to maintain the stack of held locks, but it 2762 * does not add new dependencies, because trylock can be done 2763 * in any order. 2764 * 2765 * We look up the chain_key and do the O(N^2) check and update of 2766 * the dependencies only if this is a new dependency chain. 2767 * (If lookup_chain_cache_add() return with 1 it acquires 2768 * graph_lock for us) 2769 */ 2770 if (!hlock->trylock && hlock->check && 2771 lookup_chain_cache_add(curr, hlock, chain_key)) { 2772 /* 2773 * Check whether last held lock: 2774 * 2775 * - is irq-safe, if this lock is irq-unsafe 2776 * - is softirq-safe, if this lock is hardirq-unsafe 2777 * 2778 * And check whether the new lock's dependency graph 2779 * could lead back to the previous lock. 2780 * 2781 * any of these scenarios could lead to a deadlock. If 2782 * All validations 2783 */ 2784 int ret = check_deadlock(curr, hlock, lock, hlock->read); 2785 2786 if (!ret) 2787 return 0; 2788 /* 2789 * Mark recursive read, as we jump over it when 2790 * building dependencies (just like we jump over 2791 * trylock entries): 2792 */ 2793 if (ret == 2) 2794 hlock->read = 2; 2795 /* 2796 * Add dependency only if this lock is not the head 2797 * of the chain, and if it's not a secondary read-lock: 2798 */ 2799 if (!chain_head && ret != 2) { 2800 if (!check_prevs_add(curr, hlock)) 2801 return 0; 2802 } 2803 2804 graph_unlock(); 2805 } else { 2806 /* after lookup_chain_cache_add(): */ 2807 if (unlikely(!debug_locks)) 2808 return 0; 2809 } 2810 2811 return 1; 2812 } 2813 #else 2814 static inline int validate_chain(struct task_struct *curr, 2815 struct lockdep_map *lock, struct held_lock *hlock, 2816 int chain_head, u64 chain_key) 2817 { 2818 return 1; 2819 } 2820 2821 static void print_lock_trace(struct lock_trace *trace, unsigned int spaces) 2822 { 2823 } 2824 #endif 2825 2826 /* 2827 * We are building curr_chain_key incrementally, so double-check 2828 * it from scratch, to make sure that it's done correctly: 2829 */ 2830 static void check_chain_key(struct task_struct *curr) 2831 { 2832 #ifdef CONFIG_DEBUG_LOCKDEP 2833 struct held_lock *hlock, *prev_hlock = NULL; 2834 unsigned int i; 2835 u64 chain_key = 0; 2836 2837 for (i = 0; i < curr->lockdep_depth; i++) { 2838 hlock = curr->held_locks + i; 2839 if (chain_key != hlock->prev_chain_key) { 2840 debug_locks_off(); 2841 /* 2842 * We got mighty confused, our chain keys don't match 2843 * with what we expect, someone trample on our task state? 2844 */ 2845 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2846 curr->lockdep_depth, i, 2847 (unsigned long long)chain_key, 2848 (unsigned long long)hlock->prev_chain_key); 2849 return; 2850 } 2851 /* 2852 * Whoops ran out of static storage again? 2853 */ 2854 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS)) 2855 return; 2856 2857 if (prev_hlock && (prev_hlock->irq_context != 2858 hlock->irq_context)) 2859 chain_key = 0; 2860 chain_key = iterate_chain_key(chain_key, hlock->class_idx); 2861 prev_hlock = hlock; 2862 } 2863 if (chain_key != curr->curr_chain_key) { 2864 debug_locks_off(); 2865 /* 2866 * More smoking hash instead of calculating it, damn see these 2867 * numbers float.. I bet that a pink elephant stepped on my memory. 2868 */ 2869 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2870 curr->lockdep_depth, i, 2871 (unsigned long long)chain_key, 2872 (unsigned long long)curr->curr_chain_key); 2873 } 2874 #endif 2875 } 2876 2877 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2878 enum lock_usage_bit new_bit); 2879 2880 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 2881 2882 2883 static void 2884 print_usage_bug_scenario(struct held_lock *lock) 2885 { 2886 struct lock_class *class = hlock_class(lock); 2887 2888 printk(" Possible unsafe locking scenario:\n\n"); 2889 printk(" CPU0\n"); 2890 printk(" ----\n"); 2891 printk(" lock("); 2892 __print_lock_name(class); 2893 printk(KERN_CONT ");\n"); 2894 printk(" <Interrupt>\n"); 2895 printk(" lock("); 2896 __print_lock_name(class); 2897 printk(KERN_CONT ");\n"); 2898 printk("\n *** DEADLOCK ***\n\n"); 2899 } 2900 2901 static int 2902 print_usage_bug(struct task_struct *curr, struct held_lock *this, 2903 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 2904 { 2905 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2906 return 0; 2907 2908 pr_warn("\n"); 2909 pr_warn("================================\n"); 2910 pr_warn("WARNING: inconsistent lock state\n"); 2911 print_kernel_ident(); 2912 pr_warn("--------------------------------\n"); 2913 2914 pr_warn("inconsistent {%s} -> {%s} usage.\n", 2915 usage_str[prev_bit], usage_str[new_bit]); 2916 2917 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 2918 curr->comm, task_pid_nr(curr), 2919 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2920 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 2921 trace_hardirqs_enabled(curr), 2922 trace_softirqs_enabled(curr)); 2923 print_lock(this); 2924 2925 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 2926 print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2927 2928 print_irqtrace_events(curr); 2929 pr_warn("\nother info that might help us debug this:\n"); 2930 print_usage_bug_scenario(this); 2931 2932 lockdep_print_held_locks(curr); 2933 2934 pr_warn("\nstack backtrace:\n"); 2935 dump_stack(); 2936 2937 return 0; 2938 } 2939 2940 /* 2941 * Print out an error if an invalid bit is set: 2942 */ 2943 static inline int 2944 valid_state(struct task_struct *curr, struct held_lock *this, 2945 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 2946 { 2947 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) 2948 return print_usage_bug(curr, this, bad_bit, new_bit); 2949 return 1; 2950 } 2951 2952 2953 /* 2954 * print irq inversion bug: 2955 */ 2956 static int 2957 print_irq_inversion_bug(struct task_struct *curr, 2958 struct lock_list *root, struct lock_list *other, 2959 struct held_lock *this, int forwards, 2960 const char *irqclass) 2961 { 2962 struct lock_list *entry = other; 2963 struct lock_list *middle = NULL; 2964 int depth; 2965 2966 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2967 return 0; 2968 2969 pr_warn("\n"); 2970 pr_warn("========================================================\n"); 2971 pr_warn("WARNING: possible irq lock inversion dependency detected\n"); 2972 print_kernel_ident(); 2973 pr_warn("--------------------------------------------------------\n"); 2974 pr_warn("%s/%d just changed the state of lock:\n", 2975 curr->comm, task_pid_nr(curr)); 2976 print_lock(this); 2977 if (forwards) 2978 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2979 else 2980 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2981 print_lock_name(other->class); 2982 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2983 2984 pr_warn("\nother info that might help us debug this:\n"); 2985 2986 /* Find a middle lock (if one exists) */ 2987 depth = get_lock_depth(other); 2988 do { 2989 if (depth == 0 && (entry != root)) { 2990 pr_warn("lockdep:%s bad path found in chain graph\n", __func__); 2991 break; 2992 } 2993 middle = entry; 2994 entry = get_lock_parent(entry); 2995 depth--; 2996 } while (entry && entry != root && (depth >= 0)); 2997 if (forwards) 2998 print_irq_lock_scenario(root, other, 2999 middle ? middle->class : root->class, other->class); 3000 else 3001 print_irq_lock_scenario(other, root, 3002 middle ? middle->class : other->class, root->class); 3003 3004 lockdep_print_held_locks(curr); 3005 3006 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 3007 if (!save_trace(&root->trace)) 3008 return 0; 3009 print_shortest_lock_dependencies(other, root); 3010 3011 pr_warn("\nstack backtrace:\n"); 3012 dump_stack(); 3013 3014 return 0; 3015 } 3016 3017 /* 3018 * Prove that in the forwards-direction subgraph starting at <this> 3019 * there is no lock matching <mask>: 3020 */ 3021 static int 3022 check_usage_forwards(struct task_struct *curr, struct held_lock *this, 3023 enum lock_usage_bit bit, const char *irqclass) 3024 { 3025 int ret; 3026 struct lock_list root; 3027 struct lock_list *uninitialized_var(target_entry); 3028 3029 root.parent = NULL; 3030 root.class = hlock_class(this); 3031 ret = find_usage_forwards(&root, lock_flag(bit), &target_entry); 3032 if (ret < 0) 3033 return print_bfs_bug(ret); 3034 if (ret == 1) 3035 return ret; 3036 3037 return print_irq_inversion_bug(curr, &root, target_entry, 3038 this, 1, irqclass); 3039 } 3040 3041 /* 3042 * Prove that in the backwards-direction subgraph starting at <this> 3043 * there is no lock matching <mask>: 3044 */ 3045 static int 3046 check_usage_backwards(struct task_struct *curr, struct held_lock *this, 3047 enum lock_usage_bit bit, const char *irqclass) 3048 { 3049 int ret; 3050 struct lock_list root; 3051 struct lock_list *uninitialized_var(target_entry); 3052 3053 root.parent = NULL; 3054 root.class = hlock_class(this); 3055 ret = find_usage_backwards(&root, lock_flag(bit), &target_entry); 3056 if (ret < 0) 3057 return print_bfs_bug(ret); 3058 if (ret == 1) 3059 return ret; 3060 3061 return print_irq_inversion_bug(curr, &root, target_entry, 3062 this, 0, irqclass); 3063 } 3064 3065 void print_irqtrace_events(struct task_struct *curr) 3066 { 3067 printk("irq event stamp: %u\n", curr->irq_events); 3068 printk("hardirqs last enabled at (%u): [<%px>] %pS\n", 3069 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, 3070 (void *)curr->hardirq_enable_ip); 3071 printk("hardirqs last disabled at (%u): [<%px>] %pS\n", 3072 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, 3073 (void *)curr->hardirq_disable_ip); 3074 printk("softirqs last enabled at (%u): [<%px>] %pS\n", 3075 curr->softirq_enable_event, (void *)curr->softirq_enable_ip, 3076 (void *)curr->softirq_enable_ip); 3077 printk("softirqs last disabled at (%u): [<%px>] %pS\n", 3078 curr->softirq_disable_event, (void *)curr->softirq_disable_ip, 3079 (void *)curr->softirq_disable_ip); 3080 } 3081 3082 static int HARDIRQ_verbose(struct lock_class *class) 3083 { 3084 #if HARDIRQ_VERBOSE 3085 return class_filter(class); 3086 #endif 3087 return 0; 3088 } 3089 3090 static int SOFTIRQ_verbose(struct lock_class *class) 3091 { 3092 #if SOFTIRQ_VERBOSE 3093 return class_filter(class); 3094 #endif 3095 return 0; 3096 } 3097 3098 #define STRICT_READ_CHECKS 1 3099 3100 static int (*state_verbose_f[])(struct lock_class *class) = { 3101 #define LOCKDEP_STATE(__STATE) \ 3102 __STATE##_verbose, 3103 #include "lockdep_states.h" 3104 #undef LOCKDEP_STATE 3105 }; 3106 3107 static inline int state_verbose(enum lock_usage_bit bit, 3108 struct lock_class *class) 3109 { 3110 return state_verbose_f[bit >> LOCK_USAGE_DIR_MASK](class); 3111 } 3112 3113 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 3114 enum lock_usage_bit bit, const char *name); 3115 3116 static int 3117 mark_lock_irq(struct task_struct *curr, struct held_lock *this, 3118 enum lock_usage_bit new_bit) 3119 { 3120 int excl_bit = exclusive_bit(new_bit); 3121 int read = new_bit & LOCK_USAGE_READ_MASK; 3122 int dir = new_bit & LOCK_USAGE_DIR_MASK; 3123 3124 /* 3125 * mark USED_IN has to look forwards -- to ensure no dependency 3126 * has ENABLED state, which would allow recursion deadlocks. 3127 * 3128 * mark ENABLED has to look backwards -- to ensure no dependee 3129 * has USED_IN state, which, again, would allow recursion deadlocks. 3130 */ 3131 check_usage_f usage = dir ? 3132 check_usage_backwards : check_usage_forwards; 3133 3134 /* 3135 * Validate that this particular lock does not have conflicting 3136 * usage states. 3137 */ 3138 if (!valid_state(curr, this, new_bit, excl_bit)) 3139 return 0; 3140 3141 /* 3142 * Validate that the lock dependencies don't have conflicting usage 3143 * states. 3144 */ 3145 if ((!read || !dir || STRICT_READ_CHECKS) && 3146 !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK))) 3147 return 0; 3148 3149 /* 3150 * Check for read in write conflicts 3151 */ 3152 if (!read) { 3153 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK)) 3154 return 0; 3155 3156 if (STRICT_READ_CHECKS && 3157 !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK, 3158 state_name(new_bit + LOCK_USAGE_READ_MASK))) 3159 return 0; 3160 } 3161 3162 if (state_verbose(new_bit, hlock_class(this))) 3163 return 2; 3164 3165 return 1; 3166 } 3167 3168 /* 3169 * Mark all held locks with a usage bit: 3170 */ 3171 static int 3172 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) 3173 { 3174 struct held_lock *hlock; 3175 int i; 3176 3177 for (i = 0; i < curr->lockdep_depth; i++) { 3178 enum lock_usage_bit hlock_bit = base_bit; 3179 hlock = curr->held_locks + i; 3180 3181 if (hlock->read) 3182 hlock_bit += LOCK_USAGE_READ_MASK; 3183 3184 BUG_ON(hlock_bit >= LOCK_USAGE_STATES); 3185 3186 if (!hlock->check) 3187 continue; 3188 3189 if (!mark_lock(curr, hlock, hlock_bit)) 3190 return 0; 3191 } 3192 3193 return 1; 3194 } 3195 3196 /* 3197 * Hardirqs will be enabled: 3198 */ 3199 static void __trace_hardirqs_on_caller(unsigned long ip) 3200 { 3201 struct task_struct *curr = current; 3202 3203 /* we'll do an OFF -> ON transition: */ 3204 curr->hardirqs_enabled = 1; 3205 3206 /* 3207 * We are going to turn hardirqs on, so set the 3208 * usage bit for all held locks: 3209 */ 3210 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) 3211 return; 3212 /* 3213 * If we have softirqs enabled, then set the usage 3214 * bit for all held locks. (disabled hardirqs prevented 3215 * this bit from being set before) 3216 */ 3217 if (curr->softirqs_enabled) 3218 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ)) 3219 return; 3220 3221 curr->hardirq_enable_ip = ip; 3222 curr->hardirq_enable_event = ++curr->irq_events; 3223 debug_atomic_inc(hardirqs_on_events); 3224 } 3225 3226 void lockdep_hardirqs_on(unsigned long ip) 3227 { 3228 if (unlikely(!debug_locks || current->lockdep_recursion)) 3229 return; 3230 3231 if (unlikely(current->hardirqs_enabled)) { 3232 /* 3233 * Neither irq nor preemption are disabled here 3234 * so this is racy by nature but losing one hit 3235 * in a stat is not a big deal. 3236 */ 3237 __debug_atomic_inc(redundant_hardirqs_on); 3238 return; 3239 } 3240 3241 /* 3242 * We're enabling irqs and according to our state above irqs weren't 3243 * already enabled, yet we find the hardware thinks they are in fact 3244 * enabled.. someone messed up their IRQ state tracing. 3245 */ 3246 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3247 return; 3248 3249 /* 3250 * See the fine text that goes along with this variable definition. 3251 */ 3252 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled)) 3253 return; 3254 3255 /* 3256 * Can't allow enabling interrupts while in an interrupt handler, 3257 * that's general bad form and such. Recursion, limited stack etc.. 3258 */ 3259 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 3260 return; 3261 3262 current->lockdep_recursion = 1; 3263 __trace_hardirqs_on_caller(ip); 3264 current->lockdep_recursion = 0; 3265 } 3266 NOKPROBE_SYMBOL(lockdep_hardirqs_on); 3267 3268 /* 3269 * Hardirqs were disabled: 3270 */ 3271 void lockdep_hardirqs_off(unsigned long ip) 3272 { 3273 struct task_struct *curr = current; 3274 3275 if (unlikely(!debug_locks || current->lockdep_recursion)) 3276 return; 3277 3278 /* 3279 * So we're supposed to get called after you mask local IRQs, but for 3280 * some reason the hardware doesn't quite think you did a proper job. 3281 */ 3282 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3283 return; 3284 3285 if (curr->hardirqs_enabled) { 3286 /* 3287 * We have done an ON -> OFF transition: 3288 */ 3289 curr->hardirqs_enabled = 0; 3290 curr->hardirq_disable_ip = ip; 3291 curr->hardirq_disable_event = ++curr->irq_events; 3292 debug_atomic_inc(hardirqs_off_events); 3293 } else 3294 debug_atomic_inc(redundant_hardirqs_off); 3295 } 3296 NOKPROBE_SYMBOL(lockdep_hardirqs_off); 3297 3298 /* 3299 * Softirqs will be enabled: 3300 */ 3301 void trace_softirqs_on(unsigned long ip) 3302 { 3303 struct task_struct *curr = current; 3304 3305 if (unlikely(!debug_locks || current->lockdep_recursion)) 3306 return; 3307 3308 /* 3309 * We fancy IRQs being disabled here, see softirq.c, avoids 3310 * funny state and nesting things. 3311 */ 3312 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3313 return; 3314 3315 if (curr->softirqs_enabled) { 3316 debug_atomic_inc(redundant_softirqs_on); 3317 return; 3318 } 3319 3320 current->lockdep_recursion = 1; 3321 /* 3322 * We'll do an OFF -> ON transition: 3323 */ 3324 curr->softirqs_enabled = 1; 3325 curr->softirq_enable_ip = ip; 3326 curr->softirq_enable_event = ++curr->irq_events; 3327 debug_atomic_inc(softirqs_on_events); 3328 /* 3329 * We are going to turn softirqs on, so set the 3330 * usage bit for all held locks, if hardirqs are 3331 * enabled too: 3332 */ 3333 if (curr->hardirqs_enabled) 3334 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); 3335 current->lockdep_recursion = 0; 3336 } 3337 3338 /* 3339 * Softirqs were disabled: 3340 */ 3341 void trace_softirqs_off(unsigned long ip) 3342 { 3343 struct task_struct *curr = current; 3344 3345 if (unlikely(!debug_locks || current->lockdep_recursion)) 3346 return; 3347 3348 /* 3349 * We fancy IRQs being disabled here, see softirq.c 3350 */ 3351 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3352 return; 3353 3354 if (curr->softirqs_enabled) { 3355 /* 3356 * We have done an ON -> OFF transition: 3357 */ 3358 curr->softirqs_enabled = 0; 3359 curr->softirq_disable_ip = ip; 3360 curr->softirq_disable_event = ++curr->irq_events; 3361 debug_atomic_inc(softirqs_off_events); 3362 /* 3363 * Whoops, we wanted softirqs off, so why aren't they? 3364 */ 3365 DEBUG_LOCKS_WARN_ON(!softirq_count()); 3366 } else 3367 debug_atomic_inc(redundant_softirqs_off); 3368 } 3369 3370 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 3371 { 3372 /* 3373 * If non-trylock use in a hardirq or softirq context, then 3374 * mark the lock as used in these contexts: 3375 */ 3376 if (!hlock->trylock) { 3377 if (hlock->read) { 3378 if (curr->hardirq_context) 3379 if (!mark_lock(curr, hlock, 3380 LOCK_USED_IN_HARDIRQ_READ)) 3381 return 0; 3382 if (curr->softirq_context) 3383 if (!mark_lock(curr, hlock, 3384 LOCK_USED_IN_SOFTIRQ_READ)) 3385 return 0; 3386 } else { 3387 if (curr->hardirq_context) 3388 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 3389 return 0; 3390 if (curr->softirq_context) 3391 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 3392 return 0; 3393 } 3394 } 3395 if (!hlock->hardirqs_off) { 3396 if (hlock->read) { 3397 if (!mark_lock(curr, hlock, 3398 LOCK_ENABLED_HARDIRQ_READ)) 3399 return 0; 3400 if (curr->softirqs_enabled) 3401 if (!mark_lock(curr, hlock, 3402 LOCK_ENABLED_SOFTIRQ_READ)) 3403 return 0; 3404 } else { 3405 if (!mark_lock(curr, hlock, 3406 LOCK_ENABLED_HARDIRQ)) 3407 return 0; 3408 if (curr->softirqs_enabled) 3409 if (!mark_lock(curr, hlock, 3410 LOCK_ENABLED_SOFTIRQ)) 3411 return 0; 3412 } 3413 } 3414 3415 return 1; 3416 } 3417 3418 static inline unsigned int task_irq_context(struct task_struct *task) 3419 { 3420 return 2 * !!task->hardirq_context + !!task->softirq_context; 3421 } 3422 3423 static int separate_irq_context(struct task_struct *curr, 3424 struct held_lock *hlock) 3425 { 3426 unsigned int depth = curr->lockdep_depth; 3427 3428 /* 3429 * Keep track of points where we cross into an interrupt context: 3430 */ 3431 if (depth) { 3432 struct held_lock *prev_hlock; 3433 3434 prev_hlock = curr->held_locks + depth-1; 3435 /* 3436 * If we cross into another context, reset the 3437 * hash key (this also prevents the checking and the 3438 * adding of the dependency to 'prev'): 3439 */ 3440 if (prev_hlock->irq_context != hlock->irq_context) 3441 return 1; 3442 } 3443 return 0; 3444 } 3445 3446 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3447 3448 static inline 3449 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 3450 enum lock_usage_bit new_bit) 3451 { 3452 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ 3453 return 1; 3454 } 3455 3456 static inline int mark_irqflags(struct task_struct *curr, 3457 struct held_lock *hlock) 3458 { 3459 return 1; 3460 } 3461 3462 static inline unsigned int task_irq_context(struct task_struct *task) 3463 { 3464 return 0; 3465 } 3466 3467 static inline int separate_irq_context(struct task_struct *curr, 3468 struct held_lock *hlock) 3469 { 3470 return 0; 3471 } 3472 3473 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3474 3475 /* 3476 * Mark a lock with a usage bit, and validate the state transition: 3477 */ 3478 static int mark_lock(struct task_struct *curr, struct held_lock *this, 3479 enum lock_usage_bit new_bit) 3480 { 3481 unsigned int new_mask = 1 << new_bit, ret = 1; 3482 3483 /* 3484 * If already set then do not dirty the cacheline, 3485 * nor do any checks: 3486 */ 3487 if (likely(hlock_class(this)->usage_mask & new_mask)) 3488 return 1; 3489 3490 if (!graph_lock()) 3491 return 0; 3492 /* 3493 * Make sure we didn't race: 3494 */ 3495 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 3496 graph_unlock(); 3497 return 1; 3498 } 3499 3500 hlock_class(this)->usage_mask |= new_mask; 3501 3502 if (!save_trace(hlock_class(this)->usage_traces + new_bit)) 3503 return 0; 3504 3505 switch (new_bit) { 3506 #define LOCKDEP_STATE(__STATE) \ 3507 case LOCK_USED_IN_##__STATE: \ 3508 case LOCK_USED_IN_##__STATE##_READ: \ 3509 case LOCK_ENABLED_##__STATE: \ 3510 case LOCK_ENABLED_##__STATE##_READ: 3511 #include "lockdep_states.h" 3512 #undef LOCKDEP_STATE 3513 ret = mark_lock_irq(curr, this, new_bit); 3514 if (!ret) 3515 return 0; 3516 break; 3517 case LOCK_USED: 3518 debug_atomic_dec(nr_unused_locks); 3519 break; 3520 default: 3521 if (!debug_locks_off_graph_unlock()) 3522 return 0; 3523 WARN_ON(1); 3524 return 0; 3525 } 3526 3527 graph_unlock(); 3528 3529 /* 3530 * We must printk outside of the graph_lock: 3531 */ 3532 if (ret == 2) { 3533 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 3534 print_lock(this); 3535 print_irqtrace_events(curr); 3536 dump_stack(); 3537 } 3538 3539 return ret; 3540 } 3541 3542 /* 3543 * Initialize a lock instance's lock-class mapping info: 3544 */ 3545 void lockdep_init_map(struct lockdep_map *lock, const char *name, 3546 struct lock_class_key *key, int subclass) 3547 { 3548 int i; 3549 3550 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 3551 lock->class_cache[i] = NULL; 3552 3553 #ifdef CONFIG_LOCK_STAT 3554 lock->cpu = raw_smp_processor_id(); 3555 #endif 3556 3557 /* 3558 * Can't be having no nameless bastards around this place! 3559 */ 3560 if (DEBUG_LOCKS_WARN_ON(!name)) { 3561 lock->name = "NULL"; 3562 return; 3563 } 3564 3565 lock->name = name; 3566 3567 /* 3568 * No key, no joy, we need to hash something. 3569 */ 3570 if (DEBUG_LOCKS_WARN_ON(!key)) 3571 return; 3572 /* 3573 * Sanity check, the lock-class key must either have been allocated 3574 * statically or must have been registered as a dynamic key. 3575 */ 3576 if (!static_obj(key) && !is_dynamic_key(key)) { 3577 if (debug_locks) 3578 printk(KERN_ERR "BUG: key %px has not been registered!\n", key); 3579 DEBUG_LOCKS_WARN_ON(1); 3580 return; 3581 } 3582 lock->key = key; 3583 3584 if (unlikely(!debug_locks)) 3585 return; 3586 3587 if (subclass) { 3588 unsigned long flags; 3589 3590 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) 3591 return; 3592 3593 raw_local_irq_save(flags); 3594 current->lockdep_recursion = 1; 3595 register_lock_class(lock, subclass, 1); 3596 current->lockdep_recursion = 0; 3597 raw_local_irq_restore(flags); 3598 } 3599 } 3600 EXPORT_SYMBOL_GPL(lockdep_init_map); 3601 3602 struct lock_class_key __lockdep_no_validate__; 3603 EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3604 3605 static int 3606 print_lock_nested_lock_not_held(struct task_struct *curr, 3607 struct held_lock *hlock, 3608 unsigned long ip) 3609 { 3610 if (!debug_locks_off()) 3611 return 0; 3612 if (debug_locks_silent) 3613 return 0; 3614 3615 pr_warn("\n"); 3616 pr_warn("==================================\n"); 3617 pr_warn("WARNING: Nested lock was not taken\n"); 3618 print_kernel_ident(); 3619 pr_warn("----------------------------------\n"); 3620 3621 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 3622 print_lock(hlock); 3623 3624 pr_warn("\nbut this task is not holding:\n"); 3625 pr_warn("%s\n", hlock->nest_lock->name); 3626 3627 pr_warn("\nstack backtrace:\n"); 3628 dump_stack(); 3629 3630 pr_warn("\nother info that might help us debug this:\n"); 3631 lockdep_print_held_locks(curr); 3632 3633 pr_warn("\nstack backtrace:\n"); 3634 dump_stack(); 3635 3636 return 0; 3637 } 3638 3639 static int __lock_is_held(const struct lockdep_map *lock, int read); 3640 3641 /* 3642 * This gets called for every mutex_lock*()/spin_lock*() operation. 3643 * We maintain the dependency maps and validate the locking attempt: 3644 * 3645 * The callers must make sure that IRQs are disabled before calling it, 3646 * otherwise we could get an interrupt which would want to take locks, 3647 * which would end up in lockdep again. 3648 */ 3649 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3650 int trylock, int read, int check, int hardirqs_off, 3651 struct lockdep_map *nest_lock, unsigned long ip, 3652 int references, int pin_count) 3653 { 3654 struct task_struct *curr = current; 3655 struct lock_class *class = NULL; 3656 struct held_lock *hlock; 3657 unsigned int depth; 3658 int chain_head = 0; 3659 int class_idx; 3660 u64 chain_key; 3661 3662 if (unlikely(!debug_locks)) 3663 return 0; 3664 3665 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3666 check = 0; 3667 3668 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 3669 class = lock->class_cache[subclass]; 3670 /* 3671 * Not cached? 3672 */ 3673 if (unlikely(!class)) { 3674 class = register_lock_class(lock, subclass, 0); 3675 if (!class) 3676 return 0; 3677 } 3678 3679 debug_class_ops_inc(class); 3680 3681 if (very_verbose(class)) { 3682 printk("\nacquire class [%px] %s", class->key, class->name); 3683 if (class->name_version > 1) 3684 printk(KERN_CONT "#%d", class->name_version); 3685 printk(KERN_CONT "\n"); 3686 dump_stack(); 3687 } 3688 3689 /* 3690 * Add the lock to the list of currently held locks. 3691 * (we dont increase the depth just yet, up until the 3692 * dependency checks are done) 3693 */ 3694 depth = curr->lockdep_depth; 3695 /* 3696 * Ran out of static storage for our per-task lock stack again have we? 3697 */ 3698 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3699 return 0; 3700 3701 class_idx = class - lock_classes + 1; 3702 3703 if (depth) { 3704 hlock = curr->held_locks + depth - 1; 3705 if (hlock->class_idx == class_idx && nest_lock) { 3706 if (hlock->references) { 3707 /* 3708 * Check: unsigned int references:12, overflow. 3709 */ 3710 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1)) 3711 return 0; 3712 3713 hlock->references++; 3714 } else { 3715 hlock->references = 2; 3716 } 3717 3718 return 1; 3719 } 3720 } 3721 3722 hlock = curr->held_locks + depth; 3723 /* 3724 * Plain impossible, we just registered it and checked it weren't no 3725 * NULL like.. I bet this mushroom I ate was good! 3726 */ 3727 if (DEBUG_LOCKS_WARN_ON(!class)) 3728 return 0; 3729 hlock->class_idx = class_idx; 3730 hlock->acquire_ip = ip; 3731 hlock->instance = lock; 3732 hlock->nest_lock = nest_lock; 3733 hlock->irq_context = task_irq_context(curr); 3734 hlock->trylock = trylock; 3735 hlock->read = read; 3736 hlock->check = check; 3737 hlock->hardirqs_off = !!hardirqs_off; 3738 hlock->references = references; 3739 #ifdef CONFIG_LOCK_STAT 3740 hlock->waittime_stamp = 0; 3741 hlock->holdtime_stamp = lockstat_clock(); 3742 #endif 3743 hlock->pin_count = pin_count; 3744 3745 if (check && !mark_irqflags(curr, hlock)) 3746 return 0; 3747 3748 /* mark it as used: */ 3749 if (!mark_lock(curr, hlock, LOCK_USED)) 3750 return 0; 3751 3752 /* 3753 * Calculate the chain hash: it's the combined hash of all the 3754 * lock keys along the dependency chain. We save the hash value 3755 * at every step so that we can get the current hash easily 3756 * after unlock. The chain hash is then used to cache dependency 3757 * results. 3758 * 3759 * The 'key ID' is what is the most compact key value to drive 3760 * the hash, not class->key. 3761 */ 3762 /* 3763 * Whoops, we did it again.. ran straight out of our static allocation. 3764 */ 3765 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS)) 3766 return 0; 3767 3768 chain_key = curr->curr_chain_key; 3769 if (!depth) { 3770 /* 3771 * How can we have a chain hash when we ain't got no keys?! 3772 */ 3773 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3774 return 0; 3775 chain_head = 1; 3776 } 3777 3778 hlock->prev_chain_key = chain_key; 3779 if (separate_irq_context(curr, hlock)) { 3780 chain_key = 0; 3781 chain_head = 1; 3782 } 3783 chain_key = iterate_chain_key(chain_key, class_idx); 3784 3785 if (nest_lock && !__lock_is_held(nest_lock, -1)) 3786 return print_lock_nested_lock_not_held(curr, hlock, ip); 3787 3788 if (!debug_locks_silent) { 3789 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); 3790 WARN_ON_ONCE(!hlock_class(hlock)->key); 3791 } 3792 3793 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3794 return 0; 3795 3796 curr->curr_chain_key = chain_key; 3797 curr->lockdep_depth++; 3798 check_chain_key(curr); 3799 #ifdef CONFIG_DEBUG_LOCKDEP 3800 if (unlikely(!debug_locks)) 3801 return 0; 3802 #endif 3803 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 3804 debug_locks_off(); 3805 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 3806 printk(KERN_DEBUG "depth: %i max: %lu!\n", 3807 curr->lockdep_depth, MAX_LOCK_DEPTH); 3808 3809 lockdep_print_held_locks(current); 3810 debug_show_all_locks(); 3811 dump_stack(); 3812 3813 return 0; 3814 } 3815 3816 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 3817 max_lockdep_depth = curr->lockdep_depth; 3818 3819 return 1; 3820 } 3821 3822 static int 3823 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, 3824 unsigned long ip) 3825 { 3826 if (!debug_locks_off()) 3827 return 0; 3828 if (debug_locks_silent) 3829 return 0; 3830 3831 pr_warn("\n"); 3832 pr_warn("=====================================\n"); 3833 pr_warn("WARNING: bad unlock balance detected!\n"); 3834 print_kernel_ident(); 3835 pr_warn("-------------------------------------\n"); 3836 pr_warn("%s/%d is trying to release lock (", 3837 curr->comm, task_pid_nr(curr)); 3838 print_lockdep_cache(lock); 3839 pr_cont(") at:\n"); 3840 print_ip_sym(ip); 3841 pr_warn("but there are no more locks to release!\n"); 3842 pr_warn("\nother info that might help us debug this:\n"); 3843 lockdep_print_held_locks(curr); 3844 3845 pr_warn("\nstack backtrace:\n"); 3846 dump_stack(); 3847 3848 return 0; 3849 } 3850 3851 static int match_held_lock(const struct held_lock *hlock, 3852 const struct lockdep_map *lock) 3853 { 3854 if (hlock->instance == lock) 3855 return 1; 3856 3857 if (hlock->references) { 3858 const struct lock_class *class = lock->class_cache[0]; 3859 3860 if (!class) 3861 class = look_up_lock_class(lock, 0); 3862 3863 /* 3864 * If look_up_lock_class() failed to find a class, we're trying 3865 * to test if we hold a lock that has never yet been acquired. 3866 * Clearly if the lock hasn't been acquired _ever_, we're not 3867 * holding it either, so report failure. 3868 */ 3869 if (!class) 3870 return 0; 3871 3872 /* 3873 * References, but not a lock we're actually ref-counting? 3874 * State got messed up, follow the sites that change ->references 3875 * and try to make sense of it. 3876 */ 3877 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3878 return 0; 3879 3880 if (hlock->class_idx == class - lock_classes + 1) 3881 return 1; 3882 } 3883 3884 return 0; 3885 } 3886 3887 /* @depth must not be zero */ 3888 static struct held_lock *find_held_lock(struct task_struct *curr, 3889 struct lockdep_map *lock, 3890 unsigned int depth, int *idx) 3891 { 3892 struct held_lock *ret, *hlock, *prev_hlock; 3893 int i; 3894 3895 i = depth - 1; 3896 hlock = curr->held_locks + i; 3897 ret = hlock; 3898 if (match_held_lock(hlock, lock)) 3899 goto out; 3900 3901 ret = NULL; 3902 for (i--, prev_hlock = hlock--; 3903 i >= 0; 3904 i--, prev_hlock = hlock--) { 3905 /* 3906 * We must not cross into another context: 3907 */ 3908 if (prev_hlock->irq_context != hlock->irq_context) { 3909 ret = NULL; 3910 break; 3911 } 3912 if (match_held_lock(hlock, lock)) { 3913 ret = hlock; 3914 break; 3915 } 3916 } 3917 3918 out: 3919 *idx = i; 3920 return ret; 3921 } 3922 3923 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, 3924 int idx) 3925 { 3926 struct held_lock *hlock; 3927 3928 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3929 return 0; 3930 3931 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 3932 if (!__lock_acquire(hlock->instance, 3933 hlock_class(hlock)->subclass, 3934 hlock->trylock, 3935 hlock->read, hlock->check, 3936 hlock->hardirqs_off, 3937 hlock->nest_lock, hlock->acquire_ip, 3938 hlock->references, hlock->pin_count)) 3939 return 1; 3940 } 3941 return 0; 3942 } 3943 3944 static int 3945 __lock_set_class(struct lockdep_map *lock, const char *name, 3946 struct lock_class_key *key, unsigned int subclass, 3947 unsigned long ip) 3948 { 3949 struct task_struct *curr = current; 3950 struct held_lock *hlock; 3951 struct lock_class *class; 3952 unsigned int depth; 3953 int i; 3954 3955 if (unlikely(!debug_locks)) 3956 return 0; 3957 3958 depth = curr->lockdep_depth; 3959 /* 3960 * This function is about (re)setting the class of a held lock, 3961 * yet we're not actually holding any locks. Naughty user! 3962 */ 3963 if (DEBUG_LOCKS_WARN_ON(!depth)) 3964 return 0; 3965 3966 hlock = find_held_lock(curr, lock, depth, &i); 3967 if (!hlock) 3968 return print_unlock_imbalance_bug(curr, lock, ip); 3969 3970 lockdep_init_map(lock, name, key, 0); 3971 class = register_lock_class(lock, subclass, 0); 3972 hlock->class_idx = class - lock_classes + 1; 3973 3974 curr->lockdep_depth = i; 3975 curr->curr_chain_key = hlock->prev_chain_key; 3976 3977 if (reacquire_held_locks(curr, depth, i)) 3978 return 0; 3979 3980 /* 3981 * I took it apart and put it back together again, except now I have 3982 * these 'spare' parts.. where shall I put them. 3983 */ 3984 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3985 return 0; 3986 return 1; 3987 } 3988 3989 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) 3990 { 3991 struct task_struct *curr = current; 3992 struct held_lock *hlock; 3993 unsigned int depth; 3994 int i; 3995 3996 if (unlikely(!debug_locks)) 3997 return 0; 3998 3999 depth = curr->lockdep_depth; 4000 /* 4001 * This function is about (re)setting the class of a held lock, 4002 * yet we're not actually holding any locks. Naughty user! 4003 */ 4004 if (DEBUG_LOCKS_WARN_ON(!depth)) 4005 return 0; 4006 4007 hlock = find_held_lock(curr, lock, depth, &i); 4008 if (!hlock) 4009 return print_unlock_imbalance_bug(curr, lock, ip); 4010 4011 curr->lockdep_depth = i; 4012 curr->curr_chain_key = hlock->prev_chain_key; 4013 4014 WARN(hlock->read, "downgrading a read lock"); 4015 hlock->read = 1; 4016 hlock->acquire_ip = ip; 4017 4018 if (reacquire_held_locks(curr, depth, i)) 4019 return 0; 4020 4021 /* 4022 * I took it apart and put it back together again, except now I have 4023 * these 'spare' parts.. where shall I put them. 4024 */ 4025 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 4026 return 0; 4027 return 1; 4028 } 4029 4030 /* 4031 * Remove the lock to the list of currently held locks - this gets 4032 * called on mutex_unlock()/spin_unlock*() (or on a failed 4033 * mutex_lock_interruptible()). 4034 * 4035 * @nested is an hysterical artifact, needs a tree wide cleanup. 4036 */ 4037 static int 4038 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 4039 { 4040 struct task_struct *curr = current; 4041 struct held_lock *hlock; 4042 unsigned int depth; 4043 int i; 4044 4045 if (unlikely(!debug_locks)) 4046 return 0; 4047 4048 depth = curr->lockdep_depth; 4049 /* 4050 * So we're all set to release this lock.. wait what lock? We don't 4051 * own any locks, you've been drinking again? 4052 */ 4053 if (DEBUG_LOCKS_WARN_ON(depth <= 0)) 4054 return print_unlock_imbalance_bug(curr, lock, ip); 4055 4056 /* 4057 * Check whether the lock exists in the current stack 4058 * of held locks: 4059 */ 4060 hlock = find_held_lock(curr, lock, depth, &i); 4061 if (!hlock) 4062 return print_unlock_imbalance_bug(curr, lock, ip); 4063 4064 if (hlock->instance == lock) 4065 lock_release_holdtime(hlock); 4066 4067 WARN(hlock->pin_count, "releasing a pinned lock\n"); 4068 4069 if (hlock->references) { 4070 hlock->references--; 4071 if (hlock->references) { 4072 /* 4073 * We had, and after removing one, still have 4074 * references, the current lock stack is still 4075 * valid. We're done! 4076 */ 4077 return 1; 4078 } 4079 } 4080 4081 /* 4082 * We have the right lock to unlock, 'hlock' points to it. 4083 * Now we remove it from the stack, and add back the other 4084 * entries (if any), recalculating the hash along the way: 4085 */ 4086 4087 curr->lockdep_depth = i; 4088 curr->curr_chain_key = hlock->prev_chain_key; 4089 4090 /* 4091 * The most likely case is when the unlock is on the innermost 4092 * lock. In this case, we are done! 4093 */ 4094 if (i == depth-1) 4095 return 1; 4096 4097 if (reacquire_held_locks(curr, depth, i + 1)) 4098 return 0; 4099 4100 /* 4101 * We had N bottles of beer on the wall, we drank one, but now 4102 * there's not N-1 bottles of beer left on the wall... 4103 */ 4104 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1); 4105 4106 /* 4107 * Since reacquire_held_locks() would have called check_chain_key() 4108 * indirectly via __lock_acquire(), we don't need to do it again 4109 * on return. 4110 */ 4111 return 0; 4112 } 4113 4114 static nokprobe_inline 4115 int __lock_is_held(const struct lockdep_map *lock, int read) 4116 { 4117 struct task_struct *curr = current; 4118 int i; 4119 4120 for (i = 0; i < curr->lockdep_depth; i++) { 4121 struct held_lock *hlock = curr->held_locks + i; 4122 4123 if (match_held_lock(hlock, lock)) { 4124 if (read == -1 || hlock->read == read) 4125 return 1; 4126 4127 return 0; 4128 } 4129 } 4130 4131 return 0; 4132 } 4133 4134 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) 4135 { 4136 struct pin_cookie cookie = NIL_COOKIE; 4137 struct task_struct *curr = current; 4138 int i; 4139 4140 if (unlikely(!debug_locks)) 4141 return cookie; 4142 4143 for (i = 0; i < curr->lockdep_depth; i++) { 4144 struct held_lock *hlock = curr->held_locks + i; 4145 4146 if (match_held_lock(hlock, lock)) { 4147 /* 4148 * Grab 16bits of randomness; this is sufficient to not 4149 * be guessable and still allows some pin nesting in 4150 * our u32 pin_count. 4151 */ 4152 cookie.val = 1 + (prandom_u32() >> 16); 4153 hlock->pin_count += cookie.val; 4154 return cookie; 4155 } 4156 } 4157 4158 WARN(1, "pinning an unheld lock\n"); 4159 return cookie; 4160 } 4161 4162 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4163 { 4164 struct task_struct *curr = current; 4165 int i; 4166 4167 if (unlikely(!debug_locks)) 4168 return; 4169 4170 for (i = 0; i < curr->lockdep_depth; i++) { 4171 struct held_lock *hlock = curr->held_locks + i; 4172 4173 if (match_held_lock(hlock, lock)) { 4174 hlock->pin_count += cookie.val; 4175 return; 4176 } 4177 } 4178 4179 WARN(1, "pinning an unheld lock\n"); 4180 } 4181 4182 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4183 { 4184 struct task_struct *curr = current; 4185 int i; 4186 4187 if (unlikely(!debug_locks)) 4188 return; 4189 4190 for (i = 0; i < curr->lockdep_depth; i++) { 4191 struct held_lock *hlock = curr->held_locks + i; 4192 4193 if (match_held_lock(hlock, lock)) { 4194 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 4195 return; 4196 4197 hlock->pin_count -= cookie.val; 4198 4199 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) 4200 hlock->pin_count = 0; 4201 4202 return; 4203 } 4204 } 4205 4206 WARN(1, "unpinning an unheld lock\n"); 4207 } 4208 4209 /* 4210 * Check whether we follow the irq-flags state precisely: 4211 */ 4212 static void check_flags(unsigned long flags) 4213 { 4214 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ 4215 defined(CONFIG_TRACE_IRQFLAGS) 4216 if (!debug_locks) 4217 return; 4218 4219 if (irqs_disabled_flags(flags)) { 4220 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { 4221 printk("possible reason: unannotated irqs-off.\n"); 4222 } 4223 } else { 4224 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { 4225 printk("possible reason: unannotated irqs-on.\n"); 4226 } 4227 } 4228 4229 /* 4230 * We dont accurately track softirq state in e.g. 4231 * hardirq contexts (such as on 4KSTACKS), so only 4232 * check if not in hardirq contexts: 4233 */ 4234 if (!hardirq_count()) { 4235 if (softirq_count()) { 4236 /* like the above, but with softirqs */ 4237 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 4238 } else { 4239 /* lick the above, does it taste good? */ 4240 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 4241 } 4242 } 4243 4244 if (!debug_locks) 4245 print_irqtrace_events(current); 4246 #endif 4247 } 4248 4249 void lock_set_class(struct lockdep_map *lock, const char *name, 4250 struct lock_class_key *key, unsigned int subclass, 4251 unsigned long ip) 4252 { 4253 unsigned long flags; 4254 4255 if (unlikely(current->lockdep_recursion)) 4256 return; 4257 4258 raw_local_irq_save(flags); 4259 current->lockdep_recursion = 1; 4260 check_flags(flags); 4261 if (__lock_set_class(lock, name, key, subclass, ip)) 4262 check_chain_key(current); 4263 current->lockdep_recursion = 0; 4264 raw_local_irq_restore(flags); 4265 } 4266 EXPORT_SYMBOL_GPL(lock_set_class); 4267 4268 void lock_downgrade(struct lockdep_map *lock, unsigned long ip) 4269 { 4270 unsigned long flags; 4271 4272 if (unlikely(current->lockdep_recursion)) 4273 return; 4274 4275 raw_local_irq_save(flags); 4276 current->lockdep_recursion = 1; 4277 check_flags(flags); 4278 if (__lock_downgrade(lock, ip)) 4279 check_chain_key(current); 4280 current->lockdep_recursion = 0; 4281 raw_local_irq_restore(flags); 4282 } 4283 EXPORT_SYMBOL_GPL(lock_downgrade); 4284 4285 /* 4286 * We are not always called with irqs disabled - do that here, 4287 * and also avoid lockdep recursion: 4288 */ 4289 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 4290 int trylock, int read, int check, 4291 struct lockdep_map *nest_lock, unsigned long ip) 4292 { 4293 unsigned long flags; 4294 4295 if (unlikely(current->lockdep_recursion)) 4296 return; 4297 4298 raw_local_irq_save(flags); 4299 check_flags(flags); 4300 4301 current->lockdep_recursion = 1; 4302 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 4303 __lock_acquire(lock, subclass, trylock, read, check, 4304 irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 4305 current->lockdep_recursion = 0; 4306 raw_local_irq_restore(flags); 4307 } 4308 EXPORT_SYMBOL_GPL(lock_acquire); 4309 4310 void lock_release(struct lockdep_map *lock, int nested, 4311 unsigned long ip) 4312 { 4313 unsigned long flags; 4314 4315 if (unlikely(current->lockdep_recursion)) 4316 return; 4317 4318 raw_local_irq_save(flags); 4319 check_flags(flags); 4320 current->lockdep_recursion = 1; 4321 trace_lock_release(lock, ip); 4322 if (__lock_release(lock, nested, ip)) 4323 check_chain_key(current); 4324 current->lockdep_recursion = 0; 4325 raw_local_irq_restore(flags); 4326 } 4327 EXPORT_SYMBOL_GPL(lock_release); 4328 4329 int lock_is_held_type(const struct lockdep_map *lock, int read) 4330 { 4331 unsigned long flags; 4332 int ret = 0; 4333 4334 if (unlikely(current->lockdep_recursion)) 4335 return 1; /* avoid false negative lockdep_assert_held() */ 4336 4337 raw_local_irq_save(flags); 4338 check_flags(flags); 4339 4340 current->lockdep_recursion = 1; 4341 ret = __lock_is_held(lock, read); 4342 current->lockdep_recursion = 0; 4343 raw_local_irq_restore(flags); 4344 4345 return ret; 4346 } 4347 EXPORT_SYMBOL_GPL(lock_is_held_type); 4348 NOKPROBE_SYMBOL(lock_is_held_type); 4349 4350 struct pin_cookie lock_pin_lock(struct lockdep_map *lock) 4351 { 4352 struct pin_cookie cookie = NIL_COOKIE; 4353 unsigned long flags; 4354 4355 if (unlikely(current->lockdep_recursion)) 4356 return cookie; 4357 4358 raw_local_irq_save(flags); 4359 check_flags(flags); 4360 4361 current->lockdep_recursion = 1; 4362 cookie = __lock_pin_lock(lock); 4363 current->lockdep_recursion = 0; 4364 raw_local_irq_restore(flags); 4365 4366 return cookie; 4367 } 4368 EXPORT_SYMBOL_GPL(lock_pin_lock); 4369 4370 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4371 { 4372 unsigned long flags; 4373 4374 if (unlikely(current->lockdep_recursion)) 4375 return; 4376 4377 raw_local_irq_save(flags); 4378 check_flags(flags); 4379 4380 current->lockdep_recursion = 1; 4381 __lock_repin_lock(lock, cookie); 4382 current->lockdep_recursion = 0; 4383 raw_local_irq_restore(flags); 4384 } 4385 EXPORT_SYMBOL_GPL(lock_repin_lock); 4386 4387 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4388 { 4389 unsigned long flags; 4390 4391 if (unlikely(current->lockdep_recursion)) 4392 return; 4393 4394 raw_local_irq_save(flags); 4395 check_flags(flags); 4396 4397 current->lockdep_recursion = 1; 4398 __lock_unpin_lock(lock, cookie); 4399 current->lockdep_recursion = 0; 4400 raw_local_irq_restore(flags); 4401 } 4402 EXPORT_SYMBOL_GPL(lock_unpin_lock); 4403 4404 #ifdef CONFIG_LOCK_STAT 4405 static int 4406 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 4407 unsigned long ip) 4408 { 4409 if (!debug_locks_off()) 4410 return 0; 4411 if (debug_locks_silent) 4412 return 0; 4413 4414 pr_warn("\n"); 4415 pr_warn("=================================\n"); 4416 pr_warn("WARNING: bad contention detected!\n"); 4417 print_kernel_ident(); 4418 pr_warn("---------------------------------\n"); 4419 pr_warn("%s/%d is trying to contend lock (", 4420 curr->comm, task_pid_nr(curr)); 4421 print_lockdep_cache(lock); 4422 pr_cont(") at:\n"); 4423 print_ip_sym(ip); 4424 pr_warn("but there are no locks held!\n"); 4425 pr_warn("\nother info that might help us debug this:\n"); 4426 lockdep_print_held_locks(curr); 4427 4428 pr_warn("\nstack backtrace:\n"); 4429 dump_stack(); 4430 4431 return 0; 4432 } 4433 4434 static void 4435 __lock_contended(struct lockdep_map *lock, unsigned long ip) 4436 { 4437 struct task_struct *curr = current; 4438 struct held_lock *hlock; 4439 struct lock_class_stats *stats; 4440 unsigned int depth; 4441 int i, contention_point, contending_point; 4442 4443 depth = curr->lockdep_depth; 4444 /* 4445 * Whee, we contended on this lock, except it seems we're not 4446 * actually trying to acquire anything much at all.. 4447 */ 4448 if (DEBUG_LOCKS_WARN_ON(!depth)) 4449 return; 4450 4451 hlock = find_held_lock(curr, lock, depth, &i); 4452 if (!hlock) { 4453 print_lock_contention_bug(curr, lock, ip); 4454 return; 4455 } 4456 4457 if (hlock->instance != lock) 4458 return; 4459 4460 hlock->waittime_stamp = lockstat_clock(); 4461 4462 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 4463 contending_point = lock_point(hlock_class(hlock)->contending_point, 4464 lock->ip); 4465 4466 stats = get_lock_stats(hlock_class(hlock)); 4467 if (contention_point < LOCKSTAT_POINTS) 4468 stats->contention_point[contention_point]++; 4469 if (contending_point < LOCKSTAT_POINTS) 4470 stats->contending_point[contending_point]++; 4471 if (lock->cpu != smp_processor_id()) 4472 stats->bounces[bounce_contended + !!hlock->read]++; 4473 } 4474 4475 static void 4476 __lock_acquired(struct lockdep_map *lock, unsigned long ip) 4477 { 4478 struct task_struct *curr = current; 4479 struct held_lock *hlock; 4480 struct lock_class_stats *stats; 4481 unsigned int depth; 4482 u64 now, waittime = 0; 4483 int i, cpu; 4484 4485 depth = curr->lockdep_depth; 4486 /* 4487 * Yay, we acquired ownership of this lock we didn't try to 4488 * acquire, how the heck did that happen? 4489 */ 4490 if (DEBUG_LOCKS_WARN_ON(!depth)) 4491 return; 4492 4493 hlock = find_held_lock(curr, lock, depth, &i); 4494 if (!hlock) { 4495 print_lock_contention_bug(curr, lock, _RET_IP_); 4496 return; 4497 } 4498 4499 if (hlock->instance != lock) 4500 return; 4501 4502 cpu = smp_processor_id(); 4503 if (hlock->waittime_stamp) { 4504 now = lockstat_clock(); 4505 waittime = now - hlock->waittime_stamp; 4506 hlock->holdtime_stamp = now; 4507 } 4508 4509 trace_lock_acquired(lock, ip); 4510 4511 stats = get_lock_stats(hlock_class(hlock)); 4512 if (waittime) { 4513 if (hlock->read) 4514 lock_time_inc(&stats->read_waittime, waittime); 4515 else 4516 lock_time_inc(&stats->write_waittime, waittime); 4517 } 4518 if (lock->cpu != cpu) 4519 stats->bounces[bounce_acquired + !!hlock->read]++; 4520 4521 lock->cpu = cpu; 4522 lock->ip = ip; 4523 } 4524 4525 void lock_contended(struct lockdep_map *lock, unsigned long ip) 4526 { 4527 unsigned long flags; 4528 4529 if (unlikely(!lock_stat || !debug_locks)) 4530 return; 4531 4532 if (unlikely(current->lockdep_recursion)) 4533 return; 4534 4535 raw_local_irq_save(flags); 4536 check_flags(flags); 4537 current->lockdep_recursion = 1; 4538 trace_lock_contended(lock, ip); 4539 __lock_contended(lock, ip); 4540 current->lockdep_recursion = 0; 4541 raw_local_irq_restore(flags); 4542 } 4543 EXPORT_SYMBOL_GPL(lock_contended); 4544 4545 void lock_acquired(struct lockdep_map *lock, unsigned long ip) 4546 { 4547 unsigned long flags; 4548 4549 if (unlikely(!lock_stat || !debug_locks)) 4550 return; 4551 4552 if (unlikely(current->lockdep_recursion)) 4553 return; 4554 4555 raw_local_irq_save(flags); 4556 check_flags(flags); 4557 current->lockdep_recursion = 1; 4558 __lock_acquired(lock, ip); 4559 current->lockdep_recursion = 0; 4560 raw_local_irq_restore(flags); 4561 } 4562 EXPORT_SYMBOL_GPL(lock_acquired); 4563 #endif 4564 4565 /* 4566 * Used by the testsuite, sanitize the validator state 4567 * after a simulated failure: 4568 */ 4569 4570 void lockdep_reset(void) 4571 { 4572 unsigned long flags; 4573 int i; 4574 4575 raw_local_irq_save(flags); 4576 current->curr_chain_key = 0; 4577 current->lockdep_depth = 0; 4578 current->lockdep_recursion = 0; 4579 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 4580 nr_hardirq_chains = 0; 4581 nr_softirq_chains = 0; 4582 nr_process_chains = 0; 4583 debug_locks = 1; 4584 for (i = 0; i < CHAINHASH_SIZE; i++) 4585 INIT_HLIST_HEAD(chainhash_table + i); 4586 raw_local_irq_restore(flags); 4587 } 4588 4589 /* Remove a class from a lock chain. Must be called with the graph lock held. */ 4590 static void remove_class_from_lock_chain(struct pending_free *pf, 4591 struct lock_chain *chain, 4592 struct lock_class *class) 4593 { 4594 #ifdef CONFIG_PROVE_LOCKING 4595 struct lock_chain *new_chain; 4596 u64 chain_key; 4597 int i; 4598 4599 for (i = chain->base; i < chain->base + chain->depth; i++) { 4600 if (chain_hlocks[i] != class - lock_classes) 4601 continue; 4602 /* The code below leaks one chain_hlock[] entry. */ 4603 if (--chain->depth > 0) { 4604 memmove(&chain_hlocks[i], &chain_hlocks[i + 1], 4605 (chain->base + chain->depth - i) * 4606 sizeof(chain_hlocks[0])); 4607 } 4608 /* 4609 * Each lock class occurs at most once in a lock chain so once 4610 * we found a match we can break out of this loop. 4611 */ 4612 goto recalc; 4613 } 4614 /* Since the chain has not been modified, return. */ 4615 return; 4616 4617 recalc: 4618 chain_key = 0; 4619 for (i = chain->base; i < chain->base + chain->depth; i++) 4620 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1); 4621 if (chain->depth && chain->chain_key == chain_key) 4622 return; 4623 /* Overwrite the chain key for concurrent RCU readers. */ 4624 WRITE_ONCE(chain->chain_key, chain_key); 4625 /* 4626 * Note: calling hlist_del_rcu() from inside a 4627 * hlist_for_each_entry_rcu() loop is safe. 4628 */ 4629 hlist_del_rcu(&chain->entry); 4630 __set_bit(chain - lock_chains, pf->lock_chains_being_freed); 4631 if (chain->depth == 0) 4632 return; 4633 /* 4634 * If the modified lock chain matches an existing lock chain, drop 4635 * the modified lock chain. 4636 */ 4637 if (lookup_chain_cache(chain_key)) 4638 return; 4639 new_chain = alloc_lock_chain(); 4640 if (WARN_ON_ONCE(!new_chain)) { 4641 debug_locks_off(); 4642 return; 4643 } 4644 *new_chain = *chain; 4645 hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); 4646 #endif 4647 } 4648 4649 /* Must be called with the graph lock held. */ 4650 static void remove_class_from_lock_chains(struct pending_free *pf, 4651 struct lock_class *class) 4652 { 4653 struct lock_chain *chain; 4654 struct hlist_head *head; 4655 int i; 4656 4657 for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) { 4658 head = chainhash_table + i; 4659 hlist_for_each_entry_rcu(chain, head, entry) { 4660 remove_class_from_lock_chain(pf, chain, class); 4661 } 4662 } 4663 } 4664 4665 /* 4666 * Remove all references to a lock class. The caller must hold the graph lock. 4667 */ 4668 static void zap_class(struct pending_free *pf, struct lock_class *class) 4669 { 4670 struct lock_list *entry; 4671 int i; 4672 4673 WARN_ON_ONCE(!class->key); 4674 4675 /* 4676 * Remove all dependencies this lock is 4677 * involved in: 4678 */ 4679 for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { 4680 entry = list_entries + i; 4681 if (entry->class != class && entry->links_to != class) 4682 continue; 4683 __clear_bit(i, list_entries_in_use); 4684 nr_list_entries--; 4685 list_del_rcu(&entry->entry); 4686 } 4687 if (list_empty(&class->locks_after) && 4688 list_empty(&class->locks_before)) { 4689 list_move_tail(&class->lock_entry, &pf->zapped); 4690 hlist_del_rcu(&class->hash_entry); 4691 WRITE_ONCE(class->key, NULL); 4692 WRITE_ONCE(class->name, NULL); 4693 nr_lock_classes--; 4694 } else { 4695 WARN_ONCE(true, "%s() failed for class %s\n", __func__, 4696 class->name); 4697 } 4698 4699 remove_class_from_lock_chains(pf, class); 4700 } 4701 4702 static void reinit_class(struct lock_class *class) 4703 { 4704 void *const p = class; 4705 const unsigned int offset = offsetof(struct lock_class, key); 4706 4707 WARN_ON_ONCE(!class->lock_entry.next); 4708 WARN_ON_ONCE(!list_empty(&class->locks_after)); 4709 WARN_ON_ONCE(!list_empty(&class->locks_before)); 4710 memset(p + offset, 0, sizeof(*class) - offset); 4711 WARN_ON_ONCE(!class->lock_entry.next); 4712 WARN_ON_ONCE(!list_empty(&class->locks_after)); 4713 WARN_ON_ONCE(!list_empty(&class->locks_before)); 4714 } 4715 4716 static inline int within(const void *addr, void *start, unsigned long size) 4717 { 4718 return addr >= start && addr < start + size; 4719 } 4720 4721 static bool inside_selftest(void) 4722 { 4723 return current == lockdep_selftest_task_struct; 4724 } 4725 4726 /* The caller must hold the graph lock. */ 4727 static struct pending_free *get_pending_free(void) 4728 { 4729 return delayed_free.pf + delayed_free.index; 4730 } 4731 4732 static void free_zapped_rcu(struct rcu_head *cb); 4733 4734 /* 4735 * Schedule an RCU callback if no RCU callback is pending. Must be called with 4736 * the graph lock held. 4737 */ 4738 static void call_rcu_zapped(struct pending_free *pf) 4739 { 4740 WARN_ON_ONCE(inside_selftest()); 4741 4742 if (list_empty(&pf->zapped)) 4743 return; 4744 4745 if (delayed_free.scheduled) 4746 return; 4747 4748 delayed_free.scheduled = true; 4749 4750 WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf); 4751 delayed_free.index ^= 1; 4752 4753 call_rcu(&delayed_free.rcu_head, free_zapped_rcu); 4754 } 4755 4756 /* The caller must hold the graph lock. May be called from RCU context. */ 4757 static void __free_zapped_classes(struct pending_free *pf) 4758 { 4759 struct lock_class *class; 4760 4761 check_data_structures(); 4762 4763 list_for_each_entry(class, &pf->zapped, lock_entry) 4764 reinit_class(class); 4765 4766 list_splice_init(&pf->zapped, &free_lock_classes); 4767 4768 #ifdef CONFIG_PROVE_LOCKING 4769 bitmap_andnot(lock_chains_in_use, lock_chains_in_use, 4770 pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains)); 4771 bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains)); 4772 #endif 4773 } 4774 4775 static void free_zapped_rcu(struct rcu_head *ch) 4776 { 4777 struct pending_free *pf; 4778 unsigned long flags; 4779 4780 if (WARN_ON_ONCE(ch != &delayed_free.rcu_head)) 4781 return; 4782 4783 raw_local_irq_save(flags); 4784 arch_spin_lock(&lockdep_lock); 4785 current->lockdep_recursion = 1; 4786 4787 /* closed head */ 4788 pf = delayed_free.pf + (delayed_free.index ^ 1); 4789 __free_zapped_classes(pf); 4790 delayed_free.scheduled = false; 4791 4792 /* 4793 * If there's anything on the open list, close and start a new callback. 4794 */ 4795 call_rcu_zapped(delayed_free.pf + delayed_free.index); 4796 4797 current->lockdep_recursion = 0; 4798 arch_spin_unlock(&lockdep_lock); 4799 raw_local_irq_restore(flags); 4800 } 4801 4802 /* 4803 * Remove all lock classes from the class hash table and from the 4804 * all_lock_classes list whose key or name is in the address range [start, 4805 * start + size). Move these lock classes to the zapped_classes list. Must 4806 * be called with the graph lock held. 4807 */ 4808 static void __lockdep_free_key_range(struct pending_free *pf, void *start, 4809 unsigned long size) 4810 { 4811 struct lock_class *class; 4812 struct hlist_head *head; 4813 int i; 4814 4815 /* Unhash all classes that were created by a module. */ 4816 for (i = 0; i < CLASSHASH_SIZE; i++) { 4817 head = classhash_table + i; 4818 hlist_for_each_entry_rcu(class, head, hash_entry) { 4819 if (!within(class->key, start, size) && 4820 !within(class->name, start, size)) 4821 continue; 4822 zap_class(pf, class); 4823 } 4824 } 4825 } 4826 4827 /* 4828 * Used in module.c to remove lock classes from memory that is going to be 4829 * freed; and possibly re-used by other modules. 4830 * 4831 * We will have had one synchronize_rcu() before getting here, so we're 4832 * guaranteed nobody will look up these exact classes -- they're properly dead 4833 * but still allocated. 4834 */ 4835 static void lockdep_free_key_range_reg(void *start, unsigned long size) 4836 { 4837 struct pending_free *pf; 4838 unsigned long flags; 4839 4840 init_data_structures_once(); 4841 4842 raw_local_irq_save(flags); 4843 arch_spin_lock(&lockdep_lock); 4844 current->lockdep_recursion = 1; 4845 pf = get_pending_free(); 4846 __lockdep_free_key_range(pf, start, size); 4847 call_rcu_zapped(pf); 4848 current->lockdep_recursion = 0; 4849 arch_spin_unlock(&lockdep_lock); 4850 raw_local_irq_restore(flags); 4851 4852 /* 4853 * Wait for any possible iterators from look_up_lock_class() to pass 4854 * before continuing to free the memory they refer to. 4855 */ 4856 synchronize_rcu(); 4857 } 4858 4859 /* 4860 * Free all lockdep keys in the range [start, start+size). Does not sleep. 4861 * Ignores debug_locks. Must only be used by the lockdep selftests. 4862 */ 4863 static void lockdep_free_key_range_imm(void *start, unsigned long size) 4864 { 4865 struct pending_free *pf = delayed_free.pf; 4866 unsigned long flags; 4867 4868 init_data_structures_once(); 4869 4870 raw_local_irq_save(flags); 4871 arch_spin_lock(&lockdep_lock); 4872 __lockdep_free_key_range(pf, start, size); 4873 __free_zapped_classes(pf); 4874 arch_spin_unlock(&lockdep_lock); 4875 raw_local_irq_restore(flags); 4876 } 4877 4878 void lockdep_free_key_range(void *start, unsigned long size) 4879 { 4880 init_data_structures_once(); 4881 4882 if (inside_selftest()) 4883 lockdep_free_key_range_imm(start, size); 4884 else 4885 lockdep_free_key_range_reg(start, size); 4886 } 4887 4888 /* 4889 * Check whether any element of the @lock->class_cache[] array refers to a 4890 * registered lock class. The caller must hold either the graph lock or the 4891 * RCU read lock. 4892 */ 4893 static bool lock_class_cache_is_registered(struct lockdep_map *lock) 4894 { 4895 struct lock_class *class; 4896 struct hlist_head *head; 4897 int i, j; 4898 4899 for (i = 0; i < CLASSHASH_SIZE; i++) { 4900 head = classhash_table + i; 4901 hlist_for_each_entry_rcu(class, head, hash_entry) { 4902 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4903 if (lock->class_cache[j] == class) 4904 return true; 4905 } 4906 } 4907 return false; 4908 } 4909 4910 /* The caller must hold the graph lock. Does not sleep. */ 4911 static void __lockdep_reset_lock(struct pending_free *pf, 4912 struct lockdep_map *lock) 4913 { 4914 struct lock_class *class; 4915 int j; 4916 4917 /* 4918 * Remove all classes this lock might have: 4919 */ 4920 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 4921 /* 4922 * If the class exists we look it up and zap it: 4923 */ 4924 class = look_up_lock_class(lock, j); 4925 if (class) 4926 zap_class(pf, class); 4927 } 4928 /* 4929 * Debug check: in the end all mapped classes should 4930 * be gone. 4931 */ 4932 if (WARN_ON_ONCE(lock_class_cache_is_registered(lock))) 4933 debug_locks_off(); 4934 } 4935 4936 /* 4937 * Remove all information lockdep has about a lock if debug_locks == 1. Free 4938 * released data structures from RCU context. 4939 */ 4940 static void lockdep_reset_lock_reg(struct lockdep_map *lock) 4941 { 4942 struct pending_free *pf; 4943 unsigned long flags; 4944 int locked; 4945 4946 raw_local_irq_save(flags); 4947 locked = graph_lock(); 4948 if (!locked) 4949 goto out_irq; 4950 4951 pf = get_pending_free(); 4952 __lockdep_reset_lock(pf, lock); 4953 call_rcu_zapped(pf); 4954 4955 graph_unlock(); 4956 out_irq: 4957 raw_local_irq_restore(flags); 4958 } 4959 4960 /* 4961 * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the 4962 * lockdep selftests. 4963 */ 4964 static void lockdep_reset_lock_imm(struct lockdep_map *lock) 4965 { 4966 struct pending_free *pf = delayed_free.pf; 4967 unsigned long flags; 4968 4969 raw_local_irq_save(flags); 4970 arch_spin_lock(&lockdep_lock); 4971 __lockdep_reset_lock(pf, lock); 4972 __free_zapped_classes(pf); 4973 arch_spin_unlock(&lockdep_lock); 4974 raw_local_irq_restore(flags); 4975 } 4976 4977 void lockdep_reset_lock(struct lockdep_map *lock) 4978 { 4979 init_data_structures_once(); 4980 4981 if (inside_selftest()) 4982 lockdep_reset_lock_imm(lock); 4983 else 4984 lockdep_reset_lock_reg(lock); 4985 } 4986 4987 /* Unregister a dynamically allocated key. */ 4988 void lockdep_unregister_key(struct lock_class_key *key) 4989 { 4990 struct hlist_head *hash_head = keyhashentry(key); 4991 struct lock_class_key *k; 4992 struct pending_free *pf; 4993 unsigned long flags; 4994 bool found = false; 4995 4996 might_sleep(); 4997 4998 if (WARN_ON_ONCE(static_obj(key))) 4999 return; 5000 5001 raw_local_irq_save(flags); 5002 if (!graph_lock()) 5003 goto out_irq; 5004 5005 pf = get_pending_free(); 5006 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 5007 if (k == key) { 5008 hlist_del_rcu(&k->hash_entry); 5009 found = true; 5010 break; 5011 } 5012 } 5013 WARN_ON_ONCE(!found); 5014 __lockdep_free_key_range(pf, key, 1); 5015 call_rcu_zapped(pf); 5016 graph_unlock(); 5017 out_irq: 5018 raw_local_irq_restore(flags); 5019 5020 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 5021 synchronize_rcu(); 5022 } 5023 EXPORT_SYMBOL_GPL(lockdep_unregister_key); 5024 5025 void __init lockdep_init(void) 5026 { 5027 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 5028 5029 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 5030 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 5031 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 5032 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 5033 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 5034 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 5035 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 5036 5037 printk(" memory used by lock dependency info: %zu kB\n", 5038 (sizeof(lock_classes) + 5039 sizeof(classhash_table) + 5040 sizeof(list_entries) + 5041 sizeof(list_entries_in_use) + 5042 sizeof(chainhash_table) + 5043 sizeof(delayed_free) 5044 #ifdef CONFIG_PROVE_LOCKING 5045 + sizeof(lock_cq) 5046 + sizeof(lock_chains) 5047 + sizeof(lock_chains_in_use) 5048 + sizeof(chain_hlocks) 5049 #endif 5050 ) / 1024 5051 ); 5052 5053 printk(" per task-struct memory footprint: %zu bytes\n", 5054 sizeof(((struct task_struct *)NULL)->held_locks)); 5055 } 5056 5057 static void 5058 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 5059 const void *mem_to, struct held_lock *hlock) 5060 { 5061 if (!debug_locks_off()) 5062 return; 5063 if (debug_locks_silent) 5064 return; 5065 5066 pr_warn("\n"); 5067 pr_warn("=========================\n"); 5068 pr_warn("WARNING: held lock freed!\n"); 5069 print_kernel_ident(); 5070 pr_warn("-------------------------\n"); 5071 pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n", 5072 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 5073 print_lock(hlock); 5074 lockdep_print_held_locks(curr); 5075 5076 pr_warn("\nstack backtrace:\n"); 5077 dump_stack(); 5078 } 5079 5080 static inline int not_in_range(const void* mem_from, unsigned long mem_len, 5081 const void* lock_from, unsigned long lock_len) 5082 { 5083 return lock_from + lock_len <= mem_from || 5084 mem_from + mem_len <= lock_from; 5085 } 5086 5087 /* 5088 * Called when kernel memory is freed (or unmapped), or if a lock 5089 * is destroyed or reinitialized - this code checks whether there is 5090 * any held lock in the memory range of <from> to <to>: 5091 */ 5092 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 5093 { 5094 struct task_struct *curr = current; 5095 struct held_lock *hlock; 5096 unsigned long flags; 5097 int i; 5098 5099 if (unlikely(!debug_locks)) 5100 return; 5101 5102 raw_local_irq_save(flags); 5103 for (i = 0; i < curr->lockdep_depth; i++) { 5104 hlock = curr->held_locks + i; 5105 5106 if (not_in_range(mem_from, mem_len, hlock->instance, 5107 sizeof(*hlock->instance))) 5108 continue; 5109 5110 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 5111 break; 5112 } 5113 raw_local_irq_restore(flags); 5114 } 5115 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 5116 5117 static void print_held_locks_bug(void) 5118 { 5119 if (!debug_locks_off()) 5120 return; 5121 if (debug_locks_silent) 5122 return; 5123 5124 pr_warn("\n"); 5125 pr_warn("====================================\n"); 5126 pr_warn("WARNING: %s/%d still has locks held!\n", 5127 current->comm, task_pid_nr(current)); 5128 print_kernel_ident(); 5129 pr_warn("------------------------------------\n"); 5130 lockdep_print_held_locks(current); 5131 pr_warn("\nstack backtrace:\n"); 5132 dump_stack(); 5133 } 5134 5135 void debug_check_no_locks_held(void) 5136 { 5137 if (unlikely(current->lockdep_depth > 0)) 5138 print_held_locks_bug(); 5139 } 5140 EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 5141 5142 #ifdef __KERNEL__ 5143 void debug_show_all_locks(void) 5144 { 5145 struct task_struct *g, *p; 5146 5147 if (unlikely(!debug_locks)) { 5148 pr_warn("INFO: lockdep is turned off.\n"); 5149 return; 5150 } 5151 pr_warn("\nShowing all locks held in the system:\n"); 5152 5153 rcu_read_lock(); 5154 for_each_process_thread(g, p) { 5155 if (!p->lockdep_depth) 5156 continue; 5157 lockdep_print_held_locks(p); 5158 touch_nmi_watchdog(); 5159 touch_all_softlockup_watchdogs(); 5160 } 5161 rcu_read_unlock(); 5162 5163 pr_warn("\n"); 5164 pr_warn("=============================================\n\n"); 5165 } 5166 EXPORT_SYMBOL_GPL(debug_show_all_locks); 5167 #endif 5168 5169 /* 5170 * Careful: only use this function if you are sure that 5171 * the task cannot run in parallel! 5172 */ 5173 void debug_show_held_locks(struct task_struct *task) 5174 { 5175 if (unlikely(!debug_locks)) { 5176 printk("INFO: lockdep is turned off.\n"); 5177 return; 5178 } 5179 lockdep_print_held_locks(task); 5180 } 5181 EXPORT_SYMBOL_GPL(debug_show_held_locks); 5182 5183 asmlinkage __visible void lockdep_sys_exit(void) 5184 { 5185 struct task_struct *curr = current; 5186 5187 if (unlikely(curr->lockdep_depth)) { 5188 if (!debug_locks_off()) 5189 return; 5190 pr_warn("\n"); 5191 pr_warn("================================================\n"); 5192 pr_warn("WARNING: lock held when returning to user space!\n"); 5193 print_kernel_ident(); 5194 pr_warn("------------------------------------------------\n"); 5195 pr_warn("%s/%d is leaving the kernel with locks still held!\n", 5196 curr->comm, curr->pid); 5197 lockdep_print_held_locks(curr); 5198 } 5199 5200 /* 5201 * The lock history for each syscall should be independent. So wipe the 5202 * slate clean on return to userspace. 5203 */ 5204 lockdep_invariant_state(false); 5205 } 5206 5207 void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 5208 { 5209 struct task_struct *curr = current; 5210 5211 /* Note: the following can be executed concurrently, so be careful. */ 5212 pr_warn("\n"); 5213 pr_warn("=============================\n"); 5214 pr_warn("WARNING: suspicious RCU usage\n"); 5215 print_kernel_ident(); 5216 pr_warn("-----------------------------\n"); 5217 pr_warn("%s:%d %s!\n", file, line, s); 5218 pr_warn("\nother info that might help us debug this:\n\n"); 5219 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 5220 !rcu_lockdep_current_cpu_online() 5221 ? "RCU used illegally from offline CPU!\n" 5222 : !rcu_is_watching() 5223 ? "RCU used illegally from idle CPU!\n" 5224 : "", 5225 rcu_scheduler_active, debug_locks); 5226 5227 /* 5228 * If a CPU is in the RCU-free window in idle (ie: in the section 5229 * between rcu_idle_enter() and rcu_idle_exit(), then RCU 5230 * considers that CPU to be in an "extended quiescent state", 5231 * which means that RCU will be completely ignoring that CPU. 5232 * Therefore, rcu_read_lock() and friends have absolutely no 5233 * effect on a CPU running in that state. In other words, even if 5234 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 5235 * delete data structures out from under it. RCU really has no 5236 * choice here: we need to keep an RCU-free window in idle where 5237 * the CPU may possibly enter into low power mode. This way we can 5238 * notice an extended quiescent state to other CPUs that started a grace 5239 * period. Otherwise we would delay any grace period as long as we run 5240 * in the idle task. 5241 * 5242 * So complain bitterly if someone does call rcu_read_lock(), 5243 * rcu_read_lock_bh() and so on from extended quiescent states. 5244 */ 5245 if (!rcu_is_watching()) 5246 pr_warn("RCU used illegally from extended quiescent state!\n"); 5247 5248 lockdep_print_held_locks(curr); 5249 pr_warn("\nstack backtrace:\n"); 5250 dump_stack(); 5251 } 5252 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 5253