1 /* 2 * kernel/lockdep.c 3 * 4 * Runtime locking correctness validator 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 10 * 11 * this code maps all the lock dependencies as they occur in a live kernel 12 * and will warn about the following classes of locking bugs: 13 * 14 * - lock inversion scenarios 15 * - circular lock dependencies 16 * - hardirq/softirq safe/unsafe locking bugs 17 * 18 * Bugs are reported even if the current locking scenario does not cause 19 * any deadlock at this point. 20 * 21 * I.e. if anytime in the past two locks were taken in a different order, 22 * even if it happened for another task, even if those were different 23 * locks (but of the same class as this lock), this code will detect it. 24 * 25 * Thanks to Arjan van de Ven for coming up with the initial idea of 26 * mapping lock dependencies runtime. 27 */ 28 #define DISABLE_BRANCH_PROFILING 29 #include <linux/mutex.h> 30 #include <linux/sched.h> 31 #include <linux/delay.h> 32 #include <linux/module.h> 33 #include <linux/proc_fs.h> 34 #include <linux/seq_file.h> 35 #include <linux/spinlock.h> 36 #include <linux/kallsyms.h> 37 #include <linux/interrupt.h> 38 #include <linux/stacktrace.h> 39 #include <linux/debug_locks.h> 40 #include <linux/irqflags.h> 41 #include <linux/utsname.h> 42 #include <linux/hash.h> 43 #include <linux/ftrace.h> 44 #include <linux/stringify.h> 45 #include <linux/bitops.h> 46 #include <linux/gfp.h> 47 #include <linux/kmemcheck.h> 48 49 #include <asm/sections.h> 50 51 #include "lockdep_internals.h" 52 53 #define CREATE_TRACE_POINTS 54 #include <trace/events/lock.h> 55 56 #ifdef CONFIG_PROVE_LOCKING 57 int prove_locking = 1; 58 module_param(prove_locking, int, 0644); 59 #else 60 #define prove_locking 0 61 #endif 62 63 #ifdef CONFIG_LOCK_STAT 64 int lock_stat = 1; 65 module_param(lock_stat, int, 0644); 66 #else 67 #define lock_stat 0 68 #endif 69 70 /* 71 * lockdep_lock: protects the lockdep graph, the hashes and the 72 * class/list/hash allocators. 73 * 74 * This is one of the rare exceptions where it's justified 75 * to use a raw spinlock - we really dont want the spinlock 76 * code to recurse back into the lockdep code... 77 */ 78 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 79 80 static int graph_lock(void) 81 { 82 arch_spin_lock(&lockdep_lock); 83 /* 84 * Make sure that if another CPU detected a bug while 85 * walking the graph we dont change it (while the other 86 * CPU is busy printing out stuff with the graph lock 87 * dropped already) 88 */ 89 if (!debug_locks) { 90 arch_spin_unlock(&lockdep_lock); 91 return 0; 92 } 93 /* prevent any recursions within lockdep from causing deadlocks */ 94 current->lockdep_recursion++; 95 return 1; 96 } 97 98 static inline int graph_unlock(void) 99 { 100 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { 101 /* 102 * The lockdep graph lock isn't locked while we expect it to 103 * be, we're confused now, bye! 104 */ 105 return DEBUG_LOCKS_WARN_ON(1); 106 } 107 108 current->lockdep_recursion--; 109 arch_spin_unlock(&lockdep_lock); 110 return 0; 111 } 112 113 /* 114 * Turn lock debugging off and return with 0 if it was off already, 115 * and also release the graph lock: 116 */ 117 static inline int debug_locks_off_graph_unlock(void) 118 { 119 int ret = debug_locks_off(); 120 121 arch_spin_unlock(&lockdep_lock); 122 123 return ret; 124 } 125 126 unsigned long nr_list_entries; 127 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 128 129 /* 130 * All data structures here are protected by the global debug_lock. 131 * 132 * Mutex key structs only get allocated, once during bootup, and never 133 * get freed - this significantly simplifies the debugging code. 134 */ 135 unsigned long nr_lock_classes; 136 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 137 138 static inline struct lock_class *hlock_class(struct held_lock *hlock) 139 { 140 if (!hlock->class_idx) { 141 /* 142 * Someone passed in garbage, we give up. 143 */ 144 DEBUG_LOCKS_WARN_ON(1); 145 return NULL; 146 } 147 return lock_classes + hlock->class_idx - 1; 148 } 149 150 #ifdef CONFIG_LOCK_STAT 151 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 152 153 static inline u64 lockstat_clock(void) 154 { 155 return local_clock(); 156 } 157 158 static int lock_point(unsigned long points[], unsigned long ip) 159 { 160 int i; 161 162 for (i = 0; i < LOCKSTAT_POINTS; i++) { 163 if (points[i] == 0) { 164 points[i] = ip; 165 break; 166 } 167 if (points[i] == ip) 168 break; 169 } 170 171 return i; 172 } 173 174 static void lock_time_inc(struct lock_time *lt, u64 time) 175 { 176 if (time > lt->max) 177 lt->max = time; 178 179 if (time < lt->min || !lt->nr) 180 lt->min = time; 181 182 lt->total += time; 183 lt->nr++; 184 } 185 186 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 187 { 188 if (!src->nr) 189 return; 190 191 if (src->max > dst->max) 192 dst->max = src->max; 193 194 if (src->min < dst->min || !dst->nr) 195 dst->min = src->min; 196 197 dst->total += src->total; 198 dst->nr += src->nr; 199 } 200 201 struct lock_class_stats lock_stats(struct lock_class *class) 202 { 203 struct lock_class_stats stats; 204 int cpu, i; 205 206 memset(&stats, 0, sizeof(struct lock_class_stats)); 207 for_each_possible_cpu(cpu) { 208 struct lock_class_stats *pcs = 209 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 210 211 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 212 stats.contention_point[i] += pcs->contention_point[i]; 213 214 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 215 stats.contending_point[i] += pcs->contending_point[i]; 216 217 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 218 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 219 220 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 221 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 222 223 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 224 stats.bounces[i] += pcs->bounces[i]; 225 } 226 227 return stats; 228 } 229 230 void clear_lock_stats(struct lock_class *class) 231 { 232 int cpu; 233 234 for_each_possible_cpu(cpu) { 235 struct lock_class_stats *cpu_stats = 236 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 237 238 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 239 } 240 memset(class->contention_point, 0, sizeof(class->contention_point)); 241 memset(class->contending_point, 0, sizeof(class->contending_point)); 242 } 243 244 static struct lock_class_stats *get_lock_stats(struct lock_class *class) 245 { 246 return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; 247 } 248 249 static void put_lock_stats(struct lock_class_stats *stats) 250 { 251 put_cpu_var(cpu_lock_stats); 252 } 253 254 static void lock_release_holdtime(struct held_lock *hlock) 255 { 256 struct lock_class_stats *stats; 257 u64 holdtime; 258 259 if (!lock_stat) 260 return; 261 262 holdtime = lockstat_clock() - hlock->holdtime_stamp; 263 264 stats = get_lock_stats(hlock_class(hlock)); 265 if (hlock->read) 266 lock_time_inc(&stats->read_holdtime, holdtime); 267 else 268 lock_time_inc(&stats->write_holdtime, holdtime); 269 put_lock_stats(stats); 270 } 271 #else 272 static inline void lock_release_holdtime(struct held_lock *hlock) 273 { 274 } 275 #endif 276 277 /* 278 * We keep a global list of all lock classes. The list only grows, 279 * never shrinks. The list is only accessed with the lockdep 280 * spinlock lock held. 281 */ 282 LIST_HEAD(all_lock_classes); 283 284 /* 285 * The lockdep classes are in a hash-table as well, for fast lookup: 286 */ 287 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 288 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 289 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 290 #define classhashentry(key) (classhash_table + __classhashfn((key))) 291 292 static struct hlist_head classhash_table[CLASSHASH_SIZE]; 293 294 /* 295 * We put the lock dependency chains into a hash-table as well, to cache 296 * their existence: 297 */ 298 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 299 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 300 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 301 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 302 303 static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 304 305 /* 306 * The hash key of the lock dependency chains is a hash itself too: 307 * it's a hash of all locks taken up to that lock, including that lock. 308 * It's a 64-bit hash, because it's important for the keys to be 309 * unique. 310 */ 311 #define iterate_chain_key(key1, key2) \ 312 (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ 313 ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ 314 (key2)) 315 316 void lockdep_off(void) 317 { 318 current->lockdep_recursion++; 319 } 320 EXPORT_SYMBOL(lockdep_off); 321 322 void lockdep_on(void) 323 { 324 current->lockdep_recursion--; 325 } 326 EXPORT_SYMBOL(lockdep_on); 327 328 /* 329 * Debugging switches: 330 */ 331 332 #define VERBOSE 0 333 #define VERY_VERBOSE 0 334 335 #if VERBOSE 336 # define HARDIRQ_VERBOSE 1 337 # define SOFTIRQ_VERBOSE 1 338 # define RECLAIM_VERBOSE 1 339 #else 340 # define HARDIRQ_VERBOSE 0 341 # define SOFTIRQ_VERBOSE 0 342 # define RECLAIM_VERBOSE 0 343 #endif 344 345 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE 346 /* 347 * Quick filtering for interesting events: 348 */ 349 static int class_filter(struct lock_class *class) 350 { 351 #if 0 352 /* Example */ 353 if (class->name_version == 1 && 354 !strcmp(class->name, "lockname")) 355 return 1; 356 if (class->name_version == 1 && 357 !strcmp(class->name, "&struct->lockfield")) 358 return 1; 359 #endif 360 /* Filter everything else. 1 would be to allow everything else */ 361 return 0; 362 } 363 #endif 364 365 static int verbose(struct lock_class *class) 366 { 367 #if VERBOSE 368 return class_filter(class); 369 #endif 370 return 0; 371 } 372 373 /* 374 * Stack-trace: tightly packed array of stack backtrace 375 * addresses. Protected by the graph_lock. 376 */ 377 unsigned long nr_stack_trace_entries; 378 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 379 380 static void print_lockdep_off(const char *bug_msg) 381 { 382 printk(KERN_DEBUG "%s\n", bug_msg); 383 printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 384 #ifdef CONFIG_LOCK_STAT 385 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 386 #endif 387 } 388 389 static int save_trace(struct stack_trace *trace) 390 { 391 trace->nr_entries = 0; 392 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 393 trace->entries = stack_trace + nr_stack_trace_entries; 394 395 trace->skip = 3; 396 397 save_stack_trace(trace); 398 399 /* 400 * Some daft arches put -1 at the end to indicate its a full trace. 401 * 402 * <rant> this is buggy anyway, since it takes a whole extra entry so a 403 * complete trace that maxes out the entries provided will be reported 404 * as incomplete, friggin useless </rant> 405 */ 406 if (trace->nr_entries != 0 && 407 trace->entries[trace->nr_entries-1] == ULONG_MAX) 408 trace->nr_entries--; 409 410 trace->max_entries = trace->nr_entries; 411 412 nr_stack_trace_entries += trace->nr_entries; 413 414 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 415 if (!debug_locks_off_graph_unlock()) 416 return 0; 417 418 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 419 dump_stack(); 420 421 return 0; 422 } 423 424 return 1; 425 } 426 427 unsigned int nr_hardirq_chains; 428 unsigned int nr_softirq_chains; 429 unsigned int nr_process_chains; 430 unsigned int max_lockdep_depth; 431 432 #ifdef CONFIG_DEBUG_LOCKDEP 433 /* 434 * Various lockdep statistics: 435 */ 436 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 437 #endif 438 439 /* 440 * Locking printouts: 441 */ 442 443 #define __USAGE(__STATE) \ 444 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 445 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 446 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 447 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 448 449 static const char *usage_str[] = 450 { 451 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 452 #include "lockdep_states.h" 453 #undef LOCKDEP_STATE 454 [LOCK_USED] = "INITIAL USE", 455 }; 456 457 const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 458 { 459 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 460 } 461 462 static inline unsigned long lock_flag(enum lock_usage_bit bit) 463 { 464 return 1UL << bit; 465 } 466 467 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 468 { 469 char c = '.'; 470 471 if (class->usage_mask & lock_flag(bit + 2)) 472 c = '+'; 473 if (class->usage_mask & lock_flag(bit)) { 474 c = '-'; 475 if (class->usage_mask & lock_flag(bit + 2)) 476 c = '?'; 477 } 478 479 return c; 480 } 481 482 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 483 { 484 int i = 0; 485 486 #define LOCKDEP_STATE(__STATE) \ 487 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 488 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 489 #include "lockdep_states.h" 490 #undef LOCKDEP_STATE 491 492 usage[i] = '\0'; 493 } 494 495 static void __print_lock_name(struct lock_class *class) 496 { 497 char str[KSYM_NAME_LEN]; 498 const char *name; 499 500 name = class->name; 501 if (!name) { 502 name = __get_key_name(class->key, str); 503 printk("%s", name); 504 } else { 505 printk("%s", name); 506 if (class->name_version > 1) 507 printk("#%d", class->name_version); 508 if (class->subclass) 509 printk("/%d", class->subclass); 510 } 511 } 512 513 static void print_lock_name(struct lock_class *class) 514 { 515 char usage[LOCK_USAGE_CHARS]; 516 517 get_usage_chars(class, usage); 518 519 printk(" ("); 520 __print_lock_name(class); 521 printk("){%s}", usage); 522 } 523 524 static void print_lockdep_cache(struct lockdep_map *lock) 525 { 526 const char *name; 527 char str[KSYM_NAME_LEN]; 528 529 name = lock->name; 530 if (!name) 531 name = __get_key_name(lock->key->subkeys, str); 532 533 printk("%s", name); 534 } 535 536 static void print_lock(struct held_lock *hlock) 537 { 538 /* 539 * We can be called locklessly through debug_show_all_locks() so be 540 * extra careful, the hlock might have been released and cleared. 541 */ 542 unsigned int class_idx = hlock->class_idx; 543 544 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ 545 barrier(); 546 547 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { 548 printk("<RELEASED>\n"); 549 return; 550 } 551 552 print_lock_name(lock_classes + class_idx - 1); 553 printk(", at: "); 554 print_ip_sym(hlock->acquire_ip); 555 } 556 557 static void lockdep_print_held_locks(struct task_struct *curr) 558 { 559 int i, depth = curr->lockdep_depth; 560 561 if (!depth) { 562 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); 563 return; 564 } 565 printk("%d lock%s held by %s/%d:\n", 566 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); 567 568 for (i = 0; i < depth; i++) { 569 printk(" #%d: ", i); 570 print_lock(curr->held_locks + i); 571 } 572 } 573 574 static void print_kernel_ident(void) 575 { 576 printk("%s %.*s %s\n", init_utsname()->release, 577 (int)strcspn(init_utsname()->version, " "), 578 init_utsname()->version, 579 print_tainted()); 580 } 581 582 static int very_verbose(struct lock_class *class) 583 { 584 #if VERY_VERBOSE 585 return class_filter(class); 586 #endif 587 return 0; 588 } 589 590 /* 591 * Is this the address of a static object: 592 */ 593 #ifdef __KERNEL__ 594 static int static_obj(void *obj) 595 { 596 unsigned long start = (unsigned long) &_stext, 597 end = (unsigned long) &_end, 598 addr = (unsigned long) obj; 599 600 /* 601 * static variable? 602 */ 603 if ((addr >= start) && (addr < end)) 604 return 1; 605 606 if (arch_is_kernel_data(addr)) 607 return 1; 608 609 /* 610 * in-kernel percpu var? 611 */ 612 if (is_kernel_percpu_address(addr)) 613 return 1; 614 615 /* 616 * module static or percpu var? 617 */ 618 return is_module_address(addr) || is_module_percpu_address(addr); 619 } 620 #endif 621 622 /* 623 * To make lock name printouts unique, we calculate a unique 624 * class->name_version generation counter: 625 */ 626 static int count_matching_names(struct lock_class *new_class) 627 { 628 struct lock_class *class; 629 int count = 0; 630 631 if (!new_class->name) 632 return 0; 633 634 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 635 if (new_class->key - new_class->subclass == class->key) 636 return class->name_version; 637 if (class->name && !strcmp(class->name, new_class->name)) 638 count = max(count, class->name_version); 639 } 640 641 return count + 1; 642 } 643 644 /* 645 * Register a lock's class in the hash-table, if the class is not present 646 * yet. Otherwise we look it up. We cache the result in the lock object 647 * itself, so actual lookup of the hash should be once per lock object. 648 */ 649 static inline struct lock_class * 650 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 651 { 652 struct lockdep_subclass_key *key; 653 struct hlist_head *hash_head; 654 struct lock_class *class; 655 656 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 657 debug_locks_off(); 658 printk(KERN_ERR 659 "BUG: looking up invalid subclass: %u\n", subclass); 660 printk(KERN_ERR 661 "turning off the locking correctness validator.\n"); 662 dump_stack(); 663 return NULL; 664 } 665 666 /* 667 * Static locks do not have their class-keys yet - for them the key 668 * is the lock object itself: 669 */ 670 if (unlikely(!lock->key)) 671 lock->key = (void *)lock; 672 673 /* 674 * NOTE: the class-key must be unique. For dynamic locks, a static 675 * lock_class_key variable is passed in through the mutex_init() 676 * (or spin_lock_init()) call - which acts as the key. For static 677 * locks we use the lock object itself as the key. 678 */ 679 BUILD_BUG_ON(sizeof(struct lock_class_key) > 680 sizeof(struct lockdep_map)); 681 682 key = lock->key->subkeys + subclass; 683 684 hash_head = classhashentry(key); 685 686 /* 687 * We do an RCU walk of the hash, see lockdep_free_key_range(). 688 */ 689 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 690 return NULL; 691 692 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 693 if (class->key == key) { 694 /* 695 * Huh! same key, different name? Did someone trample 696 * on some memory? We're most confused. 697 */ 698 WARN_ON_ONCE(class->name != lock->name); 699 return class; 700 } 701 } 702 703 return NULL; 704 } 705 706 /* 707 * Register a lock's class in the hash-table, if the class is not present 708 * yet. Otherwise we look it up. We cache the result in the lock object 709 * itself, so actual lookup of the hash should be once per lock object. 710 */ 711 static inline struct lock_class * 712 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 713 { 714 struct lockdep_subclass_key *key; 715 struct hlist_head *hash_head; 716 struct lock_class *class; 717 718 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 719 720 class = look_up_lock_class(lock, subclass); 721 if (likely(class)) 722 goto out_set_class_cache; 723 724 /* 725 * Debug-check: all keys must be persistent! 726 */ 727 if (!static_obj(lock->key)) { 728 debug_locks_off(); 729 printk("INFO: trying to register non-static key.\n"); 730 printk("the code is fine but needs lockdep annotation.\n"); 731 printk("turning off the locking correctness validator.\n"); 732 dump_stack(); 733 734 return NULL; 735 } 736 737 key = lock->key->subkeys + subclass; 738 hash_head = classhashentry(key); 739 740 if (!graph_lock()) { 741 return NULL; 742 } 743 /* 744 * We have to do the hash-walk again, to avoid races 745 * with another CPU: 746 */ 747 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 748 if (class->key == key) 749 goto out_unlock_set; 750 } 751 752 /* 753 * Allocate a new key from the static array, and add it to 754 * the hash: 755 */ 756 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 757 if (!debug_locks_off_graph_unlock()) { 758 return NULL; 759 } 760 761 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 762 dump_stack(); 763 return NULL; 764 } 765 class = lock_classes + nr_lock_classes++; 766 debug_atomic_inc(nr_unused_locks); 767 class->key = key; 768 class->name = lock->name; 769 class->subclass = subclass; 770 INIT_LIST_HEAD(&class->lock_entry); 771 INIT_LIST_HEAD(&class->locks_before); 772 INIT_LIST_HEAD(&class->locks_after); 773 class->name_version = count_matching_names(class); 774 /* 775 * We use RCU's safe list-add method to make 776 * parallel walking of the hash-list safe: 777 */ 778 hlist_add_head_rcu(&class->hash_entry, hash_head); 779 /* 780 * Add it to the global list of classes: 781 */ 782 list_add_tail_rcu(&class->lock_entry, &all_lock_classes); 783 784 if (verbose(class)) { 785 graph_unlock(); 786 787 printk("\nnew class %p: %s", class->key, class->name); 788 if (class->name_version > 1) 789 printk("#%d", class->name_version); 790 printk("\n"); 791 dump_stack(); 792 793 if (!graph_lock()) { 794 return NULL; 795 } 796 } 797 out_unlock_set: 798 graph_unlock(); 799 800 out_set_class_cache: 801 if (!subclass || force) 802 lock->class_cache[0] = class; 803 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 804 lock->class_cache[subclass] = class; 805 806 /* 807 * Hash collision, did we smoke some? We found a class with a matching 808 * hash but the subclass -- which is hashed in -- didn't match. 809 */ 810 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 811 return NULL; 812 813 return class; 814 } 815 816 #ifdef CONFIG_PROVE_LOCKING 817 /* 818 * Allocate a lockdep entry. (assumes the graph_lock held, returns 819 * with NULL on failure) 820 */ 821 static struct lock_list *alloc_list_entry(void) 822 { 823 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 824 if (!debug_locks_off_graph_unlock()) 825 return NULL; 826 827 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 828 dump_stack(); 829 return NULL; 830 } 831 return list_entries + nr_list_entries++; 832 } 833 834 /* 835 * Add a new dependency to the head of the list: 836 */ 837 static int add_lock_to_list(struct lock_class *class, struct lock_class *this, 838 struct list_head *head, unsigned long ip, 839 int distance, struct stack_trace *trace) 840 { 841 struct lock_list *entry; 842 /* 843 * Lock not present yet - get a new dependency struct and 844 * add it to the list: 845 */ 846 entry = alloc_list_entry(); 847 if (!entry) 848 return 0; 849 850 entry->class = this; 851 entry->distance = distance; 852 entry->trace = *trace; 853 /* 854 * Both allocation and removal are done under the graph lock; but 855 * iteration is under RCU-sched; see look_up_lock_class() and 856 * lockdep_free_key_range(). 857 */ 858 list_add_tail_rcu(&entry->entry, head); 859 860 return 1; 861 } 862 863 /* 864 * For good efficiency of modular, we use power of 2 865 */ 866 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL 867 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 868 869 /* 870 * The circular_queue and helpers is used to implement the 871 * breadth-first search(BFS)algorithem, by which we can build 872 * the shortest path from the next lock to be acquired to the 873 * previous held lock if there is a circular between them. 874 */ 875 struct circular_queue { 876 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; 877 unsigned int front, rear; 878 }; 879 880 static struct circular_queue lock_cq; 881 882 unsigned int max_bfs_queue_depth; 883 884 static unsigned int lockdep_dependency_gen_id; 885 886 static inline void __cq_init(struct circular_queue *cq) 887 { 888 cq->front = cq->rear = 0; 889 lockdep_dependency_gen_id++; 890 } 891 892 static inline int __cq_empty(struct circular_queue *cq) 893 { 894 return (cq->front == cq->rear); 895 } 896 897 static inline int __cq_full(struct circular_queue *cq) 898 { 899 return ((cq->rear + 1) & CQ_MASK) == cq->front; 900 } 901 902 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) 903 { 904 if (__cq_full(cq)) 905 return -1; 906 907 cq->element[cq->rear] = elem; 908 cq->rear = (cq->rear + 1) & CQ_MASK; 909 return 0; 910 } 911 912 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) 913 { 914 if (__cq_empty(cq)) 915 return -1; 916 917 *elem = cq->element[cq->front]; 918 cq->front = (cq->front + 1) & CQ_MASK; 919 return 0; 920 } 921 922 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 923 { 924 return (cq->rear - cq->front) & CQ_MASK; 925 } 926 927 static inline void mark_lock_accessed(struct lock_list *lock, 928 struct lock_list *parent) 929 { 930 unsigned long nr; 931 932 nr = lock - list_entries; 933 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 934 lock->parent = parent; 935 lock->class->dep_gen_id = lockdep_dependency_gen_id; 936 } 937 938 static inline unsigned long lock_accessed(struct lock_list *lock) 939 { 940 unsigned long nr; 941 942 nr = lock - list_entries; 943 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 944 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 945 } 946 947 static inline struct lock_list *get_lock_parent(struct lock_list *child) 948 { 949 return child->parent; 950 } 951 952 static inline int get_lock_depth(struct lock_list *child) 953 { 954 int depth = 0; 955 struct lock_list *parent; 956 957 while ((parent = get_lock_parent(child))) { 958 child = parent; 959 depth++; 960 } 961 return depth; 962 } 963 964 static int __bfs(struct lock_list *source_entry, 965 void *data, 966 int (*match)(struct lock_list *entry, void *data), 967 struct lock_list **target_entry, 968 int forward) 969 { 970 struct lock_list *entry; 971 struct list_head *head; 972 struct circular_queue *cq = &lock_cq; 973 int ret = 1; 974 975 if (match(source_entry, data)) { 976 *target_entry = source_entry; 977 ret = 0; 978 goto exit; 979 } 980 981 if (forward) 982 head = &source_entry->class->locks_after; 983 else 984 head = &source_entry->class->locks_before; 985 986 if (list_empty(head)) 987 goto exit; 988 989 __cq_init(cq); 990 __cq_enqueue(cq, (unsigned long)source_entry); 991 992 while (!__cq_empty(cq)) { 993 struct lock_list *lock; 994 995 __cq_dequeue(cq, (unsigned long *)&lock); 996 997 if (!lock->class) { 998 ret = -2; 999 goto exit; 1000 } 1001 1002 if (forward) 1003 head = &lock->class->locks_after; 1004 else 1005 head = &lock->class->locks_before; 1006 1007 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1008 1009 list_for_each_entry_rcu(entry, head, entry) { 1010 if (!lock_accessed(entry)) { 1011 unsigned int cq_depth; 1012 mark_lock_accessed(entry, lock); 1013 if (match(entry, data)) { 1014 *target_entry = entry; 1015 ret = 0; 1016 goto exit; 1017 } 1018 1019 if (__cq_enqueue(cq, (unsigned long)entry)) { 1020 ret = -1; 1021 goto exit; 1022 } 1023 cq_depth = __cq_get_elem_count(cq); 1024 if (max_bfs_queue_depth < cq_depth) 1025 max_bfs_queue_depth = cq_depth; 1026 } 1027 } 1028 } 1029 exit: 1030 return ret; 1031 } 1032 1033 static inline int __bfs_forwards(struct lock_list *src_entry, 1034 void *data, 1035 int (*match)(struct lock_list *entry, void *data), 1036 struct lock_list **target_entry) 1037 { 1038 return __bfs(src_entry, data, match, target_entry, 1); 1039 1040 } 1041 1042 static inline int __bfs_backwards(struct lock_list *src_entry, 1043 void *data, 1044 int (*match)(struct lock_list *entry, void *data), 1045 struct lock_list **target_entry) 1046 { 1047 return __bfs(src_entry, data, match, target_entry, 0); 1048 1049 } 1050 1051 /* 1052 * Recursive, forwards-direction lock-dependency checking, used for 1053 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1054 * checking. 1055 */ 1056 1057 /* 1058 * Print a dependency chain entry (this is only done when a deadlock 1059 * has been detected): 1060 */ 1061 static noinline int 1062 print_circular_bug_entry(struct lock_list *target, int depth) 1063 { 1064 if (debug_locks_silent) 1065 return 0; 1066 printk("\n-> #%u", depth); 1067 print_lock_name(target->class); 1068 printk(":\n"); 1069 print_stack_trace(&target->trace, 6); 1070 1071 return 0; 1072 } 1073 1074 static void 1075 print_circular_lock_scenario(struct held_lock *src, 1076 struct held_lock *tgt, 1077 struct lock_list *prt) 1078 { 1079 struct lock_class *source = hlock_class(src); 1080 struct lock_class *target = hlock_class(tgt); 1081 struct lock_class *parent = prt->class; 1082 1083 /* 1084 * A direct locking problem where unsafe_class lock is taken 1085 * directly by safe_class lock, then all we need to show 1086 * is the deadlock scenario, as it is obvious that the 1087 * unsafe lock is taken under the safe lock. 1088 * 1089 * But if there is a chain instead, where the safe lock takes 1090 * an intermediate lock (middle_class) where this lock is 1091 * not the same as the safe lock, then the lock chain is 1092 * used to describe the problem. Otherwise we would need 1093 * to show a different CPU case for each link in the chain 1094 * from the safe_class lock to the unsafe_class lock. 1095 */ 1096 if (parent != source) { 1097 printk("Chain exists of:\n "); 1098 __print_lock_name(source); 1099 printk(" --> "); 1100 __print_lock_name(parent); 1101 printk(" --> "); 1102 __print_lock_name(target); 1103 printk("\n\n"); 1104 } 1105 1106 printk(" Possible unsafe locking scenario:\n\n"); 1107 printk(" CPU0 CPU1\n"); 1108 printk(" ---- ----\n"); 1109 printk(" lock("); 1110 __print_lock_name(target); 1111 printk(");\n"); 1112 printk(" lock("); 1113 __print_lock_name(parent); 1114 printk(");\n"); 1115 printk(" lock("); 1116 __print_lock_name(target); 1117 printk(");\n"); 1118 printk(" lock("); 1119 __print_lock_name(source); 1120 printk(");\n"); 1121 printk("\n *** DEADLOCK ***\n\n"); 1122 } 1123 1124 /* 1125 * When a circular dependency is detected, print the 1126 * header first: 1127 */ 1128 static noinline int 1129 print_circular_bug_header(struct lock_list *entry, unsigned int depth, 1130 struct held_lock *check_src, 1131 struct held_lock *check_tgt) 1132 { 1133 struct task_struct *curr = current; 1134 1135 if (debug_locks_silent) 1136 return 0; 1137 1138 printk("\n"); 1139 printk("======================================================\n"); 1140 printk("[ INFO: possible circular locking dependency detected ]\n"); 1141 print_kernel_ident(); 1142 printk("-------------------------------------------------------\n"); 1143 printk("%s/%d is trying to acquire lock:\n", 1144 curr->comm, task_pid_nr(curr)); 1145 print_lock(check_src); 1146 printk("\nbut task is already holding lock:\n"); 1147 print_lock(check_tgt); 1148 printk("\nwhich lock already depends on the new lock.\n\n"); 1149 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1150 1151 print_circular_bug_entry(entry, depth); 1152 1153 return 0; 1154 } 1155 1156 static inline int class_equal(struct lock_list *entry, void *data) 1157 { 1158 return entry->class == data; 1159 } 1160 1161 static noinline int print_circular_bug(struct lock_list *this, 1162 struct lock_list *target, 1163 struct held_lock *check_src, 1164 struct held_lock *check_tgt) 1165 { 1166 struct task_struct *curr = current; 1167 struct lock_list *parent; 1168 struct lock_list *first_parent; 1169 int depth; 1170 1171 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1172 return 0; 1173 1174 if (!save_trace(&this->trace)) 1175 return 0; 1176 1177 depth = get_lock_depth(target); 1178 1179 print_circular_bug_header(target, depth, check_src, check_tgt); 1180 1181 parent = get_lock_parent(target); 1182 first_parent = parent; 1183 1184 while (parent) { 1185 print_circular_bug_entry(parent, --depth); 1186 parent = get_lock_parent(parent); 1187 } 1188 1189 printk("\nother info that might help us debug this:\n\n"); 1190 print_circular_lock_scenario(check_src, check_tgt, 1191 first_parent); 1192 1193 lockdep_print_held_locks(curr); 1194 1195 printk("\nstack backtrace:\n"); 1196 dump_stack(); 1197 1198 return 0; 1199 } 1200 1201 static noinline int print_bfs_bug(int ret) 1202 { 1203 if (!debug_locks_off_graph_unlock()) 1204 return 0; 1205 1206 /* 1207 * Breadth-first-search failed, graph got corrupted? 1208 */ 1209 WARN(1, "lockdep bfs error:%d\n", ret); 1210 1211 return 0; 1212 } 1213 1214 static int noop_count(struct lock_list *entry, void *data) 1215 { 1216 (*(unsigned long *)data)++; 1217 return 0; 1218 } 1219 1220 static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 1221 { 1222 unsigned long count = 0; 1223 struct lock_list *uninitialized_var(target_entry); 1224 1225 __bfs_forwards(this, (void *)&count, noop_count, &target_entry); 1226 1227 return count; 1228 } 1229 unsigned long lockdep_count_forward_deps(struct lock_class *class) 1230 { 1231 unsigned long ret, flags; 1232 struct lock_list this; 1233 1234 this.parent = NULL; 1235 this.class = class; 1236 1237 local_irq_save(flags); 1238 arch_spin_lock(&lockdep_lock); 1239 ret = __lockdep_count_forward_deps(&this); 1240 arch_spin_unlock(&lockdep_lock); 1241 local_irq_restore(flags); 1242 1243 return ret; 1244 } 1245 1246 static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 1247 { 1248 unsigned long count = 0; 1249 struct lock_list *uninitialized_var(target_entry); 1250 1251 __bfs_backwards(this, (void *)&count, noop_count, &target_entry); 1252 1253 return count; 1254 } 1255 1256 unsigned long lockdep_count_backward_deps(struct lock_class *class) 1257 { 1258 unsigned long ret, flags; 1259 struct lock_list this; 1260 1261 this.parent = NULL; 1262 this.class = class; 1263 1264 local_irq_save(flags); 1265 arch_spin_lock(&lockdep_lock); 1266 ret = __lockdep_count_backward_deps(&this); 1267 arch_spin_unlock(&lockdep_lock); 1268 local_irq_restore(flags); 1269 1270 return ret; 1271 } 1272 1273 /* 1274 * Prove that the dependency graph starting at <entry> can not 1275 * lead to <target>. Print an error and return 0 if it does. 1276 */ 1277 static noinline int 1278 check_noncircular(struct lock_list *root, struct lock_class *target, 1279 struct lock_list **target_entry) 1280 { 1281 int result; 1282 1283 debug_atomic_inc(nr_cyclic_checks); 1284 1285 result = __bfs_forwards(root, target, class_equal, target_entry); 1286 1287 return result; 1288 } 1289 1290 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1291 /* 1292 * Forwards and backwards subgraph searching, for the purposes of 1293 * proving that two subgraphs can be connected by a new dependency 1294 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1295 */ 1296 1297 static inline int usage_match(struct lock_list *entry, void *bit) 1298 { 1299 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1300 } 1301 1302 1303 1304 /* 1305 * Find a node in the forwards-direction dependency sub-graph starting 1306 * at @root->class that matches @bit. 1307 * 1308 * Return 0 if such a node exists in the subgraph, and put that node 1309 * into *@target_entry. 1310 * 1311 * Return 1 otherwise and keep *@target_entry unchanged. 1312 * Return <0 on error. 1313 */ 1314 static int 1315 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, 1316 struct lock_list **target_entry) 1317 { 1318 int result; 1319 1320 debug_atomic_inc(nr_find_usage_forwards_checks); 1321 1322 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1323 1324 return result; 1325 } 1326 1327 /* 1328 * Find a node in the backwards-direction dependency sub-graph starting 1329 * at @root->class that matches @bit. 1330 * 1331 * Return 0 if such a node exists in the subgraph, and put that node 1332 * into *@target_entry. 1333 * 1334 * Return 1 otherwise and keep *@target_entry unchanged. 1335 * Return <0 on error. 1336 */ 1337 static int 1338 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, 1339 struct lock_list **target_entry) 1340 { 1341 int result; 1342 1343 debug_atomic_inc(nr_find_usage_backwards_checks); 1344 1345 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1346 1347 return result; 1348 } 1349 1350 static void print_lock_class_header(struct lock_class *class, int depth) 1351 { 1352 int bit; 1353 1354 printk("%*s->", depth, ""); 1355 print_lock_name(class); 1356 printk(" ops: %lu", class->ops); 1357 printk(" {\n"); 1358 1359 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { 1360 if (class->usage_mask & (1 << bit)) { 1361 int len = depth; 1362 1363 len += printk("%*s %s", depth, "", usage_str[bit]); 1364 len += printk(" at:\n"); 1365 print_stack_trace(class->usage_traces + bit, len); 1366 } 1367 } 1368 printk("%*s }\n", depth, ""); 1369 1370 printk("%*s ... key at: ",depth,""); 1371 print_ip_sym((unsigned long)class->key); 1372 } 1373 1374 /* 1375 * printk the shortest lock dependencies from @start to @end in reverse order: 1376 */ 1377 static void __used 1378 print_shortest_lock_dependencies(struct lock_list *leaf, 1379 struct lock_list *root) 1380 { 1381 struct lock_list *entry = leaf; 1382 int depth; 1383 1384 /*compute depth from generated tree by BFS*/ 1385 depth = get_lock_depth(leaf); 1386 1387 do { 1388 print_lock_class_header(entry->class, depth); 1389 printk("%*s ... acquired at:\n", depth, ""); 1390 print_stack_trace(&entry->trace, 2); 1391 printk("\n"); 1392 1393 if (depth == 0 && (entry != root)) { 1394 printk("lockdep:%s bad path found in chain graph\n", __func__); 1395 break; 1396 } 1397 1398 entry = get_lock_parent(entry); 1399 depth--; 1400 } while (entry && (depth >= 0)); 1401 1402 return; 1403 } 1404 1405 static void 1406 print_irq_lock_scenario(struct lock_list *safe_entry, 1407 struct lock_list *unsafe_entry, 1408 struct lock_class *prev_class, 1409 struct lock_class *next_class) 1410 { 1411 struct lock_class *safe_class = safe_entry->class; 1412 struct lock_class *unsafe_class = unsafe_entry->class; 1413 struct lock_class *middle_class = prev_class; 1414 1415 if (middle_class == safe_class) 1416 middle_class = next_class; 1417 1418 /* 1419 * A direct locking problem where unsafe_class lock is taken 1420 * directly by safe_class lock, then all we need to show 1421 * is the deadlock scenario, as it is obvious that the 1422 * unsafe lock is taken under the safe lock. 1423 * 1424 * But if there is a chain instead, where the safe lock takes 1425 * an intermediate lock (middle_class) where this lock is 1426 * not the same as the safe lock, then the lock chain is 1427 * used to describe the problem. Otherwise we would need 1428 * to show a different CPU case for each link in the chain 1429 * from the safe_class lock to the unsafe_class lock. 1430 */ 1431 if (middle_class != unsafe_class) { 1432 printk("Chain exists of:\n "); 1433 __print_lock_name(safe_class); 1434 printk(" --> "); 1435 __print_lock_name(middle_class); 1436 printk(" --> "); 1437 __print_lock_name(unsafe_class); 1438 printk("\n\n"); 1439 } 1440 1441 printk(" Possible interrupt unsafe locking scenario:\n\n"); 1442 printk(" CPU0 CPU1\n"); 1443 printk(" ---- ----\n"); 1444 printk(" lock("); 1445 __print_lock_name(unsafe_class); 1446 printk(");\n"); 1447 printk(" local_irq_disable();\n"); 1448 printk(" lock("); 1449 __print_lock_name(safe_class); 1450 printk(");\n"); 1451 printk(" lock("); 1452 __print_lock_name(middle_class); 1453 printk(");\n"); 1454 printk(" <Interrupt>\n"); 1455 printk(" lock("); 1456 __print_lock_name(safe_class); 1457 printk(");\n"); 1458 printk("\n *** DEADLOCK ***\n\n"); 1459 } 1460 1461 static int 1462 print_bad_irq_dependency(struct task_struct *curr, 1463 struct lock_list *prev_root, 1464 struct lock_list *next_root, 1465 struct lock_list *backwards_entry, 1466 struct lock_list *forwards_entry, 1467 struct held_lock *prev, 1468 struct held_lock *next, 1469 enum lock_usage_bit bit1, 1470 enum lock_usage_bit bit2, 1471 const char *irqclass) 1472 { 1473 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1474 return 0; 1475 1476 printk("\n"); 1477 printk("======================================================\n"); 1478 printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", 1479 irqclass, irqclass); 1480 print_kernel_ident(); 1481 printk("------------------------------------------------------\n"); 1482 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1483 curr->comm, task_pid_nr(curr), 1484 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1485 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1486 curr->hardirqs_enabled, 1487 curr->softirqs_enabled); 1488 print_lock(next); 1489 1490 printk("\nand this task is already holding:\n"); 1491 print_lock(prev); 1492 printk("which would create a new lock dependency:\n"); 1493 print_lock_name(hlock_class(prev)); 1494 printk(" ->"); 1495 print_lock_name(hlock_class(next)); 1496 printk("\n"); 1497 1498 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1499 irqclass); 1500 print_lock_name(backwards_entry->class); 1501 printk("\n... which became %s-irq-safe at:\n", irqclass); 1502 1503 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1504 1505 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1506 print_lock_name(forwards_entry->class); 1507 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1508 printk("..."); 1509 1510 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1511 1512 printk("\nother info that might help us debug this:\n\n"); 1513 print_irq_lock_scenario(backwards_entry, forwards_entry, 1514 hlock_class(prev), hlock_class(next)); 1515 1516 lockdep_print_held_locks(curr); 1517 1518 printk("\nthe dependencies between %s-irq-safe lock", irqclass); 1519 printk(" and the holding lock:\n"); 1520 if (!save_trace(&prev_root->trace)) 1521 return 0; 1522 print_shortest_lock_dependencies(backwards_entry, prev_root); 1523 1524 printk("\nthe dependencies between the lock to be acquired"); 1525 printk(" and %s-irq-unsafe lock:\n", irqclass); 1526 if (!save_trace(&next_root->trace)) 1527 return 0; 1528 print_shortest_lock_dependencies(forwards_entry, next_root); 1529 1530 printk("\nstack backtrace:\n"); 1531 dump_stack(); 1532 1533 return 0; 1534 } 1535 1536 static int 1537 check_usage(struct task_struct *curr, struct held_lock *prev, 1538 struct held_lock *next, enum lock_usage_bit bit_backwards, 1539 enum lock_usage_bit bit_forwards, const char *irqclass) 1540 { 1541 int ret; 1542 struct lock_list this, that; 1543 struct lock_list *uninitialized_var(target_entry); 1544 struct lock_list *uninitialized_var(target_entry1); 1545 1546 this.parent = NULL; 1547 1548 this.class = hlock_class(prev); 1549 ret = find_usage_backwards(&this, bit_backwards, &target_entry); 1550 if (ret < 0) 1551 return print_bfs_bug(ret); 1552 if (ret == 1) 1553 return ret; 1554 1555 that.parent = NULL; 1556 that.class = hlock_class(next); 1557 ret = find_usage_forwards(&that, bit_forwards, &target_entry1); 1558 if (ret < 0) 1559 return print_bfs_bug(ret); 1560 if (ret == 1) 1561 return ret; 1562 1563 return print_bad_irq_dependency(curr, &this, &that, 1564 target_entry, target_entry1, 1565 prev, next, 1566 bit_backwards, bit_forwards, irqclass); 1567 } 1568 1569 static const char *state_names[] = { 1570 #define LOCKDEP_STATE(__STATE) \ 1571 __stringify(__STATE), 1572 #include "lockdep_states.h" 1573 #undef LOCKDEP_STATE 1574 }; 1575 1576 static const char *state_rnames[] = { 1577 #define LOCKDEP_STATE(__STATE) \ 1578 __stringify(__STATE)"-READ", 1579 #include "lockdep_states.h" 1580 #undef LOCKDEP_STATE 1581 }; 1582 1583 static inline const char *state_name(enum lock_usage_bit bit) 1584 { 1585 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; 1586 } 1587 1588 static int exclusive_bit(int new_bit) 1589 { 1590 /* 1591 * USED_IN 1592 * USED_IN_READ 1593 * ENABLED 1594 * ENABLED_READ 1595 * 1596 * bit 0 - write/read 1597 * bit 1 - used_in/enabled 1598 * bit 2+ state 1599 */ 1600 1601 int state = new_bit & ~3; 1602 int dir = new_bit & 2; 1603 1604 /* 1605 * keep state, bit flip the direction and strip read. 1606 */ 1607 return state | (dir ^ 2); 1608 } 1609 1610 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 1611 struct held_lock *next, enum lock_usage_bit bit) 1612 { 1613 /* 1614 * Prove that the new dependency does not connect a hardirq-safe 1615 * lock with a hardirq-unsafe lock - to achieve this we search 1616 * the backwards-subgraph starting at <prev>, and the 1617 * forwards-subgraph starting at <next>: 1618 */ 1619 if (!check_usage(curr, prev, next, bit, 1620 exclusive_bit(bit), state_name(bit))) 1621 return 0; 1622 1623 bit++; /* _READ */ 1624 1625 /* 1626 * Prove that the new dependency does not connect a hardirq-safe-read 1627 * lock with a hardirq-unsafe lock - to achieve this we search 1628 * the backwards-subgraph starting at <prev>, and the 1629 * forwards-subgraph starting at <next>: 1630 */ 1631 if (!check_usage(curr, prev, next, bit, 1632 exclusive_bit(bit), state_name(bit))) 1633 return 0; 1634 1635 return 1; 1636 } 1637 1638 static int 1639 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1640 struct held_lock *next) 1641 { 1642 #define LOCKDEP_STATE(__STATE) \ 1643 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ 1644 return 0; 1645 #include "lockdep_states.h" 1646 #undef LOCKDEP_STATE 1647 1648 return 1; 1649 } 1650 1651 static void inc_chains(void) 1652 { 1653 if (current->hardirq_context) 1654 nr_hardirq_chains++; 1655 else { 1656 if (current->softirq_context) 1657 nr_softirq_chains++; 1658 else 1659 nr_process_chains++; 1660 } 1661 } 1662 1663 #else 1664 1665 static inline int 1666 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1667 struct held_lock *next) 1668 { 1669 return 1; 1670 } 1671 1672 static inline void inc_chains(void) 1673 { 1674 nr_process_chains++; 1675 } 1676 1677 #endif 1678 1679 static void 1680 print_deadlock_scenario(struct held_lock *nxt, 1681 struct held_lock *prv) 1682 { 1683 struct lock_class *next = hlock_class(nxt); 1684 struct lock_class *prev = hlock_class(prv); 1685 1686 printk(" Possible unsafe locking scenario:\n\n"); 1687 printk(" CPU0\n"); 1688 printk(" ----\n"); 1689 printk(" lock("); 1690 __print_lock_name(prev); 1691 printk(");\n"); 1692 printk(" lock("); 1693 __print_lock_name(next); 1694 printk(");\n"); 1695 printk("\n *** DEADLOCK ***\n\n"); 1696 printk(" May be due to missing lock nesting notation\n\n"); 1697 } 1698 1699 static int 1700 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 1701 struct held_lock *next) 1702 { 1703 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1704 return 0; 1705 1706 printk("\n"); 1707 printk("=============================================\n"); 1708 printk("[ INFO: possible recursive locking detected ]\n"); 1709 print_kernel_ident(); 1710 printk("---------------------------------------------\n"); 1711 printk("%s/%d is trying to acquire lock:\n", 1712 curr->comm, task_pid_nr(curr)); 1713 print_lock(next); 1714 printk("\nbut task is already holding lock:\n"); 1715 print_lock(prev); 1716 1717 printk("\nother info that might help us debug this:\n"); 1718 print_deadlock_scenario(next, prev); 1719 lockdep_print_held_locks(curr); 1720 1721 printk("\nstack backtrace:\n"); 1722 dump_stack(); 1723 1724 return 0; 1725 } 1726 1727 /* 1728 * Check whether we are holding such a class already. 1729 * 1730 * (Note that this has to be done separately, because the graph cannot 1731 * detect such classes of deadlocks.) 1732 * 1733 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read 1734 */ 1735 static int 1736 check_deadlock(struct task_struct *curr, struct held_lock *next, 1737 struct lockdep_map *next_instance, int read) 1738 { 1739 struct held_lock *prev; 1740 struct held_lock *nest = NULL; 1741 int i; 1742 1743 for (i = 0; i < curr->lockdep_depth; i++) { 1744 prev = curr->held_locks + i; 1745 1746 if (prev->instance == next->nest_lock) 1747 nest = prev; 1748 1749 if (hlock_class(prev) != hlock_class(next)) 1750 continue; 1751 1752 /* 1753 * Allow read-after-read recursion of the same 1754 * lock class (i.e. read_lock(lock)+read_lock(lock)): 1755 */ 1756 if ((read == 2) && prev->read) 1757 return 2; 1758 1759 /* 1760 * We're holding the nest_lock, which serializes this lock's 1761 * nesting behaviour. 1762 */ 1763 if (nest) 1764 return 2; 1765 1766 return print_deadlock_bug(curr, prev, next); 1767 } 1768 return 1; 1769 } 1770 1771 /* 1772 * There was a chain-cache miss, and we are about to add a new dependency 1773 * to a previous lock. We recursively validate the following rules: 1774 * 1775 * - would the adding of the <prev> -> <next> dependency create a 1776 * circular dependency in the graph? [== circular deadlock] 1777 * 1778 * - does the new prev->next dependency connect any hardirq-safe lock 1779 * (in the full backwards-subgraph starting at <prev>) with any 1780 * hardirq-unsafe lock (in the full forwards-subgraph starting at 1781 * <next>)? [== illegal lock inversion with hardirq contexts] 1782 * 1783 * - does the new prev->next dependency connect any softirq-safe lock 1784 * (in the full backwards-subgraph starting at <prev>) with any 1785 * softirq-unsafe lock (in the full forwards-subgraph starting at 1786 * <next>)? [== illegal lock inversion with softirq contexts] 1787 * 1788 * any of these scenarios could lead to a deadlock. 1789 * 1790 * Then if all the validations pass, we add the forwards and backwards 1791 * dependency. 1792 */ 1793 static int 1794 check_prev_add(struct task_struct *curr, struct held_lock *prev, 1795 struct held_lock *next, int distance, int *stack_saved) 1796 { 1797 struct lock_list *entry; 1798 int ret; 1799 struct lock_list this; 1800 struct lock_list *uninitialized_var(target_entry); 1801 /* 1802 * Static variable, serialized by the graph_lock(). 1803 * 1804 * We use this static variable to save the stack trace in case 1805 * we call into this function multiple times due to encountering 1806 * trylocks in the held lock stack. 1807 */ 1808 static struct stack_trace trace; 1809 1810 /* 1811 * Prove that the new <prev> -> <next> dependency would not 1812 * create a circular dependency in the graph. (We do this by 1813 * forward-recursing into the graph starting at <next>, and 1814 * checking whether we can reach <prev>.) 1815 * 1816 * We are using global variables to control the recursion, to 1817 * keep the stackframe size of the recursive functions low: 1818 */ 1819 this.class = hlock_class(next); 1820 this.parent = NULL; 1821 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 1822 if (unlikely(!ret)) 1823 return print_circular_bug(&this, target_entry, next, prev); 1824 else if (unlikely(ret < 0)) 1825 return print_bfs_bug(ret); 1826 1827 if (!check_prev_add_irq(curr, prev, next)) 1828 return 0; 1829 1830 /* 1831 * For recursive read-locks we do all the dependency checks, 1832 * but we dont store read-triggered dependencies (only 1833 * write-triggered dependencies). This ensures that only the 1834 * write-side dependencies matter, and that if for example a 1835 * write-lock never takes any other locks, then the reads are 1836 * equivalent to a NOP. 1837 */ 1838 if (next->read == 2 || prev->read == 2) 1839 return 1; 1840 /* 1841 * Is the <prev> -> <next> dependency already present? 1842 * 1843 * (this may occur even though this is a new chain: consider 1844 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 1845 * chains - the second one will be new, but L1 already has 1846 * L2 added to its dependency list, due to the first chain.) 1847 */ 1848 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 1849 if (entry->class == hlock_class(next)) { 1850 if (distance == 1) 1851 entry->distance = 1; 1852 return 2; 1853 } 1854 } 1855 1856 if (!*stack_saved) { 1857 if (!save_trace(&trace)) 1858 return 0; 1859 *stack_saved = 1; 1860 } 1861 1862 /* 1863 * Ok, all validations passed, add the new lock 1864 * to the previous lock's dependency list: 1865 */ 1866 ret = add_lock_to_list(hlock_class(prev), hlock_class(next), 1867 &hlock_class(prev)->locks_after, 1868 next->acquire_ip, distance, &trace); 1869 1870 if (!ret) 1871 return 0; 1872 1873 ret = add_lock_to_list(hlock_class(next), hlock_class(prev), 1874 &hlock_class(next)->locks_before, 1875 next->acquire_ip, distance, &trace); 1876 if (!ret) 1877 return 0; 1878 1879 /* 1880 * Debugging printouts: 1881 */ 1882 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1883 /* We drop graph lock, so another thread can overwrite trace. */ 1884 *stack_saved = 0; 1885 graph_unlock(); 1886 printk("\n new dependency: "); 1887 print_lock_name(hlock_class(prev)); 1888 printk(" => "); 1889 print_lock_name(hlock_class(next)); 1890 printk("\n"); 1891 dump_stack(); 1892 return graph_lock(); 1893 } 1894 return 1; 1895 } 1896 1897 /* 1898 * Add the dependency to all directly-previous locks that are 'relevant'. 1899 * The ones that are relevant are (in increasing distance from curr): 1900 * all consecutive trylock entries and the final non-trylock entry - or 1901 * the end of this context's lock-chain - whichever comes first. 1902 */ 1903 static int 1904 check_prevs_add(struct task_struct *curr, struct held_lock *next) 1905 { 1906 int depth = curr->lockdep_depth; 1907 int stack_saved = 0; 1908 struct held_lock *hlock; 1909 1910 /* 1911 * Debugging checks. 1912 * 1913 * Depth must not be zero for a non-head lock: 1914 */ 1915 if (!depth) 1916 goto out_bug; 1917 /* 1918 * At least two relevant locks must exist for this 1919 * to be a head: 1920 */ 1921 if (curr->held_locks[depth].irq_context != 1922 curr->held_locks[depth-1].irq_context) 1923 goto out_bug; 1924 1925 for (;;) { 1926 int distance = curr->lockdep_depth - depth + 1; 1927 hlock = curr->held_locks + depth - 1; 1928 /* 1929 * Only non-recursive-read entries get new dependencies 1930 * added: 1931 */ 1932 if (hlock->read != 2 && hlock->check) { 1933 if (!check_prev_add(curr, hlock, next, 1934 distance, &stack_saved)) 1935 return 0; 1936 /* 1937 * Stop after the first non-trylock entry, 1938 * as non-trylock entries have added their 1939 * own direct dependencies already, so this 1940 * lock is connected to them indirectly: 1941 */ 1942 if (!hlock->trylock) 1943 break; 1944 } 1945 depth--; 1946 /* 1947 * End of lock-stack? 1948 */ 1949 if (!depth) 1950 break; 1951 /* 1952 * Stop the search if we cross into another context: 1953 */ 1954 if (curr->held_locks[depth].irq_context != 1955 curr->held_locks[depth-1].irq_context) 1956 break; 1957 } 1958 return 1; 1959 out_bug: 1960 if (!debug_locks_off_graph_unlock()) 1961 return 0; 1962 1963 /* 1964 * Clearly we all shouldn't be here, but since we made it we 1965 * can reliable say we messed up our state. See the above two 1966 * gotos for reasons why we could possibly end up here. 1967 */ 1968 WARN_ON(1); 1969 1970 return 0; 1971 } 1972 1973 unsigned long nr_lock_chains; 1974 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 1975 int nr_chain_hlocks; 1976 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 1977 1978 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 1979 { 1980 return lock_classes + chain_hlocks[chain->base + i]; 1981 } 1982 1983 /* 1984 * Returns the index of the first held_lock of the current chain 1985 */ 1986 static inline int get_first_held_lock(struct task_struct *curr, 1987 struct held_lock *hlock) 1988 { 1989 int i; 1990 struct held_lock *hlock_curr; 1991 1992 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 1993 hlock_curr = curr->held_locks + i; 1994 if (hlock_curr->irq_context != hlock->irq_context) 1995 break; 1996 1997 } 1998 1999 return ++i; 2000 } 2001 2002 /* 2003 * Returns the next chain_key iteration 2004 */ 2005 static u64 print_chain_key_iteration(int class_idx, u64 chain_key) 2006 { 2007 u64 new_chain_key = iterate_chain_key(chain_key, class_idx); 2008 2009 printk(" class_idx:%d -> chain_key:%016Lx", 2010 class_idx, 2011 (unsigned long long)new_chain_key); 2012 return new_chain_key; 2013 } 2014 2015 static void 2016 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) 2017 { 2018 struct held_lock *hlock; 2019 u64 chain_key = 0; 2020 int depth = curr->lockdep_depth; 2021 int i; 2022 2023 printk("depth: %u\n", depth + 1); 2024 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { 2025 hlock = curr->held_locks + i; 2026 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); 2027 2028 print_lock(hlock); 2029 } 2030 2031 print_chain_key_iteration(hlock_next->class_idx, chain_key); 2032 print_lock(hlock_next); 2033 } 2034 2035 static void print_chain_keys_chain(struct lock_chain *chain) 2036 { 2037 int i; 2038 u64 chain_key = 0; 2039 int class_id; 2040 2041 printk("depth: %u\n", chain->depth); 2042 for (i = 0; i < chain->depth; i++) { 2043 class_id = chain_hlocks[chain->base + i]; 2044 chain_key = print_chain_key_iteration(class_id + 1, chain_key); 2045 2046 print_lock_name(lock_classes + class_id); 2047 printk("\n"); 2048 } 2049 } 2050 2051 static void print_collision(struct task_struct *curr, 2052 struct held_lock *hlock_next, 2053 struct lock_chain *chain) 2054 { 2055 printk("\n"); 2056 printk("======================\n"); 2057 printk("[chain_key collision ]\n"); 2058 print_kernel_ident(); 2059 printk("----------------------\n"); 2060 printk("%s/%d: ", current->comm, task_pid_nr(current)); 2061 printk("Hash chain already cached but the contents don't match!\n"); 2062 2063 printk("Held locks:"); 2064 print_chain_keys_held_locks(curr, hlock_next); 2065 2066 printk("Locks in cached chain:"); 2067 print_chain_keys_chain(chain); 2068 2069 printk("\nstack backtrace:\n"); 2070 dump_stack(); 2071 } 2072 2073 /* 2074 * Checks whether the chain and the current held locks are consistent 2075 * in depth and also in content. If they are not it most likely means 2076 * that there was a collision during the calculation of the chain_key. 2077 * Returns: 0 not passed, 1 passed 2078 */ 2079 static int check_no_collision(struct task_struct *curr, 2080 struct held_lock *hlock, 2081 struct lock_chain *chain) 2082 { 2083 #ifdef CONFIG_DEBUG_LOCKDEP 2084 int i, j, id; 2085 2086 i = get_first_held_lock(curr, hlock); 2087 2088 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { 2089 print_collision(curr, hlock, chain); 2090 return 0; 2091 } 2092 2093 for (j = 0; j < chain->depth - 1; j++, i++) { 2094 id = curr->held_locks[i].class_idx - 1; 2095 2096 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { 2097 print_collision(curr, hlock, chain); 2098 return 0; 2099 } 2100 } 2101 #endif 2102 return 1; 2103 } 2104 2105 /* 2106 * Look up a dependency chain. If the key is not present yet then 2107 * add it and return 1 - in this case the new dependency chain is 2108 * validated. If the key is already hashed, return 0. 2109 * (On return with 1 graph_lock is held.) 2110 */ 2111 static inline int lookup_chain_cache(struct task_struct *curr, 2112 struct held_lock *hlock, 2113 u64 chain_key) 2114 { 2115 struct lock_class *class = hlock_class(hlock); 2116 struct hlist_head *hash_head = chainhashentry(chain_key); 2117 struct lock_chain *chain; 2118 int i, j; 2119 2120 /* 2121 * We might need to take the graph lock, ensure we've got IRQs 2122 * disabled to make this an IRQ-safe lock.. for recursion reasons 2123 * lockdep won't complain about its own locking errors. 2124 */ 2125 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2126 return 0; 2127 /* 2128 * We can walk it lock-free, because entries only get added 2129 * to the hash: 2130 */ 2131 hlist_for_each_entry_rcu(chain, hash_head, entry) { 2132 if (chain->chain_key == chain_key) { 2133 cache_hit: 2134 debug_atomic_inc(chain_lookup_hits); 2135 if (!check_no_collision(curr, hlock, chain)) 2136 return 0; 2137 2138 if (very_verbose(class)) 2139 printk("\nhash chain already cached, key: " 2140 "%016Lx tail class: [%p] %s\n", 2141 (unsigned long long)chain_key, 2142 class->key, class->name); 2143 return 0; 2144 } 2145 } 2146 if (very_verbose(class)) 2147 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", 2148 (unsigned long long)chain_key, class->key, class->name); 2149 /* 2150 * Allocate a new chain entry from the static array, and add 2151 * it to the hash: 2152 */ 2153 if (!graph_lock()) 2154 return 0; 2155 /* 2156 * We have to walk the chain again locked - to avoid duplicates: 2157 */ 2158 hlist_for_each_entry(chain, hash_head, entry) { 2159 if (chain->chain_key == chain_key) { 2160 graph_unlock(); 2161 goto cache_hit; 2162 } 2163 } 2164 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 2165 if (!debug_locks_off_graph_unlock()) 2166 return 0; 2167 2168 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 2169 dump_stack(); 2170 return 0; 2171 } 2172 chain = lock_chains + nr_lock_chains++; 2173 chain->chain_key = chain_key; 2174 chain->irq_context = hlock->irq_context; 2175 i = get_first_held_lock(curr, hlock); 2176 chain->depth = curr->lockdep_depth + 1 - i; 2177 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2178 chain->base = nr_chain_hlocks; 2179 nr_chain_hlocks += chain->depth; 2180 for (j = 0; j < chain->depth - 1; j++, i++) { 2181 int lock_id = curr->held_locks[i].class_idx - 1; 2182 chain_hlocks[chain->base + j] = lock_id; 2183 } 2184 chain_hlocks[chain->base + j] = class - lock_classes; 2185 } 2186 hlist_add_head_rcu(&chain->entry, hash_head); 2187 debug_atomic_inc(chain_lookup_misses); 2188 inc_chains(); 2189 2190 return 1; 2191 } 2192 2193 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, 2194 struct held_lock *hlock, int chain_head, u64 chain_key) 2195 { 2196 /* 2197 * Trylock needs to maintain the stack of held locks, but it 2198 * does not add new dependencies, because trylock can be done 2199 * in any order. 2200 * 2201 * We look up the chain_key and do the O(N^2) check and update of 2202 * the dependencies only if this is a new dependency chain. 2203 * (If lookup_chain_cache() returns with 1 it acquires 2204 * graph_lock for us) 2205 */ 2206 if (!hlock->trylock && hlock->check && 2207 lookup_chain_cache(curr, hlock, chain_key)) { 2208 /* 2209 * Check whether last held lock: 2210 * 2211 * - is irq-safe, if this lock is irq-unsafe 2212 * - is softirq-safe, if this lock is hardirq-unsafe 2213 * 2214 * And check whether the new lock's dependency graph 2215 * could lead back to the previous lock. 2216 * 2217 * any of these scenarios could lead to a deadlock. If 2218 * All validations 2219 */ 2220 int ret = check_deadlock(curr, hlock, lock, hlock->read); 2221 2222 if (!ret) 2223 return 0; 2224 /* 2225 * Mark recursive read, as we jump over it when 2226 * building dependencies (just like we jump over 2227 * trylock entries): 2228 */ 2229 if (ret == 2) 2230 hlock->read = 2; 2231 /* 2232 * Add dependency only if this lock is not the head 2233 * of the chain, and if it's not a secondary read-lock: 2234 */ 2235 if (!chain_head && ret != 2) 2236 if (!check_prevs_add(curr, hlock)) 2237 return 0; 2238 graph_unlock(); 2239 } else 2240 /* after lookup_chain_cache(): */ 2241 if (unlikely(!debug_locks)) 2242 return 0; 2243 2244 return 1; 2245 } 2246 #else 2247 static inline int validate_chain(struct task_struct *curr, 2248 struct lockdep_map *lock, struct held_lock *hlock, 2249 int chain_head, u64 chain_key) 2250 { 2251 return 1; 2252 } 2253 #endif 2254 2255 /* 2256 * We are building curr_chain_key incrementally, so double-check 2257 * it from scratch, to make sure that it's done correctly: 2258 */ 2259 static void check_chain_key(struct task_struct *curr) 2260 { 2261 #ifdef CONFIG_DEBUG_LOCKDEP 2262 struct held_lock *hlock, *prev_hlock = NULL; 2263 unsigned int i; 2264 u64 chain_key = 0; 2265 2266 for (i = 0; i < curr->lockdep_depth; i++) { 2267 hlock = curr->held_locks + i; 2268 if (chain_key != hlock->prev_chain_key) { 2269 debug_locks_off(); 2270 /* 2271 * We got mighty confused, our chain keys don't match 2272 * with what we expect, someone trample on our task state? 2273 */ 2274 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2275 curr->lockdep_depth, i, 2276 (unsigned long long)chain_key, 2277 (unsigned long long)hlock->prev_chain_key); 2278 return; 2279 } 2280 /* 2281 * Whoops ran out of static storage again? 2282 */ 2283 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS)) 2284 return; 2285 2286 if (prev_hlock && (prev_hlock->irq_context != 2287 hlock->irq_context)) 2288 chain_key = 0; 2289 chain_key = iterate_chain_key(chain_key, hlock->class_idx); 2290 prev_hlock = hlock; 2291 } 2292 if (chain_key != curr->curr_chain_key) { 2293 debug_locks_off(); 2294 /* 2295 * More smoking hash instead of calculating it, damn see these 2296 * numbers float.. I bet that a pink elephant stepped on my memory. 2297 */ 2298 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2299 curr->lockdep_depth, i, 2300 (unsigned long long)chain_key, 2301 (unsigned long long)curr->curr_chain_key); 2302 } 2303 #endif 2304 } 2305 2306 static void 2307 print_usage_bug_scenario(struct held_lock *lock) 2308 { 2309 struct lock_class *class = hlock_class(lock); 2310 2311 printk(" Possible unsafe locking scenario:\n\n"); 2312 printk(" CPU0\n"); 2313 printk(" ----\n"); 2314 printk(" lock("); 2315 __print_lock_name(class); 2316 printk(");\n"); 2317 printk(" <Interrupt>\n"); 2318 printk(" lock("); 2319 __print_lock_name(class); 2320 printk(");\n"); 2321 printk("\n *** DEADLOCK ***\n\n"); 2322 } 2323 2324 static int 2325 print_usage_bug(struct task_struct *curr, struct held_lock *this, 2326 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 2327 { 2328 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2329 return 0; 2330 2331 printk("\n"); 2332 printk("=================================\n"); 2333 printk("[ INFO: inconsistent lock state ]\n"); 2334 print_kernel_ident(); 2335 printk("---------------------------------\n"); 2336 2337 printk("inconsistent {%s} -> {%s} usage.\n", 2338 usage_str[prev_bit], usage_str[new_bit]); 2339 2340 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 2341 curr->comm, task_pid_nr(curr), 2342 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2343 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 2344 trace_hardirqs_enabled(curr), 2345 trace_softirqs_enabled(curr)); 2346 print_lock(this); 2347 2348 printk("{%s} state was registered at:\n", usage_str[prev_bit]); 2349 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2350 2351 print_irqtrace_events(curr); 2352 printk("\nother info that might help us debug this:\n"); 2353 print_usage_bug_scenario(this); 2354 2355 lockdep_print_held_locks(curr); 2356 2357 printk("\nstack backtrace:\n"); 2358 dump_stack(); 2359 2360 return 0; 2361 } 2362 2363 /* 2364 * Print out an error if an invalid bit is set: 2365 */ 2366 static inline int 2367 valid_state(struct task_struct *curr, struct held_lock *this, 2368 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 2369 { 2370 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) 2371 return print_usage_bug(curr, this, bad_bit, new_bit); 2372 return 1; 2373 } 2374 2375 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2376 enum lock_usage_bit new_bit); 2377 2378 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 2379 2380 /* 2381 * print irq inversion bug: 2382 */ 2383 static int 2384 print_irq_inversion_bug(struct task_struct *curr, 2385 struct lock_list *root, struct lock_list *other, 2386 struct held_lock *this, int forwards, 2387 const char *irqclass) 2388 { 2389 struct lock_list *entry = other; 2390 struct lock_list *middle = NULL; 2391 int depth; 2392 2393 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2394 return 0; 2395 2396 printk("\n"); 2397 printk("=========================================================\n"); 2398 printk("[ INFO: possible irq lock inversion dependency detected ]\n"); 2399 print_kernel_ident(); 2400 printk("---------------------------------------------------------\n"); 2401 printk("%s/%d just changed the state of lock:\n", 2402 curr->comm, task_pid_nr(curr)); 2403 print_lock(this); 2404 if (forwards) 2405 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2406 else 2407 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2408 print_lock_name(other->class); 2409 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2410 2411 printk("\nother info that might help us debug this:\n"); 2412 2413 /* Find a middle lock (if one exists) */ 2414 depth = get_lock_depth(other); 2415 do { 2416 if (depth == 0 && (entry != root)) { 2417 printk("lockdep:%s bad path found in chain graph\n", __func__); 2418 break; 2419 } 2420 middle = entry; 2421 entry = get_lock_parent(entry); 2422 depth--; 2423 } while (entry && entry != root && (depth >= 0)); 2424 if (forwards) 2425 print_irq_lock_scenario(root, other, 2426 middle ? middle->class : root->class, other->class); 2427 else 2428 print_irq_lock_scenario(other, root, 2429 middle ? middle->class : other->class, root->class); 2430 2431 lockdep_print_held_locks(curr); 2432 2433 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 2434 if (!save_trace(&root->trace)) 2435 return 0; 2436 print_shortest_lock_dependencies(other, root); 2437 2438 printk("\nstack backtrace:\n"); 2439 dump_stack(); 2440 2441 return 0; 2442 } 2443 2444 /* 2445 * Prove that in the forwards-direction subgraph starting at <this> 2446 * there is no lock matching <mask>: 2447 */ 2448 static int 2449 check_usage_forwards(struct task_struct *curr, struct held_lock *this, 2450 enum lock_usage_bit bit, const char *irqclass) 2451 { 2452 int ret; 2453 struct lock_list root; 2454 struct lock_list *uninitialized_var(target_entry); 2455 2456 root.parent = NULL; 2457 root.class = hlock_class(this); 2458 ret = find_usage_forwards(&root, bit, &target_entry); 2459 if (ret < 0) 2460 return print_bfs_bug(ret); 2461 if (ret == 1) 2462 return ret; 2463 2464 return print_irq_inversion_bug(curr, &root, target_entry, 2465 this, 1, irqclass); 2466 } 2467 2468 /* 2469 * Prove that in the backwards-direction subgraph starting at <this> 2470 * there is no lock matching <mask>: 2471 */ 2472 static int 2473 check_usage_backwards(struct task_struct *curr, struct held_lock *this, 2474 enum lock_usage_bit bit, const char *irqclass) 2475 { 2476 int ret; 2477 struct lock_list root; 2478 struct lock_list *uninitialized_var(target_entry); 2479 2480 root.parent = NULL; 2481 root.class = hlock_class(this); 2482 ret = find_usage_backwards(&root, bit, &target_entry); 2483 if (ret < 0) 2484 return print_bfs_bug(ret); 2485 if (ret == 1) 2486 return ret; 2487 2488 return print_irq_inversion_bug(curr, &root, target_entry, 2489 this, 0, irqclass); 2490 } 2491 2492 void print_irqtrace_events(struct task_struct *curr) 2493 { 2494 printk("irq event stamp: %u\n", curr->irq_events); 2495 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); 2496 print_ip_sym(curr->hardirq_enable_ip); 2497 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); 2498 print_ip_sym(curr->hardirq_disable_ip); 2499 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); 2500 print_ip_sym(curr->softirq_enable_ip); 2501 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); 2502 print_ip_sym(curr->softirq_disable_ip); 2503 } 2504 2505 static int HARDIRQ_verbose(struct lock_class *class) 2506 { 2507 #if HARDIRQ_VERBOSE 2508 return class_filter(class); 2509 #endif 2510 return 0; 2511 } 2512 2513 static int SOFTIRQ_verbose(struct lock_class *class) 2514 { 2515 #if SOFTIRQ_VERBOSE 2516 return class_filter(class); 2517 #endif 2518 return 0; 2519 } 2520 2521 static int RECLAIM_FS_verbose(struct lock_class *class) 2522 { 2523 #if RECLAIM_VERBOSE 2524 return class_filter(class); 2525 #endif 2526 return 0; 2527 } 2528 2529 #define STRICT_READ_CHECKS 1 2530 2531 static int (*state_verbose_f[])(struct lock_class *class) = { 2532 #define LOCKDEP_STATE(__STATE) \ 2533 __STATE##_verbose, 2534 #include "lockdep_states.h" 2535 #undef LOCKDEP_STATE 2536 }; 2537 2538 static inline int state_verbose(enum lock_usage_bit bit, 2539 struct lock_class *class) 2540 { 2541 return state_verbose_f[bit >> 2](class); 2542 } 2543 2544 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 2545 enum lock_usage_bit bit, const char *name); 2546 2547 static int 2548 mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2549 enum lock_usage_bit new_bit) 2550 { 2551 int excl_bit = exclusive_bit(new_bit); 2552 int read = new_bit & 1; 2553 int dir = new_bit & 2; 2554 2555 /* 2556 * mark USED_IN has to look forwards -- to ensure no dependency 2557 * has ENABLED state, which would allow recursion deadlocks. 2558 * 2559 * mark ENABLED has to look backwards -- to ensure no dependee 2560 * has USED_IN state, which, again, would allow recursion deadlocks. 2561 */ 2562 check_usage_f usage = dir ? 2563 check_usage_backwards : check_usage_forwards; 2564 2565 /* 2566 * Validate that this particular lock does not have conflicting 2567 * usage states. 2568 */ 2569 if (!valid_state(curr, this, new_bit, excl_bit)) 2570 return 0; 2571 2572 /* 2573 * Validate that the lock dependencies don't have conflicting usage 2574 * states. 2575 */ 2576 if ((!read || !dir || STRICT_READ_CHECKS) && 2577 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) 2578 return 0; 2579 2580 /* 2581 * Check for read in write conflicts 2582 */ 2583 if (!read) { 2584 if (!valid_state(curr, this, new_bit, excl_bit + 1)) 2585 return 0; 2586 2587 if (STRICT_READ_CHECKS && 2588 !usage(curr, this, excl_bit + 1, 2589 state_name(new_bit + 1))) 2590 return 0; 2591 } 2592 2593 if (state_verbose(new_bit, hlock_class(this))) 2594 return 2; 2595 2596 return 1; 2597 } 2598 2599 enum mark_type { 2600 #define LOCKDEP_STATE(__STATE) __STATE, 2601 #include "lockdep_states.h" 2602 #undef LOCKDEP_STATE 2603 }; 2604 2605 /* 2606 * Mark all held locks with a usage bit: 2607 */ 2608 static int 2609 mark_held_locks(struct task_struct *curr, enum mark_type mark) 2610 { 2611 enum lock_usage_bit usage_bit; 2612 struct held_lock *hlock; 2613 int i; 2614 2615 for (i = 0; i < curr->lockdep_depth; i++) { 2616 hlock = curr->held_locks + i; 2617 2618 usage_bit = 2 + (mark << 2); /* ENABLED */ 2619 if (hlock->read) 2620 usage_bit += 1; /* READ */ 2621 2622 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2623 2624 if (!hlock->check) 2625 continue; 2626 2627 if (!mark_lock(curr, hlock, usage_bit)) 2628 return 0; 2629 } 2630 2631 return 1; 2632 } 2633 2634 /* 2635 * Hardirqs will be enabled: 2636 */ 2637 static void __trace_hardirqs_on_caller(unsigned long ip) 2638 { 2639 struct task_struct *curr = current; 2640 2641 /* we'll do an OFF -> ON transition: */ 2642 curr->hardirqs_enabled = 1; 2643 2644 /* 2645 * We are going to turn hardirqs on, so set the 2646 * usage bit for all held locks: 2647 */ 2648 if (!mark_held_locks(curr, HARDIRQ)) 2649 return; 2650 /* 2651 * If we have softirqs enabled, then set the usage 2652 * bit for all held locks. (disabled hardirqs prevented 2653 * this bit from being set before) 2654 */ 2655 if (curr->softirqs_enabled) 2656 if (!mark_held_locks(curr, SOFTIRQ)) 2657 return; 2658 2659 curr->hardirq_enable_ip = ip; 2660 curr->hardirq_enable_event = ++curr->irq_events; 2661 debug_atomic_inc(hardirqs_on_events); 2662 } 2663 2664 __visible void trace_hardirqs_on_caller(unsigned long ip) 2665 { 2666 time_hardirqs_on(CALLER_ADDR0, ip); 2667 2668 if (unlikely(!debug_locks || current->lockdep_recursion)) 2669 return; 2670 2671 if (unlikely(current->hardirqs_enabled)) { 2672 /* 2673 * Neither irq nor preemption are disabled here 2674 * so this is racy by nature but losing one hit 2675 * in a stat is not a big deal. 2676 */ 2677 __debug_atomic_inc(redundant_hardirqs_on); 2678 return; 2679 } 2680 2681 /* 2682 * We're enabling irqs and according to our state above irqs weren't 2683 * already enabled, yet we find the hardware thinks they are in fact 2684 * enabled.. someone messed up their IRQ state tracing. 2685 */ 2686 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2687 return; 2688 2689 /* 2690 * See the fine text that goes along with this variable definition. 2691 */ 2692 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2693 return; 2694 2695 /* 2696 * Can't allow enabling interrupts while in an interrupt handler, 2697 * that's general bad form and such. Recursion, limited stack etc.. 2698 */ 2699 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2700 return; 2701 2702 current->lockdep_recursion = 1; 2703 __trace_hardirqs_on_caller(ip); 2704 current->lockdep_recursion = 0; 2705 } 2706 EXPORT_SYMBOL(trace_hardirqs_on_caller); 2707 2708 void trace_hardirqs_on(void) 2709 { 2710 trace_hardirqs_on_caller(CALLER_ADDR0); 2711 } 2712 EXPORT_SYMBOL(trace_hardirqs_on); 2713 2714 /* 2715 * Hardirqs were disabled: 2716 */ 2717 __visible void trace_hardirqs_off_caller(unsigned long ip) 2718 { 2719 struct task_struct *curr = current; 2720 2721 time_hardirqs_off(CALLER_ADDR0, ip); 2722 2723 if (unlikely(!debug_locks || current->lockdep_recursion)) 2724 return; 2725 2726 /* 2727 * So we're supposed to get called after you mask local IRQs, but for 2728 * some reason the hardware doesn't quite think you did a proper job. 2729 */ 2730 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2731 return; 2732 2733 if (curr->hardirqs_enabled) { 2734 /* 2735 * We have done an ON -> OFF transition: 2736 */ 2737 curr->hardirqs_enabled = 0; 2738 curr->hardirq_disable_ip = ip; 2739 curr->hardirq_disable_event = ++curr->irq_events; 2740 debug_atomic_inc(hardirqs_off_events); 2741 } else 2742 debug_atomic_inc(redundant_hardirqs_off); 2743 } 2744 EXPORT_SYMBOL(trace_hardirqs_off_caller); 2745 2746 void trace_hardirqs_off(void) 2747 { 2748 trace_hardirqs_off_caller(CALLER_ADDR0); 2749 } 2750 EXPORT_SYMBOL(trace_hardirqs_off); 2751 2752 /* 2753 * Softirqs will be enabled: 2754 */ 2755 void trace_softirqs_on(unsigned long ip) 2756 { 2757 struct task_struct *curr = current; 2758 2759 if (unlikely(!debug_locks || current->lockdep_recursion)) 2760 return; 2761 2762 /* 2763 * We fancy IRQs being disabled here, see softirq.c, avoids 2764 * funny state and nesting things. 2765 */ 2766 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2767 return; 2768 2769 if (curr->softirqs_enabled) { 2770 debug_atomic_inc(redundant_softirqs_on); 2771 return; 2772 } 2773 2774 current->lockdep_recursion = 1; 2775 /* 2776 * We'll do an OFF -> ON transition: 2777 */ 2778 curr->softirqs_enabled = 1; 2779 curr->softirq_enable_ip = ip; 2780 curr->softirq_enable_event = ++curr->irq_events; 2781 debug_atomic_inc(softirqs_on_events); 2782 /* 2783 * We are going to turn softirqs on, so set the 2784 * usage bit for all held locks, if hardirqs are 2785 * enabled too: 2786 */ 2787 if (curr->hardirqs_enabled) 2788 mark_held_locks(curr, SOFTIRQ); 2789 current->lockdep_recursion = 0; 2790 } 2791 2792 /* 2793 * Softirqs were disabled: 2794 */ 2795 void trace_softirqs_off(unsigned long ip) 2796 { 2797 struct task_struct *curr = current; 2798 2799 if (unlikely(!debug_locks || current->lockdep_recursion)) 2800 return; 2801 2802 /* 2803 * We fancy IRQs being disabled here, see softirq.c 2804 */ 2805 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2806 return; 2807 2808 if (curr->softirqs_enabled) { 2809 /* 2810 * We have done an ON -> OFF transition: 2811 */ 2812 curr->softirqs_enabled = 0; 2813 curr->softirq_disable_ip = ip; 2814 curr->softirq_disable_event = ++curr->irq_events; 2815 debug_atomic_inc(softirqs_off_events); 2816 /* 2817 * Whoops, we wanted softirqs off, so why aren't they? 2818 */ 2819 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2820 } else 2821 debug_atomic_inc(redundant_softirqs_off); 2822 } 2823 2824 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) 2825 { 2826 struct task_struct *curr = current; 2827 2828 if (unlikely(!debug_locks)) 2829 return; 2830 2831 /* no reclaim without waiting on it */ 2832 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 2833 return; 2834 2835 /* this guy won't enter reclaim */ 2836 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) 2837 return; 2838 2839 /* We're only interested __GFP_FS allocations for now */ 2840 if (!(gfp_mask & __GFP_FS)) 2841 return; 2842 2843 /* 2844 * Oi! Can't be having __GFP_FS allocations with IRQs disabled. 2845 */ 2846 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) 2847 return; 2848 2849 mark_held_locks(curr, RECLAIM_FS); 2850 } 2851 2852 static void check_flags(unsigned long flags); 2853 2854 void lockdep_trace_alloc(gfp_t gfp_mask) 2855 { 2856 unsigned long flags; 2857 2858 if (unlikely(current->lockdep_recursion)) 2859 return; 2860 2861 raw_local_irq_save(flags); 2862 check_flags(flags); 2863 current->lockdep_recursion = 1; 2864 __lockdep_trace_alloc(gfp_mask, flags); 2865 current->lockdep_recursion = 0; 2866 raw_local_irq_restore(flags); 2867 } 2868 2869 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 2870 { 2871 /* 2872 * If non-trylock use in a hardirq or softirq context, then 2873 * mark the lock as used in these contexts: 2874 */ 2875 if (!hlock->trylock) { 2876 if (hlock->read) { 2877 if (curr->hardirq_context) 2878 if (!mark_lock(curr, hlock, 2879 LOCK_USED_IN_HARDIRQ_READ)) 2880 return 0; 2881 if (curr->softirq_context) 2882 if (!mark_lock(curr, hlock, 2883 LOCK_USED_IN_SOFTIRQ_READ)) 2884 return 0; 2885 } else { 2886 if (curr->hardirq_context) 2887 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 2888 return 0; 2889 if (curr->softirq_context) 2890 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 2891 return 0; 2892 } 2893 } 2894 if (!hlock->hardirqs_off) { 2895 if (hlock->read) { 2896 if (!mark_lock(curr, hlock, 2897 LOCK_ENABLED_HARDIRQ_READ)) 2898 return 0; 2899 if (curr->softirqs_enabled) 2900 if (!mark_lock(curr, hlock, 2901 LOCK_ENABLED_SOFTIRQ_READ)) 2902 return 0; 2903 } else { 2904 if (!mark_lock(curr, hlock, 2905 LOCK_ENABLED_HARDIRQ)) 2906 return 0; 2907 if (curr->softirqs_enabled) 2908 if (!mark_lock(curr, hlock, 2909 LOCK_ENABLED_SOFTIRQ)) 2910 return 0; 2911 } 2912 } 2913 2914 /* 2915 * We reuse the irq context infrastructure more broadly as a general 2916 * context checking code. This tests GFP_FS recursion (a lock taken 2917 * during reclaim for a GFP_FS allocation is held over a GFP_FS 2918 * allocation). 2919 */ 2920 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { 2921 if (hlock->read) { 2922 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) 2923 return 0; 2924 } else { 2925 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) 2926 return 0; 2927 } 2928 } 2929 2930 return 1; 2931 } 2932 2933 static int separate_irq_context(struct task_struct *curr, 2934 struct held_lock *hlock) 2935 { 2936 unsigned int depth = curr->lockdep_depth; 2937 2938 /* 2939 * Keep track of points where we cross into an interrupt context: 2940 */ 2941 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + 2942 curr->softirq_context; 2943 if (depth) { 2944 struct held_lock *prev_hlock; 2945 2946 prev_hlock = curr->held_locks + depth-1; 2947 /* 2948 * If we cross into another context, reset the 2949 * hash key (this also prevents the checking and the 2950 * adding of the dependency to 'prev'): 2951 */ 2952 if (prev_hlock->irq_context != hlock->irq_context) 2953 return 1; 2954 } 2955 return 0; 2956 } 2957 2958 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2959 2960 static inline 2961 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2962 enum lock_usage_bit new_bit) 2963 { 2964 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ 2965 return 1; 2966 } 2967 2968 static inline int mark_irqflags(struct task_struct *curr, 2969 struct held_lock *hlock) 2970 { 2971 return 1; 2972 } 2973 2974 static inline int separate_irq_context(struct task_struct *curr, 2975 struct held_lock *hlock) 2976 { 2977 return 0; 2978 } 2979 2980 void lockdep_trace_alloc(gfp_t gfp_mask) 2981 { 2982 } 2983 2984 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2985 2986 /* 2987 * Mark a lock with a usage bit, and validate the state transition: 2988 */ 2989 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2990 enum lock_usage_bit new_bit) 2991 { 2992 unsigned int new_mask = 1 << new_bit, ret = 1; 2993 2994 /* 2995 * If already set then do not dirty the cacheline, 2996 * nor do any checks: 2997 */ 2998 if (likely(hlock_class(this)->usage_mask & new_mask)) 2999 return 1; 3000 3001 if (!graph_lock()) 3002 return 0; 3003 /* 3004 * Make sure we didn't race: 3005 */ 3006 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 3007 graph_unlock(); 3008 return 1; 3009 } 3010 3011 hlock_class(this)->usage_mask |= new_mask; 3012 3013 if (!save_trace(hlock_class(this)->usage_traces + new_bit)) 3014 return 0; 3015 3016 switch (new_bit) { 3017 #define LOCKDEP_STATE(__STATE) \ 3018 case LOCK_USED_IN_##__STATE: \ 3019 case LOCK_USED_IN_##__STATE##_READ: \ 3020 case LOCK_ENABLED_##__STATE: \ 3021 case LOCK_ENABLED_##__STATE##_READ: 3022 #include "lockdep_states.h" 3023 #undef LOCKDEP_STATE 3024 ret = mark_lock_irq(curr, this, new_bit); 3025 if (!ret) 3026 return 0; 3027 break; 3028 case LOCK_USED: 3029 debug_atomic_dec(nr_unused_locks); 3030 break; 3031 default: 3032 if (!debug_locks_off_graph_unlock()) 3033 return 0; 3034 WARN_ON(1); 3035 return 0; 3036 } 3037 3038 graph_unlock(); 3039 3040 /* 3041 * We must printk outside of the graph_lock: 3042 */ 3043 if (ret == 2) { 3044 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 3045 print_lock(this); 3046 print_irqtrace_events(curr); 3047 dump_stack(); 3048 } 3049 3050 return ret; 3051 } 3052 3053 /* 3054 * Initialize a lock instance's lock-class mapping info: 3055 */ 3056 void lockdep_init_map(struct lockdep_map *lock, const char *name, 3057 struct lock_class_key *key, int subclass) 3058 { 3059 int i; 3060 3061 kmemcheck_mark_initialized(lock, sizeof(*lock)); 3062 3063 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 3064 lock->class_cache[i] = NULL; 3065 3066 #ifdef CONFIG_LOCK_STAT 3067 lock->cpu = raw_smp_processor_id(); 3068 #endif 3069 3070 /* 3071 * Can't be having no nameless bastards around this place! 3072 */ 3073 if (DEBUG_LOCKS_WARN_ON(!name)) { 3074 lock->name = "NULL"; 3075 return; 3076 } 3077 3078 lock->name = name; 3079 3080 /* 3081 * No key, no joy, we need to hash something. 3082 */ 3083 if (DEBUG_LOCKS_WARN_ON(!key)) 3084 return; 3085 /* 3086 * Sanity check, the lock-class key must be persistent: 3087 */ 3088 if (!static_obj(key)) { 3089 printk("BUG: key %p not in .data!\n", key); 3090 /* 3091 * What it says above ^^^^^, I suggest you read it. 3092 */ 3093 DEBUG_LOCKS_WARN_ON(1); 3094 return; 3095 } 3096 lock->key = key; 3097 3098 if (unlikely(!debug_locks)) 3099 return; 3100 3101 if (subclass) { 3102 unsigned long flags; 3103 3104 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) 3105 return; 3106 3107 raw_local_irq_save(flags); 3108 current->lockdep_recursion = 1; 3109 register_lock_class(lock, subclass, 1); 3110 current->lockdep_recursion = 0; 3111 raw_local_irq_restore(flags); 3112 } 3113 } 3114 EXPORT_SYMBOL_GPL(lockdep_init_map); 3115 3116 struct lock_class_key __lockdep_no_validate__; 3117 EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3118 3119 static int 3120 print_lock_nested_lock_not_held(struct task_struct *curr, 3121 struct held_lock *hlock, 3122 unsigned long ip) 3123 { 3124 if (!debug_locks_off()) 3125 return 0; 3126 if (debug_locks_silent) 3127 return 0; 3128 3129 printk("\n"); 3130 printk("==================================\n"); 3131 printk("[ BUG: Nested lock was not taken ]\n"); 3132 print_kernel_ident(); 3133 printk("----------------------------------\n"); 3134 3135 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 3136 print_lock(hlock); 3137 3138 printk("\nbut this task is not holding:\n"); 3139 printk("%s\n", hlock->nest_lock->name); 3140 3141 printk("\nstack backtrace:\n"); 3142 dump_stack(); 3143 3144 printk("\nother info that might help us debug this:\n"); 3145 lockdep_print_held_locks(curr); 3146 3147 printk("\nstack backtrace:\n"); 3148 dump_stack(); 3149 3150 return 0; 3151 } 3152 3153 static int __lock_is_held(struct lockdep_map *lock); 3154 3155 /* 3156 * This gets called for every mutex_lock*()/spin_lock*() operation. 3157 * We maintain the dependency maps and validate the locking attempt: 3158 */ 3159 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3160 int trylock, int read, int check, int hardirqs_off, 3161 struct lockdep_map *nest_lock, unsigned long ip, 3162 int references, int pin_count) 3163 { 3164 struct task_struct *curr = current; 3165 struct lock_class *class = NULL; 3166 struct held_lock *hlock; 3167 unsigned int depth; 3168 int chain_head = 0; 3169 int class_idx; 3170 u64 chain_key; 3171 3172 if (unlikely(!debug_locks)) 3173 return 0; 3174 3175 /* 3176 * Lockdep should run with IRQs disabled, otherwise we could 3177 * get an interrupt which would want to take locks, which would 3178 * end up in lockdep and have you got a head-ache already? 3179 */ 3180 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3181 return 0; 3182 3183 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3184 check = 0; 3185 3186 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 3187 class = lock->class_cache[subclass]; 3188 /* 3189 * Not cached? 3190 */ 3191 if (unlikely(!class)) { 3192 class = register_lock_class(lock, subclass, 0); 3193 if (!class) 3194 return 0; 3195 } 3196 atomic_inc((atomic_t *)&class->ops); 3197 if (very_verbose(class)) { 3198 printk("\nacquire class [%p] %s", class->key, class->name); 3199 if (class->name_version > 1) 3200 printk("#%d", class->name_version); 3201 printk("\n"); 3202 dump_stack(); 3203 } 3204 3205 /* 3206 * Add the lock to the list of currently held locks. 3207 * (we dont increase the depth just yet, up until the 3208 * dependency checks are done) 3209 */ 3210 depth = curr->lockdep_depth; 3211 /* 3212 * Ran out of static storage for our per-task lock stack again have we? 3213 */ 3214 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3215 return 0; 3216 3217 class_idx = class - lock_classes + 1; 3218 3219 if (depth) { 3220 hlock = curr->held_locks + depth - 1; 3221 if (hlock->class_idx == class_idx && nest_lock) { 3222 if (hlock->references) 3223 hlock->references++; 3224 else 3225 hlock->references = 2; 3226 3227 return 1; 3228 } 3229 } 3230 3231 hlock = curr->held_locks + depth; 3232 /* 3233 * Plain impossible, we just registered it and checked it weren't no 3234 * NULL like.. I bet this mushroom I ate was good! 3235 */ 3236 if (DEBUG_LOCKS_WARN_ON(!class)) 3237 return 0; 3238 hlock->class_idx = class_idx; 3239 hlock->acquire_ip = ip; 3240 hlock->instance = lock; 3241 hlock->nest_lock = nest_lock; 3242 hlock->trylock = trylock; 3243 hlock->read = read; 3244 hlock->check = check; 3245 hlock->hardirqs_off = !!hardirqs_off; 3246 hlock->references = references; 3247 #ifdef CONFIG_LOCK_STAT 3248 hlock->waittime_stamp = 0; 3249 hlock->holdtime_stamp = lockstat_clock(); 3250 #endif 3251 hlock->pin_count = pin_count; 3252 3253 if (check && !mark_irqflags(curr, hlock)) 3254 return 0; 3255 3256 /* mark it as used: */ 3257 if (!mark_lock(curr, hlock, LOCK_USED)) 3258 return 0; 3259 3260 /* 3261 * Calculate the chain hash: it's the combined hash of all the 3262 * lock keys along the dependency chain. We save the hash value 3263 * at every step so that we can get the current hash easily 3264 * after unlock. The chain hash is then used to cache dependency 3265 * results. 3266 * 3267 * The 'key ID' is what is the most compact key value to drive 3268 * the hash, not class->key. 3269 */ 3270 /* 3271 * Whoops, we did it again.. ran straight out of our static allocation. 3272 */ 3273 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS)) 3274 return 0; 3275 3276 chain_key = curr->curr_chain_key; 3277 if (!depth) { 3278 /* 3279 * How can we have a chain hash when we ain't got no keys?! 3280 */ 3281 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3282 return 0; 3283 chain_head = 1; 3284 } 3285 3286 hlock->prev_chain_key = chain_key; 3287 if (separate_irq_context(curr, hlock)) { 3288 chain_key = 0; 3289 chain_head = 1; 3290 } 3291 chain_key = iterate_chain_key(chain_key, class_idx); 3292 3293 if (nest_lock && !__lock_is_held(nest_lock)) 3294 return print_lock_nested_lock_not_held(curr, hlock, ip); 3295 3296 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3297 return 0; 3298 3299 curr->curr_chain_key = chain_key; 3300 curr->lockdep_depth++; 3301 check_chain_key(curr); 3302 #ifdef CONFIG_DEBUG_LOCKDEP 3303 if (unlikely(!debug_locks)) 3304 return 0; 3305 #endif 3306 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 3307 debug_locks_off(); 3308 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 3309 printk(KERN_DEBUG "depth: %i max: %lu!\n", 3310 curr->lockdep_depth, MAX_LOCK_DEPTH); 3311 3312 lockdep_print_held_locks(current); 3313 debug_show_all_locks(); 3314 dump_stack(); 3315 3316 return 0; 3317 } 3318 3319 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 3320 max_lockdep_depth = curr->lockdep_depth; 3321 3322 return 1; 3323 } 3324 3325 static int 3326 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, 3327 unsigned long ip) 3328 { 3329 if (!debug_locks_off()) 3330 return 0; 3331 if (debug_locks_silent) 3332 return 0; 3333 3334 printk("\n"); 3335 printk("=====================================\n"); 3336 printk("[ BUG: bad unlock balance detected! ]\n"); 3337 print_kernel_ident(); 3338 printk("-------------------------------------\n"); 3339 printk("%s/%d is trying to release lock (", 3340 curr->comm, task_pid_nr(curr)); 3341 print_lockdep_cache(lock); 3342 printk(") at:\n"); 3343 print_ip_sym(ip); 3344 printk("but there are no more locks to release!\n"); 3345 printk("\nother info that might help us debug this:\n"); 3346 lockdep_print_held_locks(curr); 3347 3348 printk("\nstack backtrace:\n"); 3349 dump_stack(); 3350 3351 return 0; 3352 } 3353 3354 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) 3355 { 3356 if (hlock->instance == lock) 3357 return 1; 3358 3359 if (hlock->references) { 3360 struct lock_class *class = lock->class_cache[0]; 3361 3362 if (!class) 3363 class = look_up_lock_class(lock, 0); 3364 3365 /* 3366 * If look_up_lock_class() failed to find a class, we're trying 3367 * to test if we hold a lock that has never yet been acquired. 3368 * Clearly if the lock hasn't been acquired _ever_, we're not 3369 * holding it either, so report failure. 3370 */ 3371 if (!class) 3372 return 0; 3373 3374 /* 3375 * References, but not a lock we're actually ref-counting? 3376 * State got messed up, follow the sites that change ->references 3377 * and try to make sense of it. 3378 */ 3379 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3380 return 0; 3381 3382 if (hlock->class_idx == class - lock_classes + 1) 3383 return 1; 3384 } 3385 3386 return 0; 3387 } 3388 3389 static int 3390 __lock_set_class(struct lockdep_map *lock, const char *name, 3391 struct lock_class_key *key, unsigned int subclass, 3392 unsigned long ip) 3393 { 3394 struct task_struct *curr = current; 3395 struct held_lock *hlock, *prev_hlock; 3396 struct lock_class *class; 3397 unsigned int depth; 3398 int i; 3399 3400 depth = curr->lockdep_depth; 3401 /* 3402 * This function is about (re)setting the class of a held lock, 3403 * yet we're not actually holding any locks. Naughty user! 3404 */ 3405 if (DEBUG_LOCKS_WARN_ON(!depth)) 3406 return 0; 3407 3408 prev_hlock = NULL; 3409 for (i = depth-1; i >= 0; i--) { 3410 hlock = curr->held_locks + i; 3411 /* 3412 * We must not cross into another context: 3413 */ 3414 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3415 break; 3416 if (match_held_lock(hlock, lock)) 3417 goto found_it; 3418 prev_hlock = hlock; 3419 } 3420 return print_unlock_imbalance_bug(curr, lock, ip); 3421 3422 found_it: 3423 lockdep_init_map(lock, name, key, 0); 3424 class = register_lock_class(lock, subclass, 0); 3425 hlock->class_idx = class - lock_classes + 1; 3426 3427 curr->lockdep_depth = i; 3428 curr->curr_chain_key = hlock->prev_chain_key; 3429 3430 for (; i < depth; i++) { 3431 hlock = curr->held_locks + i; 3432 if (!__lock_acquire(hlock->instance, 3433 hlock_class(hlock)->subclass, hlock->trylock, 3434 hlock->read, hlock->check, hlock->hardirqs_off, 3435 hlock->nest_lock, hlock->acquire_ip, 3436 hlock->references, hlock->pin_count)) 3437 return 0; 3438 } 3439 3440 /* 3441 * I took it apart and put it back together again, except now I have 3442 * these 'spare' parts.. where shall I put them. 3443 */ 3444 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3445 return 0; 3446 return 1; 3447 } 3448 3449 /* 3450 * Remove the lock to the list of currently held locks - this gets 3451 * called on mutex_unlock()/spin_unlock*() (or on a failed 3452 * mutex_lock_interruptible()). 3453 * 3454 * @nested is an hysterical artifact, needs a tree wide cleanup. 3455 */ 3456 static int 3457 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 3458 { 3459 struct task_struct *curr = current; 3460 struct held_lock *hlock, *prev_hlock; 3461 unsigned int depth; 3462 int i; 3463 3464 if (unlikely(!debug_locks)) 3465 return 0; 3466 3467 depth = curr->lockdep_depth; 3468 /* 3469 * So we're all set to release this lock.. wait what lock? We don't 3470 * own any locks, you've been drinking again? 3471 */ 3472 if (DEBUG_LOCKS_WARN_ON(depth <= 0)) 3473 return print_unlock_imbalance_bug(curr, lock, ip); 3474 3475 /* 3476 * Check whether the lock exists in the current stack 3477 * of held locks: 3478 */ 3479 prev_hlock = NULL; 3480 for (i = depth-1; i >= 0; i--) { 3481 hlock = curr->held_locks + i; 3482 /* 3483 * We must not cross into another context: 3484 */ 3485 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3486 break; 3487 if (match_held_lock(hlock, lock)) 3488 goto found_it; 3489 prev_hlock = hlock; 3490 } 3491 return print_unlock_imbalance_bug(curr, lock, ip); 3492 3493 found_it: 3494 if (hlock->instance == lock) 3495 lock_release_holdtime(hlock); 3496 3497 WARN(hlock->pin_count, "releasing a pinned lock\n"); 3498 3499 if (hlock->references) { 3500 hlock->references--; 3501 if (hlock->references) { 3502 /* 3503 * We had, and after removing one, still have 3504 * references, the current lock stack is still 3505 * valid. We're done! 3506 */ 3507 return 1; 3508 } 3509 } 3510 3511 /* 3512 * We have the right lock to unlock, 'hlock' points to it. 3513 * Now we remove it from the stack, and add back the other 3514 * entries (if any), recalculating the hash along the way: 3515 */ 3516 3517 curr->lockdep_depth = i; 3518 curr->curr_chain_key = hlock->prev_chain_key; 3519 3520 for (i++; i < depth; i++) { 3521 hlock = curr->held_locks + i; 3522 if (!__lock_acquire(hlock->instance, 3523 hlock_class(hlock)->subclass, hlock->trylock, 3524 hlock->read, hlock->check, hlock->hardirqs_off, 3525 hlock->nest_lock, hlock->acquire_ip, 3526 hlock->references, hlock->pin_count)) 3527 return 0; 3528 } 3529 3530 /* 3531 * We had N bottles of beer on the wall, we drank one, but now 3532 * there's not N-1 bottles of beer left on the wall... 3533 */ 3534 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3535 return 0; 3536 3537 return 1; 3538 } 3539 3540 static int __lock_is_held(struct lockdep_map *lock) 3541 { 3542 struct task_struct *curr = current; 3543 int i; 3544 3545 for (i = 0; i < curr->lockdep_depth; i++) { 3546 struct held_lock *hlock = curr->held_locks + i; 3547 3548 if (match_held_lock(hlock, lock)) 3549 return 1; 3550 } 3551 3552 return 0; 3553 } 3554 3555 static void __lock_pin_lock(struct lockdep_map *lock) 3556 { 3557 struct task_struct *curr = current; 3558 int i; 3559 3560 if (unlikely(!debug_locks)) 3561 return; 3562 3563 for (i = 0; i < curr->lockdep_depth; i++) { 3564 struct held_lock *hlock = curr->held_locks + i; 3565 3566 if (match_held_lock(hlock, lock)) { 3567 hlock->pin_count++; 3568 return; 3569 } 3570 } 3571 3572 WARN(1, "pinning an unheld lock\n"); 3573 } 3574 3575 static void __lock_unpin_lock(struct lockdep_map *lock) 3576 { 3577 struct task_struct *curr = current; 3578 int i; 3579 3580 if (unlikely(!debug_locks)) 3581 return; 3582 3583 for (i = 0; i < curr->lockdep_depth; i++) { 3584 struct held_lock *hlock = curr->held_locks + i; 3585 3586 if (match_held_lock(hlock, lock)) { 3587 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 3588 return; 3589 3590 hlock->pin_count--; 3591 return; 3592 } 3593 } 3594 3595 WARN(1, "unpinning an unheld lock\n"); 3596 } 3597 3598 /* 3599 * Check whether we follow the irq-flags state precisely: 3600 */ 3601 static void check_flags(unsigned long flags) 3602 { 3603 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ 3604 defined(CONFIG_TRACE_IRQFLAGS) 3605 if (!debug_locks) 3606 return; 3607 3608 if (irqs_disabled_flags(flags)) { 3609 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { 3610 printk("possible reason: unannotated irqs-off.\n"); 3611 } 3612 } else { 3613 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { 3614 printk("possible reason: unannotated irqs-on.\n"); 3615 } 3616 } 3617 3618 /* 3619 * We dont accurately track softirq state in e.g. 3620 * hardirq contexts (such as on 4KSTACKS), so only 3621 * check if not in hardirq contexts: 3622 */ 3623 if (!hardirq_count()) { 3624 if (softirq_count()) { 3625 /* like the above, but with softirqs */ 3626 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3627 } else { 3628 /* lick the above, does it taste good? */ 3629 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3630 } 3631 } 3632 3633 if (!debug_locks) 3634 print_irqtrace_events(current); 3635 #endif 3636 } 3637 3638 void lock_set_class(struct lockdep_map *lock, const char *name, 3639 struct lock_class_key *key, unsigned int subclass, 3640 unsigned long ip) 3641 { 3642 unsigned long flags; 3643 3644 if (unlikely(current->lockdep_recursion)) 3645 return; 3646 3647 raw_local_irq_save(flags); 3648 current->lockdep_recursion = 1; 3649 check_flags(flags); 3650 if (__lock_set_class(lock, name, key, subclass, ip)) 3651 check_chain_key(current); 3652 current->lockdep_recursion = 0; 3653 raw_local_irq_restore(flags); 3654 } 3655 EXPORT_SYMBOL_GPL(lock_set_class); 3656 3657 /* 3658 * We are not always called with irqs disabled - do that here, 3659 * and also avoid lockdep recursion: 3660 */ 3661 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3662 int trylock, int read, int check, 3663 struct lockdep_map *nest_lock, unsigned long ip) 3664 { 3665 unsigned long flags; 3666 3667 if (unlikely(current->lockdep_recursion)) 3668 return; 3669 3670 raw_local_irq_save(flags); 3671 check_flags(flags); 3672 3673 current->lockdep_recursion = 1; 3674 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 3675 __lock_acquire(lock, subclass, trylock, read, check, 3676 irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 3677 current->lockdep_recursion = 0; 3678 raw_local_irq_restore(flags); 3679 } 3680 EXPORT_SYMBOL_GPL(lock_acquire); 3681 3682 void lock_release(struct lockdep_map *lock, int nested, 3683 unsigned long ip) 3684 { 3685 unsigned long flags; 3686 3687 if (unlikely(current->lockdep_recursion)) 3688 return; 3689 3690 raw_local_irq_save(flags); 3691 check_flags(flags); 3692 current->lockdep_recursion = 1; 3693 trace_lock_release(lock, ip); 3694 if (__lock_release(lock, nested, ip)) 3695 check_chain_key(current); 3696 current->lockdep_recursion = 0; 3697 raw_local_irq_restore(flags); 3698 } 3699 EXPORT_SYMBOL_GPL(lock_release); 3700 3701 int lock_is_held(struct lockdep_map *lock) 3702 { 3703 unsigned long flags; 3704 int ret = 0; 3705 3706 if (unlikely(current->lockdep_recursion)) 3707 return 1; /* avoid false negative lockdep_assert_held() */ 3708 3709 raw_local_irq_save(flags); 3710 check_flags(flags); 3711 3712 current->lockdep_recursion = 1; 3713 ret = __lock_is_held(lock); 3714 current->lockdep_recursion = 0; 3715 raw_local_irq_restore(flags); 3716 3717 return ret; 3718 } 3719 EXPORT_SYMBOL_GPL(lock_is_held); 3720 3721 void lock_pin_lock(struct lockdep_map *lock) 3722 { 3723 unsigned long flags; 3724 3725 if (unlikely(current->lockdep_recursion)) 3726 return; 3727 3728 raw_local_irq_save(flags); 3729 check_flags(flags); 3730 3731 current->lockdep_recursion = 1; 3732 __lock_pin_lock(lock); 3733 current->lockdep_recursion = 0; 3734 raw_local_irq_restore(flags); 3735 } 3736 EXPORT_SYMBOL_GPL(lock_pin_lock); 3737 3738 void lock_unpin_lock(struct lockdep_map *lock) 3739 { 3740 unsigned long flags; 3741 3742 if (unlikely(current->lockdep_recursion)) 3743 return; 3744 3745 raw_local_irq_save(flags); 3746 check_flags(flags); 3747 3748 current->lockdep_recursion = 1; 3749 __lock_unpin_lock(lock); 3750 current->lockdep_recursion = 0; 3751 raw_local_irq_restore(flags); 3752 } 3753 EXPORT_SYMBOL_GPL(lock_unpin_lock); 3754 3755 void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3756 { 3757 current->lockdep_reclaim_gfp = gfp_mask; 3758 } 3759 3760 void lockdep_clear_current_reclaim_state(void) 3761 { 3762 current->lockdep_reclaim_gfp = 0; 3763 } 3764 3765 #ifdef CONFIG_LOCK_STAT 3766 static int 3767 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 3768 unsigned long ip) 3769 { 3770 if (!debug_locks_off()) 3771 return 0; 3772 if (debug_locks_silent) 3773 return 0; 3774 3775 printk("\n"); 3776 printk("=================================\n"); 3777 printk("[ BUG: bad contention detected! ]\n"); 3778 print_kernel_ident(); 3779 printk("---------------------------------\n"); 3780 printk("%s/%d is trying to contend lock (", 3781 curr->comm, task_pid_nr(curr)); 3782 print_lockdep_cache(lock); 3783 printk(") at:\n"); 3784 print_ip_sym(ip); 3785 printk("but there are no locks held!\n"); 3786 printk("\nother info that might help us debug this:\n"); 3787 lockdep_print_held_locks(curr); 3788 3789 printk("\nstack backtrace:\n"); 3790 dump_stack(); 3791 3792 return 0; 3793 } 3794 3795 static void 3796 __lock_contended(struct lockdep_map *lock, unsigned long ip) 3797 { 3798 struct task_struct *curr = current; 3799 struct held_lock *hlock, *prev_hlock; 3800 struct lock_class_stats *stats; 3801 unsigned int depth; 3802 int i, contention_point, contending_point; 3803 3804 depth = curr->lockdep_depth; 3805 /* 3806 * Whee, we contended on this lock, except it seems we're not 3807 * actually trying to acquire anything much at all.. 3808 */ 3809 if (DEBUG_LOCKS_WARN_ON(!depth)) 3810 return; 3811 3812 prev_hlock = NULL; 3813 for (i = depth-1; i >= 0; i--) { 3814 hlock = curr->held_locks + i; 3815 /* 3816 * We must not cross into another context: 3817 */ 3818 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3819 break; 3820 if (match_held_lock(hlock, lock)) 3821 goto found_it; 3822 prev_hlock = hlock; 3823 } 3824 print_lock_contention_bug(curr, lock, ip); 3825 return; 3826 3827 found_it: 3828 if (hlock->instance != lock) 3829 return; 3830 3831 hlock->waittime_stamp = lockstat_clock(); 3832 3833 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3834 contending_point = lock_point(hlock_class(hlock)->contending_point, 3835 lock->ip); 3836 3837 stats = get_lock_stats(hlock_class(hlock)); 3838 if (contention_point < LOCKSTAT_POINTS) 3839 stats->contention_point[contention_point]++; 3840 if (contending_point < LOCKSTAT_POINTS) 3841 stats->contending_point[contending_point]++; 3842 if (lock->cpu != smp_processor_id()) 3843 stats->bounces[bounce_contended + !!hlock->read]++; 3844 put_lock_stats(stats); 3845 } 3846 3847 static void 3848 __lock_acquired(struct lockdep_map *lock, unsigned long ip) 3849 { 3850 struct task_struct *curr = current; 3851 struct held_lock *hlock, *prev_hlock; 3852 struct lock_class_stats *stats; 3853 unsigned int depth; 3854 u64 now, waittime = 0; 3855 int i, cpu; 3856 3857 depth = curr->lockdep_depth; 3858 /* 3859 * Yay, we acquired ownership of this lock we didn't try to 3860 * acquire, how the heck did that happen? 3861 */ 3862 if (DEBUG_LOCKS_WARN_ON(!depth)) 3863 return; 3864 3865 prev_hlock = NULL; 3866 for (i = depth-1; i >= 0; i--) { 3867 hlock = curr->held_locks + i; 3868 /* 3869 * We must not cross into another context: 3870 */ 3871 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3872 break; 3873 if (match_held_lock(hlock, lock)) 3874 goto found_it; 3875 prev_hlock = hlock; 3876 } 3877 print_lock_contention_bug(curr, lock, _RET_IP_); 3878 return; 3879 3880 found_it: 3881 if (hlock->instance != lock) 3882 return; 3883 3884 cpu = smp_processor_id(); 3885 if (hlock->waittime_stamp) { 3886 now = lockstat_clock(); 3887 waittime = now - hlock->waittime_stamp; 3888 hlock->holdtime_stamp = now; 3889 } 3890 3891 trace_lock_acquired(lock, ip); 3892 3893 stats = get_lock_stats(hlock_class(hlock)); 3894 if (waittime) { 3895 if (hlock->read) 3896 lock_time_inc(&stats->read_waittime, waittime); 3897 else 3898 lock_time_inc(&stats->write_waittime, waittime); 3899 } 3900 if (lock->cpu != cpu) 3901 stats->bounces[bounce_acquired + !!hlock->read]++; 3902 put_lock_stats(stats); 3903 3904 lock->cpu = cpu; 3905 lock->ip = ip; 3906 } 3907 3908 void lock_contended(struct lockdep_map *lock, unsigned long ip) 3909 { 3910 unsigned long flags; 3911 3912 if (unlikely(!lock_stat)) 3913 return; 3914 3915 if (unlikely(current->lockdep_recursion)) 3916 return; 3917 3918 raw_local_irq_save(flags); 3919 check_flags(flags); 3920 current->lockdep_recursion = 1; 3921 trace_lock_contended(lock, ip); 3922 __lock_contended(lock, ip); 3923 current->lockdep_recursion = 0; 3924 raw_local_irq_restore(flags); 3925 } 3926 EXPORT_SYMBOL_GPL(lock_contended); 3927 3928 void lock_acquired(struct lockdep_map *lock, unsigned long ip) 3929 { 3930 unsigned long flags; 3931 3932 if (unlikely(!lock_stat)) 3933 return; 3934 3935 if (unlikely(current->lockdep_recursion)) 3936 return; 3937 3938 raw_local_irq_save(flags); 3939 check_flags(flags); 3940 current->lockdep_recursion = 1; 3941 __lock_acquired(lock, ip); 3942 current->lockdep_recursion = 0; 3943 raw_local_irq_restore(flags); 3944 } 3945 EXPORT_SYMBOL_GPL(lock_acquired); 3946 #endif 3947 3948 /* 3949 * Used by the testsuite, sanitize the validator state 3950 * after a simulated failure: 3951 */ 3952 3953 void lockdep_reset(void) 3954 { 3955 unsigned long flags; 3956 int i; 3957 3958 raw_local_irq_save(flags); 3959 current->curr_chain_key = 0; 3960 current->lockdep_depth = 0; 3961 current->lockdep_recursion = 0; 3962 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 3963 nr_hardirq_chains = 0; 3964 nr_softirq_chains = 0; 3965 nr_process_chains = 0; 3966 debug_locks = 1; 3967 for (i = 0; i < CHAINHASH_SIZE; i++) 3968 INIT_HLIST_HEAD(chainhash_table + i); 3969 raw_local_irq_restore(flags); 3970 } 3971 3972 static void zap_class(struct lock_class *class) 3973 { 3974 int i; 3975 3976 /* 3977 * Remove all dependencies this lock is 3978 * involved in: 3979 */ 3980 for (i = 0; i < nr_list_entries; i++) { 3981 if (list_entries[i].class == class) 3982 list_del_rcu(&list_entries[i].entry); 3983 } 3984 /* 3985 * Unhash the class and remove it from the all_lock_classes list: 3986 */ 3987 hlist_del_rcu(&class->hash_entry); 3988 list_del_rcu(&class->lock_entry); 3989 3990 RCU_INIT_POINTER(class->key, NULL); 3991 RCU_INIT_POINTER(class->name, NULL); 3992 } 3993 3994 static inline int within(const void *addr, void *start, unsigned long size) 3995 { 3996 return addr >= start && addr < start + size; 3997 } 3998 3999 /* 4000 * Used in module.c to remove lock classes from memory that is going to be 4001 * freed; and possibly re-used by other modules. 4002 * 4003 * We will have had one sync_sched() before getting here, so we're guaranteed 4004 * nobody will look up these exact classes -- they're properly dead but still 4005 * allocated. 4006 */ 4007 void lockdep_free_key_range(void *start, unsigned long size) 4008 { 4009 struct lock_class *class; 4010 struct hlist_head *head; 4011 unsigned long flags; 4012 int i; 4013 int locked; 4014 4015 raw_local_irq_save(flags); 4016 locked = graph_lock(); 4017 4018 /* 4019 * Unhash all classes that were created by this module: 4020 */ 4021 for (i = 0; i < CLASSHASH_SIZE; i++) { 4022 head = classhash_table + i; 4023 hlist_for_each_entry_rcu(class, head, hash_entry) { 4024 if (within(class->key, start, size)) 4025 zap_class(class); 4026 else if (within(class->name, start, size)) 4027 zap_class(class); 4028 } 4029 } 4030 4031 if (locked) 4032 graph_unlock(); 4033 raw_local_irq_restore(flags); 4034 4035 /* 4036 * Wait for any possible iterators from look_up_lock_class() to pass 4037 * before continuing to free the memory they refer to. 4038 * 4039 * sync_sched() is sufficient because the read-side is IRQ disable. 4040 */ 4041 synchronize_sched(); 4042 4043 /* 4044 * XXX at this point we could return the resources to the pool; 4045 * instead we leak them. We would need to change to bitmap allocators 4046 * instead of the linear allocators we have now. 4047 */ 4048 } 4049 4050 void lockdep_reset_lock(struct lockdep_map *lock) 4051 { 4052 struct lock_class *class; 4053 struct hlist_head *head; 4054 unsigned long flags; 4055 int i, j; 4056 int locked; 4057 4058 raw_local_irq_save(flags); 4059 4060 /* 4061 * Remove all classes this lock might have: 4062 */ 4063 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 4064 /* 4065 * If the class exists we look it up and zap it: 4066 */ 4067 class = look_up_lock_class(lock, j); 4068 if (class) 4069 zap_class(class); 4070 } 4071 /* 4072 * Debug check: in the end all mapped classes should 4073 * be gone. 4074 */ 4075 locked = graph_lock(); 4076 for (i = 0; i < CLASSHASH_SIZE; i++) { 4077 head = classhash_table + i; 4078 hlist_for_each_entry_rcu(class, head, hash_entry) { 4079 int match = 0; 4080 4081 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4082 match |= class == lock->class_cache[j]; 4083 4084 if (unlikely(match)) { 4085 if (debug_locks_off_graph_unlock()) { 4086 /* 4087 * We all just reset everything, how did it match? 4088 */ 4089 WARN_ON(1); 4090 } 4091 goto out_restore; 4092 } 4093 } 4094 } 4095 if (locked) 4096 graph_unlock(); 4097 4098 out_restore: 4099 raw_local_irq_restore(flags); 4100 } 4101 4102 void __init lockdep_info(void) 4103 { 4104 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 4105 4106 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 4107 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 4108 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 4109 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 4110 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 4111 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 4112 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 4113 4114 printk(" memory used by lock dependency info: %lu kB\n", 4115 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + 4116 sizeof(struct list_head) * CLASSHASH_SIZE + 4117 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 4118 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 4119 sizeof(struct list_head) * CHAINHASH_SIZE 4120 #ifdef CONFIG_PROVE_LOCKING 4121 + sizeof(struct circular_queue) 4122 #endif 4123 ) / 1024 4124 ); 4125 4126 printk(" per task-struct memory footprint: %lu bytes\n", 4127 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 4128 } 4129 4130 static void 4131 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 4132 const void *mem_to, struct held_lock *hlock) 4133 { 4134 if (!debug_locks_off()) 4135 return; 4136 if (debug_locks_silent) 4137 return; 4138 4139 printk("\n"); 4140 printk("=========================\n"); 4141 printk("[ BUG: held lock freed! ]\n"); 4142 print_kernel_ident(); 4143 printk("-------------------------\n"); 4144 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 4145 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 4146 print_lock(hlock); 4147 lockdep_print_held_locks(curr); 4148 4149 printk("\nstack backtrace:\n"); 4150 dump_stack(); 4151 } 4152 4153 static inline int not_in_range(const void* mem_from, unsigned long mem_len, 4154 const void* lock_from, unsigned long lock_len) 4155 { 4156 return lock_from + lock_len <= mem_from || 4157 mem_from + mem_len <= lock_from; 4158 } 4159 4160 /* 4161 * Called when kernel memory is freed (or unmapped), or if a lock 4162 * is destroyed or reinitialized - this code checks whether there is 4163 * any held lock in the memory range of <from> to <to>: 4164 */ 4165 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 4166 { 4167 struct task_struct *curr = current; 4168 struct held_lock *hlock; 4169 unsigned long flags; 4170 int i; 4171 4172 if (unlikely(!debug_locks)) 4173 return; 4174 4175 local_irq_save(flags); 4176 for (i = 0; i < curr->lockdep_depth; i++) { 4177 hlock = curr->held_locks + i; 4178 4179 if (not_in_range(mem_from, mem_len, hlock->instance, 4180 sizeof(*hlock->instance))) 4181 continue; 4182 4183 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 4184 break; 4185 } 4186 local_irq_restore(flags); 4187 } 4188 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4189 4190 static void print_held_locks_bug(void) 4191 { 4192 if (!debug_locks_off()) 4193 return; 4194 if (debug_locks_silent) 4195 return; 4196 4197 printk("\n"); 4198 printk("=====================================\n"); 4199 printk("[ BUG: %s/%d still has locks held! ]\n", 4200 current->comm, task_pid_nr(current)); 4201 print_kernel_ident(); 4202 printk("-------------------------------------\n"); 4203 lockdep_print_held_locks(current); 4204 printk("\nstack backtrace:\n"); 4205 dump_stack(); 4206 } 4207 4208 void debug_check_no_locks_held(void) 4209 { 4210 if (unlikely(current->lockdep_depth > 0)) 4211 print_held_locks_bug(); 4212 } 4213 EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 4214 4215 #ifdef __KERNEL__ 4216 void debug_show_all_locks(void) 4217 { 4218 struct task_struct *g, *p; 4219 int count = 10; 4220 int unlock = 1; 4221 4222 if (unlikely(!debug_locks)) { 4223 printk("INFO: lockdep is turned off.\n"); 4224 return; 4225 } 4226 printk("\nShowing all locks held in the system:\n"); 4227 4228 /* 4229 * Here we try to get the tasklist_lock as hard as possible, 4230 * if not successful after 2 seconds we ignore it (but keep 4231 * trying). This is to enable a debug printout even if a 4232 * tasklist_lock-holding task deadlocks or crashes. 4233 */ 4234 retry: 4235 if (!read_trylock(&tasklist_lock)) { 4236 if (count == 10) 4237 printk("hm, tasklist_lock locked, retrying... "); 4238 if (count) { 4239 count--; 4240 printk(" #%d", 10-count); 4241 mdelay(200); 4242 goto retry; 4243 } 4244 printk(" ignoring it.\n"); 4245 unlock = 0; 4246 } else { 4247 if (count != 10) 4248 printk(KERN_CONT " locked it.\n"); 4249 } 4250 4251 do_each_thread(g, p) { 4252 /* 4253 * It's not reliable to print a task's held locks 4254 * if it's not sleeping (or if it's not the current 4255 * task): 4256 */ 4257 if (p->state == TASK_RUNNING && p != current) 4258 continue; 4259 if (p->lockdep_depth) 4260 lockdep_print_held_locks(p); 4261 if (!unlock) 4262 if (read_trylock(&tasklist_lock)) 4263 unlock = 1; 4264 } while_each_thread(g, p); 4265 4266 printk("\n"); 4267 printk("=============================================\n\n"); 4268 4269 if (unlock) 4270 read_unlock(&tasklist_lock); 4271 } 4272 EXPORT_SYMBOL_GPL(debug_show_all_locks); 4273 #endif 4274 4275 /* 4276 * Careful: only use this function if you are sure that 4277 * the task cannot run in parallel! 4278 */ 4279 void debug_show_held_locks(struct task_struct *task) 4280 { 4281 if (unlikely(!debug_locks)) { 4282 printk("INFO: lockdep is turned off.\n"); 4283 return; 4284 } 4285 lockdep_print_held_locks(task); 4286 } 4287 EXPORT_SYMBOL_GPL(debug_show_held_locks); 4288 4289 asmlinkage __visible void lockdep_sys_exit(void) 4290 { 4291 struct task_struct *curr = current; 4292 4293 if (unlikely(curr->lockdep_depth)) { 4294 if (!debug_locks_off()) 4295 return; 4296 printk("\n"); 4297 printk("================================================\n"); 4298 printk("[ BUG: lock held when returning to user space! ]\n"); 4299 print_kernel_ident(); 4300 printk("------------------------------------------------\n"); 4301 printk("%s/%d is leaving the kernel with locks still held!\n", 4302 curr->comm, curr->pid); 4303 lockdep_print_held_locks(curr); 4304 } 4305 } 4306 4307 void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 4308 { 4309 struct task_struct *curr = current; 4310 4311 #ifndef CONFIG_PROVE_RCU_REPEATEDLY 4312 if (!debug_locks_off()) 4313 return; 4314 #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ 4315 /* Note: the following can be executed concurrently, so be careful. */ 4316 printk("\n"); 4317 printk("===============================\n"); 4318 printk("[ INFO: suspicious RCU usage. ]\n"); 4319 print_kernel_ident(); 4320 printk("-------------------------------\n"); 4321 printk("%s:%d %s!\n", file, line, s); 4322 printk("\nother info that might help us debug this:\n\n"); 4323 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 4324 !rcu_lockdep_current_cpu_online() 4325 ? "RCU used illegally from offline CPU!\n" 4326 : !rcu_is_watching() 4327 ? "RCU used illegally from idle CPU!\n" 4328 : "", 4329 rcu_scheduler_active, debug_locks); 4330 4331 /* 4332 * If a CPU is in the RCU-free window in idle (ie: in the section 4333 * between rcu_idle_enter() and rcu_idle_exit(), then RCU 4334 * considers that CPU to be in an "extended quiescent state", 4335 * which means that RCU will be completely ignoring that CPU. 4336 * Therefore, rcu_read_lock() and friends have absolutely no 4337 * effect on a CPU running in that state. In other words, even if 4338 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 4339 * delete data structures out from under it. RCU really has no 4340 * choice here: we need to keep an RCU-free window in idle where 4341 * the CPU may possibly enter into low power mode. This way we can 4342 * notice an extended quiescent state to other CPUs that started a grace 4343 * period. Otherwise we would delay any grace period as long as we run 4344 * in the idle task. 4345 * 4346 * So complain bitterly if someone does call rcu_read_lock(), 4347 * rcu_read_lock_bh() and so on from extended quiescent states. 4348 */ 4349 if (!rcu_is_watching()) 4350 printk("RCU used illegally from extended quiescent state!\n"); 4351 4352 lockdep_print_held_locks(curr); 4353 printk("\nstack backtrace:\n"); 4354 dump_stack(); 4355 } 4356 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4357