1 /* 2 * kernel/lockdep.c 3 * 4 * Runtime locking correctness validator 5 * 6 * Started by Ingo Molnar: 7 * 8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 10 * 11 * this code maps all the lock dependencies as they occur in a live kernel 12 * and will warn about the following classes of locking bugs: 13 * 14 * - lock inversion scenarios 15 * - circular lock dependencies 16 * - hardirq/softirq safe/unsafe locking bugs 17 * 18 * Bugs are reported even if the current locking scenario does not cause 19 * any deadlock at this point. 20 * 21 * I.e. if anytime in the past two locks were taken in a different order, 22 * even if it happened for another task, even if those were different 23 * locks (but of the same class as this lock), this code will detect it. 24 * 25 * Thanks to Arjan van de Ven for coming up with the initial idea of 26 * mapping lock dependencies runtime. 27 */ 28 #define DISABLE_BRANCH_PROFILING 29 #include <linux/mutex.h> 30 #include <linux/sched.h> 31 #include <linux/sched/clock.h> 32 #include <linux/sched/task.h> 33 #include <linux/sched/mm.h> 34 #include <linux/delay.h> 35 #include <linux/module.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/spinlock.h> 39 #include <linux/kallsyms.h> 40 #include <linux/interrupt.h> 41 #include <linux/stacktrace.h> 42 #include <linux/debug_locks.h> 43 #include <linux/irqflags.h> 44 #include <linux/utsname.h> 45 #include <linux/hash.h> 46 #include <linux/ftrace.h> 47 #include <linux/stringify.h> 48 #include <linux/bitops.h> 49 #include <linux/gfp.h> 50 #include <linux/kmemcheck.h> 51 #include <linux/random.h> 52 #include <linux/jhash.h> 53 54 #include <asm/sections.h> 55 56 #include "lockdep_internals.h" 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/lock.h> 60 61 #ifdef CONFIG_LOCKDEP_CROSSRELEASE 62 #include <linux/slab.h> 63 #endif 64 65 #ifdef CONFIG_PROVE_LOCKING 66 int prove_locking = 1; 67 module_param(prove_locking, int, 0644); 68 #else 69 #define prove_locking 0 70 #endif 71 72 #ifdef CONFIG_LOCK_STAT 73 int lock_stat = 1; 74 module_param(lock_stat, int, 0644); 75 #else 76 #define lock_stat 0 77 #endif 78 79 #ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK 80 static int crossrelease_fullstack = 1; 81 #else 82 static int crossrelease_fullstack; 83 #endif 84 static int __init allow_crossrelease_fullstack(char *str) 85 { 86 crossrelease_fullstack = 1; 87 return 0; 88 } 89 90 early_param("crossrelease_fullstack", allow_crossrelease_fullstack); 91 92 /* 93 * lockdep_lock: protects the lockdep graph, the hashes and the 94 * class/list/hash allocators. 95 * 96 * This is one of the rare exceptions where it's justified 97 * to use a raw spinlock - we really dont want the spinlock 98 * code to recurse back into the lockdep code... 99 */ 100 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 101 102 static int graph_lock(void) 103 { 104 arch_spin_lock(&lockdep_lock); 105 /* 106 * Make sure that if another CPU detected a bug while 107 * walking the graph we dont change it (while the other 108 * CPU is busy printing out stuff with the graph lock 109 * dropped already) 110 */ 111 if (!debug_locks) { 112 arch_spin_unlock(&lockdep_lock); 113 return 0; 114 } 115 /* prevent any recursions within lockdep from causing deadlocks */ 116 current->lockdep_recursion++; 117 return 1; 118 } 119 120 static inline int graph_unlock(void) 121 { 122 if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { 123 /* 124 * The lockdep graph lock isn't locked while we expect it to 125 * be, we're confused now, bye! 126 */ 127 return DEBUG_LOCKS_WARN_ON(1); 128 } 129 130 current->lockdep_recursion--; 131 arch_spin_unlock(&lockdep_lock); 132 return 0; 133 } 134 135 /* 136 * Turn lock debugging off and return with 0 if it was off already, 137 * and also release the graph lock: 138 */ 139 static inline int debug_locks_off_graph_unlock(void) 140 { 141 int ret = debug_locks_off(); 142 143 arch_spin_unlock(&lockdep_lock); 144 145 return ret; 146 } 147 148 unsigned long nr_list_entries; 149 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 150 151 /* 152 * All data structures here are protected by the global debug_lock. 153 * 154 * Mutex key structs only get allocated, once during bootup, and never 155 * get freed - this significantly simplifies the debugging code. 156 */ 157 unsigned long nr_lock_classes; 158 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 159 160 static inline struct lock_class *hlock_class(struct held_lock *hlock) 161 { 162 if (!hlock->class_idx) { 163 /* 164 * Someone passed in garbage, we give up. 165 */ 166 DEBUG_LOCKS_WARN_ON(1); 167 return NULL; 168 } 169 return lock_classes + hlock->class_idx - 1; 170 } 171 172 #ifdef CONFIG_LOCK_STAT 173 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); 174 175 static inline u64 lockstat_clock(void) 176 { 177 return local_clock(); 178 } 179 180 static int lock_point(unsigned long points[], unsigned long ip) 181 { 182 int i; 183 184 for (i = 0; i < LOCKSTAT_POINTS; i++) { 185 if (points[i] == 0) { 186 points[i] = ip; 187 break; 188 } 189 if (points[i] == ip) 190 break; 191 } 192 193 return i; 194 } 195 196 static void lock_time_inc(struct lock_time *lt, u64 time) 197 { 198 if (time > lt->max) 199 lt->max = time; 200 201 if (time < lt->min || !lt->nr) 202 lt->min = time; 203 204 lt->total += time; 205 lt->nr++; 206 } 207 208 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) 209 { 210 if (!src->nr) 211 return; 212 213 if (src->max > dst->max) 214 dst->max = src->max; 215 216 if (src->min < dst->min || !dst->nr) 217 dst->min = src->min; 218 219 dst->total += src->total; 220 dst->nr += src->nr; 221 } 222 223 struct lock_class_stats lock_stats(struct lock_class *class) 224 { 225 struct lock_class_stats stats; 226 int cpu, i; 227 228 memset(&stats, 0, sizeof(struct lock_class_stats)); 229 for_each_possible_cpu(cpu) { 230 struct lock_class_stats *pcs = 231 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 232 233 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 234 stats.contention_point[i] += pcs->contention_point[i]; 235 236 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) 237 stats.contending_point[i] += pcs->contending_point[i]; 238 239 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 240 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 241 242 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); 243 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); 244 245 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) 246 stats.bounces[i] += pcs->bounces[i]; 247 } 248 249 return stats; 250 } 251 252 void clear_lock_stats(struct lock_class *class) 253 { 254 int cpu; 255 256 for_each_possible_cpu(cpu) { 257 struct lock_class_stats *cpu_stats = 258 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; 259 260 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 261 } 262 memset(class->contention_point, 0, sizeof(class->contention_point)); 263 memset(class->contending_point, 0, sizeof(class->contending_point)); 264 } 265 266 static struct lock_class_stats *get_lock_stats(struct lock_class *class) 267 { 268 return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; 269 } 270 271 static void put_lock_stats(struct lock_class_stats *stats) 272 { 273 put_cpu_var(cpu_lock_stats); 274 } 275 276 static void lock_release_holdtime(struct held_lock *hlock) 277 { 278 struct lock_class_stats *stats; 279 u64 holdtime; 280 281 if (!lock_stat) 282 return; 283 284 holdtime = lockstat_clock() - hlock->holdtime_stamp; 285 286 stats = get_lock_stats(hlock_class(hlock)); 287 if (hlock->read) 288 lock_time_inc(&stats->read_holdtime, holdtime); 289 else 290 lock_time_inc(&stats->write_holdtime, holdtime); 291 put_lock_stats(stats); 292 } 293 #else 294 static inline void lock_release_holdtime(struct held_lock *hlock) 295 { 296 } 297 #endif 298 299 /* 300 * We keep a global list of all lock classes. The list only grows, 301 * never shrinks. The list is only accessed with the lockdep 302 * spinlock lock held. 303 */ 304 LIST_HEAD(all_lock_classes); 305 306 /* 307 * The lockdep classes are in a hash-table as well, for fast lookup: 308 */ 309 #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) 310 #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) 311 #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) 312 #define classhashentry(key) (classhash_table + __classhashfn((key))) 313 314 static struct hlist_head classhash_table[CLASSHASH_SIZE]; 315 316 /* 317 * We put the lock dependency chains into a hash-table as well, to cache 318 * their existence: 319 */ 320 #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) 321 #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) 322 #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) 323 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) 324 325 static struct hlist_head chainhash_table[CHAINHASH_SIZE]; 326 327 /* 328 * The hash key of the lock dependency chains is a hash itself too: 329 * it's a hash of all locks taken up to that lock, including that lock. 330 * It's a 64-bit hash, because it's important for the keys to be 331 * unique. 332 */ 333 static inline u64 iterate_chain_key(u64 key, u32 idx) 334 { 335 u32 k0 = key, k1 = key >> 32; 336 337 __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */ 338 339 return k0 | (u64)k1 << 32; 340 } 341 342 void lockdep_off(void) 343 { 344 current->lockdep_recursion++; 345 } 346 EXPORT_SYMBOL(lockdep_off); 347 348 void lockdep_on(void) 349 { 350 current->lockdep_recursion--; 351 } 352 EXPORT_SYMBOL(lockdep_on); 353 354 /* 355 * Debugging switches: 356 */ 357 358 #define VERBOSE 0 359 #define VERY_VERBOSE 0 360 361 #if VERBOSE 362 # define HARDIRQ_VERBOSE 1 363 # define SOFTIRQ_VERBOSE 1 364 #else 365 # define HARDIRQ_VERBOSE 0 366 # define SOFTIRQ_VERBOSE 0 367 #endif 368 369 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 370 /* 371 * Quick filtering for interesting events: 372 */ 373 static int class_filter(struct lock_class *class) 374 { 375 #if 0 376 /* Example */ 377 if (class->name_version == 1 && 378 !strcmp(class->name, "lockname")) 379 return 1; 380 if (class->name_version == 1 && 381 !strcmp(class->name, "&struct->lockfield")) 382 return 1; 383 #endif 384 /* Filter everything else. 1 would be to allow everything else */ 385 return 0; 386 } 387 #endif 388 389 static int verbose(struct lock_class *class) 390 { 391 #if VERBOSE 392 return class_filter(class); 393 #endif 394 return 0; 395 } 396 397 /* 398 * Stack-trace: tightly packed array of stack backtrace 399 * addresses. Protected by the graph_lock. 400 */ 401 unsigned long nr_stack_trace_entries; 402 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 403 404 static void print_lockdep_off(const char *bug_msg) 405 { 406 printk(KERN_DEBUG "%s\n", bug_msg); 407 printk(KERN_DEBUG "turning off the locking correctness validator.\n"); 408 #ifdef CONFIG_LOCK_STAT 409 printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); 410 #endif 411 } 412 413 static int save_trace(struct stack_trace *trace) 414 { 415 trace->nr_entries = 0; 416 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 417 trace->entries = stack_trace + nr_stack_trace_entries; 418 419 trace->skip = 3; 420 421 save_stack_trace(trace); 422 423 /* 424 * Some daft arches put -1 at the end to indicate its a full trace. 425 * 426 * <rant> this is buggy anyway, since it takes a whole extra entry so a 427 * complete trace that maxes out the entries provided will be reported 428 * as incomplete, friggin useless </rant> 429 */ 430 if (trace->nr_entries != 0 && 431 trace->entries[trace->nr_entries-1] == ULONG_MAX) 432 trace->nr_entries--; 433 434 trace->max_entries = trace->nr_entries; 435 436 nr_stack_trace_entries += trace->nr_entries; 437 438 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 439 if (!debug_locks_off_graph_unlock()) 440 return 0; 441 442 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 443 dump_stack(); 444 445 return 0; 446 } 447 448 return 1; 449 } 450 451 unsigned int nr_hardirq_chains; 452 unsigned int nr_softirq_chains; 453 unsigned int nr_process_chains; 454 unsigned int max_lockdep_depth; 455 456 #ifdef CONFIG_DEBUG_LOCKDEP 457 /* 458 * Various lockdep statistics: 459 */ 460 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); 461 #endif 462 463 /* 464 * Locking printouts: 465 */ 466 467 #define __USAGE(__STATE) \ 468 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ 469 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ 470 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ 471 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", 472 473 static const char *usage_str[] = 474 { 475 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) 476 #include "lockdep_states.h" 477 #undef LOCKDEP_STATE 478 [LOCK_USED] = "INITIAL USE", 479 }; 480 481 const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 482 { 483 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 484 } 485 486 static inline unsigned long lock_flag(enum lock_usage_bit bit) 487 { 488 return 1UL << bit; 489 } 490 491 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) 492 { 493 char c = '.'; 494 495 if (class->usage_mask & lock_flag(bit + 2)) 496 c = '+'; 497 if (class->usage_mask & lock_flag(bit)) { 498 c = '-'; 499 if (class->usage_mask & lock_flag(bit + 2)) 500 c = '?'; 501 } 502 503 return c; 504 } 505 506 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) 507 { 508 int i = 0; 509 510 #define LOCKDEP_STATE(__STATE) \ 511 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ 512 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); 513 #include "lockdep_states.h" 514 #undef LOCKDEP_STATE 515 516 usage[i] = '\0'; 517 } 518 519 static void __print_lock_name(struct lock_class *class) 520 { 521 char str[KSYM_NAME_LEN]; 522 const char *name; 523 524 name = class->name; 525 if (!name) { 526 name = __get_key_name(class->key, str); 527 printk(KERN_CONT "%s", name); 528 } else { 529 printk(KERN_CONT "%s", name); 530 if (class->name_version > 1) 531 printk(KERN_CONT "#%d", class->name_version); 532 if (class->subclass) 533 printk(KERN_CONT "/%d", class->subclass); 534 } 535 } 536 537 static void print_lock_name(struct lock_class *class) 538 { 539 char usage[LOCK_USAGE_CHARS]; 540 541 get_usage_chars(class, usage); 542 543 printk(KERN_CONT " ("); 544 __print_lock_name(class); 545 printk(KERN_CONT "){%s}", usage); 546 } 547 548 static void print_lockdep_cache(struct lockdep_map *lock) 549 { 550 const char *name; 551 char str[KSYM_NAME_LEN]; 552 553 name = lock->name; 554 if (!name) 555 name = __get_key_name(lock->key->subkeys, str); 556 557 printk(KERN_CONT "%s", name); 558 } 559 560 static void print_lock(struct held_lock *hlock) 561 { 562 /* 563 * We can be called locklessly through debug_show_all_locks() so be 564 * extra careful, the hlock might have been released and cleared. 565 */ 566 unsigned int class_idx = hlock->class_idx; 567 568 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ 569 barrier(); 570 571 if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { 572 printk(KERN_CONT "<RELEASED>\n"); 573 return; 574 } 575 576 print_lock_name(lock_classes + class_idx - 1); 577 printk(KERN_CONT ", at: [<%p>] %pS\n", 578 (void *)hlock->acquire_ip, (void *)hlock->acquire_ip); 579 } 580 581 static void lockdep_print_held_locks(struct task_struct *curr) 582 { 583 int i, depth = curr->lockdep_depth; 584 585 if (!depth) { 586 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); 587 return; 588 } 589 printk("%d lock%s held by %s/%d:\n", 590 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); 591 592 for (i = 0; i < depth; i++) { 593 printk(" #%d: ", i); 594 print_lock(curr->held_locks + i); 595 } 596 } 597 598 static void print_kernel_ident(void) 599 { 600 printk("%s %.*s %s\n", init_utsname()->release, 601 (int)strcspn(init_utsname()->version, " "), 602 init_utsname()->version, 603 print_tainted()); 604 } 605 606 static int very_verbose(struct lock_class *class) 607 { 608 #if VERY_VERBOSE 609 return class_filter(class); 610 #endif 611 return 0; 612 } 613 614 /* 615 * Is this the address of a static object: 616 */ 617 #ifdef __KERNEL__ 618 static int static_obj(void *obj) 619 { 620 unsigned long start = (unsigned long) &_stext, 621 end = (unsigned long) &_end, 622 addr = (unsigned long) obj; 623 624 /* 625 * static variable? 626 */ 627 if ((addr >= start) && (addr < end)) 628 return 1; 629 630 if (arch_is_kernel_data(addr)) 631 return 1; 632 633 /* 634 * in-kernel percpu var? 635 */ 636 if (is_kernel_percpu_address(addr)) 637 return 1; 638 639 /* 640 * module static or percpu var? 641 */ 642 return is_module_address(addr) || is_module_percpu_address(addr); 643 } 644 #endif 645 646 /* 647 * To make lock name printouts unique, we calculate a unique 648 * class->name_version generation counter: 649 */ 650 static int count_matching_names(struct lock_class *new_class) 651 { 652 struct lock_class *class; 653 int count = 0; 654 655 if (!new_class->name) 656 return 0; 657 658 list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 659 if (new_class->key - new_class->subclass == class->key) 660 return class->name_version; 661 if (class->name && !strcmp(class->name, new_class->name)) 662 count = max(count, class->name_version); 663 } 664 665 return count + 1; 666 } 667 668 /* 669 * Register a lock's class in the hash-table, if the class is not present 670 * yet. Otherwise we look it up. We cache the result in the lock object 671 * itself, so actual lookup of the hash should be once per lock object. 672 */ 673 static inline struct lock_class * 674 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) 675 { 676 struct lockdep_subclass_key *key; 677 struct hlist_head *hash_head; 678 struct lock_class *class; 679 bool is_static = false; 680 681 if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { 682 debug_locks_off(); 683 printk(KERN_ERR 684 "BUG: looking up invalid subclass: %u\n", subclass); 685 printk(KERN_ERR 686 "turning off the locking correctness validator.\n"); 687 dump_stack(); 688 return NULL; 689 } 690 691 /* 692 * Static locks do not have their class-keys yet - for them the key 693 * is the lock object itself. If the lock is in the per cpu area, 694 * the canonical address of the lock (per cpu offset removed) is 695 * used. 696 */ 697 if (unlikely(!lock->key)) { 698 unsigned long can_addr, addr = (unsigned long)lock; 699 700 if (__is_kernel_percpu_address(addr, &can_addr)) 701 lock->key = (void *)can_addr; 702 else if (__is_module_percpu_address(addr, &can_addr)) 703 lock->key = (void *)can_addr; 704 else if (static_obj(lock)) 705 lock->key = (void *)lock; 706 else 707 return ERR_PTR(-EINVAL); 708 is_static = true; 709 } 710 711 /* 712 * NOTE: the class-key must be unique. For dynamic locks, a static 713 * lock_class_key variable is passed in through the mutex_init() 714 * (or spin_lock_init()) call - which acts as the key. For static 715 * locks we use the lock object itself as the key. 716 */ 717 BUILD_BUG_ON(sizeof(struct lock_class_key) > 718 sizeof(struct lockdep_map)); 719 720 key = lock->key->subkeys + subclass; 721 722 hash_head = classhashentry(key); 723 724 /* 725 * We do an RCU walk of the hash, see lockdep_free_key_range(). 726 */ 727 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 728 return NULL; 729 730 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 731 if (class->key == key) { 732 /* 733 * Huh! same key, different name? Did someone trample 734 * on some memory? We're most confused. 735 */ 736 WARN_ON_ONCE(class->name != lock->name); 737 return class; 738 } 739 } 740 741 return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); 742 } 743 744 #ifdef CONFIG_LOCKDEP_CROSSRELEASE 745 static void cross_init(struct lockdep_map *lock, int cross); 746 static int cross_lock(struct lockdep_map *lock); 747 static int lock_acquire_crosslock(struct held_lock *hlock); 748 static int lock_release_crosslock(struct lockdep_map *lock); 749 #else 750 static inline void cross_init(struct lockdep_map *lock, int cross) {} 751 static inline int cross_lock(struct lockdep_map *lock) { return 0; } 752 static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; } 753 static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; } 754 #endif 755 756 /* 757 * Register a lock's class in the hash-table, if the class is not present 758 * yet. Otherwise we look it up. We cache the result in the lock object 759 * itself, so actual lookup of the hash should be once per lock object. 760 */ 761 static struct lock_class * 762 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) 763 { 764 struct lockdep_subclass_key *key; 765 struct hlist_head *hash_head; 766 struct lock_class *class; 767 768 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 769 770 class = look_up_lock_class(lock, subclass); 771 if (likely(!IS_ERR_OR_NULL(class))) 772 goto out_set_class_cache; 773 774 /* 775 * Debug-check: all keys must be persistent! 776 */ 777 if (IS_ERR(class)) { 778 debug_locks_off(); 779 printk("INFO: trying to register non-static key.\n"); 780 printk("the code is fine but needs lockdep annotation.\n"); 781 printk("turning off the locking correctness validator.\n"); 782 dump_stack(); 783 return NULL; 784 } 785 786 key = lock->key->subkeys + subclass; 787 hash_head = classhashentry(key); 788 789 if (!graph_lock()) { 790 return NULL; 791 } 792 /* 793 * We have to do the hash-walk again, to avoid races 794 * with another CPU: 795 */ 796 hlist_for_each_entry_rcu(class, hash_head, hash_entry) { 797 if (class->key == key) 798 goto out_unlock_set; 799 } 800 801 /* 802 * Allocate a new key from the static array, and add it to 803 * the hash: 804 */ 805 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 806 if (!debug_locks_off_graph_unlock()) { 807 return NULL; 808 } 809 810 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); 811 dump_stack(); 812 return NULL; 813 } 814 class = lock_classes + nr_lock_classes++; 815 debug_atomic_inc(nr_unused_locks); 816 class->key = key; 817 class->name = lock->name; 818 class->subclass = subclass; 819 INIT_LIST_HEAD(&class->lock_entry); 820 INIT_LIST_HEAD(&class->locks_before); 821 INIT_LIST_HEAD(&class->locks_after); 822 class->name_version = count_matching_names(class); 823 /* 824 * We use RCU's safe list-add method to make 825 * parallel walking of the hash-list safe: 826 */ 827 hlist_add_head_rcu(&class->hash_entry, hash_head); 828 /* 829 * Add it to the global list of classes: 830 */ 831 list_add_tail_rcu(&class->lock_entry, &all_lock_classes); 832 833 if (verbose(class)) { 834 graph_unlock(); 835 836 printk("\nnew class %p: %s", class->key, class->name); 837 if (class->name_version > 1) 838 printk(KERN_CONT "#%d", class->name_version); 839 printk(KERN_CONT "\n"); 840 dump_stack(); 841 842 if (!graph_lock()) { 843 return NULL; 844 } 845 } 846 out_unlock_set: 847 graph_unlock(); 848 849 out_set_class_cache: 850 if (!subclass || force) 851 lock->class_cache[0] = class; 852 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 853 lock->class_cache[subclass] = class; 854 855 /* 856 * Hash collision, did we smoke some? We found a class with a matching 857 * hash but the subclass -- which is hashed in -- didn't match. 858 */ 859 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 860 return NULL; 861 862 return class; 863 } 864 865 #ifdef CONFIG_PROVE_LOCKING 866 /* 867 * Allocate a lockdep entry. (assumes the graph_lock held, returns 868 * with NULL on failure) 869 */ 870 static struct lock_list *alloc_list_entry(void) 871 { 872 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 873 if (!debug_locks_off_graph_unlock()) 874 return NULL; 875 876 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!"); 877 dump_stack(); 878 return NULL; 879 } 880 return list_entries + nr_list_entries++; 881 } 882 883 /* 884 * Add a new dependency to the head of the list: 885 */ 886 static int add_lock_to_list(struct lock_class *this, struct list_head *head, 887 unsigned long ip, int distance, 888 struct stack_trace *trace) 889 { 890 struct lock_list *entry; 891 /* 892 * Lock not present yet - get a new dependency struct and 893 * add it to the list: 894 */ 895 entry = alloc_list_entry(); 896 if (!entry) 897 return 0; 898 899 entry->class = this; 900 entry->distance = distance; 901 entry->trace = *trace; 902 /* 903 * Both allocation and removal are done under the graph lock; but 904 * iteration is under RCU-sched; see look_up_lock_class() and 905 * lockdep_free_key_range(). 906 */ 907 list_add_tail_rcu(&entry->entry, head); 908 909 return 1; 910 } 911 912 /* 913 * For good efficiency of modular, we use power of 2 914 */ 915 #define MAX_CIRCULAR_QUEUE_SIZE 4096UL 916 #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) 917 918 /* 919 * The circular_queue and helpers is used to implement the 920 * breadth-first search(BFS)algorithem, by which we can build 921 * the shortest path from the next lock to be acquired to the 922 * previous held lock if there is a circular between them. 923 */ 924 struct circular_queue { 925 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; 926 unsigned int front, rear; 927 }; 928 929 static struct circular_queue lock_cq; 930 931 unsigned int max_bfs_queue_depth; 932 933 static unsigned int lockdep_dependency_gen_id; 934 935 static inline void __cq_init(struct circular_queue *cq) 936 { 937 cq->front = cq->rear = 0; 938 lockdep_dependency_gen_id++; 939 } 940 941 static inline int __cq_empty(struct circular_queue *cq) 942 { 943 return (cq->front == cq->rear); 944 } 945 946 static inline int __cq_full(struct circular_queue *cq) 947 { 948 return ((cq->rear + 1) & CQ_MASK) == cq->front; 949 } 950 951 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) 952 { 953 if (__cq_full(cq)) 954 return -1; 955 956 cq->element[cq->rear] = elem; 957 cq->rear = (cq->rear + 1) & CQ_MASK; 958 return 0; 959 } 960 961 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) 962 { 963 if (__cq_empty(cq)) 964 return -1; 965 966 *elem = cq->element[cq->front]; 967 cq->front = (cq->front + 1) & CQ_MASK; 968 return 0; 969 } 970 971 static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) 972 { 973 return (cq->rear - cq->front) & CQ_MASK; 974 } 975 976 static inline void mark_lock_accessed(struct lock_list *lock, 977 struct lock_list *parent) 978 { 979 unsigned long nr; 980 981 nr = lock - list_entries; 982 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 983 lock->parent = parent; 984 lock->class->dep_gen_id = lockdep_dependency_gen_id; 985 } 986 987 static inline unsigned long lock_accessed(struct lock_list *lock) 988 { 989 unsigned long nr; 990 991 nr = lock - list_entries; 992 WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 993 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 994 } 995 996 static inline struct lock_list *get_lock_parent(struct lock_list *child) 997 { 998 return child->parent; 999 } 1000 1001 static inline int get_lock_depth(struct lock_list *child) 1002 { 1003 int depth = 0; 1004 struct lock_list *parent; 1005 1006 while ((parent = get_lock_parent(child))) { 1007 child = parent; 1008 depth++; 1009 } 1010 return depth; 1011 } 1012 1013 static int __bfs(struct lock_list *source_entry, 1014 void *data, 1015 int (*match)(struct lock_list *entry, void *data), 1016 struct lock_list **target_entry, 1017 int forward) 1018 { 1019 struct lock_list *entry; 1020 struct list_head *head; 1021 struct circular_queue *cq = &lock_cq; 1022 int ret = 1; 1023 1024 if (match(source_entry, data)) { 1025 *target_entry = source_entry; 1026 ret = 0; 1027 goto exit; 1028 } 1029 1030 if (forward) 1031 head = &source_entry->class->locks_after; 1032 else 1033 head = &source_entry->class->locks_before; 1034 1035 if (list_empty(head)) 1036 goto exit; 1037 1038 __cq_init(cq); 1039 __cq_enqueue(cq, (unsigned long)source_entry); 1040 1041 while (!__cq_empty(cq)) { 1042 struct lock_list *lock; 1043 1044 __cq_dequeue(cq, (unsigned long *)&lock); 1045 1046 if (!lock->class) { 1047 ret = -2; 1048 goto exit; 1049 } 1050 1051 if (forward) 1052 head = &lock->class->locks_after; 1053 else 1054 head = &lock->class->locks_before; 1055 1056 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1057 1058 list_for_each_entry_rcu(entry, head, entry) { 1059 if (!lock_accessed(entry)) { 1060 unsigned int cq_depth; 1061 mark_lock_accessed(entry, lock); 1062 if (match(entry, data)) { 1063 *target_entry = entry; 1064 ret = 0; 1065 goto exit; 1066 } 1067 1068 if (__cq_enqueue(cq, (unsigned long)entry)) { 1069 ret = -1; 1070 goto exit; 1071 } 1072 cq_depth = __cq_get_elem_count(cq); 1073 if (max_bfs_queue_depth < cq_depth) 1074 max_bfs_queue_depth = cq_depth; 1075 } 1076 } 1077 } 1078 exit: 1079 return ret; 1080 } 1081 1082 static inline int __bfs_forwards(struct lock_list *src_entry, 1083 void *data, 1084 int (*match)(struct lock_list *entry, void *data), 1085 struct lock_list **target_entry) 1086 { 1087 return __bfs(src_entry, data, match, target_entry, 1); 1088 1089 } 1090 1091 static inline int __bfs_backwards(struct lock_list *src_entry, 1092 void *data, 1093 int (*match)(struct lock_list *entry, void *data), 1094 struct lock_list **target_entry) 1095 { 1096 return __bfs(src_entry, data, match, target_entry, 0); 1097 1098 } 1099 1100 /* 1101 * Recursive, forwards-direction lock-dependency checking, used for 1102 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1103 * checking. 1104 */ 1105 1106 /* 1107 * Print a dependency chain entry (this is only done when a deadlock 1108 * has been detected): 1109 */ 1110 static noinline int 1111 print_circular_bug_entry(struct lock_list *target, int depth) 1112 { 1113 if (debug_locks_silent) 1114 return 0; 1115 printk("\n-> #%u", depth); 1116 print_lock_name(target->class); 1117 printk(KERN_CONT ":\n"); 1118 print_stack_trace(&target->trace, 6); 1119 1120 return 0; 1121 } 1122 1123 static void 1124 print_circular_lock_scenario(struct held_lock *src, 1125 struct held_lock *tgt, 1126 struct lock_list *prt) 1127 { 1128 struct lock_class *source = hlock_class(src); 1129 struct lock_class *target = hlock_class(tgt); 1130 struct lock_class *parent = prt->class; 1131 1132 /* 1133 * A direct locking problem where unsafe_class lock is taken 1134 * directly by safe_class lock, then all we need to show 1135 * is the deadlock scenario, as it is obvious that the 1136 * unsafe lock is taken under the safe lock. 1137 * 1138 * But if there is a chain instead, where the safe lock takes 1139 * an intermediate lock (middle_class) where this lock is 1140 * not the same as the safe lock, then the lock chain is 1141 * used to describe the problem. Otherwise we would need 1142 * to show a different CPU case for each link in the chain 1143 * from the safe_class lock to the unsafe_class lock. 1144 */ 1145 if (parent != source) { 1146 printk("Chain exists of:\n "); 1147 __print_lock_name(source); 1148 printk(KERN_CONT " --> "); 1149 __print_lock_name(parent); 1150 printk(KERN_CONT " --> "); 1151 __print_lock_name(target); 1152 printk(KERN_CONT "\n\n"); 1153 } 1154 1155 if (cross_lock(tgt->instance)) { 1156 printk(" Possible unsafe locking scenario by crosslock:\n\n"); 1157 printk(" CPU0 CPU1\n"); 1158 printk(" ---- ----\n"); 1159 printk(" lock("); 1160 __print_lock_name(parent); 1161 printk(KERN_CONT ");\n"); 1162 printk(" lock("); 1163 __print_lock_name(target); 1164 printk(KERN_CONT ");\n"); 1165 printk(" lock("); 1166 __print_lock_name(source); 1167 printk(KERN_CONT ");\n"); 1168 printk(" unlock("); 1169 __print_lock_name(target); 1170 printk(KERN_CONT ");\n"); 1171 printk("\n *** DEADLOCK ***\n\n"); 1172 } else { 1173 printk(" Possible unsafe locking scenario:\n\n"); 1174 printk(" CPU0 CPU1\n"); 1175 printk(" ---- ----\n"); 1176 printk(" lock("); 1177 __print_lock_name(target); 1178 printk(KERN_CONT ");\n"); 1179 printk(" lock("); 1180 __print_lock_name(parent); 1181 printk(KERN_CONT ");\n"); 1182 printk(" lock("); 1183 __print_lock_name(target); 1184 printk(KERN_CONT ");\n"); 1185 printk(" lock("); 1186 __print_lock_name(source); 1187 printk(KERN_CONT ");\n"); 1188 printk("\n *** DEADLOCK ***\n\n"); 1189 } 1190 } 1191 1192 /* 1193 * When a circular dependency is detected, print the 1194 * header first: 1195 */ 1196 static noinline int 1197 print_circular_bug_header(struct lock_list *entry, unsigned int depth, 1198 struct held_lock *check_src, 1199 struct held_lock *check_tgt) 1200 { 1201 struct task_struct *curr = current; 1202 1203 if (debug_locks_silent) 1204 return 0; 1205 1206 pr_warn("\n"); 1207 pr_warn("======================================================\n"); 1208 pr_warn("WARNING: possible circular locking dependency detected\n"); 1209 print_kernel_ident(); 1210 pr_warn("------------------------------------------------------\n"); 1211 pr_warn("%s/%d is trying to acquire lock:\n", 1212 curr->comm, task_pid_nr(curr)); 1213 print_lock(check_src); 1214 1215 if (cross_lock(check_tgt->instance)) 1216 pr_warn("\nbut now in release context of a crosslock acquired at the following:\n"); 1217 else 1218 pr_warn("\nbut task is already holding lock:\n"); 1219 1220 print_lock(check_tgt); 1221 pr_warn("\nwhich lock already depends on the new lock.\n\n"); 1222 pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); 1223 1224 print_circular_bug_entry(entry, depth); 1225 1226 return 0; 1227 } 1228 1229 static inline int class_equal(struct lock_list *entry, void *data) 1230 { 1231 return entry->class == data; 1232 } 1233 1234 static noinline int print_circular_bug(struct lock_list *this, 1235 struct lock_list *target, 1236 struct held_lock *check_src, 1237 struct held_lock *check_tgt, 1238 struct stack_trace *trace) 1239 { 1240 struct task_struct *curr = current; 1241 struct lock_list *parent; 1242 struct lock_list *first_parent; 1243 int depth; 1244 1245 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1246 return 0; 1247 1248 if (cross_lock(check_tgt->instance)) 1249 this->trace = *trace; 1250 else if (!save_trace(&this->trace)) 1251 return 0; 1252 1253 depth = get_lock_depth(target); 1254 1255 print_circular_bug_header(target, depth, check_src, check_tgt); 1256 1257 parent = get_lock_parent(target); 1258 first_parent = parent; 1259 1260 while (parent) { 1261 print_circular_bug_entry(parent, --depth); 1262 parent = get_lock_parent(parent); 1263 } 1264 1265 printk("\nother info that might help us debug this:\n\n"); 1266 print_circular_lock_scenario(check_src, check_tgt, 1267 first_parent); 1268 1269 lockdep_print_held_locks(curr); 1270 1271 printk("\nstack backtrace:\n"); 1272 dump_stack(); 1273 1274 return 0; 1275 } 1276 1277 static noinline int print_bfs_bug(int ret) 1278 { 1279 if (!debug_locks_off_graph_unlock()) 1280 return 0; 1281 1282 /* 1283 * Breadth-first-search failed, graph got corrupted? 1284 */ 1285 WARN(1, "lockdep bfs error:%d\n", ret); 1286 1287 return 0; 1288 } 1289 1290 static int noop_count(struct lock_list *entry, void *data) 1291 { 1292 (*(unsigned long *)data)++; 1293 return 0; 1294 } 1295 1296 static unsigned long __lockdep_count_forward_deps(struct lock_list *this) 1297 { 1298 unsigned long count = 0; 1299 struct lock_list *uninitialized_var(target_entry); 1300 1301 __bfs_forwards(this, (void *)&count, noop_count, &target_entry); 1302 1303 return count; 1304 } 1305 unsigned long lockdep_count_forward_deps(struct lock_class *class) 1306 { 1307 unsigned long ret, flags; 1308 struct lock_list this; 1309 1310 this.parent = NULL; 1311 this.class = class; 1312 1313 local_irq_save(flags); 1314 arch_spin_lock(&lockdep_lock); 1315 ret = __lockdep_count_forward_deps(&this); 1316 arch_spin_unlock(&lockdep_lock); 1317 local_irq_restore(flags); 1318 1319 return ret; 1320 } 1321 1322 static unsigned long __lockdep_count_backward_deps(struct lock_list *this) 1323 { 1324 unsigned long count = 0; 1325 struct lock_list *uninitialized_var(target_entry); 1326 1327 __bfs_backwards(this, (void *)&count, noop_count, &target_entry); 1328 1329 return count; 1330 } 1331 1332 unsigned long lockdep_count_backward_deps(struct lock_class *class) 1333 { 1334 unsigned long ret, flags; 1335 struct lock_list this; 1336 1337 this.parent = NULL; 1338 this.class = class; 1339 1340 local_irq_save(flags); 1341 arch_spin_lock(&lockdep_lock); 1342 ret = __lockdep_count_backward_deps(&this); 1343 arch_spin_unlock(&lockdep_lock); 1344 local_irq_restore(flags); 1345 1346 return ret; 1347 } 1348 1349 /* 1350 * Prove that the dependency graph starting at <entry> can not 1351 * lead to <target>. Print an error and return 0 if it does. 1352 */ 1353 static noinline int 1354 check_noncircular(struct lock_list *root, struct lock_class *target, 1355 struct lock_list **target_entry) 1356 { 1357 int result; 1358 1359 debug_atomic_inc(nr_cyclic_checks); 1360 1361 result = __bfs_forwards(root, target, class_equal, target_entry); 1362 1363 return result; 1364 } 1365 1366 static noinline int 1367 check_redundant(struct lock_list *root, struct lock_class *target, 1368 struct lock_list **target_entry) 1369 { 1370 int result; 1371 1372 debug_atomic_inc(nr_redundant_checks); 1373 1374 result = __bfs_forwards(root, target, class_equal, target_entry); 1375 1376 return result; 1377 } 1378 1379 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1380 /* 1381 * Forwards and backwards subgraph searching, for the purposes of 1382 * proving that two subgraphs can be connected by a new dependency 1383 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1384 */ 1385 1386 static inline int usage_match(struct lock_list *entry, void *bit) 1387 { 1388 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1389 } 1390 1391 1392 1393 /* 1394 * Find a node in the forwards-direction dependency sub-graph starting 1395 * at @root->class that matches @bit. 1396 * 1397 * Return 0 if such a node exists in the subgraph, and put that node 1398 * into *@target_entry. 1399 * 1400 * Return 1 otherwise and keep *@target_entry unchanged. 1401 * Return <0 on error. 1402 */ 1403 static int 1404 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, 1405 struct lock_list **target_entry) 1406 { 1407 int result; 1408 1409 debug_atomic_inc(nr_find_usage_forwards_checks); 1410 1411 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1412 1413 return result; 1414 } 1415 1416 /* 1417 * Find a node in the backwards-direction dependency sub-graph starting 1418 * at @root->class that matches @bit. 1419 * 1420 * Return 0 if such a node exists in the subgraph, and put that node 1421 * into *@target_entry. 1422 * 1423 * Return 1 otherwise and keep *@target_entry unchanged. 1424 * Return <0 on error. 1425 */ 1426 static int 1427 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, 1428 struct lock_list **target_entry) 1429 { 1430 int result; 1431 1432 debug_atomic_inc(nr_find_usage_backwards_checks); 1433 1434 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1435 1436 return result; 1437 } 1438 1439 static void print_lock_class_header(struct lock_class *class, int depth) 1440 { 1441 int bit; 1442 1443 printk("%*s->", depth, ""); 1444 print_lock_name(class); 1445 printk(KERN_CONT " ops: %lu", class->ops); 1446 printk(KERN_CONT " {\n"); 1447 1448 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { 1449 if (class->usage_mask & (1 << bit)) { 1450 int len = depth; 1451 1452 len += printk("%*s %s", depth, "", usage_str[bit]); 1453 len += printk(KERN_CONT " at:\n"); 1454 print_stack_trace(class->usage_traces + bit, len); 1455 } 1456 } 1457 printk("%*s }\n", depth, ""); 1458 1459 printk("%*s ... key at: [<%p>] %pS\n", 1460 depth, "", class->key, class->key); 1461 } 1462 1463 /* 1464 * printk the shortest lock dependencies from @start to @end in reverse order: 1465 */ 1466 static void __used 1467 print_shortest_lock_dependencies(struct lock_list *leaf, 1468 struct lock_list *root) 1469 { 1470 struct lock_list *entry = leaf; 1471 int depth; 1472 1473 /*compute depth from generated tree by BFS*/ 1474 depth = get_lock_depth(leaf); 1475 1476 do { 1477 print_lock_class_header(entry->class, depth); 1478 printk("%*s ... acquired at:\n", depth, ""); 1479 print_stack_trace(&entry->trace, 2); 1480 printk("\n"); 1481 1482 if (depth == 0 && (entry != root)) { 1483 printk("lockdep:%s bad path found in chain graph\n", __func__); 1484 break; 1485 } 1486 1487 entry = get_lock_parent(entry); 1488 depth--; 1489 } while (entry && (depth >= 0)); 1490 1491 return; 1492 } 1493 1494 static void 1495 print_irq_lock_scenario(struct lock_list *safe_entry, 1496 struct lock_list *unsafe_entry, 1497 struct lock_class *prev_class, 1498 struct lock_class *next_class) 1499 { 1500 struct lock_class *safe_class = safe_entry->class; 1501 struct lock_class *unsafe_class = unsafe_entry->class; 1502 struct lock_class *middle_class = prev_class; 1503 1504 if (middle_class == safe_class) 1505 middle_class = next_class; 1506 1507 /* 1508 * A direct locking problem where unsafe_class lock is taken 1509 * directly by safe_class lock, then all we need to show 1510 * is the deadlock scenario, as it is obvious that the 1511 * unsafe lock is taken under the safe lock. 1512 * 1513 * But if there is a chain instead, where the safe lock takes 1514 * an intermediate lock (middle_class) where this lock is 1515 * not the same as the safe lock, then the lock chain is 1516 * used to describe the problem. Otherwise we would need 1517 * to show a different CPU case for each link in the chain 1518 * from the safe_class lock to the unsafe_class lock. 1519 */ 1520 if (middle_class != unsafe_class) { 1521 printk("Chain exists of:\n "); 1522 __print_lock_name(safe_class); 1523 printk(KERN_CONT " --> "); 1524 __print_lock_name(middle_class); 1525 printk(KERN_CONT " --> "); 1526 __print_lock_name(unsafe_class); 1527 printk(KERN_CONT "\n\n"); 1528 } 1529 1530 printk(" Possible interrupt unsafe locking scenario:\n\n"); 1531 printk(" CPU0 CPU1\n"); 1532 printk(" ---- ----\n"); 1533 printk(" lock("); 1534 __print_lock_name(unsafe_class); 1535 printk(KERN_CONT ");\n"); 1536 printk(" local_irq_disable();\n"); 1537 printk(" lock("); 1538 __print_lock_name(safe_class); 1539 printk(KERN_CONT ");\n"); 1540 printk(" lock("); 1541 __print_lock_name(middle_class); 1542 printk(KERN_CONT ");\n"); 1543 printk(" <Interrupt>\n"); 1544 printk(" lock("); 1545 __print_lock_name(safe_class); 1546 printk(KERN_CONT ");\n"); 1547 printk("\n *** DEADLOCK ***\n\n"); 1548 } 1549 1550 static int 1551 print_bad_irq_dependency(struct task_struct *curr, 1552 struct lock_list *prev_root, 1553 struct lock_list *next_root, 1554 struct lock_list *backwards_entry, 1555 struct lock_list *forwards_entry, 1556 struct held_lock *prev, 1557 struct held_lock *next, 1558 enum lock_usage_bit bit1, 1559 enum lock_usage_bit bit2, 1560 const char *irqclass) 1561 { 1562 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1563 return 0; 1564 1565 pr_warn("\n"); 1566 pr_warn("=====================================================\n"); 1567 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", 1568 irqclass, irqclass); 1569 print_kernel_ident(); 1570 pr_warn("-----------------------------------------------------\n"); 1571 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1572 curr->comm, task_pid_nr(curr), 1573 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1574 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1575 curr->hardirqs_enabled, 1576 curr->softirqs_enabled); 1577 print_lock(next); 1578 1579 pr_warn("\nand this task is already holding:\n"); 1580 print_lock(prev); 1581 pr_warn("which would create a new lock dependency:\n"); 1582 print_lock_name(hlock_class(prev)); 1583 pr_cont(" ->"); 1584 print_lock_name(hlock_class(next)); 1585 pr_cont("\n"); 1586 1587 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", 1588 irqclass); 1589 print_lock_name(backwards_entry->class); 1590 pr_warn("\n... which became %s-irq-safe at:\n", irqclass); 1591 1592 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1593 1594 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); 1595 print_lock_name(forwards_entry->class); 1596 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); 1597 pr_warn("..."); 1598 1599 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1600 1601 pr_warn("\nother info that might help us debug this:\n\n"); 1602 print_irq_lock_scenario(backwards_entry, forwards_entry, 1603 hlock_class(prev), hlock_class(next)); 1604 1605 lockdep_print_held_locks(curr); 1606 1607 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); 1608 if (!save_trace(&prev_root->trace)) 1609 return 0; 1610 print_shortest_lock_dependencies(backwards_entry, prev_root); 1611 1612 pr_warn("\nthe dependencies between the lock to be acquired"); 1613 pr_warn(" and %s-irq-unsafe lock:\n", irqclass); 1614 if (!save_trace(&next_root->trace)) 1615 return 0; 1616 print_shortest_lock_dependencies(forwards_entry, next_root); 1617 1618 pr_warn("\nstack backtrace:\n"); 1619 dump_stack(); 1620 1621 return 0; 1622 } 1623 1624 static int 1625 check_usage(struct task_struct *curr, struct held_lock *prev, 1626 struct held_lock *next, enum lock_usage_bit bit_backwards, 1627 enum lock_usage_bit bit_forwards, const char *irqclass) 1628 { 1629 int ret; 1630 struct lock_list this, that; 1631 struct lock_list *uninitialized_var(target_entry); 1632 struct lock_list *uninitialized_var(target_entry1); 1633 1634 this.parent = NULL; 1635 1636 this.class = hlock_class(prev); 1637 ret = find_usage_backwards(&this, bit_backwards, &target_entry); 1638 if (ret < 0) 1639 return print_bfs_bug(ret); 1640 if (ret == 1) 1641 return ret; 1642 1643 that.parent = NULL; 1644 that.class = hlock_class(next); 1645 ret = find_usage_forwards(&that, bit_forwards, &target_entry1); 1646 if (ret < 0) 1647 return print_bfs_bug(ret); 1648 if (ret == 1) 1649 return ret; 1650 1651 return print_bad_irq_dependency(curr, &this, &that, 1652 target_entry, target_entry1, 1653 prev, next, 1654 bit_backwards, bit_forwards, irqclass); 1655 } 1656 1657 static const char *state_names[] = { 1658 #define LOCKDEP_STATE(__STATE) \ 1659 __stringify(__STATE), 1660 #include "lockdep_states.h" 1661 #undef LOCKDEP_STATE 1662 }; 1663 1664 static const char *state_rnames[] = { 1665 #define LOCKDEP_STATE(__STATE) \ 1666 __stringify(__STATE)"-READ", 1667 #include "lockdep_states.h" 1668 #undef LOCKDEP_STATE 1669 }; 1670 1671 static inline const char *state_name(enum lock_usage_bit bit) 1672 { 1673 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; 1674 } 1675 1676 static int exclusive_bit(int new_bit) 1677 { 1678 /* 1679 * USED_IN 1680 * USED_IN_READ 1681 * ENABLED 1682 * ENABLED_READ 1683 * 1684 * bit 0 - write/read 1685 * bit 1 - used_in/enabled 1686 * bit 2+ state 1687 */ 1688 1689 int state = new_bit & ~3; 1690 int dir = new_bit & 2; 1691 1692 /* 1693 * keep state, bit flip the direction and strip read. 1694 */ 1695 return state | (dir ^ 2); 1696 } 1697 1698 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, 1699 struct held_lock *next, enum lock_usage_bit bit) 1700 { 1701 /* 1702 * Prove that the new dependency does not connect a hardirq-safe 1703 * lock with a hardirq-unsafe lock - to achieve this we search 1704 * the backwards-subgraph starting at <prev>, and the 1705 * forwards-subgraph starting at <next>: 1706 */ 1707 if (!check_usage(curr, prev, next, bit, 1708 exclusive_bit(bit), state_name(bit))) 1709 return 0; 1710 1711 bit++; /* _READ */ 1712 1713 /* 1714 * Prove that the new dependency does not connect a hardirq-safe-read 1715 * lock with a hardirq-unsafe lock - to achieve this we search 1716 * the backwards-subgraph starting at <prev>, and the 1717 * forwards-subgraph starting at <next>: 1718 */ 1719 if (!check_usage(curr, prev, next, bit, 1720 exclusive_bit(bit), state_name(bit))) 1721 return 0; 1722 1723 return 1; 1724 } 1725 1726 static int 1727 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1728 struct held_lock *next) 1729 { 1730 #define LOCKDEP_STATE(__STATE) \ 1731 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ 1732 return 0; 1733 #include "lockdep_states.h" 1734 #undef LOCKDEP_STATE 1735 1736 return 1; 1737 } 1738 1739 static void inc_chains(void) 1740 { 1741 if (current->hardirq_context) 1742 nr_hardirq_chains++; 1743 else { 1744 if (current->softirq_context) 1745 nr_softirq_chains++; 1746 else 1747 nr_process_chains++; 1748 } 1749 } 1750 1751 #else 1752 1753 static inline int 1754 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1755 struct held_lock *next) 1756 { 1757 return 1; 1758 } 1759 1760 static inline void inc_chains(void) 1761 { 1762 nr_process_chains++; 1763 } 1764 1765 #endif 1766 1767 static void 1768 print_deadlock_scenario(struct held_lock *nxt, 1769 struct held_lock *prv) 1770 { 1771 struct lock_class *next = hlock_class(nxt); 1772 struct lock_class *prev = hlock_class(prv); 1773 1774 printk(" Possible unsafe locking scenario:\n\n"); 1775 printk(" CPU0\n"); 1776 printk(" ----\n"); 1777 printk(" lock("); 1778 __print_lock_name(prev); 1779 printk(KERN_CONT ");\n"); 1780 printk(" lock("); 1781 __print_lock_name(next); 1782 printk(KERN_CONT ");\n"); 1783 printk("\n *** DEADLOCK ***\n\n"); 1784 printk(" May be due to missing lock nesting notation\n\n"); 1785 } 1786 1787 static int 1788 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 1789 struct held_lock *next) 1790 { 1791 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1792 return 0; 1793 1794 pr_warn("\n"); 1795 pr_warn("============================================\n"); 1796 pr_warn("WARNING: possible recursive locking detected\n"); 1797 print_kernel_ident(); 1798 pr_warn("--------------------------------------------\n"); 1799 pr_warn("%s/%d is trying to acquire lock:\n", 1800 curr->comm, task_pid_nr(curr)); 1801 print_lock(next); 1802 pr_warn("\nbut task is already holding lock:\n"); 1803 print_lock(prev); 1804 1805 pr_warn("\nother info that might help us debug this:\n"); 1806 print_deadlock_scenario(next, prev); 1807 lockdep_print_held_locks(curr); 1808 1809 pr_warn("\nstack backtrace:\n"); 1810 dump_stack(); 1811 1812 return 0; 1813 } 1814 1815 /* 1816 * Check whether we are holding such a class already. 1817 * 1818 * (Note that this has to be done separately, because the graph cannot 1819 * detect such classes of deadlocks.) 1820 * 1821 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read 1822 */ 1823 static int 1824 check_deadlock(struct task_struct *curr, struct held_lock *next, 1825 struct lockdep_map *next_instance, int read) 1826 { 1827 struct held_lock *prev; 1828 struct held_lock *nest = NULL; 1829 int i; 1830 1831 for (i = 0; i < curr->lockdep_depth; i++) { 1832 prev = curr->held_locks + i; 1833 1834 if (prev->instance == next->nest_lock) 1835 nest = prev; 1836 1837 if (hlock_class(prev) != hlock_class(next)) 1838 continue; 1839 1840 /* 1841 * Allow read-after-read recursion of the same 1842 * lock class (i.e. read_lock(lock)+read_lock(lock)): 1843 */ 1844 if ((read == 2) && prev->read) 1845 return 2; 1846 1847 /* 1848 * We're holding the nest_lock, which serializes this lock's 1849 * nesting behaviour. 1850 */ 1851 if (nest) 1852 return 2; 1853 1854 if (cross_lock(prev->instance)) 1855 continue; 1856 1857 return print_deadlock_bug(curr, prev, next); 1858 } 1859 return 1; 1860 } 1861 1862 /* 1863 * There was a chain-cache miss, and we are about to add a new dependency 1864 * to a previous lock. We recursively validate the following rules: 1865 * 1866 * - would the adding of the <prev> -> <next> dependency create a 1867 * circular dependency in the graph? [== circular deadlock] 1868 * 1869 * - does the new prev->next dependency connect any hardirq-safe lock 1870 * (in the full backwards-subgraph starting at <prev>) with any 1871 * hardirq-unsafe lock (in the full forwards-subgraph starting at 1872 * <next>)? [== illegal lock inversion with hardirq contexts] 1873 * 1874 * - does the new prev->next dependency connect any softirq-safe lock 1875 * (in the full backwards-subgraph starting at <prev>) with any 1876 * softirq-unsafe lock (in the full forwards-subgraph starting at 1877 * <next>)? [== illegal lock inversion with softirq contexts] 1878 * 1879 * any of these scenarios could lead to a deadlock. 1880 * 1881 * Then if all the validations pass, we add the forwards and backwards 1882 * dependency. 1883 */ 1884 static int 1885 check_prev_add(struct task_struct *curr, struct held_lock *prev, 1886 struct held_lock *next, int distance, struct stack_trace *trace, 1887 int (*save)(struct stack_trace *trace)) 1888 { 1889 struct lock_list *uninitialized_var(target_entry); 1890 struct lock_list *entry; 1891 struct lock_list this; 1892 int ret; 1893 1894 /* 1895 * Prove that the new <prev> -> <next> dependency would not 1896 * create a circular dependency in the graph. (We do this by 1897 * forward-recursing into the graph starting at <next>, and 1898 * checking whether we can reach <prev>.) 1899 * 1900 * We are using global variables to control the recursion, to 1901 * keep the stackframe size of the recursive functions low: 1902 */ 1903 this.class = hlock_class(next); 1904 this.parent = NULL; 1905 ret = check_noncircular(&this, hlock_class(prev), &target_entry); 1906 if (unlikely(!ret)) { 1907 if (!trace->entries) { 1908 /* 1909 * If @save fails here, the printing might trigger 1910 * a WARN but because of the !nr_entries it should 1911 * not do bad things. 1912 */ 1913 save(trace); 1914 } 1915 return print_circular_bug(&this, target_entry, next, prev, trace); 1916 } 1917 else if (unlikely(ret < 0)) 1918 return print_bfs_bug(ret); 1919 1920 if (!check_prev_add_irq(curr, prev, next)) 1921 return 0; 1922 1923 /* 1924 * For recursive read-locks we do all the dependency checks, 1925 * but we dont store read-triggered dependencies (only 1926 * write-triggered dependencies). This ensures that only the 1927 * write-side dependencies matter, and that if for example a 1928 * write-lock never takes any other locks, then the reads are 1929 * equivalent to a NOP. 1930 */ 1931 if (next->read == 2 || prev->read == 2) 1932 return 1; 1933 /* 1934 * Is the <prev> -> <next> dependency already present? 1935 * 1936 * (this may occur even though this is a new chain: consider 1937 * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 1938 * chains - the second one will be new, but L1 already has 1939 * L2 added to its dependency list, due to the first chain.) 1940 */ 1941 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { 1942 if (entry->class == hlock_class(next)) { 1943 if (distance == 1) 1944 entry->distance = 1; 1945 return 1; 1946 } 1947 } 1948 1949 /* 1950 * Is the <prev> -> <next> link redundant? 1951 */ 1952 this.class = hlock_class(prev); 1953 this.parent = NULL; 1954 ret = check_redundant(&this, hlock_class(next), &target_entry); 1955 if (!ret) { 1956 debug_atomic_inc(nr_redundant); 1957 return 2; 1958 } 1959 if (ret < 0) 1960 return print_bfs_bug(ret); 1961 1962 1963 if (!trace->entries && !save(trace)) 1964 return 0; 1965 1966 /* 1967 * Ok, all validations passed, add the new lock 1968 * to the previous lock's dependency list: 1969 */ 1970 ret = add_lock_to_list(hlock_class(next), 1971 &hlock_class(prev)->locks_after, 1972 next->acquire_ip, distance, trace); 1973 1974 if (!ret) 1975 return 0; 1976 1977 ret = add_lock_to_list(hlock_class(prev), 1978 &hlock_class(next)->locks_before, 1979 next->acquire_ip, distance, trace); 1980 if (!ret) 1981 return 0; 1982 1983 return 2; 1984 } 1985 1986 /* 1987 * Add the dependency to all directly-previous locks that are 'relevant'. 1988 * The ones that are relevant are (in increasing distance from curr): 1989 * all consecutive trylock entries and the final non-trylock entry - or 1990 * the end of this context's lock-chain - whichever comes first. 1991 */ 1992 static int 1993 check_prevs_add(struct task_struct *curr, struct held_lock *next) 1994 { 1995 int depth = curr->lockdep_depth; 1996 struct held_lock *hlock; 1997 struct stack_trace trace = { 1998 .nr_entries = 0, 1999 .max_entries = 0, 2000 .entries = NULL, 2001 .skip = 0, 2002 }; 2003 2004 /* 2005 * Debugging checks. 2006 * 2007 * Depth must not be zero for a non-head lock: 2008 */ 2009 if (!depth) 2010 goto out_bug; 2011 /* 2012 * At least two relevant locks must exist for this 2013 * to be a head: 2014 */ 2015 if (curr->held_locks[depth].irq_context != 2016 curr->held_locks[depth-1].irq_context) 2017 goto out_bug; 2018 2019 for (;;) { 2020 int distance = curr->lockdep_depth - depth + 1; 2021 hlock = curr->held_locks + depth - 1; 2022 /* 2023 * Only non-crosslock entries get new dependencies added. 2024 * Crosslock entries will be added by commit later: 2025 */ 2026 if (!cross_lock(hlock->instance)) { 2027 /* 2028 * Only non-recursive-read entries get new dependencies 2029 * added: 2030 */ 2031 if (hlock->read != 2 && hlock->check) { 2032 int ret = check_prev_add(curr, hlock, next, 2033 distance, &trace, save_trace); 2034 if (!ret) 2035 return 0; 2036 2037 /* 2038 * Stop after the first non-trylock entry, 2039 * as non-trylock entries have added their 2040 * own direct dependencies already, so this 2041 * lock is connected to them indirectly: 2042 */ 2043 if (!hlock->trylock) 2044 break; 2045 } 2046 } 2047 depth--; 2048 /* 2049 * End of lock-stack? 2050 */ 2051 if (!depth) 2052 break; 2053 /* 2054 * Stop the search if we cross into another context: 2055 */ 2056 if (curr->held_locks[depth].irq_context != 2057 curr->held_locks[depth-1].irq_context) 2058 break; 2059 } 2060 return 1; 2061 out_bug: 2062 if (!debug_locks_off_graph_unlock()) 2063 return 0; 2064 2065 /* 2066 * Clearly we all shouldn't be here, but since we made it we 2067 * can reliable say we messed up our state. See the above two 2068 * gotos for reasons why we could possibly end up here. 2069 */ 2070 WARN_ON(1); 2071 2072 return 0; 2073 } 2074 2075 unsigned long nr_lock_chains; 2076 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; 2077 int nr_chain_hlocks; 2078 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; 2079 2080 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) 2081 { 2082 return lock_classes + chain_hlocks[chain->base + i]; 2083 } 2084 2085 /* 2086 * Returns the index of the first held_lock of the current chain 2087 */ 2088 static inline int get_first_held_lock(struct task_struct *curr, 2089 struct held_lock *hlock) 2090 { 2091 int i; 2092 struct held_lock *hlock_curr; 2093 2094 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 2095 hlock_curr = curr->held_locks + i; 2096 if (hlock_curr->irq_context != hlock->irq_context) 2097 break; 2098 2099 } 2100 2101 return ++i; 2102 } 2103 2104 #ifdef CONFIG_DEBUG_LOCKDEP 2105 /* 2106 * Returns the next chain_key iteration 2107 */ 2108 static u64 print_chain_key_iteration(int class_idx, u64 chain_key) 2109 { 2110 u64 new_chain_key = iterate_chain_key(chain_key, class_idx); 2111 2112 printk(" class_idx:%d -> chain_key:%016Lx", 2113 class_idx, 2114 (unsigned long long)new_chain_key); 2115 return new_chain_key; 2116 } 2117 2118 static void 2119 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) 2120 { 2121 struct held_lock *hlock; 2122 u64 chain_key = 0; 2123 int depth = curr->lockdep_depth; 2124 int i; 2125 2126 printk("depth: %u\n", depth + 1); 2127 for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) { 2128 hlock = curr->held_locks + i; 2129 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); 2130 2131 print_lock(hlock); 2132 } 2133 2134 print_chain_key_iteration(hlock_next->class_idx, chain_key); 2135 print_lock(hlock_next); 2136 } 2137 2138 static void print_chain_keys_chain(struct lock_chain *chain) 2139 { 2140 int i; 2141 u64 chain_key = 0; 2142 int class_id; 2143 2144 printk("depth: %u\n", chain->depth); 2145 for (i = 0; i < chain->depth; i++) { 2146 class_id = chain_hlocks[chain->base + i]; 2147 chain_key = print_chain_key_iteration(class_id + 1, chain_key); 2148 2149 print_lock_name(lock_classes + class_id); 2150 printk("\n"); 2151 } 2152 } 2153 2154 static void print_collision(struct task_struct *curr, 2155 struct held_lock *hlock_next, 2156 struct lock_chain *chain) 2157 { 2158 pr_warn("\n"); 2159 pr_warn("============================\n"); 2160 pr_warn("WARNING: chain_key collision\n"); 2161 print_kernel_ident(); 2162 pr_warn("----------------------------\n"); 2163 pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); 2164 pr_warn("Hash chain already cached but the contents don't match!\n"); 2165 2166 pr_warn("Held locks:"); 2167 print_chain_keys_held_locks(curr, hlock_next); 2168 2169 pr_warn("Locks in cached chain:"); 2170 print_chain_keys_chain(chain); 2171 2172 pr_warn("\nstack backtrace:\n"); 2173 dump_stack(); 2174 } 2175 #endif 2176 2177 /* 2178 * Checks whether the chain and the current held locks are consistent 2179 * in depth and also in content. If they are not it most likely means 2180 * that there was a collision during the calculation of the chain_key. 2181 * Returns: 0 not passed, 1 passed 2182 */ 2183 static int check_no_collision(struct task_struct *curr, 2184 struct held_lock *hlock, 2185 struct lock_chain *chain) 2186 { 2187 #ifdef CONFIG_DEBUG_LOCKDEP 2188 int i, j, id; 2189 2190 i = get_first_held_lock(curr, hlock); 2191 2192 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { 2193 print_collision(curr, hlock, chain); 2194 return 0; 2195 } 2196 2197 for (j = 0; j < chain->depth - 1; j++, i++) { 2198 id = curr->held_locks[i].class_idx - 1; 2199 2200 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) { 2201 print_collision(curr, hlock, chain); 2202 return 0; 2203 } 2204 } 2205 #endif 2206 return 1; 2207 } 2208 2209 /* 2210 * This is for building a chain between just two different classes, 2211 * instead of adding a new hlock upon current, which is done by 2212 * add_chain_cache(). 2213 * 2214 * This can be called in any context with two classes, while 2215 * add_chain_cache() must be done within the lock owener's context 2216 * since it uses hlock which might be racy in another context. 2217 */ 2218 static inline int add_chain_cache_classes(unsigned int prev, 2219 unsigned int next, 2220 unsigned int irq_context, 2221 u64 chain_key) 2222 { 2223 struct hlist_head *hash_head = chainhashentry(chain_key); 2224 struct lock_chain *chain; 2225 2226 /* 2227 * Allocate a new chain entry from the static array, and add 2228 * it to the hash: 2229 */ 2230 2231 /* 2232 * We might need to take the graph lock, ensure we've got IRQs 2233 * disabled to make this an IRQ-safe lock.. for recursion reasons 2234 * lockdep won't complain about its own locking errors. 2235 */ 2236 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2237 return 0; 2238 2239 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 2240 if (!debug_locks_off_graph_unlock()) 2241 return 0; 2242 2243 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 2244 dump_stack(); 2245 return 0; 2246 } 2247 2248 chain = lock_chains + nr_lock_chains++; 2249 chain->chain_key = chain_key; 2250 chain->irq_context = irq_context; 2251 chain->depth = 2; 2252 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2253 chain->base = nr_chain_hlocks; 2254 nr_chain_hlocks += chain->depth; 2255 chain_hlocks[chain->base] = prev - 1; 2256 chain_hlocks[chain->base + 1] = next -1; 2257 } 2258 #ifdef CONFIG_DEBUG_LOCKDEP 2259 /* 2260 * Important for check_no_collision(). 2261 */ 2262 else { 2263 if (!debug_locks_off_graph_unlock()) 2264 return 0; 2265 2266 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 2267 dump_stack(); 2268 return 0; 2269 } 2270 #endif 2271 2272 hlist_add_head_rcu(&chain->entry, hash_head); 2273 debug_atomic_inc(chain_lookup_misses); 2274 inc_chains(); 2275 2276 return 1; 2277 } 2278 2279 /* 2280 * Adds a dependency chain into chain hashtable. And must be called with 2281 * graph_lock held. 2282 * 2283 * Return 0 if fail, and graph_lock is released. 2284 * Return 1 if succeed, with graph_lock held. 2285 */ 2286 static inline int add_chain_cache(struct task_struct *curr, 2287 struct held_lock *hlock, 2288 u64 chain_key) 2289 { 2290 struct lock_class *class = hlock_class(hlock); 2291 struct hlist_head *hash_head = chainhashentry(chain_key); 2292 struct lock_chain *chain; 2293 int i, j; 2294 2295 /* 2296 * Allocate a new chain entry from the static array, and add 2297 * it to the hash: 2298 */ 2299 2300 /* 2301 * We might need to take the graph lock, ensure we've got IRQs 2302 * disabled to make this an IRQ-safe lock.. for recursion reasons 2303 * lockdep won't complain about its own locking errors. 2304 */ 2305 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2306 return 0; 2307 2308 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 2309 if (!debug_locks_off_graph_unlock()) 2310 return 0; 2311 2312 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!"); 2313 dump_stack(); 2314 return 0; 2315 } 2316 chain = lock_chains + nr_lock_chains++; 2317 chain->chain_key = chain_key; 2318 chain->irq_context = hlock->irq_context; 2319 i = get_first_held_lock(curr, hlock); 2320 chain->depth = curr->lockdep_depth + 1 - i; 2321 2322 BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 2323 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 2324 BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 2325 2326 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2327 chain->base = nr_chain_hlocks; 2328 for (j = 0; j < chain->depth - 1; j++, i++) { 2329 int lock_id = curr->held_locks[i].class_idx - 1; 2330 chain_hlocks[chain->base + j] = lock_id; 2331 } 2332 chain_hlocks[chain->base + j] = class - lock_classes; 2333 } 2334 2335 if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) 2336 nr_chain_hlocks += chain->depth; 2337 2338 #ifdef CONFIG_DEBUG_LOCKDEP 2339 /* 2340 * Important for check_no_collision(). 2341 */ 2342 if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { 2343 if (!debug_locks_off_graph_unlock()) 2344 return 0; 2345 2346 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 2347 dump_stack(); 2348 return 0; 2349 } 2350 #endif 2351 2352 hlist_add_head_rcu(&chain->entry, hash_head); 2353 debug_atomic_inc(chain_lookup_misses); 2354 inc_chains(); 2355 2356 return 1; 2357 } 2358 2359 /* 2360 * Look up a dependency chain. 2361 */ 2362 static inline struct lock_chain *lookup_chain_cache(u64 chain_key) 2363 { 2364 struct hlist_head *hash_head = chainhashentry(chain_key); 2365 struct lock_chain *chain; 2366 2367 /* 2368 * We can walk it lock-free, because entries only get added 2369 * to the hash: 2370 */ 2371 hlist_for_each_entry_rcu(chain, hash_head, entry) { 2372 if (chain->chain_key == chain_key) { 2373 debug_atomic_inc(chain_lookup_hits); 2374 return chain; 2375 } 2376 } 2377 return NULL; 2378 } 2379 2380 /* 2381 * If the key is not present yet in dependency chain cache then 2382 * add it and return 1 - in this case the new dependency chain is 2383 * validated. If the key is already hashed, return 0. 2384 * (On return with 1 graph_lock is held.) 2385 */ 2386 static inline int lookup_chain_cache_add(struct task_struct *curr, 2387 struct held_lock *hlock, 2388 u64 chain_key) 2389 { 2390 struct lock_class *class = hlock_class(hlock); 2391 struct lock_chain *chain = lookup_chain_cache(chain_key); 2392 2393 if (chain) { 2394 cache_hit: 2395 if (!check_no_collision(curr, hlock, chain)) 2396 return 0; 2397 2398 if (very_verbose(class)) { 2399 printk("\nhash chain already cached, key: " 2400 "%016Lx tail class: [%p] %s\n", 2401 (unsigned long long)chain_key, 2402 class->key, class->name); 2403 } 2404 2405 return 0; 2406 } 2407 2408 if (very_verbose(class)) { 2409 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", 2410 (unsigned long long)chain_key, class->key, class->name); 2411 } 2412 2413 if (!graph_lock()) 2414 return 0; 2415 2416 /* 2417 * We have to walk the chain again locked - to avoid duplicates: 2418 */ 2419 chain = lookup_chain_cache(chain_key); 2420 if (chain) { 2421 graph_unlock(); 2422 goto cache_hit; 2423 } 2424 2425 if (!add_chain_cache(curr, hlock, chain_key)) 2426 return 0; 2427 2428 return 1; 2429 } 2430 2431 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, 2432 struct held_lock *hlock, int chain_head, u64 chain_key) 2433 { 2434 /* 2435 * Trylock needs to maintain the stack of held locks, but it 2436 * does not add new dependencies, because trylock can be done 2437 * in any order. 2438 * 2439 * We look up the chain_key and do the O(N^2) check and update of 2440 * the dependencies only if this is a new dependency chain. 2441 * (If lookup_chain_cache_add() return with 1 it acquires 2442 * graph_lock for us) 2443 */ 2444 if (!hlock->trylock && hlock->check && 2445 lookup_chain_cache_add(curr, hlock, chain_key)) { 2446 /* 2447 * Check whether last held lock: 2448 * 2449 * - is irq-safe, if this lock is irq-unsafe 2450 * - is softirq-safe, if this lock is hardirq-unsafe 2451 * 2452 * And check whether the new lock's dependency graph 2453 * could lead back to the previous lock. 2454 * 2455 * any of these scenarios could lead to a deadlock. If 2456 * All validations 2457 */ 2458 int ret = check_deadlock(curr, hlock, lock, hlock->read); 2459 2460 if (!ret) 2461 return 0; 2462 /* 2463 * Mark recursive read, as we jump over it when 2464 * building dependencies (just like we jump over 2465 * trylock entries): 2466 */ 2467 if (ret == 2) 2468 hlock->read = 2; 2469 /* 2470 * Add dependency only if this lock is not the head 2471 * of the chain, and if it's not a secondary read-lock: 2472 */ 2473 if (!chain_head && ret != 2) { 2474 if (!check_prevs_add(curr, hlock)) 2475 return 0; 2476 } 2477 2478 graph_unlock(); 2479 } else { 2480 /* after lookup_chain_cache_add(): */ 2481 if (unlikely(!debug_locks)) 2482 return 0; 2483 } 2484 2485 return 1; 2486 } 2487 #else 2488 static inline int validate_chain(struct task_struct *curr, 2489 struct lockdep_map *lock, struct held_lock *hlock, 2490 int chain_head, u64 chain_key) 2491 { 2492 return 1; 2493 } 2494 #endif 2495 2496 /* 2497 * We are building curr_chain_key incrementally, so double-check 2498 * it from scratch, to make sure that it's done correctly: 2499 */ 2500 static void check_chain_key(struct task_struct *curr) 2501 { 2502 #ifdef CONFIG_DEBUG_LOCKDEP 2503 struct held_lock *hlock, *prev_hlock = NULL; 2504 unsigned int i; 2505 u64 chain_key = 0; 2506 2507 for (i = 0; i < curr->lockdep_depth; i++) { 2508 hlock = curr->held_locks + i; 2509 if (chain_key != hlock->prev_chain_key) { 2510 debug_locks_off(); 2511 /* 2512 * We got mighty confused, our chain keys don't match 2513 * with what we expect, someone trample on our task state? 2514 */ 2515 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2516 curr->lockdep_depth, i, 2517 (unsigned long long)chain_key, 2518 (unsigned long long)hlock->prev_chain_key); 2519 return; 2520 } 2521 /* 2522 * Whoops ran out of static storage again? 2523 */ 2524 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS)) 2525 return; 2526 2527 if (prev_hlock && (prev_hlock->irq_context != 2528 hlock->irq_context)) 2529 chain_key = 0; 2530 chain_key = iterate_chain_key(chain_key, hlock->class_idx); 2531 prev_hlock = hlock; 2532 } 2533 if (chain_key != curr->curr_chain_key) { 2534 debug_locks_off(); 2535 /* 2536 * More smoking hash instead of calculating it, damn see these 2537 * numbers float.. I bet that a pink elephant stepped on my memory. 2538 */ 2539 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2540 curr->lockdep_depth, i, 2541 (unsigned long long)chain_key, 2542 (unsigned long long)curr->curr_chain_key); 2543 } 2544 #endif 2545 } 2546 2547 static void 2548 print_usage_bug_scenario(struct held_lock *lock) 2549 { 2550 struct lock_class *class = hlock_class(lock); 2551 2552 printk(" Possible unsafe locking scenario:\n\n"); 2553 printk(" CPU0\n"); 2554 printk(" ----\n"); 2555 printk(" lock("); 2556 __print_lock_name(class); 2557 printk(KERN_CONT ");\n"); 2558 printk(" <Interrupt>\n"); 2559 printk(" lock("); 2560 __print_lock_name(class); 2561 printk(KERN_CONT ");\n"); 2562 printk("\n *** DEADLOCK ***\n\n"); 2563 } 2564 2565 static int 2566 print_usage_bug(struct task_struct *curr, struct held_lock *this, 2567 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 2568 { 2569 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2570 return 0; 2571 2572 pr_warn("\n"); 2573 pr_warn("================================\n"); 2574 pr_warn("WARNING: inconsistent lock state\n"); 2575 print_kernel_ident(); 2576 pr_warn("--------------------------------\n"); 2577 2578 pr_warn("inconsistent {%s} -> {%s} usage.\n", 2579 usage_str[prev_bit], usage_str[new_bit]); 2580 2581 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 2582 curr->comm, task_pid_nr(curr), 2583 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2584 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 2585 trace_hardirqs_enabled(curr), 2586 trace_softirqs_enabled(curr)); 2587 print_lock(this); 2588 2589 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); 2590 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2591 2592 print_irqtrace_events(curr); 2593 pr_warn("\nother info that might help us debug this:\n"); 2594 print_usage_bug_scenario(this); 2595 2596 lockdep_print_held_locks(curr); 2597 2598 pr_warn("\nstack backtrace:\n"); 2599 dump_stack(); 2600 2601 return 0; 2602 } 2603 2604 /* 2605 * Print out an error if an invalid bit is set: 2606 */ 2607 static inline int 2608 valid_state(struct task_struct *curr, struct held_lock *this, 2609 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 2610 { 2611 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) 2612 return print_usage_bug(curr, this, bad_bit, new_bit); 2613 return 1; 2614 } 2615 2616 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2617 enum lock_usage_bit new_bit); 2618 2619 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 2620 2621 /* 2622 * print irq inversion bug: 2623 */ 2624 static int 2625 print_irq_inversion_bug(struct task_struct *curr, 2626 struct lock_list *root, struct lock_list *other, 2627 struct held_lock *this, int forwards, 2628 const char *irqclass) 2629 { 2630 struct lock_list *entry = other; 2631 struct lock_list *middle = NULL; 2632 int depth; 2633 2634 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2635 return 0; 2636 2637 pr_warn("\n"); 2638 pr_warn("========================================================\n"); 2639 pr_warn("WARNING: possible irq lock inversion dependency detected\n"); 2640 print_kernel_ident(); 2641 pr_warn("--------------------------------------------------------\n"); 2642 pr_warn("%s/%d just changed the state of lock:\n", 2643 curr->comm, task_pid_nr(curr)); 2644 print_lock(this); 2645 if (forwards) 2646 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2647 else 2648 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2649 print_lock_name(other->class); 2650 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2651 2652 pr_warn("\nother info that might help us debug this:\n"); 2653 2654 /* Find a middle lock (if one exists) */ 2655 depth = get_lock_depth(other); 2656 do { 2657 if (depth == 0 && (entry != root)) { 2658 pr_warn("lockdep:%s bad path found in chain graph\n", __func__); 2659 break; 2660 } 2661 middle = entry; 2662 entry = get_lock_parent(entry); 2663 depth--; 2664 } while (entry && entry != root && (depth >= 0)); 2665 if (forwards) 2666 print_irq_lock_scenario(root, other, 2667 middle ? middle->class : root->class, other->class); 2668 else 2669 print_irq_lock_scenario(other, root, 2670 middle ? middle->class : other->class, root->class); 2671 2672 lockdep_print_held_locks(curr); 2673 2674 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 2675 if (!save_trace(&root->trace)) 2676 return 0; 2677 print_shortest_lock_dependencies(other, root); 2678 2679 pr_warn("\nstack backtrace:\n"); 2680 dump_stack(); 2681 2682 return 0; 2683 } 2684 2685 /* 2686 * Prove that in the forwards-direction subgraph starting at <this> 2687 * there is no lock matching <mask>: 2688 */ 2689 static int 2690 check_usage_forwards(struct task_struct *curr, struct held_lock *this, 2691 enum lock_usage_bit bit, const char *irqclass) 2692 { 2693 int ret; 2694 struct lock_list root; 2695 struct lock_list *uninitialized_var(target_entry); 2696 2697 root.parent = NULL; 2698 root.class = hlock_class(this); 2699 ret = find_usage_forwards(&root, bit, &target_entry); 2700 if (ret < 0) 2701 return print_bfs_bug(ret); 2702 if (ret == 1) 2703 return ret; 2704 2705 return print_irq_inversion_bug(curr, &root, target_entry, 2706 this, 1, irqclass); 2707 } 2708 2709 /* 2710 * Prove that in the backwards-direction subgraph starting at <this> 2711 * there is no lock matching <mask>: 2712 */ 2713 static int 2714 check_usage_backwards(struct task_struct *curr, struct held_lock *this, 2715 enum lock_usage_bit bit, const char *irqclass) 2716 { 2717 int ret; 2718 struct lock_list root; 2719 struct lock_list *uninitialized_var(target_entry); 2720 2721 root.parent = NULL; 2722 root.class = hlock_class(this); 2723 ret = find_usage_backwards(&root, bit, &target_entry); 2724 if (ret < 0) 2725 return print_bfs_bug(ret); 2726 if (ret == 1) 2727 return ret; 2728 2729 return print_irq_inversion_bug(curr, &root, target_entry, 2730 this, 0, irqclass); 2731 } 2732 2733 void print_irqtrace_events(struct task_struct *curr) 2734 { 2735 printk("irq event stamp: %u\n", curr->irq_events); 2736 printk("hardirqs last enabled at (%u): [<%p>] %pS\n", 2737 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, 2738 (void *)curr->hardirq_enable_ip); 2739 printk("hardirqs last disabled at (%u): [<%p>] %pS\n", 2740 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, 2741 (void *)curr->hardirq_disable_ip); 2742 printk("softirqs last enabled at (%u): [<%p>] %pS\n", 2743 curr->softirq_enable_event, (void *)curr->softirq_enable_ip, 2744 (void *)curr->softirq_enable_ip); 2745 printk("softirqs last disabled at (%u): [<%p>] %pS\n", 2746 curr->softirq_disable_event, (void *)curr->softirq_disable_ip, 2747 (void *)curr->softirq_disable_ip); 2748 } 2749 2750 static int HARDIRQ_verbose(struct lock_class *class) 2751 { 2752 #if HARDIRQ_VERBOSE 2753 return class_filter(class); 2754 #endif 2755 return 0; 2756 } 2757 2758 static int SOFTIRQ_verbose(struct lock_class *class) 2759 { 2760 #if SOFTIRQ_VERBOSE 2761 return class_filter(class); 2762 #endif 2763 return 0; 2764 } 2765 2766 #define STRICT_READ_CHECKS 1 2767 2768 static int (*state_verbose_f[])(struct lock_class *class) = { 2769 #define LOCKDEP_STATE(__STATE) \ 2770 __STATE##_verbose, 2771 #include "lockdep_states.h" 2772 #undef LOCKDEP_STATE 2773 }; 2774 2775 static inline int state_verbose(enum lock_usage_bit bit, 2776 struct lock_class *class) 2777 { 2778 return state_verbose_f[bit >> 2](class); 2779 } 2780 2781 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, 2782 enum lock_usage_bit bit, const char *name); 2783 2784 static int 2785 mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2786 enum lock_usage_bit new_bit) 2787 { 2788 int excl_bit = exclusive_bit(new_bit); 2789 int read = new_bit & 1; 2790 int dir = new_bit & 2; 2791 2792 /* 2793 * mark USED_IN has to look forwards -- to ensure no dependency 2794 * has ENABLED state, which would allow recursion deadlocks. 2795 * 2796 * mark ENABLED has to look backwards -- to ensure no dependee 2797 * has USED_IN state, which, again, would allow recursion deadlocks. 2798 */ 2799 check_usage_f usage = dir ? 2800 check_usage_backwards : check_usage_forwards; 2801 2802 /* 2803 * Validate that this particular lock does not have conflicting 2804 * usage states. 2805 */ 2806 if (!valid_state(curr, this, new_bit, excl_bit)) 2807 return 0; 2808 2809 /* 2810 * Validate that the lock dependencies don't have conflicting usage 2811 * states. 2812 */ 2813 if ((!read || !dir || STRICT_READ_CHECKS) && 2814 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) 2815 return 0; 2816 2817 /* 2818 * Check for read in write conflicts 2819 */ 2820 if (!read) { 2821 if (!valid_state(curr, this, new_bit, excl_bit + 1)) 2822 return 0; 2823 2824 if (STRICT_READ_CHECKS && 2825 !usage(curr, this, excl_bit + 1, 2826 state_name(new_bit + 1))) 2827 return 0; 2828 } 2829 2830 if (state_verbose(new_bit, hlock_class(this))) 2831 return 2; 2832 2833 return 1; 2834 } 2835 2836 enum mark_type { 2837 #define LOCKDEP_STATE(__STATE) __STATE, 2838 #include "lockdep_states.h" 2839 #undef LOCKDEP_STATE 2840 }; 2841 2842 /* 2843 * Mark all held locks with a usage bit: 2844 */ 2845 static int 2846 mark_held_locks(struct task_struct *curr, enum mark_type mark) 2847 { 2848 enum lock_usage_bit usage_bit; 2849 struct held_lock *hlock; 2850 int i; 2851 2852 for (i = 0; i < curr->lockdep_depth; i++) { 2853 hlock = curr->held_locks + i; 2854 2855 usage_bit = 2 + (mark << 2); /* ENABLED */ 2856 if (hlock->read) 2857 usage_bit += 1; /* READ */ 2858 2859 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2860 2861 if (!hlock->check) 2862 continue; 2863 2864 if (!mark_lock(curr, hlock, usage_bit)) 2865 return 0; 2866 } 2867 2868 return 1; 2869 } 2870 2871 /* 2872 * Hardirqs will be enabled: 2873 */ 2874 static void __trace_hardirqs_on_caller(unsigned long ip) 2875 { 2876 struct task_struct *curr = current; 2877 2878 /* we'll do an OFF -> ON transition: */ 2879 curr->hardirqs_enabled = 1; 2880 2881 /* 2882 * We are going to turn hardirqs on, so set the 2883 * usage bit for all held locks: 2884 */ 2885 if (!mark_held_locks(curr, HARDIRQ)) 2886 return; 2887 /* 2888 * If we have softirqs enabled, then set the usage 2889 * bit for all held locks. (disabled hardirqs prevented 2890 * this bit from being set before) 2891 */ 2892 if (curr->softirqs_enabled) 2893 if (!mark_held_locks(curr, SOFTIRQ)) 2894 return; 2895 2896 curr->hardirq_enable_ip = ip; 2897 curr->hardirq_enable_event = ++curr->irq_events; 2898 debug_atomic_inc(hardirqs_on_events); 2899 } 2900 2901 __visible void trace_hardirqs_on_caller(unsigned long ip) 2902 { 2903 time_hardirqs_on(CALLER_ADDR0, ip); 2904 2905 if (unlikely(!debug_locks || current->lockdep_recursion)) 2906 return; 2907 2908 if (unlikely(current->hardirqs_enabled)) { 2909 /* 2910 * Neither irq nor preemption are disabled here 2911 * so this is racy by nature but losing one hit 2912 * in a stat is not a big deal. 2913 */ 2914 __debug_atomic_inc(redundant_hardirqs_on); 2915 return; 2916 } 2917 2918 /* 2919 * We're enabling irqs and according to our state above irqs weren't 2920 * already enabled, yet we find the hardware thinks they are in fact 2921 * enabled.. someone messed up their IRQ state tracing. 2922 */ 2923 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2924 return; 2925 2926 /* 2927 * See the fine text that goes along with this variable definition. 2928 */ 2929 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2930 return; 2931 2932 /* 2933 * Can't allow enabling interrupts while in an interrupt handler, 2934 * that's general bad form and such. Recursion, limited stack etc.. 2935 */ 2936 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2937 return; 2938 2939 current->lockdep_recursion = 1; 2940 __trace_hardirqs_on_caller(ip); 2941 current->lockdep_recursion = 0; 2942 } 2943 EXPORT_SYMBOL(trace_hardirqs_on_caller); 2944 2945 void trace_hardirqs_on(void) 2946 { 2947 trace_hardirqs_on_caller(CALLER_ADDR0); 2948 } 2949 EXPORT_SYMBOL(trace_hardirqs_on); 2950 2951 /* 2952 * Hardirqs were disabled: 2953 */ 2954 __visible void trace_hardirqs_off_caller(unsigned long ip) 2955 { 2956 struct task_struct *curr = current; 2957 2958 time_hardirqs_off(CALLER_ADDR0, ip); 2959 2960 if (unlikely(!debug_locks || current->lockdep_recursion)) 2961 return; 2962 2963 /* 2964 * So we're supposed to get called after you mask local IRQs, but for 2965 * some reason the hardware doesn't quite think you did a proper job. 2966 */ 2967 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2968 return; 2969 2970 if (curr->hardirqs_enabled) { 2971 /* 2972 * We have done an ON -> OFF transition: 2973 */ 2974 curr->hardirqs_enabled = 0; 2975 curr->hardirq_disable_ip = ip; 2976 curr->hardirq_disable_event = ++curr->irq_events; 2977 debug_atomic_inc(hardirqs_off_events); 2978 } else 2979 debug_atomic_inc(redundant_hardirqs_off); 2980 } 2981 EXPORT_SYMBOL(trace_hardirqs_off_caller); 2982 2983 void trace_hardirqs_off(void) 2984 { 2985 trace_hardirqs_off_caller(CALLER_ADDR0); 2986 } 2987 EXPORT_SYMBOL(trace_hardirqs_off); 2988 2989 /* 2990 * Softirqs will be enabled: 2991 */ 2992 void trace_softirqs_on(unsigned long ip) 2993 { 2994 struct task_struct *curr = current; 2995 2996 if (unlikely(!debug_locks || current->lockdep_recursion)) 2997 return; 2998 2999 /* 3000 * We fancy IRQs being disabled here, see softirq.c, avoids 3001 * funny state and nesting things. 3002 */ 3003 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3004 return; 3005 3006 if (curr->softirqs_enabled) { 3007 debug_atomic_inc(redundant_softirqs_on); 3008 return; 3009 } 3010 3011 current->lockdep_recursion = 1; 3012 /* 3013 * We'll do an OFF -> ON transition: 3014 */ 3015 curr->softirqs_enabled = 1; 3016 curr->softirq_enable_ip = ip; 3017 curr->softirq_enable_event = ++curr->irq_events; 3018 debug_atomic_inc(softirqs_on_events); 3019 /* 3020 * We are going to turn softirqs on, so set the 3021 * usage bit for all held locks, if hardirqs are 3022 * enabled too: 3023 */ 3024 if (curr->hardirqs_enabled) 3025 mark_held_locks(curr, SOFTIRQ); 3026 current->lockdep_recursion = 0; 3027 } 3028 3029 /* 3030 * Softirqs were disabled: 3031 */ 3032 void trace_softirqs_off(unsigned long ip) 3033 { 3034 struct task_struct *curr = current; 3035 3036 if (unlikely(!debug_locks || current->lockdep_recursion)) 3037 return; 3038 3039 /* 3040 * We fancy IRQs being disabled here, see softirq.c 3041 */ 3042 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3043 return; 3044 3045 if (curr->softirqs_enabled) { 3046 /* 3047 * We have done an ON -> OFF transition: 3048 */ 3049 curr->softirqs_enabled = 0; 3050 curr->softirq_disable_ip = ip; 3051 curr->softirq_disable_event = ++curr->irq_events; 3052 debug_atomic_inc(softirqs_off_events); 3053 /* 3054 * Whoops, we wanted softirqs off, so why aren't they? 3055 */ 3056 DEBUG_LOCKS_WARN_ON(!softirq_count()); 3057 } else 3058 debug_atomic_inc(redundant_softirqs_off); 3059 } 3060 3061 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 3062 { 3063 /* 3064 * If non-trylock use in a hardirq or softirq context, then 3065 * mark the lock as used in these contexts: 3066 */ 3067 if (!hlock->trylock) { 3068 if (hlock->read) { 3069 if (curr->hardirq_context) 3070 if (!mark_lock(curr, hlock, 3071 LOCK_USED_IN_HARDIRQ_READ)) 3072 return 0; 3073 if (curr->softirq_context) 3074 if (!mark_lock(curr, hlock, 3075 LOCK_USED_IN_SOFTIRQ_READ)) 3076 return 0; 3077 } else { 3078 if (curr->hardirq_context) 3079 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) 3080 return 0; 3081 if (curr->softirq_context) 3082 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) 3083 return 0; 3084 } 3085 } 3086 if (!hlock->hardirqs_off) { 3087 if (hlock->read) { 3088 if (!mark_lock(curr, hlock, 3089 LOCK_ENABLED_HARDIRQ_READ)) 3090 return 0; 3091 if (curr->softirqs_enabled) 3092 if (!mark_lock(curr, hlock, 3093 LOCK_ENABLED_SOFTIRQ_READ)) 3094 return 0; 3095 } else { 3096 if (!mark_lock(curr, hlock, 3097 LOCK_ENABLED_HARDIRQ)) 3098 return 0; 3099 if (curr->softirqs_enabled) 3100 if (!mark_lock(curr, hlock, 3101 LOCK_ENABLED_SOFTIRQ)) 3102 return 0; 3103 } 3104 } 3105 3106 return 1; 3107 } 3108 3109 static inline unsigned int task_irq_context(struct task_struct *task) 3110 { 3111 return 2 * !!task->hardirq_context + !!task->softirq_context; 3112 } 3113 3114 static int separate_irq_context(struct task_struct *curr, 3115 struct held_lock *hlock) 3116 { 3117 unsigned int depth = curr->lockdep_depth; 3118 3119 /* 3120 * Keep track of points where we cross into an interrupt context: 3121 */ 3122 if (depth) { 3123 struct held_lock *prev_hlock; 3124 3125 prev_hlock = curr->held_locks + depth-1; 3126 /* 3127 * If we cross into another context, reset the 3128 * hash key (this also prevents the checking and the 3129 * adding of the dependency to 'prev'): 3130 */ 3131 if (prev_hlock->irq_context != hlock->irq_context) 3132 return 1; 3133 } 3134 return 0; 3135 } 3136 3137 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3138 3139 static inline 3140 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 3141 enum lock_usage_bit new_bit) 3142 { 3143 WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ 3144 return 1; 3145 } 3146 3147 static inline int mark_irqflags(struct task_struct *curr, 3148 struct held_lock *hlock) 3149 { 3150 return 1; 3151 } 3152 3153 static inline unsigned int task_irq_context(struct task_struct *task) 3154 { 3155 return 0; 3156 } 3157 3158 static inline int separate_irq_context(struct task_struct *curr, 3159 struct held_lock *hlock) 3160 { 3161 return 0; 3162 } 3163 3164 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 3165 3166 /* 3167 * Mark a lock with a usage bit, and validate the state transition: 3168 */ 3169 static int mark_lock(struct task_struct *curr, struct held_lock *this, 3170 enum lock_usage_bit new_bit) 3171 { 3172 unsigned int new_mask = 1 << new_bit, ret = 1; 3173 3174 /* 3175 * If already set then do not dirty the cacheline, 3176 * nor do any checks: 3177 */ 3178 if (likely(hlock_class(this)->usage_mask & new_mask)) 3179 return 1; 3180 3181 if (!graph_lock()) 3182 return 0; 3183 /* 3184 * Make sure we didn't race: 3185 */ 3186 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 3187 graph_unlock(); 3188 return 1; 3189 } 3190 3191 hlock_class(this)->usage_mask |= new_mask; 3192 3193 if (!save_trace(hlock_class(this)->usage_traces + new_bit)) 3194 return 0; 3195 3196 switch (new_bit) { 3197 #define LOCKDEP_STATE(__STATE) \ 3198 case LOCK_USED_IN_##__STATE: \ 3199 case LOCK_USED_IN_##__STATE##_READ: \ 3200 case LOCK_ENABLED_##__STATE: \ 3201 case LOCK_ENABLED_##__STATE##_READ: 3202 #include "lockdep_states.h" 3203 #undef LOCKDEP_STATE 3204 ret = mark_lock_irq(curr, this, new_bit); 3205 if (!ret) 3206 return 0; 3207 break; 3208 case LOCK_USED: 3209 debug_atomic_dec(nr_unused_locks); 3210 break; 3211 default: 3212 if (!debug_locks_off_graph_unlock()) 3213 return 0; 3214 WARN_ON(1); 3215 return 0; 3216 } 3217 3218 graph_unlock(); 3219 3220 /* 3221 * We must printk outside of the graph_lock: 3222 */ 3223 if (ret == 2) { 3224 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 3225 print_lock(this); 3226 print_irqtrace_events(curr); 3227 dump_stack(); 3228 } 3229 3230 return ret; 3231 } 3232 3233 /* 3234 * Initialize a lock instance's lock-class mapping info: 3235 */ 3236 static void __lockdep_init_map(struct lockdep_map *lock, const char *name, 3237 struct lock_class_key *key, int subclass) 3238 { 3239 int i; 3240 3241 kmemcheck_mark_initialized(lock, sizeof(*lock)); 3242 3243 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) 3244 lock->class_cache[i] = NULL; 3245 3246 #ifdef CONFIG_LOCK_STAT 3247 lock->cpu = raw_smp_processor_id(); 3248 #endif 3249 3250 /* 3251 * Can't be having no nameless bastards around this place! 3252 */ 3253 if (DEBUG_LOCKS_WARN_ON(!name)) { 3254 lock->name = "NULL"; 3255 return; 3256 } 3257 3258 lock->name = name; 3259 3260 /* 3261 * No key, no joy, we need to hash something. 3262 */ 3263 if (DEBUG_LOCKS_WARN_ON(!key)) 3264 return; 3265 /* 3266 * Sanity check, the lock-class key must be persistent: 3267 */ 3268 if (!static_obj(key)) { 3269 printk("BUG: key %p not in .data!\n", key); 3270 /* 3271 * What it says above ^^^^^, I suggest you read it. 3272 */ 3273 DEBUG_LOCKS_WARN_ON(1); 3274 return; 3275 } 3276 lock->key = key; 3277 3278 if (unlikely(!debug_locks)) 3279 return; 3280 3281 if (subclass) { 3282 unsigned long flags; 3283 3284 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) 3285 return; 3286 3287 raw_local_irq_save(flags); 3288 current->lockdep_recursion = 1; 3289 register_lock_class(lock, subclass, 1); 3290 current->lockdep_recursion = 0; 3291 raw_local_irq_restore(flags); 3292 } 3293 } 3294 3295 void lockdep_init_map(struct lockdep_map *lock, const char *name, 3296 struct lock_class_key *key, int subclass) 3297 { 3298 cross_init(lock, 0); 3299 __lockdep_init_map(lock, name, key, subclass); 3300 } 3301 EXPORT_SYMBOL_GPL(lockdep_init_map); 3302 3303 #ifdef CONFIG_LOCKDEP_CROSSRELEASE 3304 void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name, 3305 struct lock_class_key *key, int subclass) 3306 { 3307 cross_init(lock, 1); 3308 __lockdep_init_map(lock, name, key, subclass); 3309 } 3310 EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock); 3311 #endif 3312 3313 struct lock_class_key __lockdep_no_validate__; 3314 EXPORT_SYMBOL_GPL(__lockdep_no_validate__); 3315 3316 static int 3317 print_lock_nested_lock_not_held(struct task_struct *curr, 3318 struct held_lock *hlock, 3319 unsigned long ip) 3320 { 3321 if (!debug_locks_off()) 3322 return 0; 3323 if (debug_locks_silent) 3324 return 0; 3325 3326 pr_warn("\n"); 3327 pr_warn("==================================\n"); 3328 pr_warn("WARNING: Nested lock was not taken\n"); 3329 print_kernel_ident(); 3330 pr_warn("----------------------------------\n"); 3331 3332 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 3333 print_lock(hlock); 3334 3335 pr_warn("\nbut this task is not holding:\n"); 3336 pr_warn("%s\n", hlock->nest_lock->name); 3337 3338 pr_warn("\nstack backtrace:\n"); 3339 dump_stack(); 3340 3341 pr_warn("\nother info that might help us debug this:\n"); 3342 lockdep_print_held_locks(curr); 3343 3344 pr_warn("\nstack backtrace:\n"); 3345 dump_stack(); 3346 3347 return 0; 3348 } 3349 3350 static int __lock_is_held(struct lockdep_map *lock, int read); 3351 3352 /* 3353 * This gets called for every mutex_lock*()/spin_lock*() operation. 3354 * We maintain the dependency maps and validate the locking attempt: 3355 */ 3356 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3357 int trylock, int read, int check, int hardirqs_off, 3358 struct lockdep_map *nest_lock, unsigned long ip, 3359 int references, int pin_count) 3360 { 3361 struct task_struct *curr = current; 3362 struct lock_class *class = NULL; 3363 struct held_lock *hlock; 3364 unsigned int depth; 3365 int chain_head = 0; 3366 int class_idx; 3367 u64 chain_key; 3368 int ret; 3369 3370 if (unlikely(!debug_locks)) 3371 return 0; 3372 3373 /* 3374 * Lockdep should run with IRQs disabled, otherwise we could 3375 * get an interrupt which would want to take locks, which would 3376 * end up in lockdep and have you got a head-ache already? 3377 */ 3378 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3379 return 0; 3380 3381 if (!prove_locking || lock->key == &__lockdep_no_validate__) 3382 check = 0; 3383 3384 if (subclass < NR_LOCKDEP_CACHING_CLASSES) 3385 class = lock->class_cache[subclass]; 3386 /* 3387 * Not cached? 3388 */ 3389 if (unlikely(!class)) { 3390 class = register_lock_class(lock, subclass, 0); 3391 if (!class) 3392 return 0; 3393 } 3394 atomic_inc((atomic_t *)&class->ops); 3395 if (very_verbose(class)) { 3396 printk("\nacquire class [%p] %s", class->key, class->name); 3397 if (class->name_version > 1) 3398 printk(KERN_CONT "#%d", class->name_version); 3399 printk(KERN_CONT "\n"); 3400 dump_stack(); 3401 } 3402 3403 /* 3404 * Add the lock to the list of currently held locks. 3405 * (we dont increase the depth just yet, up until the 3406 * dependency checks are done) 3407 */ 3408 depth = curr->lockdep_depth; 3409 /* 3410 * Ran out of static storage for our per-task lock stack again have we? 3411 */ 3412 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3413 return 0; 3414 3415 class_idx = class - lock_classes + 1; 3416 3417 /* TODO: nest_lock is not implemented for crosslock yet. */ 3418 if (depth && !cross_lock(lock)) { 3419 hlock = curr->held_locks + depth - 1; 3420 if (hlock->class_idx == class_idx && nest_lock) { 3421 if (hlock->references) { 3422 /* 3423 * Check: unsigned int references:12, overflow. 3424 */ 3425 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1)) 3426 return 0; 3427 3428 hlock->references++; 3429 } else { 3430 hlock->references = 2; 3431 } 3432 3433 return 1; 3434 } 3435 } 3436 3437 hlock = curr->held_locks + depth; 3438 /* 3439 * Plain impossible, we just registered it and checked it weren't no 3440 * NULL like.. I bet this mushroom I ate was good! 3441 */ 3442 if (DEBUG_LOCKS_WARN_ON(!class)) 3443 return 0; 3444 hlock->class_idx = class_idx; 3445 hlock->acquire_ip = ip; 3446 hlock->instance = lock; 3447 hlock->nest_lock = nest_lock; 3448 hlock->irq_context = task_irq_context(curr); 3449 hlock->trylock = trylock; 3450 hlock->read = read; 3451 hlock->check = check; 3452 hlock->hardirqs_off = !!hardirqs_off; 3453 hlock->references = references; 3454 #ifdef CONFIG_LOCK_STAT 3455 hlock->waittime_stamp = 0; 3456 hlock->holdtime_stamp = lockstat_clock(); 3457 #endif 3458 hlock->pin_count = pin_count; 3459 3460 if (check && !mark_irqflags(curr, hlock)) 3461 return 0; 3462 3463 /* mark it as used: */ 3464 if (!mark_lock(curr, hlock, LOCK_USED)) 3465 return 0; 3466 3467 /* 3468 * Calculate the chain hash: it's the combined hash of all the 3469 * lock keys along the dependency chain. We save the hash value 3470 * at every step so that we can get the current hash easily 3471 * after unlock. The chain hash is then used to cache dependency 3472 * results. 3473 * 3474 * The 'key ID' is what is the most compact key value to drive 3475 * the hash, not class->key. 3476 */ 3477 /* 3478 * Whoops, we did it again.. ran straight out of our static allocation. 3479 */ 3480 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS)) 3481 return 0; 3482 3483 chain_key = curr->curr_chain_key; 3484 if (!depth) { 3485 /* 3486 * How can we have a chain hash when we ain't got no keys?! 3487 */ 3488 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3489 return 0; 3490 chain_head = 1; 3491 } 3492 3493 hlock->prev_chain_key = chain_key; 3494 if (separate_irq_context(curr, hlock)) { 3495 chain_key = 0; 3496 chain_head = 1; 3497 } 3498 chain_key = iterate_chain_key(chain_key, class_idx); 3499 3500 if (nest_lock && !__lock_is_held(nest_lock, -1)) 3501 return print_lock_nested_lock_not_held(curr, hlock, ip); 3502 3503 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) 3504 return 0; 3505 3506 ret = lock_acquire_crosslock(hlock); 3507 /* 3508 * 2 means normal acquire operations are needed. Otherwise, it's 3509 * ok just to return with '0:fail, 1:success'. 3510 */ 3511 if (ret != 2) 3512 return ret; 3513 3514 curr->curr_chain_key = chain_key; 3515 curr->lockdep_depth++; 3516 check_chain_key(curr); 3517 #ifdef CONFIG_DEBUG_LOCKDEP 3518 if (unlikely(!debug_locks)) 3519 return 0; 3520 #endif 3521 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 3522 debug_locks_off(); 3523 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!"); 3524 printk(KERN_DEBUG "depth: %i max: %lu!\n", 3525 curr->lockdep_depth, MAX_LOCK_DEPTH); 3526 3527 lockdep_print_held_locks(current); 3528 debug_show_all_locks(); 3529 dump_stack(); 3530 3531 return 0; 3532 } 3533 3534 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 3535 max_lockdep_depth = curr->lockdep_depth; 3536 3537 return 1; 3538 } 3539 3540 static int 3541 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, 3542 unsigned long ip) 3543 { 3544 if (!debug_locks_off()) 3545 return 0; 3546 if (debug_locks_silent) 3547 return 0; 3548 3549 pr_warn("\n"); 3550 pr_warn("=====================================\n"); 3551 pr_warn("WARNING: bad unlock balance detected!\n"); 3552 print_kernel_ident(); 3553 pr_warn("-------------------------------------\n"); 3554 pr_warn("%s/%d is trying to release lock (", 3555 curr->comm, task_pid_nr(curr)); 3556 print_lockdep_cache(lock); 3557 pr_cont(") at:\n"); 3558 print_ip_sym(ip); 3559 pr_warn("but there are no more locks to release!\n"); 3560 pr_warn("\nother info that might help us debug this:\n"); 3561 lockdep_print_held_locks(curr); 3562 3563 pr_warn("\nstack backtrace:\n"); 3564 dump_stack(); 3565 3566 return 0; 3567 } 3568 3569 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) 3570 { 3571 if (hlock->instance == lock) 3572 return 1; 3573 3574 if (hlock->references) { 3575 struct lock_class *class = lock->class_cache[0]; 3576 3577 if (!class) 3578 class = look_up_lock_class(lock, 0); 3579 3580 /* 3581 * If look_up_lock_class() failed to find a class, we're trying 3582 * to test if we hold a lock that has never yet been acquired. 3583 * Clearly if the lock hasn't been acquired _ever_, we're not 3584 * holding it either, so report failure. 3585 */ 3586 if (IS_ERR_OR_NULL(class)) 3587 return 0; 3588 3589 /* 3590 * References, but not a lock we're actually ref-counting? 3591 * State got messed up, follow the sites that change ->references 3592 * and try to make sense of it. 3593 */ 3594 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3595 return 0; 3596 3597 if (hlock->class_idx == class - lock_classes + 1) 3598 return 1; 3599 } 3600 3601 return 0; 3602 } 3603 3604 /* @depth must not be zero */ 3605 static struct held_lock *find_held_lock(struct task_struct *curr, 3606 struct lockdep_map *lock, 3607 unsigned int depth, int *idx) 3608 { 3609 struct held_lock *ret, *hlock, *prev_hlock; 3610 int i; 3611 3612 i = depth - 1; 3613 hlock = curr->held_locks + i; 3614 ret = hlock; 3615 if (match_held_lock(hlock, lock)) 3616 goto out; 3617 3618 ret = NULL; 3619 for (i--, prev_hlock = hlock--; 3620 i >= 0; 3621 i--, prev_hlock = hlock--) { 3622 /* 3623 * We must not cross into another context: 3624 */ 3625 if (prev_hlock->irq_context != hlock->irq_context) { 3626 ret = NULL; 3627 break; 3628 } 3629 if (match_held_lock(hlock, lock)) { 3630 ret = hlock; 3631 break; 3632 } 3633 } 3634 3635 out: 3636 *idx = i; 3637 return ret; 3638 } 3639 3640 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, 3641 int idx) 3642 { 3643 struct held_lock *hlock; 3644 3645 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { 3646 if (!__lock_acquire(hlock->instance, 3647 hlock_class(hlock)->subclass, 3648 hlock->trylock, 3649 hlock->read, hlock->check, 3650 hlock->hardirqs_off, 3651 hlock->nest_lock, hlock->acquire_ip, 3652 hlock->references, hlock->pin_count)) 3653 return 1; 3654 } 3655 return 0; 3656 } 3657 3658 static int 3659 __lock_set_class(struct lockdep_map *lock, const char *name, 3660 struct lock_class_key *key, unsigned int subclass, 3661 unsigned long ip) 3662 { 3663 struct task_struct *curr = current; 3664 struct held_lock *hlock; 3665 struct lock_class *class; 3666 unsigned int depth; 3667 int i; 3668 3669 depth = curr->lockdep_depth; 3670 /* 3671 * This function is about (re)setting the class of a held lock, 3672 * yet we're not actually holding any locks. Naughty user! 3673 */ 3674 if (DEBUG_LOCKS_WARN_ON(!depth)) 3675 return 0; 3676 3677 hlock = find_held_lock(curr, lock, depth, &i); 3678 if (!hlock) 3679 return print_unlock_imbalance_bug(curr, lock, ip); 3680 3681 lockdep_init_map(lock, name, key, 0); 3682 class = register_lock_class(lock, subclass, 0); 3683 hlock->class_idx = class - lock_classes + 1; 3684 3685 curr->lockdep_depth = i; 3686 curr->curr_chain_key = hlock->prev_chain_key; 3687 3688 if (reacquire_held_locks(curr, depth, i)) 3689 return 0; 3690 3691 /* 3692 * I took it apart and put it back together again, except now I have 3693 * these 'spare' parts.. where shall I put them. 3694 */ 3695 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3696 return 0; 3697 return 1; 3698 } 3699 3700 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip) 3701 { 3702 struct task_struct *curr = current; 3703 struct held_lock *hlock; 3704 unsigned int depth; 3705 int i; 3706 3707 depth = curr->lockdep_depth; 3708 /* 3709 * This function is about (re)setting the class of a held lock, 3710 * yet we're not actually holding any locks. Naughty user! 3711 */ 3712 if (DEBUG_LOCKS_WARN_ON(!depth)) 3713 return 0; 3714 3715 hlock = find_held_lock(curr, lock, depth, &i); 3716 if (!hlock) 3717 return print_unlock_imbalance_bug(curr, lock, ip); 3718 3719 curr->lockdep_depth = i; 3720 curr->curr_chain_key = hlock->prev_chain_key; 3721 3722 WARN(hlock->read, "downgrading a read lock"); 3723 hlock->read = 1; 3724 hlock->acquire_ip = ip; 3725 3726 if (reacquire_held_locks(curr, depth, i)) 3727 return 0; 3728 3729 /* 3730 * I took it apart and put it back together again, except now I have 3731 * these 'spare' parts.. where shall I put them. 3732 */ 3733 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3734 return 0; 3735 return 1; 3736 } 3737 3738 /* 3739 * Remove the lock to the list of currently held locks - this gets 3740 * called on mutex_unlock()/spin_unlock*() (or on a failed 3741 * mutex_lock_interruptible()). 3742 * 3743 * @nested is an hysterical artifact, needs a tree wide cleanup. 3744 */ 3745 static int 3746 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) 3747 { 3748 struct task_struct *curr = current; 3749 struct held_lock *hlock; 3750 unsigned int depth; 3751 int ret, i; 3752 3753 if (unlikely(!debug_locks)) 3754 return 0; 3755 3756 ret = lock_release_crosslock(lock); 3757 /* 3758 * 2 means normal release operations are needed. Otherwise, it's 3759 * ok just to return with '0:fail, 1:success'. 3760 */ 3761 if (ret != 2) 3762 return ret; 3763 3764 depth = curr->lockdep_depth; 3765 /* 3766 * So we're all set to release this lock.. wait what lock? We don't 3767 * own any locks, you've been drinking again? 3768 */ 3769 if (DEBUG_LOCKS_WARN_ON(depth <= 0)) 3770 return print_unlock_imbalance_bug(curr, lock, ip); 3771 3772 /* 3773 * Check whether the lock exists in the current stack 3774 * of held locks: 3775 */ 3776 hlock = find_held_lock(curr, lock, depth, &i); 3777 if (!hlock) 3778 return print_unlock_imbalance_bug(curr, lock, ip); 3779 3780 if (hlock->instance == lock) 3781 lock_release_holdtime(hlock); 3782 3783 WARN(hlock->pin_count, "releasing a pinned lock\n"); 3784 3785 if (hlock->references) { 3786 hlock->references--; 3787 if (hlock->references) { 3788 /* 3789 * We had, and after removing one, still have 3790 * references, the current lock stack is still 3791 * valid. We're done! 3792 */ 3793 return 1; 3794 } 3795 } 3796 3797 /* 3798 * We have the right lock to unlock, 'hlock' points to it. 3799 * Now we remove it from the stack, and add back the other 3800 * entries (if any), recalculating the hash along the way: 3801 */ 3802 3803 curr->lockdep_depth = i; 3804 curr->curr_chain_key = hlock->prev_chain_key; 3805 3806 if (reacquire_held_locks(curr, depth, i + 1)) 3807 return 0; 3808 3809 /* 3810 * We had N bottles of beer on the wall, we drank one, but now 3811 * there's not N-1 bottles of beer left on the wall... 3812 */ 3813 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3814 return 0; 3815 3816 return 1; 3817 } 3818 3819 static int __lock_is_held(struct lockdep_map *lock, int read) 3820 { 3821 struct task_struct *curr = current; 3822 int i; 3823 3824 for (i = 0; i < curr->lockdep_depth; i++) { 3825 struct held_lock *hlock = curr->held_locks + i; 3826 3827 if (match_held_lock(hlock, lock)) { 3828 if (read == -1 || hlock->read == read) 3829 return 1; 3830 3831 return 0; 3832 } 3833 } 3834 3835 return 0; 3836 } 3837 3838 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) 3839 { 3840 struct pin_cookie cookie = NIL_COOKIE; 3841 struct task_struct *curr = current; 3842 int i; 3843 3844 if (unlikely(!debug_locks)) 3845 return cookie; 3846 3847 for (i = 0; i < curr->lockdep_depth; i++) { 3848 struct held_lock *hlock = curr->held_locks + i; 3849 3850 if (match_held_lock(hlock, lock)) { 3851 /* 3852 * Grab 16bits of randomness; this is sufficient to not 3853 * be guessable and still allows some pin nesting in 3854 * our u32 pin_count. 3855 */ 3856 cookie.val = 1 + (prandom_u32() >> 16); 3857 hlock->pin_count += cookie.val; 3858 return cookie; 3859 } 3860 } 3861 3862 WARN(1, "pinning an unheld lock\n"); 3863 return cookie; 3864 } 3865 3866 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 3867 { 3868 struct task_struct *curr = current; 3869 int i; 3870 3871 if (unlikely(!debug_locks)) 3872 return; 3873 3874 for (i = 0; i < curr->lockdep_depth; i++) { 3875 struct held_lock *hlock = curr->held_locks + i; 3876 3877 if (match_held_lock(hlock, lock)) { 3878 hlock->pin_count += cookie.val; 3879 return; 3880 } 3881 } 3882 3883 WARN(1, "pinning an unheld lock\n"); 3884 } 3885 3886 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 3887 { 3888 struct task_struct *curr = current; 3889 int i; 3890 3891 if (unlikely(!debug_locks)) 3892 return; 3893 3894 for (i = 0; i < curr->lockdep_depth; i++) { 3895 struct held_lock *hlock = curr->held_locks + i; 3896 3897 if (match_held_lock(hlock, lock)) { 3898 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) 3899 return; 3900 3901 hlock->pin_count -= cookie.val; 3902 3903 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) 3904 hlock->pin_count = 0; 3905 3906 return; 3907 } 3908 } 3909 3910 WARN(1, "unpinning an unheld lock\n"); 3911 } 3912 3913 /* 3914 * Check whether we follow the irq-flags state precisely: 3915 */ 3916 static void check_flags(unsigned long flags) 3917 { 3918 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ 3919 defined(CONFIG_TRACE_IRQFLAGS) 3920 if (!debug_locks) 3921 return; 3922 3923 if (irqs_disabled_flags(flags)) { 3924 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { 3925 printk("possible reason: unannotated irqs-off.\n"); 3926 } 3927 } else { 3928 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { 3929 printk("possible reason: unannotated irqs-on.\n"); 3930 } 3931 } 3932 3933 /* 3934 * We dont accurately track softirq state in e.g. 3935 * hardirq contexts (such as on 4KSTACKS), so only 3936 * check if not in hardirq contexts: 3937 */ 3938 if (!hardirq_count()) { 3939 if (softirq_count()) { 3940 /* like the above, but with softirqs */ 3941 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3942 } else { 3943 /* lick the above, does it taste good? */ 3944 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3945 } 3946 } 3947 3948 if (!debug_locks) 3949 print_irqtrace_events(current); 3950 #endif 3951 } 3952 3953 void lock_set_class(struct lockdep_map *lock, const char *name, 3954 struct lock_class_key *key, unsigned int subclass, 3955 unsigned long ip) 3956 { 3957 unsigned long flags; 3958 3959 if (unlikely(current->lockdep_recursion)) 3960 return; 3961 3962 raw_local_irq_save(flags); 3963 current->lockdep_recursion = 1; 3964 check_flags(flags); 3965 if (__lock_set_class(lock, name, key, subclass, ip)) 3966 check_chain_key(current); 3967 current->lockdep_recursion = 0; 3968 raw_local_irq_restore(flags); 3969 } 3970 EXPORT_SYMBOL_GPL(lock_set_class); 3971 3972 void lock_downgrade(struct lockdep_map *lock, unsigned long ip) 3973 { 3974 unsigned long flags; 3975 3976 if (unlikely(current->lockdep_recursion)) 3977 return; 3978 3979 raw_local_irq_save(flags); 3980 current->lockdep_recursion = 1; 3981 check_flags(flags); 3982 if (__lock_downgrade(lock, ip)) 3983 check_chain_key(current); 3984 current->lockdep_recursion = 0; 3985 raw_local_irq_restore(flags); 3986 } 3987 EXPORT_SYMBOL_GPL(lock_downgrade); 3988 3989 /* 3990 * We are not always called with irqs disabled - do that here, 3991 * and also avoid lockdep recursion: 3992 */ 3993 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 3994 int trylock, int read, int check, 3995 struct lockdep_map *nest_lock, unsigned long ip) 3996 { 3997 unsigned long flags; 3998 3999 if (unlikely(current->lockdep_recursion)) 4000 return; 4001 4002 raw_local_irq_save(flags); 4003 check_flags(flags); 4004 4005 current->lockdep_recursion = 1; 4006 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); 4007 __lock_acquire(lock, subclass, trylock, read, check, 4008 irqs_disabled_flags(flags), nest_lock, ip, 0, 0); 4009 current->lockdep_recursion = 0; 4010 raw_local_irq_restore(flags); 4011 } 4012 EXPORT_SYMBOL_GPL(lock_acquire); 4013 4014 void lock_release(struct lockdep_map *lock, int nested, 4015 unsigned long ip) 4016 { 4017 unsigned long flags; 4018 4019 if (unlikely(current->lockdep_recursion)) 4020 return; 4021 4022 raw_local_irq_save(flags); 4023 check_flags(flags); 4024 current->lockdep_recursion = 1; 4025 trace_lock_release(lock, ip); 4026 if (__lock_release(lock, nested, ip)) 4027 check_chain_key(current); 4028 current->lockdep_recursion = 0; 4029 raw_local_irq_restore(flags); 4030 } 4031 EXPORT_SYMBOL_GPL(lock_release); 4032 4033 int lock_is_held_type(struct lockdep_map *lock, int read) 4034 { 4035 unsigned long flags; 4036 int ret = 0; 4037 4038 if (unlikely(current->lockdep_recursion)) 4039 return 1; /* avoid false negative lockdep_assert_held() */ 4040 4041 raw_local_irq_save(flags); 4042 check_flags(flags); 4043 4044 current->lockdep_recursion = 1; 4045 ret = __lock_is_held(lock, read); 4046 current->lockdep_recursion = 0; 4047 raw_local_irq_restore(flags); 4048 4049 return ret; 4050 } 4051 EXPORT_SYMBOL_GPL(lock_is_held_type); 4052 4053 struct pin_cookie lock_pin_lock(struct lockdep_map *lock) 4054 { 4055 struct pin_cookie cookie = NIL_COOKIE; 4056 unsigned long flags; 4057 4058 if (unlikely(current->lockdep_recursion)) 4059 return cookie; 4060 4061 raw_local_irq_save(flags); 4062 check_flags(flags); 4063 4064 current->lockdep_recursion = 1; 4065 cookie = __lock_pin_lock(lock); 4066 current->lockdep_recursion = 0; 4067 raw_local_irq_restore(flags); 4068 4069 return cookie; 4070 } 4071 EXPORT_SYMBOL_GPL(lock_pin_lock); 4072 4073 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4074 { 4075 unsigned long flags; 4076 4077 if (unlikely(current->lockdep_recursion)) 4078 return; 4079 4080 raw_local_irq_save(flags); 4081 check_flags(flags); 4082 4083 current->lockdep_recursion = 1; 4084 __lock_repin_lock(lock, cookie); 4085 current->lockdep_recursion = 0; 4086 raw_local_irq_restore(flags); 4087 } 4088 EXPORT_SYMBOL_GPL(lock_repin_lock); 4089 4090 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie) 4091 { 4092 unsigned long flags; 4093 4094 if (unlikely(current->lockdep_recursion)) 4095 return; 4096 4097 raw_local_irq_save(flags); 4098 check_flags(flags); 4099 4100 current->lockdep_recursion = 1; 4101 __lock_unpin_lock(lock, cookie); 4102 current->lockdep_recursion = 0; 4103 raw_local_irq_restore(flags); 4104 } 4105 EXPORT_SYMBOL_GPL(lock_unpin_lock); 4106 4107 #ifdef CONFIG_LOCK_STAT 4108 static int 4109 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 4110 unsigned long ip) 4111 { 4112 if (!debug_locks_off()) 4113 return 0; 4114 if (debug_locks_silent) 4115 return 0; 4116 4117 pr_warn("\n"); 4118 pr_warn("=================================\n"); 4119 pr_warn("WARNING: bad contention detected!\n"); 4120 print_kernel_ident(); 4121 pr_warn("---------------------------------\n"); 4122 pr_warn("%s/%d is trying to contend lock (", 4123 curr->comm, task_pid_nr(curr)); 4124 print_lockdep_cache(lock); 4125 pr_cont(") at:\n"); 4126 print_ip_sym(ip); 4127 pr_warn("but there are no locks held!\n"); 4128 pr_warn("\nother info that might help us debug this:\n"); 4129 lockdep_print_held_locks(curr); 4130 4131 pr_warn("\nstack backtrace:\n"); 4132 dump_stack(); 4133 4134 return 0; 4135 } 4136 4137 static void 4138 __lock_contended(struct lockdep_map *lock, unsigned long ip) 4139 { 4140 struct task_struct *curr = current; 4141 struct held_lock *hlock; 4142 struct lock_class_stats *stats; 4143 unsigned int depth; 4144 int i, contention_point, contending_point; 4145 4146 depth = curr->lockdep_depth; 4147 /* 4148 * Whee, we contended on this lock, except it seems we're not 4149 * actually trying to acquire anything much at all.. 4150 */ 4151 if (DEBUG_LOCKS_WARN_ON(!depth)) 4152 return; 4153 4154 hlock = find_held_lock(curr, lock, depth, &i); 4155 if (!hlock) { 4156 print_lock_contention_bug(curr, lock, ip); 4157 return; 4158 } 4159 4160 if (hlock->instance != lock) 4161 return; 4162 4163 hlock->waittime_stamp = lockstat_clock(); 4164 4165 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 4166 contending_point = lock_point(hlock_class(hlock)->contending_point, 4167 lock->ip); 4168 4169 stats = get_lock_stats(hlock_class(hlock)); 4170 if (contention_point < LOCKSTAT_POINTS) 4171 stats->contention_point[contention_point]++; 4172 if (contending_point < LOCKSTAT_POINTS) 4173 stats->contending_point[contending_point]++; 4174 if (lock->cpu != smp_processor_id()) 4175 stats->bounces[bounce_contended + !!hlock->read]++; 4176 put_lock_stats(stats); 4177 } 4178 4179 static void 4180 __lock_acquired(struct lockdep_map *lock, unsigned long ip) 4181 { 4182 struct task_struct *curr = current; 4183 struct held_lock *hlock; 4184 struct lock_class_stats *stats; 4185 unsigned int depth; 4186 u64 now, waittime = 0; 4187 int i, cpu; 4188 4189 depth = curr->lockdep_depth; 4190 /* 4191 * Yay, we acquired ownership of this lock we didn't try to 4192 * acquire, how the heck did that happen? 4193 */ 4194 if (DEBUG_LOCKS_WARN_ON(!depth)) 4195 return; 4196 4197 hlock = find_held_lock(curr, lock, depth, &i); 4198 if (!hlock) { 4199 print_lock_contention_bug(curr, lock, _RET_IP_); 4200 return; 4201 } 4202 4203 if (hlock->instance != lock) 4204 return; 4205 4206 cpu = smp_processor_id(); 4207 if (hlock->waittime_stamp) { 4208 now = lockstat_clock(); 4209 waittime = now - hlock->waittime_stamp; 4210 hlock->holdtime_stamp = now; 4211 } 4212 4213 trace_lock_acquired(lock, ip); 4214 4215 stats = get_lock_stats(hlock_class(hlock)); 4216 if (waittime) { 4217 if (hlock->read) 4218 lock_time_inc(&stats->read_waittime, waittime); 4219 else 4220 lock_time_inc(&stats->write_waittime, waittime); 4221 } 4222 if (lock->cpu != cpu) 4223 stats->bounces[bounce_acquired + !!hlock->read]++; 4224 put_lock_stats(stats); 4225 4226 lock->cpu = cpu; 4227 lock->ip = ip; 4228 } 4229 4230 void lock_contended(struct lockdep_map *lock, unsigned long ip) 4231 { 4232 unsigned long flags; 4233 4234 if (unlikely(!lock_stat)) 4235 return; 4236 4237 if (unlikely(current->lockdep_recursion)) 4238 return; 4239 4240 raw_local_irq_save(flags); 4241 check_flags(flags); 4242 current->lockdep_recursion = 1; 4243 trace_lock_contended(lock, ip); 4244 __lock_contended(lock, ip); 4245 current->lockdep_recursion = 0; 4246 raw_local_irq_restore(flags); 4247 } 4248 EXPORT_SYMBOL_GPL(lock_contended); 4249 4250 void lock_acquired(struct lockdep_map *lock, unsigned long ip) 4251 { 4252 unsigned long flags; 4253 4254 if (unlikely(!lock_stat)) 4255 return; 4256 4257 if (unlikely(current->lockdep_recursion)) 4258 return; 4259 4260 raw_local_irq_save(flags); 4261 check_flags(flags); 4262 current->lockdep_recursion = 1; 4263 __lock_acquired(lock, ip); 4264 current->lockdep_recursion = 0; 4265 raw_local_irq_restore(flags); 4266 } 4267 EXPORT_SYMBOL_GPL(lock_acquired); 4268 #endif 4269 4270 /* 4271 * Used by the testsuite, sanitize the validator state 4272 * after a simulated failure: 4273 */ 4274 4275 void lockdep_reset(void) 4276 { 4277 unsigned long flags; 4278 int i; 4279 4280 raw_local_irq_save(flags); 4281 current->curr_chain_key = 0; 4282 current->lockdep_depth = 0; 4283 current->lockdep_recursion = 0; 4284 memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); 4285 nr_hardirq_chains = 0; 4286 nr_softirq_chains = 0; 4287 nr_process_chains = 0; 4288 debug_locks = 1; 4289 for (i = 0; i < CHAINHASH_SIZE; i++) 4290 INIT_HLIST_HEAD(chainhash_table + i); 4291 raw_local_irq_restore(flags); 4292 } 4293 4294 static void zap_class(struct lock_class *class) 4295 { 4296 int i; 4297 4298 /* 4299 * Remove all dependencies this lock is 4300 * involved in: 4301 */ 4302 for (i = 0; i < nr_list_entries; i++) { 4303 if (list_entries[i].class == class) 4304 list_del_rcu(&list_entries[i].entry); 4305 } 4306 /* 4307 * Unhash the class and remove it from the all_lock_classes list: 4308 */ 4309 hlist_del_rcu(&class->hash_entry); 4310 list_del_rcu(&class->lock_entry); 4311 4312 RCU_INIT_POINTER(class->key, NULL); 4313 RCU_INIT_POINTER(class->name, NULL); 4314 } 4315 4316 static inline int within(const void *addr, void *start, unsigned long size) 4317 { 4318 return addr >= start && addr < start + size; 4319 } 4320 4321 /* 4322 * Used in module.c to remove lock classes from memory that is going to be 4323 * freed; and possibly re-used by other modules. 4324 * 4325 * We will have had one sync_sched() before getting here, so we're guaranteed 4326 * nobody will look up these exact classes -- they're properly dead but still 4327 * allocated. 4328 */ 4329 void lockdep_free_key_range(void *start, unsigned long size) 4330 { 4331 struct lock_class *class; 4332 struct hlist_head *head; 4333 unsigned long flags; 4334 int i; 4335 int locked; 4336 4337 raw_local_irq_save(flags); 4338 locked = graph_lock(); 4339 4340 /* 4341 * Unhash all classes that were created by this module: 4342 */ 4343 for (i = 0; i < CLASSHASH_SIZE; i++) { 4344 head = classhash_table + i; 4345 hlist_for_each_entry_rcu(class, head, hash_entry) { 4346 if (within(class->key, start, size)) 4347 zap_class(class); 4348 else if (within(class->name, start, size)) 4349 zap_class(class); 4350 } 4351 } 4352 4353 if (locked) 4354 graph_unlock(); 4355 raw_local_irq_restore(flags); 4356 4357 /* 4358 * Wait for any possible iterators from look_up_lock_class() to pass 4359 * before continuing to free the memory they refer to. 4360 * 4361 * sync_sched() is sufficient because the read-side is IRQ disable. 4362 */ 4363 synchronize_sched(); 4364 4365 /* 4366 * XXX at this point we could return the resources to the pool; 4367 * instead we leak them. We would need to change to bitmap allocators 4368 * instead of the linear allocators we have now. 4369 */ 4370 } 4371 4372 void lockdep_reset_lock(struct lockdep_map *lock) 4373 { 4374 struct lock_class *class; 4375 struct hlist_head *head; 4376 unsigned long flags; 4377 int i, j; 4378 int locked; 4379 4380 raw_local_irq_save(flags); 4381 4382 /* 4383 * Remove all classes this lock might have: 4384 */ 4385 for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { 4386 /* 4387 * If the class exists we look it up and zap it: 4388 */ 4389 class = look_up_lock_class(lock, j); 4390 if (!IS_ERR_OR_NULL(class)) 4391 zap_class(class); 4392 } 4393 /* 4394 * Debug check: in the end all mapped classes should 4395 * be gone. 4396 */ 4397 locked = graph_lock(); 4398 for (i = 0; i < CLASSHASH_SIZE; i++) { 4399 head = classhash_table + i; 4400 hlist_for_each_entry_rcu(class, head, hash_entry) { 4401 int match = 0; 4402 4403 for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4404 match |= class == lock->class_cache[j]; 4405 4406 if (unlikely(match)) { 4407 if (debug_locks_off_graph_unlock()) { 4408 /* 4409 * We all just reset everything, how did it match? 4410 */ 4411 WARN_ON(1); 4412 } 4413 goto out_restore; 4414 } 4415 } 4416 } 4417 if (locked) 4418 graph_unlock(); 4419 4420 out_restore: 4421 raw_local_irq_restore(flags); 4422 } 4423 4424 void __init lockdep_info(void) 4425 { 4426 printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); 4427 4428 printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); 4429 printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); 4430 printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); 4431 printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); 4432 printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); 4433 printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); 4434 printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); 4435 4436 printk(" memory used by lock dependency info: %lu kB\n", 4437 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + 4438 sizeof(struct list_head) * CLASSHASH_SIZE + 4439 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 4440 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 4441 sizeof(struct list_head) * CHAINHASH_SIZE 4442 #ifdef CONFIG_PROVE_LOCKING 4443 + sizeof(struct circular_queue) 4444 #endif 4445 ) / 1024 4446 ); 4447 4448 printk(" per task-struct memory footprint: %lu bytes\n", 4449 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 4450 } 4451 4452 static void 4453 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, 4454 const void *mem_to, struct held_lock *hlock) 4455 { 4456 if (!debug_locks_off()) 4457 return; 4458 if (debug_locks_silent) 4459 return; 4460 4461 pr_warn("\n"); 4462 pr_warn("=========================\n"); 4463 pr_warn("WARNING: held lock freed!\n"); 4464 print_kernel_ident(); 4465 pr_warn("-------------------------\n"); 4466 pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 4467 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 4468 print_lock(hlock); 4469 lockdep_print_held_locks(curr); 4470 4471 pr_warn("\nstack backtrace:\n"); 4472 dump_stack(); 4473 } 4474 4475 static inline int not_in_range(const void* mem_from, unsigned long mem_len, 4476 const void* lock_from, unsigned long lock_len) 4477 { 4478 return lock_from + lock_len <= mem_from || 4479 mem_from + mem_len <= lock_from; 4480 } 4481 4482 /* 4483 * Called when kernel memory is freed (or unmapped), or if a lock 4484 * is destroyed or reinitialized - this code checks whether there is 4485 * any held lock in the memory range of <from> to <to>: 4486 */ 4487 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) 4488 { 4489 struct task_struct *curr = current; 4490 struct held_lock *hlock; 4491 unsigned long flags; 4492 int i; 4493 4494 if (unlikely(!debug_locks)) 4495 return; 4496 4497 local_irq_save(flags); 4498 for (i = 0; i < curr->lockdep_depth; i++) { 4499 hlock = curr->held_locks + i; 4500 4501 if (not_in_range(mem_from, mem_len, hlock->instance, 4502 sizeof(*hlock->instance))) 4503 continue; 4504 4505 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); 4506 break; 4507 } 4508 local_irq_restore(flags); 4509 } 4510 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4511 4512 static void print_held_locks_bug(void) 4513 { 4514 if (!debug_locks_off()) 4515 return; 4516 if (debug_locks_silent) 4517 return; 4518 4519 pr_warn("\n"); 4520 pr_warn("====================================\n"); 4521 pr_warn("WARNING: %s/%d still has locks held!\n", 4522 current->comm, task_pid_nr(current)); 4523 print_kernel_ident(); 4524 pr_warn("------------------------------------\n"); 4525 lockdep_print_held_locks(current); 4526 pr_warn("\nstack backtrace:\n"); 4527 dump_stack(); 4528 } 4529 4530 void debug_check_no_locks_held(void) 4531 { 4532 if (unlikely(current->lockdep_depth > 0)) 4533 print_held_locks_bug(); 4534 } 4535 EXPORT_SYMBOL_GPL(debug_check_no_locks_held); 4536 4537 #ifdef __KERNEL__ 4538 void debug_show_all_locks(void) 4539 { 4540 struct task_struct *g, *p; 4541 int count = 10; 4542 int unlock = 1; 4543 4544 if (unlikely(!debug_locks)) { 4545 pr_warn("INFO: lockdep is turned off.\n"); 4546 return; 4547 } 4548 pr_warn("\nShowing all locks held in the system:\n"); 4549 4550 /* 4551 * Here we try to get the tasklist_lock as hard as possible, 4552 * if not successful after 2 seconds we ignore it (but keep 4553 * trying). This is to enable a debug printout even if a 4554 * tasklist_lock-holding task deadlocks or crashes. 4555 */ 4556 retry: 4557 if (!read_trylock(&tasklist_lock)) { 4558 if (count == 10) 4559 pr_warn("hm, tasklist_lock locked, retrying... "); 4560 if (count) { 4561 count--; 4562 pr_cont(" #%d", 10-count); 4563 mdelay(200); 4564 goto retry; 4565 } 4566 pr_cont(" ignoring it.\n"); 4567 unlock = 0; 4568 } else { 4569 if (count != 10) 4570 pr_cont(" locked it.\n"); 4571 } 4572 4573 do_each_thread(g, p) { 4574 /* 4575 * It's not reliable to print a task's held locks 4576 * if it's not sleeping (or if it's not the current 4577 * task): 4578 */ 4579 if (p->state == TASK_RUNNING && p != current) 4580 continue; 4581 if (p->lockdep_depth) 4582 lockdep_print_held_locks(p); 4583 if (!unlock) 4584 if (read_trylock(&tasklist_lock)) 4585 unlock = 1; 4586 } while_each_thread(g, p); 4587 4588 pr_warn("\n"); 4589 pr_warn("=============================================\n\n"); 4590 4591 if (unlock) 4592 read_unlock(&tasklist_lock); 4593 } 4594 EXPORT_SYMBOL_GPL(debug_show_all_locks); 4595 #endif 4596 4597 /* 4598 * Careful: only use this function if you are sure that 4599 * the task cannot run in parallel! 4600 */ 4601 void debug_show_held_locks(struct task_struct *task) 4602 { 4603 if (unlikely(!debug_locks)) { 4604 printk("INFO: lockdep is turned off.\n"); 4605 return; 4606 } 4607 lockdep_print_held_locks(task); 4608 } 4609 EXPORT_SYMBOL_GPL(debug_show_held_locks); 4610 4611 asmlinkage __visible void lockdep_sys_exit(void) 4612 { 4613 struct task_struct *curr = current; 4614 4615 if (unlikely(curr->lockdep_depth)) { 4616 if (!debug_locks_off()) 4617 return; 4618 pr_warn("\n"); 4619 pr_warn("================================================\n"); 4620 pr_warn("WARNING: lock held when returning to user space!\n"); 4621 print_kernel_ident(); 4622 pr_warn("------------------------------------------------\n"); 4623 pr_warn("%s/%d is leaving the kernel with locks still held!\n", 4624 curr->comm, curr->pid); 4625 lockdep_print_held_locks(curr); 4626 } 4627 4628 /* 4629 * The lock history for each syscall should be independent. So wipe the 4630 * slate clean on return to userspace. 4631 */ 4632 lockdep_invariant_state(false); 4633 } 4634 4635 void lockdep_rcu_suspicious(const char *file, const int line, const char *s) 4636 { 4637 struct task_struct *curr = current; 4638 4639 /* Note: the following can be executed concurrently, so be careful. */ 4640 pr_warn("\n"); 4641 pr_warn("=============================\n"); 4642 pr_warn("WARNING: suspicious RCU usage\n"); 4643 print_kernel_ident(); 4644 pr_warn("-----------------------------\n"); 4645 pr_warn("%s:%d %s!\n", file, line, s); 4646 pr_warn("\nother info that might help us debug this:\n\n"); 4647 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 4648 !rcu_lockdep_current_cpu_online() 4649 ? "RCU used illegally from offline CPU!\n" 4650 : !rcu_is_watching() 4651 ? "RCU used illegally from idle CPU!\n" 4652 : "", 4653 rcu_scheduler_active, debug_locks); 4654 4655 /* 4656 * If a CPU is in the RCU-free window in idle (ie: in the section 4657 * between rcu_idle_enter() and rcu_idle_exit(), then RCU 4658 * considers that CPU to be in an "extended quiescent state", 4659 * which means that RCU will be completely ignoring that CPU. 4660 * Therefore, rcu_read_lock() and friends have absolutely no 4661 * effect on a CPU running in that state. In other words, even if 4662 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well 4663 * delete data structures out from under it. RCU really has no 4664 * choice here: we need to keep an RCU-free window in idle where 4665 * the CPU may possibly enter into low power mode. This way we can 4666 * notice an extended quiescent state to other CPUs that started a grace 4667 * period. Otherwise we would delay any grace period as long as we run 4668 * in the idle task. 4669 * 4670 * So complain bitterly if someone does call rcu_read_lock(), 4671 * rcu_read_lock_bh() and so on from extended quiescent states. 4672 */ 4673 if (!rcu_is_watching()) 4674 pr_warn("RCU used illegally from extended quiescent state!\n"); 4675 4676 lockdep_print_held_locks(curr); 4677 pr_warn("\nstack backtrace:\n"); 4678 dump_stack(); 4679 } 4680 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4681 4682 #ifdef CONFIG_LOCKDEP_CROSSRELEASE 4683 4684 /* 4685 * Crossrelease works by recording a lock history for each thread and 4686 * connecting those historic locks that were taken after the 4687 * wait_for_completion() in the complete() context. 4688 * 4689 * Task-A Task-B 4690 * 4691 * mutex_lock(&A); 4692 * mutex_unlock(&A); 4693 * 4694 * wait_for_completion(&C); 4695 * lock_acquire_crosslock(); 4696 * atomic_inc_return(&cross_gen_id); 4697 * | 4698 * | mutex_lock(&B); 4699 * | mutex_unlock(&B); 4700 * | 4701 * | complete(&C); 4702 * `-- lock_commit_crosslock(); 4703 * 4704 * Which will then add a dependency between B and C. 4705 */ 4706 4707 #define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR]) 4708 4709 /* 4710 * Whenever a crosslock is held, cross_gen_id will be increased. 4711 */ 4712 static atomic_t cross_gen_id; /* Can be wrapped */ 4713 4714 /* 4715 * Make an entry of the ring buffer invalid. 4716 */ 4717 static inline void invalidate_xhlock(struct hist_lock *xhlock) 4718 { 4719 /* 4720 * Normally, xhlock->hlock.instance must be !NULL. 4721 */ 4722 xhlock->hlock.instance = NULL; 4723 } 4724 4725 /* 4726 * Lock history stacks; we have 2 nested lock history stacks: 4727 * 4728 * HARD(IRQ) 4729 * SOFT(IRQ) 4730 * 4731 * The thing is that once we complete a HARD/SOFT IRQ the future task locks 4732 * should not depend on any of the locks observed while running the IRQ. So 4733 * what we do is rewind the history buffer and erase all our knowledge of that 4734 * temporal event. 4735 */ 4736 4737 void crossrelease_hist_start(enum xhlock_context_t c) 4738 { 4739 struct task_struct *cur = current; 4740 4741 if (!cur->xhlocks) 4742 return; 4743 4744 cur->xhlock_idx_hist[c] = cur->xhlock_idx; 4745 cur->hist_id_save[c] = cur->hist_id; 4746 } 4747 4748 void crossrelease_hist_end(enum xhlock_context_t c) 4749 { 4750 struct task_struct *cur = current; 4751 4752 if (cur->xhlocks) { 4753 unsigned int idx = cur->xhlock_idx_hist[c]; 4754 struct hist_lock *h = &xhlock(idx); 4755 4756 cur->xhlock_idx = idx; 4757 4758 /* Check if the ring was overwritten. */ 4759 if (h->hist_id != cur->hist_id_save[c]) 4760 invalidate_xhlock(h); 4761 } 4762 } 4763 4764 /* 4765 * lockdep_invariant_state() is used to annotate independence inside a task, to 4766 * make one task look like multiple independent 'tasks'. 4767 * 4768 * Take for instance workqueues; each work is independent of the last. The 4769 * completion of a future work does not depend on the completion of a past work 4770 * (in general). Therefore we must not carry that (lock) dependency across 4771 * works. 4772 * 4773 * This is true for many things; pretty much all kthreads fall into this 4774 * pattern, where they have an invariant state and future completions do not 4775 * depend on past completions. Its just that since they all have the 'same' 4776 * form -- the kthread does the same over and over -- it doesn't typically 4777 * matter. 4778 * 4779 * The same is true for system-calls, once a system call is completed (we've 4780 * returned to userspace) the next system call does not depend on the lock 4781 * history of the previous system call. 4782 * 4783 * They key property for independence, this invariant state, is that it must be 4784 * a point where we hold no locks and have no history. Because if we were to 4785 * hold locks, the restore at _end() would not necessarily recover it's history 4786 * entry. Similarly, independence per-definition means it does not depend on 4787 * prior state. 4788 */ 4789 void lockdep_invariant_state(bool force) 4790 { 4791 /* 4792 * We call this at an invariant point, no current state, no history. 4793 * Verify the former, enforce the latter. 4794 */ 4795 WARN_ON_ONCE(!force && current->lockdep_depth); 4796 invalidate_xhlock(&xhlock(current->xhlock_idx)); 4797 } 4798 4799 static int cross_lock(struct lockdep_map *lock) 4800 { 4801 return lock ? lock->cross : 0; 4802 } 4803 4804 /* 4805 * This is needed to decide the relationship between wrapable variables. 4806 */ 4807 static inline int before(unsigned int a, unsigned int b) 4808 { 4809 return (int)(a - b) < 0; 4810 } 4811 4812 static inline struct lock_class *xhlock_class(struct hist_lock *xhlock) 4813 { 4814 return hlock_class(&xhlock->hlock); 4815 } 4816 4817 static inline struct lock_class *xlock_class(struct cross_lock *xlock) 4818 { 4819 return hlock_class(&xlock->hlock); 4820 } 4821 4822 /* 4823 * Should we check a dependency with previous one? 4824 */ 4825 static inline int depend_before(struct held_lock *hlock) 4826 { 4827 return hlock->read != 2 && hlock->check && !hlock->trylock; 4828 } 4829 4830 /* 4831 * Should we check a dependency with next one? 4832 */ 4833 static inline int depend_after(struct held_lock *hlock) 4834 { 4835 return hlock->read != 2 && hlock->check; 4836 } 4837 4838 /* 4839 * Check if the xhlock is valid, which would be false if, 4840 * 4841 * 1. Has not used after initializaion yet. 4842 * 2. Got invalidated. 4843 * 4844 * Remind hist_lock is implemented as a ring buffer. 4845 */ 4846 static inline int xhlock_valid(struct hist_lock *xhlock) 4847 { 4848 /* 4849 * xhlock->hlock.instance must be !NULL. 4850 */ 4851 return !!xhlock->hlock.instance; 4852 } 4853 4854 /* 4855 * Record a hist_lock entry. 4856 * 4857 * Irq disable is only required. 4858 */ 4859 static void add_xhlock(struct held_lock *hlock) 4860 { 4861 unsigned int idx = ++current->xhlock_idx; 4862 struct hist_lock *xhlock = &xhlock(idx); 4863 4864 #ifdef CONFIG_DEBUG_LOCKDEP 4865 /* 4866 * This can be done locklessly because they are all task-local 4867 * state, we must however ensure IRQs are disabled. 4868 */ 4869 WARN_ON_ONCE(!irqs_disabled()); 4870 #endif 4871 4872 /* Initialize hist_lock's members */ 4873 xhlock->hlock = *hlock; 4874 xhlock->hist_id = ++current->hist_id; 4875 4876 xhlock->trace.nr_entries = 0; 4877 xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES; 4878 xhlock->trace.entries = xhlock->trace_entries; 4879 4880 if (crossrelease_fullstack) { 4881 xhlock->trace.skip = 3; 4882 save_stack_trace(&xhlock->trace); 4883 } else { 4884 xhlock->trace.nr_entries = 1; 4885 xhlock->trace.entries[0] = hlock->acquire_ip; 4886 } 4887 } 4888 4889 static inline int same_context_xhlock(struct hist_lock *xhlock) 4890 { 4891 return xhlock->hlock.irq_context == task_irq_context(current); 4892 } 4893 4894 /* 4895 * This should be lockless as far as possible because this would be 4896 * called very frequently. 4897 */ 4898 static void check_add_xhlock(struct held_lock *hlock) 4899 { 4900 /* 4901 * Record a hist_lock, only in case that acquisitions ahead 4902 * could depend on the held_lock. For example, if the held_lock 4903 * is trylock then acquisitions ahead never depends on that. 4904 * In that case, we don't need to record it. Just return. 4905 */ 4906 if (!current->xhlocks || !depend_before(hlock)) 4907 return; 4908 4909 add_xhlock(hlock); 4910 } 4911 4912 /* 4913 * For crosslock. 4914 */ 4915 static int add_xlock(struct held_lock *hlock) 4916 { 4917 struct cross_lock *xlock; 4918 unsigned int gen_id; 4919 4920 if (!graph_lock()) 4921 return 0; 4922 4923 xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock; 4924 4925 /* 4926 * When acquisitions for a crosslock are overlapped, we use 4927 * nr_acquire to perform commit for them, based on cross_gen_id 4928 * of the first acquisition, which allows to add additional 4929 * dependencies. 4930 * 4931 * Moreover, when no acquisition of a crosslock is in progress, 4932 * we should not perform commit because the lock might not exist 4933 * any more, which might cause incorrect memory access. So we 4934 * have to track the number of acquisitions of a crosslock. 4935 * 4936 * depend_after() is necessary to initialize only the first 4937 * valid xlock so that the xlock can be used on its commit. 4938 */ 4939 if (xlock->nr_acquire++ && depend_after(&xlock->hlock)) 4940 goto unlock; 4941 4942 gen_id = (unsigned int)atomic_inc_return(&cross_gen_id); 4943 xlock->hlock = *hlock; 4944 xlock->hlock.gen_id = gen_id; 4945 unlock: 4946 graph_unlock(); 4947 return 1; 4948 } 4949 4950 /* 4951 * Called for both normal and crosslock acquires. Normal locks will be 4952 * pushed on the hist_lock queue. Cross locks will record state and 4953 * stop regular lock_acquire() to avoid being placed on the held_lock 4954 * stack. 4955 * 4956 * Return: 0 - failure; 4957 * 1 - crosslock, done; 4958 * 2 - normal lock, continue to held_lock[] ops. 4959 */ 4960 static int lock_acquire_crosslock(struct held_lock *hlock) 4961 { 4962 /* 4963 * CONTEXT 1 CONTEXT 2 4964 * --------- --------- 4965 * lock A (cross) 4966 * X = atomic_inc_return(&cross_gen_id) 4967 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4968 * Y = atomic_read_acquire(&cross_gen_id) 4969 * lock B 4970 * 4971 * atomic_read_acquire() is for ordering between A and B, 4972 * IOW, A happens before B, when CONTEXT 2 see Y >= X. 4973 * 4974 * Pairs with atomic_inc_return() in add_xlock(). 4975 */ 4976 hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id); 4977 4978 if (cross_lock(hlock->instance)) 4979 return add_xlock(hlock); 4980 4981 check_add_xhlock(hlock); 4982 return 2; 4983 } 4984 4985 static int copy_trace(struct stack_trace *trace) 4986 { 4987 unsigned long *buf = stack_trace + nr_stack_trace_entries; 4988 unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; 4989 unsigned int nr = min(max_nr, trace->nr_entries); 4990 4991 trace->nr_entries = nr; 4992 memcpy(buf, trace->entries, nr * sizeof(trace->entries[0])); 4993 trace->entries = buf; 4994 nr_stack_trace_entries += nr; 4995 4996 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { 4997 if (!debug_locks_off_graph_unlock()) 4998 return 0; 4999 5000 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); 5001 dump_stack(); 5002 5003 return 0; 5004 } 5005 5006 return 1; 5007 } 5008 5009 static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock) 5010 { 5011 unsigned int xid, pid; 5012 u64 chain_key; 5013 5014 xid = xlock_class(xlock) - lock_classes; 5015 chain_key = iterate_chain_key((u64)0, xid); 5016 pid = xhlock_class(xhlock) - lock_classes; 5017 chain_key = iterate_chain_key(chain_key, pid); 5018 5019 if (lookup_chain_cache(chain_key)) 5020 return 1; 5021 5022 if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context, 5023 chain_key)) 5024 return 0; 5025 5026 if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1, 5027 &xhlock->trace, copy_trace)) 5028 return 0; 5029 5030 return 1; 5031 } 5032 5033 static void commit_xhlocks(struct cross_lock *xlock) 5034 { 5035 unsigned int cur = current->xhlock_idx; 5036 unsigned int prev_hist_id = xhlock(cur).hist_id; 5037 unsigned int i; 5038 5039 if (!graph_lock()) 5040 return; 5041 5042 if (xlock->nr_acquire) { 5043 for (i = 0; i < MAX_XHLOCKS_NR; i++) { 5044 struct hist_lock *xhlock = &xhlock(cur - i); 5045 5046 if (!xhlock_valid(xhlock)) 5047 break; 5048 5049 if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id)) 5050 break; 5051 5052 if (!same_context_xhlock(xhlock)) 5053 break; 5054 5055 /* 5056 * Filter out the cases where the ring buffer was 5057 * overwritten and the current entry has a bigger 5058 * hist_id than the previous one, which is impossible 5059 * otherwise: 5060 */ 5061 if (unlikely(before(prev_hist_id, xhlock->hist_id))) 5062 break; 5063 5064 prev_hist_id = xhlock->hist_id; 5065 5066 /* 5067 * commit_xhlock() returns 0 with graph_lock already 5068 * released if fail. 5069 */ 5070 if (!commit_xhlock(xlock, xhlock)) 5071 return; 5072 } 5073 } 5074 5075 graph_unlock(); 5076 } 5077 5078 void lock_commit_crosslock(struct lockdep_map *lock) 5079 { 5080 struct cross_lock *xlock; 5081 unsigned long flags; 5082 5083 if (unlikely(!debug_locks || current->lockdep_recursion)) 5084 return; 5085 5086 if (!current->xhlocks) 5087 return; 5088 5089 /* 5090 * Do commit hist_locks with the cross_lock, only in case that 5091 * the cross_lock could depend on acquisitions after that. 5092 * 5093 * For example, if the cross_lock does not have the 'check' flag 5094 * then we don't need to check dependencies and commit for that. 5095 * Just skip it. In that case, of course, the cross_lock does 5096 * not depend on acquisitions ahead, either. 5097 * 5098 * WARNING: Don't do that in add_xlock() in advance. When an 5099 * acquisition context is different from the commit context, 5100 * invalid(skipped) cross_lock might be accessed. 5101 */ 5102 if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock)) 5103 return; 5104 5105 raw_local_irq_save(flags); 5106 check_flags(flags); 5107 current->lockdep_recursion = 1; 5108 xlock = &((struct lockdep_map_cross *)lock)->xlock; 5109 commit_xhlocks(xlock); 5110 current->lockdep_recursion = 0; 5111 raw_local_irq_restore(flags); 5112 } 5113 EXPORT_SYMBOL_GPL(lock_commit_crosslock); 5114 5115 /* 5116 * Return: 0 - failure; 5117 * 1 - crosslock, done; 5118 * 2 - normal lock, continue to held_lock[] ops. 5119 */ 5120 static int lock_release_crosslock(struct lockdep_map *lock) 5121 { 5122 if (cross_lock(lock)) { 5123 if (!graph_lock()) 5124 return 0; 5125 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--; 5126 graph_unlock(); 5127 return 1; 5128 } 5129 return 2; 5130 } 5131 5132 static void cross_init(struct lockdep_map *lock, int cross) 5133 { 5134 if (cross) 5135 ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0; 5136 5137 lock->cross = cross; 5138 5139 /* 5140 * Crossrelease assumes that the ring buffer size of xhlocks 5141 * is aligned with power of 2. So force it on build. 5142 */ 5143 BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1)); 5144 } 5145 5146 void lockdep_init_task(struct task_struct *task) 5147 { 5148 int i; 5149 5150 task->xhlock_idx = UINT_MAX; 5151 task->hist_id = 0; 5152 5153 for (i = 0; i < XHLOCK_CTX_NR; i++) { 5154 task->xhlock_idx_hist[i] = UINT_MAX; 5155 task->hist_id_save[i] = 0; 5156 } 5157 5158 task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR, 5159 GFP_KERNEL); 5160 } 5161 5162 void lockdep_free_task(struct task_struct *task) 5163 { 5164 if (task->xhlocks) { 5165 void *tmp = task->xhlocks; 5166 /* Diable crossrelease for current */ 5167 task->xhlocks = NULL; 5168 kfree(tmp); 5169 } 5170 } 5171 #endif 5172