xref: /openbmc/linux/kernel/locking/lockdep.c (revision d4ea45e8)
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitops.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jhash.h>
52 #include <linux/nmi.h>
53 
54 #include <asm/sections.h>
55 
56 #include "lockdep_internals.h"
57 
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/lock.h>
60 
61 #ifdef CONFIG_PROVE_LOCKING
62 int prove_locking = 1;
63 module_param(prove_locking, int, 0644);
64 #else
65 #define prove_locking 0
66 #endif
67 
68 #ifdef CONFIG_LOCK_STAT
69 int lock_stat = 1;
70 module_param(lock_stat, int, 0644);
71 #else
72 #define lock_stat 0
73 #endif
74 
75 /*
76  * lockdep_lock: protects the lockdep graph, the hashes and the
77  *               class/list/hash allocators.
78  *
79  * This is one of the rare exceptions where it's justified
80  * to use a raw spinlock - we really dont want the spinlock
81  * code to recurse back into the lockdep code...
82  */
83 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84 
85 static int graph_lock(void)
86 {
87 	arch_spin_lock(&lockdep_lock);
88 	/*
89 	 * Make sure that if another CPU detected a bug while
90 	 * walking the graph we dont change it (while the other
91 	 * CPU is busy printing out stuff with the graph lock
92 	 * dropped already)
93 	 */
94 	if (!debug_locks) {
95 		arch_spin_unlock(&lockdep_lock);
96 		return 0;
97 	}
98 	/* prevent any recursions within lockdep from causing deadlocks */
99 	current->lockdep_recursion++;
100 	return 1;
101 }
102 
103 static inline int graph_unlock(void)
104 {
105 	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106 		/*
107 		 * The lockdep graph lock isn't locked while we expect it to
108 		 * be, we're confused now, bye!
109 		 */
110 		return DEBUG_LOCKS_WARN_ON(1);
111 	}
112 
113 	current->lockdep_recursion--;
114 	arch_spin_unlock(&lockdep_lock);
115 	return 0;
116 }
117 
118 /*
119  * Turn lock debugging off and return with 0 if it was off already,
120  * and also release the graph lock:
121  */
122 static inline int debug_locks_off_graph_unlock(void)
123 {
124 	int ret = debug_locks_off();
125 
126 	arch_spin_unlock(&lockdep_lock);
127 
128 	return ret;
129 }
130 
131 unsigned long nr_list_entries;
132 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
133 
134 /*
135  * All data structures here are protected by the global debug_lock.
136  *
137  * Mutex key structs only get allocated, once during bootup, and never
138  * get freed - this significantly simplifies the debugging code.
139  */
140 unsigned long nr_lock_classes;
141 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142 
143 static inline struct lock_class *hlock_class(struct held_lock *hlock)
144 {
145 	if (!hlock->class_idx) {
146 		/*
147 		 * Someone passed in garbage, we give up.
148 		 */
149 		DEBUG_LOCKS_WARN_ON(1);
150 		return NULL;
151 	}
152 	return lock_classes + hlock->class_idx - 1;
153 }
154 
155 #ifdef CONFIG_LOCK_STAT
156 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
157 
158 static inline u64 lockstat_clock(void)
159 {
160 	return local_clock();
161 }
162 
163 static int lock_point(unsigned long points[], unsigned long ip)
164 {
165 	int i;
166 
167 	for (i = 0; i < LOCKSTAT_POINTS; i++) {
168 		if (points[i] == 0) {
169 			points[i] = ip;
170 			break;
171 		}
172 		if (points[i] == ip)
173 			break;
174 	}
175 
176 	return i;
177 }
178 
179 static void lock_time_inc(struct lock_time *lt, u64 time)
180 {
181 	if (time > lt->max)
182 		lt->max = time;
183 
184 	if (time < lt->min || !lt->nr)
185 		lt->min = time;
186 
187 	lt->total += time;
188 	lt->nr++;
189 }
190 
191 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
192 {
193 	if (!src->nr)
194 		return;
195 
196 	if (src->max > dst->max)
197 		dst->max = src->max;
198 
199 	if (src->min < dst->min || !dst->nr)
200 		dst->min = src->min;
201 
202 	dst->total += src->total;
203 	dst->nr += src->nr;
204 }
205 
206 struct lock_class_stats lock_stats(struct lock_class *class)
207 {
208 	struct lock_class_stats stats;
209 	int cpu, i;
210 
211 	memset(&stats, 0, sizeof(struct lock_class_stats));
212 	for_each_possible_cpu(cpu) {
213 		struct lock_class_stats *pcs =
214 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
215 
216 		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
217 			stats.contention_point[i] += pcs->contention_point[i];
218 
219 		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
220 			stats.contending_point[i] += pcs->contending_point[i];
221 
222 		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
223 		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
224 
225 		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
226 		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
227 
228 		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
229 			stats.bounces[i] += pcs->bounces[i];
230 	}
231 
232 	return stats;
233 }
234 
235 void clear_lock_stats(struct lock_class *class)
236 {
237 	int cpu;
238 
239 	for_each_possible_cpu(cpu) {
240 		struct lock_class_stats *cpu_stats =
241 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
242 
243 		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
244 	}
245 	memset(class->contention_point, 0, sizeof(class->contention_point));
246 	memset(class->contending_point, 0, sizeof(class->contending_point));
247 }
248 
249 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
250 {
251 	return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
252 }
253 
254 static void lock_release_holdtime(struct held_lock *hlock)
255 {
256 	struct lock_class_stats *stats;
257 	u64 holdtime;
258 
259 	if (!lock_stat)
260 		return;
261 
262 	holdtime = lockstat_clock() - hlock->holdtime_stamp;
263 
264 	stats = get_lock_stats(hlock_class(hlock));
265 	if (hlock->read)
266 		lock_time_inc(&stats->read_holdtime, holdtime);
267 	else
268 		lock_time_inc(&stats->write_holdtime, holdtime);
269 }
270 #else
271 static inline void lock_release_holdtime(struct held_lock *hlock)
272 {
273 }
274 #endif
275 
276 /*
277  * We keep a global list of all lock classes. The list only grows,
278  * never shrinks. The list is only accessed with the lockdep
279  * spinlock lock held.
280  */
281 LIST_HEAD(all_lock_classes);
282 
283 /*
284  * The lockdep classes are in a hash-table as well, for fast lookup:
285  */
286 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
287 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
288 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
289 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
290 
291 static struct hlist_head classhash_table[CLASSHASH_SIZE];
292 
293 /*
294  * We put the lock dependency chains into a hash-table as well, to cache
295  * their existence:
296  */
297 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
298 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
299 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
300 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
301 
302 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
303 
304 /*
305  * The hash key of the lock dependency chains is a hash itself too:
306  * it's a hash of all locks taken up to that lock, including that lock.
307  * It's a 64-bit hash, because it's important for the keys to be
308  * unique.
309  */
310 static inline u64 iterate_chain_key(u64 key, u32 idx)
311 {
312 	u32 k0 = key, k1 = key >> 32;
313 
314 	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
315 
316 	return k0 | (u64)k1 << 32;
317 }
318 
319 void lockdep_off(void)
320 {
321 	current->lockdep_recursion++;
322 }
323 EXPORT_SYMBOL(lockdep_off);
324 
325 void lockdep_on(void)
326 {
327 	current->lockdep_recursion--;
328 }
329 EXPORT_SYMBOL(lockdep_on);
330 
331 /*
332  * Debugging switches:
333  */
334 
335 #define VERBOSE			0
336 #define VERY_VERBOSE		0
337 
338 #if VERBOSE
339 # define HARDIRQ_VERBOSE	1
340 # define SOFTIRQ_VERBOSE	1
341 #else
342 # define HARDIRQ_VERBOSE	0
343 # define SOFTIRQ_VERBOSE	0
344 #endif
345 
346 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
347 /*
348  * Quick filtering for interesting events:
349  */
350 static int class_filter(struct lock_class *class)
351 {
352 #if 0
353 	/* Example */
354 	if (class->name_version == 1 &&
355 			!strcmp(class->name, "lockname"))
356 		return 1;
357 	if (class->name_version == 1 &&
358 			!strcmp(class->name, "&struct->lockfield"))
359 		return 1;
360 #endif
361 	/* Filter everything else. 1 would be to allow everything else */
362 	return 0;
363 }
364 #endif
365 
366 static int verbose(struct lock_class *class)
367 {
368 #if VERBOSE
369 	return class_filter(class);
370 #endif
371 	return 0;
372 }
373 
374 /*
375  * Stack-trace: tightly packed array of stack backtrace
376  * addresses. Protected by the graph_lock.
377  */
378 unsigned long nr_stack_trace_entries;
379 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
380 
381 static void print_lockdep_off(const char *bug_msg)
382 {
383 	printk(KERN_DEBUG "%s\n", bug_msg);
384 	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
385 #ifdef CONFIG_LOCK_STAT
386 	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
387 #endif
388 }
389 
390 static int save_trace(struct stack_trace *trace)
391 {
392 	trace->nr_entries = 0;
393 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
394 	trace->entries = stack_trace + nr_stack_trace_entries;
395 
396 	trace->skip = 3;
397 
398 	save_stack_trace(trace);
399 
400 	/*
401 	 * Some daft arches put -1 at the end to indicate its a full trace.
402 	 *
403 	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
404 	 * complete trace that maxes out the entries provided will be reported
405 	 * as incomplete, friggin useless </rant>
406 	 */
407 	if (trace->nr_entries != 0 &&
408 	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
409 		trace->nr_entries--;
410 
411 	trace->max_entries = trace->nr_entries;
412 
413 	nr_stack_trace_entries += trace->nr_entries;
414 
415 	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
416 		if (!debug_locks_off_graph_unlock())
417 			return 0;
418 
419 		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
420 		dump_stack();
421 
422 		return 0;
423 	}
424 
425 	return 1;
426 }
427 
428 unsigned int nr_hardirq_chains;
429 unsigned int nr_softirq_chains;
430 unsigned int nr_process_chains;
431 unsigned int max_lockdep_depth;
432 
433 #ifdef CONFIG_DEBUG_LOCKDEP
434 /*
435  * Various lockdep statistics:
436  */
437 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
438 #endif
439 
440 /*
441  * Locking printouts:
442  */
443 
444 #define __USAGE(__STATE)						\
445 	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
446 	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
447 	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
448 	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
449 
450 static const char *usage_str[] =
451 {
452 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
453 #include "lockdep_states.h"
454 #undef LOCKDEP_STATE
455 	[LOCK_USED] = "INITIAL USE",
456 };
457 
458 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
459 {
460 	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
461 }
462 
463 static inline unsigned long lock_flag(enum lock_usage_bit bit)
464 {
465 	return 1UL << bit;
466 }
467 
468 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
469 {
470 	char c = '.';
471 
472 	if (class->usage_mask & lock_flag(bit + 2))
473 		c = '+';
474 	if (class->usage_mask & lock_flag(bit)) {
475 		c = '-';
476 		if (class->usage_mask & lock_flag(bit + 2))
477 			c = '?';
478 	}
479 
480 	return c;
481 }
482 
483 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
484 {
485 	int i = 0;
486 
487 #define LOCKDEP_STATE(__STATE) 						\
488 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
489 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
490 #include "lockdep_states.h"
491 #undef LOCKDEP_STATE
492 
493 	usage[i] = '\0';
494 }
495 
496 static void __print_lock_name(struct lock_class *class)
497 {
498 	char str[KSYM_NAME_LEN];
499 	const char *name;
500 
501 	name = class->name;
502 	if (!name) {
503 		name = __get_key_name(class->key, str);
504 		printk(KERN_CONT "%s", name);
505 	} else {
506 		printk(KERN_CONT "%s", name);
507 		if (class->name_version > 1)
508 			printk(KERN_CONT "#%d", class->name_version);
509 		if (class->subclass)
510 			printk(KERN_CONT "/%d", class->subclass);
511 	}
512 }
513 
514 static void print_lock_name(struct lock_class *class)
515 {
516 	char usage[LOCK_USAGE_CHARS];
517 
518 	get_usage_chars(class, usage);
519 
520 	printk(KERN_CONT " (");
521 	__print_lock_name(class);
522 	printk(KERN_CONT "){%s}", usage);
523 }
524 
525 static void print_lockdep_cache(struct lockdep_map *lock)
526 {
527 	const char *name;
528 	char str[KSYM_NAME_LEN];
529 
530 	name = lock->name;
531 	if (!name)
532 		name = __get_key_name(lock->key->subkeys, str);
533 
534 	printk(KERN_CONT "%s", name);
535 }
536 
537 static void print_lock(struct held_lock *hlock)
538 {
539 	/*
540 	 * We can be called locklessly through debug_show_all_locks() so be
541 	 * extra careful, the hlock might have been released and cleared.
542 	 */
543 	unsigned int class_idx = hlock->class_idx;
544 
545 	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
546 	barrier();
547 
548 	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
549 		printk(KERN_CONT "<RELEASED>\n");
550 		return;
551 	}
552 
553 	printk(KERN_CONT "%p", hlock->instance);
554 	print_lock_name(lock_classes + class_idx - 1);
555 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
556 }
557 
558 static void lockdep_print_held_locks(struct task_struct *p)
559 {
560 	int i, depth = READ_ONCE(p->lockdep_depth);
561 
562 	if (!depth)
563 		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
564 	else
565 		printk("%d lock%s held by %s/%d:\n", depth,
566 		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
567 	/*
568 	 * It's not reliable to print a task's held locks if it's not sleeping
569 	 * and it's not the current task.
570 	 */
571 	if (p->state == TASK_RUNNING && p != current)
572 		return;
573 	for (i = 0; i < depth; i++) {
574 		printk(" #%d: ", i);
575 		print_lock(p->held_locks + i);
576 	}
577 }
578 
579 static void print_kernel_ident(void)
580 {
581 	printk("%s %.*s %s\n", init_utsname()->release,
582 		(int)strcspn(init_utsname()->version, " "),
583 		init_utsname()->version,
584 		print_tainted());
585 }
586 
587 static int very_verbose(struct lock_class *class)
588 {
589 #if VERY_VERBOSE
590 	return class_filter(class);
591 #endif
592 	return 0;
593 }
594 
595 /*
596  * Is this the address of a static object:
597  */
598 #ifdef __KERNEL__
599 static int static_obj(void *obj)
600 {
601 	unsigned long start = (unsigned long) &_stext,
602 		      end   = (unsigned long) &_end,
603 		      addr  = (unsigned long) obj;
604 
605 	/*
606 	 * static variable?
607 	 */
608 	if ((addr >= start) && (addr < end))
609 		return 1;
610 
611 	if (arch_is_kernel_data(addr))
612 		return 1;
613 
614 	/*
615 	 * in-kernel percpu var?
616 	 */
617 	if (is_kernel_percpu_address(addr))
618 		return 1;
619 
620 	/*
621 	 * module static or percpu var?
622 	 */
623 	return is_module_address(addr) || is_module_percpu_address(addr);
624 }
625 #endif
626 
627 /*
628  * To make lock name printouts unique, we calculate a unique
629  * class->name_version generation counter:
630  */
631 static int count_matching_names(struct lock_class *new_class)
632 {
633 	struct lock_class *class;
634 	int count = 0;
635 
636 	if (!new_class->name)
637 		return 0;
638 
639 	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
640 		if (new_class->key - new_class->subclass == class->key)
641 			return class->name_version;
642 		if (class->name && !strcmp(class->name, new_class->name))
643 			count = max(count, class->name_version);
644 	}
645 
646 	return count + 1;
647 }
648 
649 static inline struct lock_class *
650 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
651 {
652 	struct lockdep_subclass_key *key;
653 	struct hlist_head *hash_head;
654 	struct lock_class *class;
655 
656 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
657 		debug_locks_off();
658 		printk(KERN_ERR
659 			"BUG: looking up invalid subclass: %u\n", subclass);
660 		printk(KERN_ERR
661 			"turning off the locking correctness validator.\n");
662 		dump_stack();
663 		return NULL;
664 	}
665 
666 	/*
667 	 * If it is not initialised then it has never been locked,
668 	 * so it won't be present in the hash table.
669 	 */
670 	if (unlikely(!lock->key))
671 		return NULL;
672 
673 	/*
674 	 * NOTE: the class-key must be unique. For dynamic locks, a static
675 	 * lock_class_key variable is passed in through the mutex_init()
676 	 * (or spin_lock_init()) call - which acts as the key. For static
677 	 * locks we use the lock object itself as the key.
678 	 */
679 	BUILD_BUG_ON(sizeof(struct lock_class_key) >
680 			sizeof(struct lockdep_map));
681 
682 	key = lock->key->subkeys + subclass;
683 
684 	hash_head = classhashentry(key);
685 
686 	/*
687 	 * We do an RCU walk of the hash, see lockdep_free_key_range().
688 	 */
689 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
690 		return NULL;
691 
692 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
693 		if (class->key == key) {
694 			/*
695 			 * Huh! same key, different name? Did someone trample
696 			 * on some memory? We're most confused.
697 			 */
698 			WARN_ON_ONCE(class->name != lock->name);
699 			return class;
700 		}
701 	}
702 
703 	return NULL;
704 }
705 
706 /*
707  * Static locks do not have their class-keys yet - for them the key is
708  * the lock object itself. If the lock is in the per cpu area, the
709  * canonical address of the lock (per cpu offset removed) is used.
710  */
711 static bool assign_lock_key(struct lockdep_map *lock)
712 {
713 	unsigned long can_addr, addr = (unsigned long)lock;
714 
715 	if (__is_kernel_percpu_address(addr, &can_addr))
716 		lock->key = (void *)can_addr;
717 	else if (__is_module_percpu_address(addr, &can_addr))
718 		lock->key = (void *)can_addr;
719 	else if (static_obj(lock))
720 		lock->key = (void *)lock;
721 	else {
722 		/* Debug-check: all keys must be persistent! */
723 		debug_locks_off();
724 		pr_err("INFO: trying to register non-static key.\n");
725 		pr_err("the code is fine but needs lockdep annotation.\n");
726 		pr_err("turning off the locking correctness validator.\n");
727 		dump_stack();
728 		return false;
729 	}
730 
731 	return true;
732 }
733 
734 /*
735  * Register a lock's class in the hash-table, if the class is not present
736  * yet. Otherwise we look it up. We cache the result in the lock object
737  * itself, so actual lookup of the hash should be once per lock object.
738  */
739 static struct lock_class *
740 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
741 {
742 	struct lockdep_subclass_key *key;
743 	struct hlist_head *hash_head;
744 	struct lock_class *class;
745 
746 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
747 
748 	class = look_up_lock_class(lock, subclass);
749 	if (likely(class))
750 		goto out_set_class_cache;
751 
752 	if (!lock->key) {
753 		if (!assign_lock_key(lock))
754 			return NULL;
755 	} else if (!static_obj(lock->key)) {
756 		return NULL;
757 	}
758 
759 	key = lock->key->subkeys + subclass;
760 	hash_head = classhashentry(key);
761 
762 	if (!graph_lock()) {
763 		return NULL;
764 	}
765 	/*
766 	 * We have to do the hash-walk again, to avoid races
767 	 * with another CPU:
768 	 */
769 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
770 		if (class->key == key)
771 			goto out_unlock_set;
772 	}
773 
774 	/*
775 	 * Allocate a new key from the static array, and add it to
776 	 * the hash:
777 	 */
778 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
779 		if (!debug_locks_off_graph_unlock()) {
780 			return NULL;
781 		}
782 
783 		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
784 		dump_stack();
785 		return NULL;
786 	}
787 	class = lock_classes + nr_lock_classes++;
788 	debug_atomic_inc(nr_unused_locks);
789 	class->key = key;
790 	class->name = lock->name;
791 	class->subclass = subclass;
792 	INIT_LIST_HEAD(&class->lock_entry);
793 	INIT_LIST_HEAD(&class->locks_before);
794 	INIT_LIST_HEAD(&class->locks_after);
795 	class->name_version = count_matching_names(class);
796 	/*
797 	 * We use RCU's safe list-add method to make
798 	 * parallel walking of the hash-list safe:
799 	 */
800 	hlist_add_head_rcu(&class->hash_entry, hash_head);
801 	/*
802 	 * Add it to the global list of classes:
803 	 */
804 	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
805 
806 	if (verbose(class)) {
807 		graph_unlock();
808 
809 		printk("\nnew class %px: %s", class->key, class->name);
810 		if (class->name_version > 1)
811 			printk(KERN_CONT "#%d", class->name_version);
812 		printk(KERN_CONT "\n");
813 		dump_stack();
814 
815 		if (!graph_lock()) {
816 			return NULL;
817 		}
818 	}
819 out_unlock_set:
820 	graph_unlock();
821 
822 out_set_class_cache:
823 	if (!subclass || force)
824 		lock->class_cache[0] = class;
825 	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
826 		lock->class_cache[subclass] = class;
827 
828 	/*
829 	 * Hash collision, did we smoke some? We found a class with a matching
830 	 * hash but the subclass -- which is hashed in -- didn't match.
831 	 */
832 	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
833 		return NULL;
834 
835 	return class;
836 }
837 
838 #ifdef CONFIG_PROVE_LOCKING
839 /*
840  * Allocate a lockdep entry. (assumes the graph_lock held, returns
841  * with NULL on failure)
842  */
843 static struct lock_list *alloc_list_entry(void)
844 {
845 	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
846 		if (!debug_locks_off_graph_unlock())
847 			return NULL;
848 
849 		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
850 		dump_stack();
851 		return NULL;
852 	}
853 	return list_entries + nr_list_entries++;
854 }
855 
856 /*
857  * Add a new dependency to the head of the list:
858  */
859 static int add_lock_to_list(struct lock_class *this, struct list_head *head,
860 			    unsigned long ip, int distance,
861 			    struct stack_trace *trace)
862 {
863 	struct lock_list *entry;
864 	/*
865 	 * Lock not present yet - get a new dependency struct and
866 	 * add it to the list:
867 	 */
868 	entry = alloc_list_entry();
869 	if (!entry)
870 		return 0;
871 
872 	entry->class = this;
873 	entry->distance = distance;
874 	entry->trace = *trace;
875 	/*
876 	 * Both allocation and removal are done under the graph lock; but
877 	 * iteration is under RCU-sched; see look_up_lock_class() and
878 	 * lockdep_free_key_range().
879 	 */
880 	list_add_tail_rcu(&entry->entry, head);
881 
882 	return 1;
883 }
884 
885 /*
886  * For good efficiency of modular, we use power of 2
887  */
888 #define MAX_CIRCULAR_QUEUE_SIZE		4096UL
889 #define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
890 
891 /*
892  * The circular_queue and helpers is used to implement the
893  * breadth-first search(BFS)algorithem, by which we can build
894  * the shortest path from the next lock to be acquired to the
895  * previous held lock if there is a circular between them.
896  */
897 struct circular_queue {
898 	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
899 	unsigned int  front, rear;
900 };
901 
902 static struct circular_queue lock_cq;
903 
904 unsigned int max_bfs_queue_depth;
905 
906 static unsigned int lockdep_dependency_gen_id;
907 
908 static inline void __cq_init(struct circular_queue *cq)
909 {
910 	cq->front = cq->rear = 0;
911 	lockdep_dependency_gen_id++;
912 }
913 
914 static inline int __cq_empty(struct circular_queue *cq)
915 {
916 	return (cq->front == cq->rear);
917 }
918 
919 static inline int __cq_full(struct circular_queue *cq)
920 {
921 	return ((cq->rear + 1) & CQ_MASK) == cq->front;
922 }
923 
924 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
925 {
926 	if (__cq_full(cq))
927 		return -1;
928 
929 	cq->element[cq->rear] = elem;
930 	cq->rear = (cq->rear + 1) & CQ_MASK;
931 	return 0;
932 }
933 
934 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
935 {
936 	if (__cq_empty(cq))
937 		return -1;
938 
939 	*elem = cq->element[cq->front];
940 	cq->front = (cq->front + 1) & CQ_MASK;
941 	return 0;
942 }
943 
944 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
945 {
946 	return (cq->rear - cq->front) & CQ_MASK;
947 }
948 
949 static inline void mark_lock_accessed(struct lock_list *lock,
950 					struct lock_list *parent)
951 {
952 	unsigned long nr;
953 
954 	nr = lock - list_entries;
955 	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
956 	lock->parent = parent;
957 	lock->class->dep_gen_id = lockdep_dependency_gen_id;
958 }
959 
960 static inline unsigned long lock_accessed(struct lock_list *lock)
961 {
962 	unsigned long nr;
963 
964 	nr = lock - list_entries;
965 	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
966 	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
967 }
968 
969 static inline struct lock_list *get_lock_parent(struct lock_list *child)
970 {
971 	return child->parent;
972 }
973 
974 static inline int get_lock_depth(struct lock_list *child)
975 {
976 	int depth = 0;
977 	struct lock_list *parent;
978 
979 	while ((parent = get_lock_parent(child))) {
980 		child = parent;
981 		depth++;
982 	}
983 	return depth;
984 }
985 
986 static int __bfs(struct lock_list *source_entry,
987 		 void *data,
988 		 int (*match)(struct lock_list *entry, void *data),
989 		 struct lock_list **target_entry,
990 		 int forward)
991 {
992 	struct lock_list *entry;
993 	struct list_head *head;
994 	struct circular_queue *cq = &lock_cq;
995 	int ret = 1;
996 
997 	if (match(source_entry, data)) {
998 		*target_entry = source_entry;
999 		ret = 0;
1000 		goto exit;
1001 	}
1002 
1003 	if (forward)
1004 		head = &source_entry->class->locks_after;
1005 	else
1006 		head = &source_entry->class->locks_before;
1007 
1008 	if (list_empty(head))
1009 		goto exit;
1010 
1011 	__cq_init(cq);
1012 	__cq_enqueue(cq, (unsigned long)source_entry);
1013 
1014 	while (!__cq_empty(cq)) {
1015 		struct lock_list *lock;
1016 
1017 		__cq_dequeue(cq, (unsigned long *)&lock);
1018 
1019 		if (!lock->class) {
1020 			ret = -2;
1021 			goto exit;
1022 		}
1023 
1024 		if (forward)
1025 			head = &lock->class->locks_after;
1026 		else
1027 			head = &lock->class->locks_before;
1028 
1029 		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1030 
1031 		list_for_each_entry_rcu(entry, head, entry) {
1032 			if (!lock_accessed(entry)) {
1033 				unsigned int cq_depth;
1034 				mark_lock_accessed(entry, lock);
1035 				if (match(entry, data)) {
1036 					*target_entry = entry;
1037 					ret = 0;
1038 					goto exit;
1039 				}
1040 
1041 				if (__cq_enqueue(cq, (unsigned long)entry)) {
1042 					ret = -1;
1043 					goto exit;
1044 				}
1045 				cq_depth = __cq_get_elem_count(cq);
1046 				if (max_bfs_queue_depth < cq_depth)
1047 					max_bfs_queue_depth = cq_depth;
1048 			}
1049 		}
1050 	}
1051 exit:
1052 	return ret;
1053 }
1054 
1055 static inline int __bfs_forwards(struct lock_list *src_entry,
1056 			void *data,
1057 			int (*match)(struct lock_list *entry, void *data),
1058 			struct lock_list **target_entry)
1059 {
1060 	return __bfs(src_entry, data, match, target_entry, 1);
1061 
1062 }
1063 
1064 static inline int __bfs_backwards(struct lock_list *src_entry,
1065 			void *data,
1066 			int (*match)(struct lock_list *entry, void *data),
1067 			struct lock_list **target_entry)
1068 {
1069 	return __bfs(src_entry, data, match, target_entry, 0);
1070 
1071 }
1072 
1073 /*
1074  * Recursive, forwards-direction lock-dependency checking, used for
1075  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1076  * checking.
1077  */
1078 
1079 /*
1080  * Print a dependency chain entry (this is only done when a deadlock
1081  * has been detected):
1082  */
1083 static noinline int
1084 print_circular_bug_entry(struct lock_list *target, int depth)
1085 {
1086 	if (debug_locks_silent)
1087 		return 0;
1088 	printk("\n-> #%u", depth);
1089 	print_lock_name(target->class);
1090 	printk(KERN_CONT ":\n");
1091 	print_stack_trace(&target->trace, 6);
1092 
1093 	return 0;
1094 }
1095 
1096 static void
1097 print_circular_lock_scenario(struct held_lock *src,
1098 			     struct held_lock *tgt,
1099 			     struct lock_list *prt)
1100 {
1101 	struct lock_class *source = hlock_class(src);
1102 	struct lock_class *target = hlock_class(tgt);
1103 	struct lock_class *parent = prt->class;
1104 
1105 	/*
1106 	 * A direct locking problem where unsafe_class lock is taken
1107 	 * directly by safe_class lock, then all we need to show
1108 	 * is the deadlock scenario, as it is obvious that the
1109 	 * unsafe lock is taken under the safe lock.
1110 	 *
1111 	 * But if there is a chain instead, where the safe lock takes
1112 	 * an intermediate lock (middle_class) where this lock is
1113 	 * not the same as the safe lock, then the lock chain is
1114 	 * used to describe the problem. Otherwise we would need
1115 	 * to show a different CPU case for each link in the chain
1116 	 * from the safe_class lock to the unsafe_class lock.
1117 	 */
1118 	if (parent != source) {
1119 		printk("Chain exists of:\n  ");
1120 		__print_lock_name(source);
1121 		printk(KERN_CONT " --> ");
1122 		__print_lock_name(parent);
1123 		printk(KERN_CONT " --> ");
1124 		__print_lock_name(target);
1125 		printk(KERN_CONT "\n\n");
1126 	}
1127 
1128 	printk(" Possible unsafe locking scenario:\n\n");
1129 	printk("       CPU0                    CPU1\n");
1130 	printk("       ----                    ----\n");
1131 	printk("  lock(");
1132 	__print_lock_name(target);
1133 	printk(KERN_CONT ");\n");
1134 	printk("                               lock(");
1135 	__print_lock_name(parent);
1136 	printk(KERN_CONT ");\n");
1137 	printk("                               lock(");
1138 	__print_lock_name(target);
1139 	printk(KERN_CONT ");\n");
1140 	printk("  lock(");
1141 	__print_lock_name(source);
1142 	printk(KERN_CONT ");\n");
1143 	printk("\n *** DEADLOCK ***\n\n");
1144 }
1145 
1146 /*
1147  * When a circular dependency is detected, print the
1148  * header first:
1149  */
1150 static noinline int
1151 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1152 			struct held_lock *check_src,
1153 			struct held_lock *check_tgt)
1154 {
1155 	struct task_struct *curr = current;
1156 
1157 	if (debug_locks_silent)
1158 		return 0;
1159 
1160 	pr_warn("\n");
1161 	pr_warn("======================================================\n");
1162 	pr_warn("WARNING: possible circular locking dependency detected\n");
1163 	print_kernel_ident();
1164 	pr_warn("------------------------------------------------------\n");
1165 	pr_warn("%s/%d is trying to acquire lock:\n",
1166 		curr->comm, task_pid_nr(curr));
1167 	print_lock(check_src);
1168 
1169 	pr_warn("\nbut task is already holding lock:\n");
1170 
1171 	print_lock(check_tgt);
1172 	pr_warn("\nwhich lock already depends on the new lock.\n\n");
1173 	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1174 
1175 	print_circular_bug_entry(entry, depth);
1176 
1177 	return 0;
1178 }
1179 
1180 static inline int class_equal(struct lock_list *entry, void *data)
1181 {
1182 	return entry->class == data;
1183 }
1184 
1185 static noinline int print_circular_bug(struct lock_list *this,
1186 				struct lock_list *target,
1187 				struct held_lock *check_src,
1188 				struct held_lock *check_tgt,
1189 				struct stack_trace *trace)
1190 {
1191 	struct task_struct *curr = current;
1192 	struct lock_list *parent;
1193 	struct lock_list *first_parent;
1194 	int depth;
1195 
1196 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1197 		return 0;
1198 
1199 	if (!save_trace(&this->trace))
1200 		return 0;
1201 
1202 	depth = get_lock_depth(target);
1203 
1204 	print_circular_bug_header(target, depth, check_src, check_tgt);
1205 
1206 	parent = get_lock_parent(target);
1207 	first_parent = parent;
1208 
1209 	while (parent) {
1210 		print_circular_bug_entry(parent, --depth);
1211 		parent = get_lock_parent(parent);
1212 	}
1213 
1214 	printk("\nother info that might help us debug this:\n\n");
1215 	print_circular_lock_scenario(check_src, check_tgt,
1216 				     first_parent);
1217 
1218 	lockdep_print_held_locks(curr);
1219 
1220 	printk("\nstack backtrace:\n");
1221 	dump_stack();
1222 
1223 	return 0;
1224 }
1225 
1226 static noinline int print_bfs_bug(int ret)
1227 {
1228 	if (!debug_locks_off_graph_unlock())
1229 		return 0;
1230 
1231 	/*
1232 	 * Breadth-first-search failed, graph got corrupted?
1233 	 */
1234 	WARN(1, "lockdep bfs error:%d\n", ret);
1235 
1236 	return 0;
1237 }
1238 
1239 static int noop_count(struct lock_list *entry, void *data)
1240 {
1241 	(*(unsigned long *)data)++;
1242 	return 0;
1243 }
1244 
1245 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1246 {
1247 	unsigned long  count = 0;
1248 	struct lock_list *uninitialized_var(target_entry);
1249 
1250 	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1251 
1252 	return count;
1253 }
1254 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1255 {
1256 	unsigned long ret, flags;
1257 	struct lock_list this;
1258 
1259 	this.parent = NULL;
1260 	this.class = class;
1261 
1262 	raw_local_irq_save(flags);
1263 	arch_spin_lock(&lockdep_lock);
1264 	ret = __lockdep_count_forward_deps(&this);
1265 	arch_spin_unlock(&lockdep_lock);
1266 	raw_local_irq_restore(flags);
1267 
1268 	return ret;
1269 }
1270 
1271 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1272 {
1273 	unsigned long  count = 0;
1274 	struct lock_list *uninitialized_var(target_entry);
1275 
1276 	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1277 
1278 	return count;
1279 }
1280 
1281 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1282 {
1283 	unsigned long ret, flags;
1284 	struct lock_list this;
1285 
1286 	this.parent = NULL;
1287 	this.class = class;
1288 
1289 	raw_local_irq_save(flags);
1290 	arch_spin_lock(&lockdep_lock);
1291 	ret = __lockdep_count_backward_deps(&this);
1292 	arch_spin_unlock(&lockdep_lock);
1293 	raw_local_irq_restore(flags);
1294 
1295 	return ret;
1296 }
1297 
1298 /*
1299  * Prove that the dependency graph starting at <entry> can not
1300  * lead to <target>. Print an error and return 0 if it does.
1301  */
1302 static noinline int
1303 check_noncircular(struct lock_list *root, struct lock_class *target,
1304 		struct lock_list **target_entry)
1305 {
1306 	int result;
1307 
1308 	debug_atomic_inc(nr_cyclic_checks);
1309 
1310 	result = __bfs_forwards(root, target, class_equal, target_entry);
1311 
1312 	return result;
1313 }
1314 
1315 static noinline int
1316 check_redundant(struct lock_list *root, struct lock_class *target,
1317 		struct lock_list **target_entry)
1318 {
1319 	int result;
1320 
1321 	debug_atomic_inc(nr_redundant_checks);
1322 
1323 	result = __bfs_forwards(root, target, class_equal, target_entry);
1324 
1325 	return result;
1326 }
1327 
1328 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1329 /*
1330  * Forwards and backwards subgraph searching, for the purposes of
1331  * proving that two subgraphs can be connected by a new dependency
1332  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1333  */
1334 
1335 static inline int usage_match(struct lock_list *entry, void *bit)
1336 {
1337 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1338 }
1339 
1340 
1341 
1342 /*
1343  * Find a node in the forwards-direction dependency sub-graph starting
1344  * at @root->class that matches @bit.
1345  *
1346  * Return 0 if such a node exists in the subgraph, and put that node
1347  * into *@target_entry.
1348  *
1349  * Return 1 otherwise and keep *@target_entry unchanged.
1350  * Return <0 on error.
1351  */
1352 static int
1353 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1354 			struct lock_list **target_entry)
1355 {
1356 	int result;
1357 
1358 	debug_atomic_inc(nr_find_usage_forwards_checks);
1359 
1360 	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1361 
1362 	return result;
1363 }
1364 
1365 /*
1366  * Find a node in the backwards-direction dependency sub-graph starting
1367  * at @root->class that matches @bit.
1368  *
1369  * Return 0 if such a node exists in the subgraph, and put that node
1370  * into *@target_entry.
1371  *
1372  * Return 1 otherwise and keep *@target_entry unchanged.
1373  * Return <0 on error.
1374  */
1375 static int
1376 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1377 			struct lock_list **target_entry)
1378 {
1379 	int result;
1380 
1381 	debug_atomic_inc(nr_find_usage_backwards_checks);
1382 
1383 	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1384 
1385 	return result;
1386 }
1387 
1388 static void print_lock_class_header(struct lock_class *class, int depth)
1389 {
1390 	int bit;
1391 
1392 	printk("%*s->", depth, "");
1393 	print_lock_name(class);
1394 #ifdef CONFIG_DEBUG_LOCKDEP
1395 	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1396 #endif
1397 	printk(KERN_CONT " {\n");
1398 
1399 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1400 		if (class->usage_mask & (1 << bit)) {
1401 			int len = depth;
1402 
1403 			len += printk("%*s   %s", depth, "", usage_str[bit]);
1404 			len += printk(KERN_CONT " at:\n");
1405 			print_stack_trace(class->usage_traces + bit, len);
1406 		}
1407 	}
1408 	printk("%*s }\n", depth, "");
1409 
1410 	printk("%*s ... key      at: [<%px>] %pS\n",
1411 		depth, "", class->key, class->key);
1412 }
1413 
1414 /*
1415  * printk the shortest lock dependencies from @start to @end in reverse order:
1416  */
1417 static void __used
1418 print_shortest_lock_dependencies(struct lock_list *leaf,
1419 				struct lock_list *root)
1420 {
1421 	struct lock_list *entry = leaf;
1422 	int depth;
1423 
1424 	/*compute depth from generated tree by BFS*/
1425 	depth = get_lock_depth(leaf);
1426 
1427 	do {
1428 		print_lock_class_header(entry->class, depth);
1429 		printk("%*s ... acquired at:\n", depth, "");
1430 		print_stack_trace(&entry->trace, 2);
1431 		printk("\n");
1432 
1433 		if (depth == 0 && (entry != root)) {
1434 			printk("lockdep:%s bad path found in chain graph\n", __func__);
1435 			break;
1436 		}
1437 
1438 		entry = get_lock_parent(entry);
1439 		depth--;
1440 	} while (entry && (depth >= 0));
1441 
1442 	return;
1443 }
1444 
1445 static void
1446 print_irq_lock_scenario(struct lock_list *safe_entry,
1447 			struct lock_list *unsafe_entry,
1448 			struct lock_class *prev_class,
1449 			struct lock_class *next_class)
1450 {
1451 	struct lock_class *safe_class = safe_entry->class;
1452 	struct lock_class *unsafe_class = unsafe_entry->class;
1453 	struct lock_class *middle_class = prev_class;
1454 
1455 	if (middle_class == safe_class)
1456 		middle_class = next_class;
1457 
1458 	/*
1459 	 * A direct locking problem where unsafe_class lock is taken
1460 	 * directly by safe_class lock, then all we need to show
1461 	 * is the deadlock scenario, as it is obvious that the
1462 	 * unsafe lock is taken under the safe lock.
1463 	 *
1464 	 * But if there is a chain instead, where the safe lock takes
1465 	 * an intermediate lock (middle_class) where this lock is
1466 	 * not the same as the safe lock, then the lock chain is
1467 	 * used to describe the problem. Otherwise we would need
1468 	 * to show a different CPU case for each link in the chain
1469 	 * from the safe_class lock to the unsafe_class lock.
1470 	 */
1471 	if (middle_class != unsafe_class) {
1472 		printk("Chain exists of:\n  ");
1473 		__print_lock_name(safe_class);
1474 		printk(KERN_CONT " --> ");
1475 		__print_lock_name(middle_class);
1476 		printk(KERN_CONT " --> ");
1477 		__print_lock_name(unsafe_class);
1478 		printk(KERN_CONT "\n\n");
1479 	}
1480 
1481 	printk(" Possible interrupt unsafe locking scenario:\n\n");
1482 	printk("       CPU0                    CPU1\n");
1483 	printk("       ----                    ----\n");
1484 	printk("  lock(");
1485 	__print_lock_name(unsafe_class);
1486 	printk(KERN_CONT ");\n");
1487 	printk("                               local_irq_disable();\n");
1488 	printk("                               lock(");
1489 	__print_lock_name(safe_class);
1490 	printk(KERN_CONT ");\n");
1491 	printk("                               lock(");
1492 	__print_lock_name(middle_class);
1493 	printk(KERN_CONT ");\n");
1494 	printk("  <Interrupt>\n");
1495 	printk("    lock(");
1496 	__print_lock_name(safe_class);
1497 	printk(KERN_CONT ");\n");
1498 	printk("\n *** DEADLOCK ***\n\n");
1499 }
1500 
1501 static int
1502 print_bad_irq_dependency(struct task_struct *curr,
1503 			 struct lock_list *prev_root,
1504 			 struct lock_list *next_root,
1505 			 struct lock_list *backwards_entry,
1506 			 struct lock_list *forwards_entry,
1507 			 struct held_lock *prev,
1508 			 struct held_lock *next,
1509 			 enum lock_usage_bit bit1,
1510 			 enum lock_usage_bit bit2,
1511 			 const char *irqclass)
1512 {
1513 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1514 		return 0;
1515 
1516 	pr_warn("\n");
1517 	pr_warn("=====================================================\n");
1518 	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1519 		irqclass, irqclass);
1520 	print_kernel_ident();
1521 	pr_warn("-----------------------------------------------------\n");
1522 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1523 		curr->comm, task_pid_nr(curr),
1524 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1525 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1526 		curr->hardirqs_enabled,
1527 		curr->softirqs_enabled);
1528 	print_lock(next);
1529 
1530 	pr_warn("\nand this task is already holding:\n");
1531 	print_lock(prev);
1532 	pr_warn("which would create a new lock dependency:\n");
1533 	print_lock_name(hlock_class(prev));
1534 	pr_cont(" ->");
1535 	print_lock_name(hlock_class(next));
1536 	pr_cont("\n");
1537 
1538 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1539 		irqclass);
1540 	print_lock_name(backwards_entry->class);
1541 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1542 
1543 	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1544 
1545 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1546 	print_lock_name(forwards_entry->class);
1547 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1548 	pr_warn("...");
1549 
1550 	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1551 
1552 	pr_warn("\nother info that might help us debug this:\n\n");
1553 	print_irq_lock_scenario(backwards_entry, forwards_entry,
1554 				hlock_class(prev), hlock_class(next));
1555 
1556 	lockdep_print_held_locks(curr);
1557 
1558 	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1559 	if (!save_trace(&prev_root->trace))
1560 		return 0;
1561 	print_shortest_lock_dependencies(backwards_entry, prev_root);
1562 
1563 	pr_warn("\nthe dependencies between the lock to be acquired");
1564 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1565 	if (!save_trace(&next_root->trace))
1566 		return 0;
1567 	print_shortest_lock_dependencies(forwards_entry, next_root);
1568 
1569 	pr_warn("\nstack backtrace:\n");
1570 	dump_stack();
1571 
1572 	return 0;
1573 }
1574 
1575 static int
1576 check_usage(struct task_struct *curr, struct held_lock *prev,
1577 	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1578 	    enum lock_usage_bit bit_forwards, const char *irqclass)
1579 {
1580 	int ret;
1581 	struct lock_list this, that;
1582 	struct lock_list *uninitialized_var(target_entry);
1583 	struct lock_list *uninitialized_var(target_entry1);
1584 
1585 	this.parent = NULL;
1586 
1587 	this.class = hlock_class(prev);
1588 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1589 	if (ret < 0)
1590 		return print_bfs_bug(ret);
1591 	if (ret == 1)
1592 		return ret;
1593 
1594 	that.parent = NULL;
1595 	that.class = hlock_class(next);
1596 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1597 	if (ret < 0)
1598 		return print_bfs_bug(ret);
1599 	if (ret == 1)
1600 		return ret;
1601 
1602 	return print_bad_irq_dependency(curr, &this, &that,
1603 			target_entry, target_entry1,
1604 			prev, next,
1605 			bit_backwards, bit_forwards, irqclass);
1606 }
1607 
1608 static const char *state_names[] = {
1609 #define LOCKDEP_STATE(__STATE) \
1610 	__stringify(__STATE),
1611 #include "lockdep_states.h"
1612 #undef LOCKDEP_STATE
1613 };
1614 
1615 static const char *state_rnames[] = {
1616 #define LOCKDEP_STATE(__STATE) \
1617 	__stringify(__STATE)"-READ",
1618 #include "lockdep_states.h"
1619 #undef LOCKDEP_STATE
1620 };
1621 
1622 static inline const char *state_name(enum lock_usage_bit bit)
1623 {
1624 	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1625 }
1626 
1627 static int exclusive_bit(int new_bit)
1628 {
1629 	/*
1630 	 * USED_IN
1631 	 * USED_IN_READ
1632 	 * ENABLED
1633 	 * ENABLED_READ
1634 	 *
1635 	 * bit 0 - write/read
1636 	 * bit 1 - used_in/enabled
1637 	 * bit 2+  state
1638 	 */
1639 
1640 	int state = new_bit & ~3;
1641 	int dir = new_bit & 2;
1642 
1643 	/*
1644 	 * keep state, bit flip the direction and strip read.
1645 	 */
1646 	return state | (dir ^ 2);
1647 }
1648 
1649 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1650 			   struct held_lock *next, enum lock_usage_bit bit)
1651 {
1652 	/*
1653 	 * Prove that the new dependency does not connect a hardirq-safe
1654 	 * lock with a hardirq-unsafe lock - to achieve this we search
1655 	 * the backwards-subgraph starting at <prev>, and the
1656 	 * forwards-subgraph starting at <next>:
1657 	 */
1658 	if (!check_usage(curr, prev, next, bit,
1659 			   exclusive_bit(bit), state_name(bit)))
1660 		return 0;
1661 
1662 	bit++; /* _READ */
1663 
1664 	/*
1665 	 * Prove that the new dependency does not connect a hardirq-safe-read
1666 	 * lock with a hardirq-unsafe lock - to achieve this we search
1667 	 * the backwards-subgraph starting at <prev>, and the
1668 	 * forwards-subgraph starting at <next>:
1669 	 */
1670 	if (!check_usage(curr, prev, next, bit,
1671 			   exclusive_bit(bit), state_name(bit)))
1672 		return 0;
1673 
1674 	return 1;
1675 }
1676 
1677 static int
1678 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1679 		struct held_lock *next)
1680 {
1681 #define LOCKDEP_STATE(__STATE)						\
1682 	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1683 		return 0;
1684 #include "lockdep_states.h"
1685 #undef LOCKDEP_STATE
1686 
1687 	return 1;
1688 }
1689 
1690 static void inc_chains(void)
1691 {
1692 	if (current->hardirq_context)
1693 		nr_hardirq_chains++;
1694 	else {
1695 		if (current->softirq_context)
1696 			nr_softirq_chains++;
1697 		else
1698 			nr_process_chains++;
1699 	}
1700 }
1701 
1702 #else
1703 
1704 static inline int
1705 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1706 		struct held_lock *next)
1707 {
1708 	return 1;
1709 }
1710 
1711 static inline void inc_chains(void)
1712 {
1713 	nr_process_chains++;
1714 }
1715 
1716 #endif
1717 
1718 static void
1719 print_deadlock_scenario(struct held_lock *nxt,
1720 			     struct held_lock *prv)
1721 {
1722 	struct lock_class *next = hlock_class(nxt);
1723 	struct lock_class *prev = hlock_class(prv);
1724 
1725 	printk(" Possible unsafe locking scenario:\n\n");
1726 	printk("       CPU0\n");
1727 	printk("       ----\n");
1728 	printk("  lock(");
1729 	__print_lock_name(prev);
1730 	printk(KERN_CONT ");\n");
1731 	printk("  lock(");
1732 	__print_lock_name(next);
1733 	printk(KERN_CONT ");\n");
1734 	printk("\n *** DEADLOCK ***\n\n");
1735 	printk(" May be due to missing lock nesting notation\n\n");
1736 }
1737 
1738 static int
1739 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1740 		   struct held_lock *next)
1741 {
1742 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1743 		return 0;
1744 
1745 	pr_warn("\n");
1746 	pr_warn("============================================\n");
1747 	pr_warn("WARNING: possible recursive locking detected\n");
1748 	print_kernel_ident();
1749 	pr_warn("--------------------------------------------\n");
1750 	pr_warn("%s/%d is trying to acquire lock:\n",
1751 		curr->comm, task_pid_nr(curr));
1752 	print_lock(next);
1753 	pr_warn("\nbut task is already holding lock:\n");
1754 	print_lock(prev);
1755 
1756 	pr_warn("\nother info that might help us debug this:\n");
1757 	print_deadlock_scenario(next, prev);
1758 	lockdep_print_held_locks(curr);
1759 
1760 	pr_warn("\nstack backtrace:\n");
1761 	dump_stack();
1762 
1763 	return 0;
1764 }
1765 
1766 /*
1767  * Check whether we are holding such a class already.
1768  *
1769  * (Note that this has to be done separately, because the graph cannot
1770  * detect such classes of deadlocks.)
1771  *
1772  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1773  */
1774 static int
1775 check_deadlock(struct task_struct *curr, struct held_lock *next,
1776 	       struct lockdep_map *next_instance, int read)
1777 {
1778 	struct held_lock *prev;
1779 	struct held_lock *nest = NULL;
1780 	int i;
1781 
1782 	for (i = 0; i < curr->lockdep_depth; i++) {
1783 		prev = curr->held_locks + i;
1784 
1785 		if (prev->instance == next->nest_lock)
1786 			nest = prev;
1787 
1788 		if (hlock_class(prev) != hlock_class(next))
1789 			continue;
1790 
1791 		/*
1792 		 * Allow read-after-read recursion of the same
1793 		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1794 		 */
1795 		if ((read == 2) && prev->read)
1796 			return 2;
1797 
1798 		/*
1799 		 * We're holding the nest_lock, which serializes this lock's
1800 		 * nesting behaviour.
1801 		 */
1802 		if (nest)
1803 			return 2;
1804 
1805 		return print_deadlock_bug(curr, prev, next);
1806 	}
1807 	return 1;
1808 }
1809 
1810 /*
1811  * There was a chain-cache miss, and we are about to add a new dependency
1812  * to a previous lock. We recursively validate the following rules:
1813  *
1814  *  - would the adding of the <prev> -> <next> dependency create a
1815  *    circular dependency in the graph? [== circular deadlock]
1816  *
1817  *  - does the new prev->next dependency connect any hardirq-safe lock
1818  *    (in the full backwards-subgraph starting at <prev>) with any
1819  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1820  *    <next>)? [== illegal lock inversion with hardirq contexts]
1821  *
1822  *  - does the new prev->next dependency connect any softirq-safe lock
1823  *    (in the full backwards-subgraph starting at <prev>) with any
1824  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1825  *    <next>)? [== illegal lock inversion with softirq contexts]
1826  *
1827  * any of these scenarios could lead to a deadlock.
1828  *
1829  * Then if all the validations pass, we add the forwards and backwards
1830  * dependency.
1831  */
1832 static int
1833 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1834 	       struct held_lock *next, int distance, struct stack_trace *trace,
1835 	       int (*save)(struct stack_trace *trace))
1836 {
1837 	struct lock_list *uninitialized_var(target_entry);
1838 	struct lock_list *entry;
1839 	struct lock_list this;
1840 	int ret;
1841 
1842 	/*
1843 	 * Prove that the new <prev> -> <next> dependency would not
1844 	 * create a circular dependency in the graph. (We do this by
1845 	 * forward-recursing into the graph starting at <next>, and
1846 	 * checking whether we can reach <prev>.)
1847 	 *
1848 	 * We are using global variables to control the recursion, to
1849 	 * keep the stackframe size of the recursive functions low:
1850 	 */
1851 	this.class = hlock_class(next);
1852 	this.parent = NULL;
1853 	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1854 	if (unlikely(!ret)) {
1855 		if (!trace->entries) {
1856 			/*
1857 			 * If @save fails here, the printing might trigger
1858 			 * a WARN but because of the !nr_entries it should
1859 			 * not do bad things.
1860 			 */
1861 			save(trace);
1862 		}
1863 		return print_circular_bug(&this, target_entry, next, prev, trace);
1864 	}
1865 	else if (unlikely(ret < 0))
1866 		return print_bfs_bug(ret);
1867 
1868 	if (!check_prev_add_irq(curr, prev, next))
1869 		return 0;
1870 
1871 	/*
1872 	 * For recursive read-locks we do all the dependency checks,
1873 	 * but we dont store read-triggered dependencies (only
1874 	 * write-triggered dependencies). This ensures that only the
1875 	 * write-side dependencies matter, and that if for example a
1876 	 * write-lock never takes any other locks, then the reads are
1877 	 * equivalent to a NOP.
1878 	 */
1879 	if (next->read == 2 || prev->read == 2)
1880 		return 1;
1881 	/*
1882 	 * Is the <prev> -> <next> dependency already present?
1883 	 *
1884 	 * (this may occur even though this is a new chain: consider
1885 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1886 	 *  chains - the second one will be new, but L1 already has
1887 	 *  L2 added to its dependency list, due to the first chain.)
1888 	 */
1889 	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1890 		if (entry->class == hlock_class(next)) {
1891 			if (distance == 1)
1892 				entry->distance = 1;
1893 			return 1;
1894 		}
1895 	}
1896 
1897 	/*
1898 	 * Is the <prev> -> <next> link redundant?
1899 	 */
1900 	this.class = hlock_class(prev);
1901 	this.parent = NULL;
1902 	ret = check_redundant(&this, hlock_class(next), &target_entry);
1903 	if (!ret) {
1904 		debug_atomic_inc(nr_redundant);
1905 		return 2;
1906 	}
1907 	if (ret < 0)
1908 		return print_bfs_bug(ret);
1909 
1910 
1911 	if (!trace->entries && !save(trace))
1912 		return 0;
1913 
1914 	/*
1915 	 * Ok, all validations passed, add the new lock
1916 	 * to the previous lock's dependency list:
1917 	 */
1918 	ret = add_lock_to_list(hlock_class(next),
1919 			       &hlock_class(prev)->locks_after,
1920 			       next->acquire_ip, distance, trace);
1921 
1922 	if (!ret)
1923 		return 0;
1924 
1925 	ret = add_lock_to_list(hlock_class(prev),
1926 			       &hlock_class(next)->locks_before,
1927 			       next->acquire_ip, distance, trace);
1928 	if (!ret)
1929 		return 0;
1930 
1931 	return 2;
1932 }
1933 
1934 /*
1935  * Add the dependency to all directly-previous locks that are 'relevant'.
1936  * The ones that are relevant are (in increasing distance from curr):
1937  * all consecutive trylock entries and the final non-trylock entry - or
1938  * the end of this context's lock-chain - whichever comes first.
1939  */
1940 static int
1941 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1942 {
1943 	int depth = curr->lockdep_depth;
1944 	struct held_lock *hlock;
1945 	struct stack_trace trace = {
1946 		.nr_entries = 0,
1947 		.max_entries = 0,
1948 		.entries = NULL,
1949 		.skip = 0,
1950 	};
1951 
1952 	/*
1953 	 * Debugging checks.
1954 	 *
1955 	 * Depth must not be zero for a non-head lock:
1956 	 */
1957 	if (!depth)
1958 		goto out_bug;
1959 	/*
1960 	 * At least two relevant locks must exist for this
1961 	 * to be a head:
1962 	 */
1963 	if (curr->held_locks[depth].irq_context !=
1964 			curr->held_locks[depth-1].irq_context)
1965 		goto out_bug;
1966 
1967 	for (;;) {
1968 		int distance = curr->lockdep_depth - depth + 1;
1969 		hlock = curr->held_locks + depth - 1;
1970 
1971 		/*
1972 		 * Only non-recursive-read entries get new dependencies
1973 		 * added:
1974 		 */
1975 		if (hlock->read != 2 && hlock->check) {
1976 			int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1977 			if (!ret)
1978 				return 0;
1979 
1980 			/*
1981 			 * Stop after the first non-trylock entry,
1982 			 * as non-trylock entries have added their
1983 			 * own direct dependencies already, so this
1984 			 * lock is connected to them indirectly:
1985 			 */
1986 			if (!hlock->trylock)
1987 				break;
1988 		}
1989 
1990 		depth--;
1991 		/*
1992 		 * End of lock-stack?
1993 		 */
1994 		if (!depth)
1995 			break;
1996 		/*
1997 		 * Stop the search if we cross into another context:
1998 		 */
1999 		if (curr->held_locks[depth].irq_context !=
2000 				curr->held_locks[depth-1].irq_context)
2001 			break;
2002 	}
2003 	return 1;
2004 out_bug:
2005 	if (!debug_locks_off_graph_unlock())
2006 		return 0;
2007 
2008 	/*
2009 	 * Clearly we all shouldn't be here, but since we made it we
2010 	 * can reliable say we messed up our state. See the above two
2011 	 * gotos for reasons why we could possibly end up here.
2012 	 */
2013 	WARN_ON(1);
2014 
2015 	return 0;
2016 }
2017 
2018 unsigned long nr_lock_chains;
2019 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2020 int nr_chain_hlocks;
2021 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2022 
2023 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2024 {
2025 	return lock_classes + chain_hlocks[chain->base + i];
2026 }
2027 
2028 /*
2029  * Returns the index of the first held_lock of the current chain
2030  */
2031 static inline int get_first_held_lock(struct task_struct *curr,
2032 					struct held_lock *hlock)
2033 {
2034 	int i;
2035 	struct held_lock *hlock_curr;
2036 
2037 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2038 		hlock_curr = curr->held_locks + i;
2039 		if (hlock_curr->irq_context != hlock->irq_context)
2040 			break;
2041 
2042 	}
2043 
2044 	return ++i;
2045 }
2046 
2047 #ifdef CONFIG_DEBUG_LOCKDEP
2048 /*
2049  * Returns the next chain_key iteration
2050  */
2051 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2052 {
2053 	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2054 
2055 	printk(" class_idx:%d -> chain_key:%016Lx",
2056 		class_idx,
2057 		(unsigned long long)new_chain_key);
2058 	return new_chain_key;
2059 }
2060 
2061 static void
2062 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2063 {
2064 	struct held_lock *hlock;
2065 	u64 chain_key = 0;
2066 	int depth = curr->lockdep_depth;
2067 	int i;
2068 
2069 	printk("depth: %u\n", depth + 1);
2070 	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2071 		hlock = curr->held_locks + i;
2072 		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2073 
2074 		print_lock(hlock);
2075 	}
2076 
2077 	print_chain_key_iteration(hlock_next->class_idx, chain_key);
2078 	print_lock(hlock_next);
2079 }
2080 
2081 static void print_chain_keys_chain(struct lock_chain *chain)
2082 {
2083 	int i;
2084 	u64 chain_key = 0;
2085 	int class_id;
2086 
2087 	printk("depth: %u\n", chain->depth);
2088 	for (i = 0; i < chain->depth; i++) {
2089 		class_id = chain_hlocks[chain->base + i];
2090 		chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2091 
2092 		print_lock_name(lock_classes + class_id);
2093 		printk("\n");
2094 	}
2095 }
2096 
2097 static void print_collision(struct task_struct *curr,
2098 			struct held_lock *hlock_next,
2099 			struct lock_chain *chain)
2100 {
2101 	pr_warn("\n");
2102 	pr_warn("============================\n");
2103 	pr_warn("WARNING: chain_key collision\n");
2104 	print_kernel_ident();
2105 	pr_warn("----------------------------\n");
2106 	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2107 	pr_warn("Hash chain already cached but the contents don't match!\n");
2108 
2109 	pr_warn("Held locks:");
2110 	print_chain_keys_held_locks(curr, hlock_next);
2111 
2112 	pr_warn("Locks in cached chain:");
2113 	print_chain_keys_chain(chain);
2114 
2115 	pr_warn("\nstack backtrace:\n");
2116 	dump_stack();
2117 }
2118 #endif
2119 
2120 /*
2121  * Checks whether the chain and the current held locks are consistent
2122  * in depth and also in content. If they are not it most likely means
2123  * that there was a collision during the calculation of the chain_key.
2124  * Returns: 0 not passed, 1 passed
2125  */
2126 static int check_no_collision(struct task_struct *curr,
2127 			struct held_lock *hlock,
2128 			struct lock_chain *chain)
2129 {
2130 #ifdef CONFIG_DEBUG_LOCKDEP
2131 	int i, j, id;
2132 
2133 	i = get_first_held_lock(curr, hlock);
2134 
2135 	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2136 		print_collision(curr, hlock, chain);
2137 		return 0;
2138 	}
2139 
2140 	for (j = 0; j < chain->depth - 1; j++, i++) {
2141 		id = curr->held_locks[i].class_idx - 1;
2142 
2143 		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2144 			print_collision(curr, hlock, chain);
2145 			return 0;
2146 		}
2147 	}
2148 #endif
2149 	return 1;
2150 }
2151 
2152 /*
2153  * Adds a dependency chain into chain hashtable. And must be called with
2154  * graph_lock held.
2155  *
2156  * Return 0 if fail, and graph_lock is released.
2157  * Return 1 if succeed, with graph_lock held.
2158  */
2159 static inline int add_chain_cache(struct task_struct *curr,
2160 				  struct held_lock *hlock,
2161 				  u64 chain_key)
2162 {
2163 	struct lock_class *class = hlock_class(hlock);
2164 	struct hlist_head *hash_head = chainhashentry(chain_key);
2165 	struct lock_chain *chain;
2166 	int i, j;
2167 
2168 	/*
2169 	 * Allocate a new chain entry from the static array, and add
2170 	 * it to the hash:
2171 	 */
2172 
2173 	/*
2174 	 * We might need to take the graph lock, ensure we've got IRQs
2175 	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2176 	 * lockdep won't complain about its own locking errors.
2177 	 */
2178 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2179 		return 0;
2180 
2181 	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2182 		if (!debug_locks_off_graph_unlock())
2183 			return 0;
2184 
2185 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2186 		dump_stack();
2187 		return 0;
2188 	}
2189 	chain = lock_chains + nr_lock_chains++;
2190 	chain->chain_key = chain_key;
2191 	chain->irq_context = hlock->irq_context;
2192 	i = get_first_held_lock(curr, hlock);
2193 	chain->depth = curr->lockdep_depth + 1 - i;
2194 
2195 	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2196 	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2197 	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2198 
2199 	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2200 		chain->base = nr_chain_hlocks;
2201 		for (j = 0; j < chain->depth - 1; j++, i++) {
2202 			int lock_id = curr->held_locks[i].class_idx - 1;
2203 			chain_hlocks[chain->base + j] = lock_id;
2204 		}
2205 		chain_hlocks[chain->base + j] = class - lock_classes;
2206 	}
2207 
2208 	if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2209 		nr_chain_hlocks += chain->depth;
2210 
2211 #ifdef CONFIG_DEBUG_LOCKDEP
2212 	/*
2213 	 * Important for check_no_collision().
2214 	 */
2215 	if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2216 		if (!debug_locks_off_graph_unlock())
2217 			return 0;
2218 
2219 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2220 		dump_stack();
2221 		return 0;
2222 	}
2223 #endif
2224 
2225 	hlist_add_head_rcu(&chain->entry, hash_head);
2226 	debug_atomic_inc(chain_lookup_misses);
2227 	inc_chains();
2228 
2229 	return 1;
2230 }
2231 
2232 /*
2233  * Look up a dependency chain.
2234  */
2235 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2236 {
2237 	struct hlist_head *hash_head = chainhashentry(chain_key);
2238 	struct lock_chain *chain;
2239 
2240 	/*
2241 	 * We can walk it lock-free, because entries only get added
2242 	 * to the hash:
2243 	 */
2244 	hlist_for_each_entry_rcu(chain, hash_head, entry) {
2245 		if (chain->chain_key == chain_key) {
2246 			debug_atomic_inc(chain_lookup_hits);
2247 			return chain;
2248 		}
2249 	}
2250 	return NULL;
2251 }
2252 
2253 /*
2254  * If the key is not present yet in dependency chain cache then
2255  * add it and return 1 - in this case the new dependency chain is
2256  * validated. If the key is already hashed, return 0.
2257  * (On return with 1 graph_lock is held.)
2258  */
2259 static inline int lookup_chain_cache_add(struct task_struct *curr,
2260 					 struct held_lock *hlock,
2261 					 u64 chain_key)
2262 {
2263 	struct lock_class *class = hlock_class(hlock);
2264 	struct lock_chain *chain = lookup_chain_cache(chain_key);
2265 
2266 	if (chain) {
2267 cache_hit:
2268 		if (!check_no_collision(curr, hlock, chain))
2269 			return 0;
2270 
2271 		if (very_verbose(class)) {
2272 			printk("\nhash chain already cached, key: "
2273 					"%016Lx tail class: [%px] %s\n",
2274 					(unsigned long long)chain_key,
2275 					class->key, class->name);
2276 		}
2277 
2278 		return 0;
2279 	}
2280 
2281 	if (very_verbose(class)) {
2282 		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2283 			(unsigned long long)chain_key, class->key, class->name);
2284 	}
2285 
2286 	if (!graph_lock())
2287 		return 0;
2288 
2289 	/*
2290 	 * We have to walk the chain again locked - to avoid duplicates:
2291 	 */
2292 	chain = lookup_chain_cache(chain_key);
2293 	if (chain) {
2294 		graph_unlock();
2295 		goto cache_hit;
2296 	}
2297 
2298 	if (!add_chain_cache(curr, hlock, chain_key))
2299 		return 0;
2300 
2301 	return 1;
2302 }
2303 
2304 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2305 		struct held_lock *hlock, int chain_head, u64 chain_key)
2306 {
2307 	/*
2308 	 * Trylock needs to maintain the stack of held locks, but it
2309 	 * does not add new dependencies, because trylock can be done
2310 	 * in any order.
2311 	 *
2312 	 * We look up the chain_key and do the O(N^2) check and update of
2313 	 * the dependencies only if this is a new dependency chain.
2314 	 * (If lookup_chain_cache_add() return with 1 it acquires
2315 	 * graph_lock for us)
2316 	 */
2317 	if (!hlock->trylock && hlock->check &&
2318 	    lookup_chain_cache_add(curr, hlock, chain_key)) {
2319 		/*
2320 		 * Check whether last held lock:
2321 		 *
2322 		 * - is irq-safe, if this lock is irq-unsafe
2323 		 * - is softirq-safe, if this lock is hardirq-unsafe
2324 		 *
2325 		 * And check whether the new lock's dependency graph
2326 		 * could lead back to the previous lock.
2327 		 *
2328 		 * any of these scenarios could lead to a deadlock. If
2329 		 * All validations
2330 		 */
2331 		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2332 
2333 		if (!ret)
2334 			return 0;
2335 		/*
2336 		 * Mark recursive read, as we jump over it when
2337 		 * building dependencies (just like we jump over
2338 		 * trylock entries):
2339 		 */
2340 		if (ret == 2)
2341 			hlock->read = 2;
2342 		/*
2343 		 * Add dependency only if this lock is not the head
2344 		 * of the chain, and if it's not a secondary read-lock:
2345 		 */
2346 		if (!chain_head && ret != 2) {
2347 			if (!check_prevs_add(curr, hlock))
2348 				return 0;
2349 		}
2350 
2351 		graph_unlock();
2352 	} else {
2353 		/* after lookup_chain_cache_add(): */
2354 		if (unlikely(!debug_locks))
2355 			return 0;
2356 	}
2357 
2358 	return 1;
2359 }
2360 #else
2361 static inline int validate_chain(struct task_struct *curr,
2362 	       	struct lockdep_map *lock, struct held_lock *hlock,
2363 		int chain_head, u64 chain_key)
2364 {
2365 	return 1;
2366 }
2367 #endif
2368 
2369 /*
2370  * We are building curr_chain_key incrementally, so double-check
2371  * it from scratch, to make sure that it's done correctly:
2372  */
2373 static void check_chain_key(struct task_struct *curr)
2374 {
2375 #ifdef CONFIG_DEBUG_LOCKDEP
2376 	struct held_lock *hlock, *prev_hlock = NULL;
2377 	unsigned int i;
2378 	u64 chain_key = 0;
2379 
2380 	for (i = 0; i < curr->lockdep_depth; i++) {
2381 		hlock = curr->held_locks + i;
2382 		if (chain_key != hlock->prev_chain_key) {
2383 			debug_locks_off();
2384 			/*
2385 			 * We got mighty confused, our chain keys don't match
2386 			 * with what we expect, someone trample on our task state?
2387 			 */
2388 			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2389 				curr->lockdep_depth, i,
2390 				(unsigned long long)chain_key,
2391 				(unsigned long long)hlock->prev_chain_key);
2392 			return;
2393 		}
2394 		/*
2395 		 * Whoops ran out of static storage again?
2396 		 */
2397 		if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2398 			return;
2399 
2400 		if (prev_hlock && (prev_hlock->irq_context !=
2401 							hlock->irq_context))
2402 			chain_key = 0;
2403 		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2404 		prev_hlock = hlock;
2405 	}
2406 	if (chain_key != curr->curr_chain_key) {
2407 		debug_locks_off();
2408 		/*
2409 		 * More smoking hash instead of calculating it, damn see these
2410 		 * numbers float.. I bet that a pink elephant stepped on my memory.
2411 		 */
2412 		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2413 			curr->lockdep_depth, i,
2414 			(unsigned long long)chain_key,
2415 			(unsigned long long)curr->curr_chain_key);
2416 	}
2417 #endif
2418 }
2419 
2420 static void
2421 print_usage_bug_scenario(struct held_lock *lock)
2422 {
2423 	struct lock_class *class = hlock_class(lock);
2424 
2425 	printk(" Possible unsafe locking scenario:\n\n");
2426 	printk("       CPU0\n");
2427 	printk("       ----\n");
2428 	printk("  lock(");
2429 	__print_lock_name(class);
2430 	printk(KERN_CONT ");\n");
2431 	printk("  <Interrupt>\n");
2432 	printk("    lock(");
2433 	__print_lock_name(class);
2434 	printk(KERN_CONT ");\n");
2435 	printk("\n *** DEADLOCK ***\n\n");
2436 }
2437 
2438 static int
2439 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2440 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2441 {
2442 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2443 		return 0;
2444 
2445 	pr_warn("\n");
2446 	pr_warn("================================\n");
2447 	pr_warn("WARNING: inconsistent lock state\n");
2448 	print_kernel_ident();
2449 	pr_warn("--------------------------------\n");
2450 
2451 	pr_warn("inconsistent {%s} -> {%s} usage.\n",
2452 		usage_str[prev_bit], usage_str[new_bit]);
2453 
2454 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2455 		curr->comm, task_pid_nr(curr),
2456 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2457 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2458 		trace_hardirqs_enabled(curr),
2459 		trace_softirqs_enabled(curr));
2460 	print_lock(this);
2461 
2462 	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2463 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2464 
2465 	print_irqtrace_events(curr);
2466 	pr_warn("\nother info that might help us debug this:\n");
2467 	print_usage_bug_scenario(this);
2468 
2469 	lockdep_print_held_locks(curr);
2470 
2471 	pr_warn("\nstack backtrace:\n");
2472 	dump_stack();
2473 
2474 	return 0;
2475 }
2476 
2477 /*
2478  * Print out an error if an invalid bit is set:
2479  */
2480 static inline int
2481 valid_state(struct task_struct *curr, struct held_lock *this,
2482 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2483 {
2484 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2485 		return print_usage_bug(curr, this, bad_bit, new_bit);
2486 	return 1;
2487 }
2488 
2489 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2490 		     enum lock_usage_bit new_bit);
2491 
2492 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2493 
2494 /*
2495  * print irq inversion bug:
2496  */
2497 static int
2498 print_irq_inversion_bug(struct task_struct *curr,
2499 			struct lock_list *root, struct lock_list *other,
2500 			struct held_lock *this, int forwards,
2501 			const char *irqclass)
2502 {
2503 	struct lock_list *entry = other;
2504 	struct lock_list *middle = NULL;
2505 	int depth;
2506 
2507 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2508 		return 0;
2509 
2510 	pr_warn("\n");
2511 	pr_warn("========================================================\n");
2512 	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2513 	print_kernel_ident();
2514 	pr_warn("--------------------------------------------------------\n");
2515 	pr_warn("%s/%d just changed the state of lock:\n",
2516 		curr->comm, task_pid_nr(curr));
2517 	print_lock(this);
2518 	if (forwards)
2519 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2520 	else
2521 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2522 	print_lock_name(other->class);
2523 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2524 
2525 	pr_warn("\nother info that might help us debug this:\n");
2526 
2527 	/* Find a middle lock (if one exists) */
2528 	depth = get_lock_depth(other);
2529 	do {
2530 		if (depth == 0 && (entry != root)) {
2531 			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2532 			break;
2533 		}
2534 		middle = entry;
2535 		entry = get_lock_parent(entry);
2536 		depth--;
2537 	} while (entry && entry != root && (depth >= 0));
2538 	if (forwards)
2539 		print_irq_lock_scenario(root, other,
2540 			middle ? middle->class : root->class, other->class);
2541 	else
2542 		print_irq_lock_scenario(other, root,
2543 			middle ? middle->class : other->class, root->class);
2544 
2545 	lockdep_print_held_locks(curr);
2546 
2547 	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2548 	if (!save_trace(&root->trace))
2549 		return 0;
2550 	print_shortest_lock_dependencies(other, root);
2551 
2552 	pr_warn("\nstack backtrace:\n");
2553 	dump_stack();
2554 
2555 	return 0;
2556 }
2557 
2558 /*
2559  * Prove that in the forwards-direction subgraph starting at <this>
2560  * there is no lock matching <mask>:
2561  */
2562 static int
2563 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2564 		     enum lock_usage_bit bit, const char *irqclass)
2565 {
2566 	int ret;
2567 	struct lock_list root;
2568 	struct lock_list *uninitialized_var(target_entry);
2569 
2570 	root.parent = NULL;
2571 	root.class = hlock_class(this);
2572 	ret = find_usage_forwards(&root, bit, &target_entry);
2573 	if (ret < 0)
2574 		return print_bfs_bug(ret);
2575 	if (ret == 1)
2576 		return ret;
2577 
2578 	return print_irq_inversion_bug(curr, &root, target_entry,
2579 					this, 1, irqclass);
2580 }
2581 
2582 /*
2583  * Prove that in the backwards-direction subgraph starting at <this>
2584  * there is no lock matching <mask>:
2585  */
2586 static int
2587 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2588 		      enum lock_usage_bit bit, const char *irqclass)
2589 {
2590 	int ret;
2591 	struct lock_list root;
2592 	struct lock_list *uninitialized_var(target_entry);
2593 
2594 	root.parent = NULL;
2595 	root.class = hlock_class(this);
2596 	ret = find_usage_backwards(&root, bit, &target_entry);
2597 	if (ret < 0)
2598 		return print_bfs_bug(ret);
2599 	if (ret == 1)
2600 		return ret;
2601 
2602 	return print_irq_inversion_bug(curr, &root, target_entry,
2603 					this, 0, irqclass);
2604 }
2605 
2606 void print_irqtrace_events(struct task_struct *curr)
2607 {
2608 	printk("irq event stamp: %u\n", curr->irq_events);
2609 	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
2610 		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2611 		(void *)curr->hardirq_enable_ip);
2612 	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2613 		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2614 		(void *)curr->hardirq_disable_ip);
2615 	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
2616 		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2617 		(void *)curr->softirq_enable_ip);
2618 	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2619 		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2620 		(void *)curr->softirq_disable_ip);
2621 }
2622 
2623 static int HARDIRQ_verbose(struct lock_class *class)
2624 {
2625 #if HARDIRQ_VERBOSE
2626 	return class_filter(class);
2627 #endif
2628 	return 0;
2629 }
2630 
2631 static int SOFTIRQ_verbose(struct lock_class *class)
2632 {
2633 #if SOFTIRQ_VERBOSE
2634 	return class_filter(class);
2635 #endif
2636 	return 0;
2637 }
2638 
2639 #define STRICT_READ_CHECKS	1
2640 
2641 static int (*state_verbose_f[])(struct lock_class *class) = {
2642 #define LOCKDEP_STATE(__STATE) \
2643 	__STATE##_verbose,
2644 #include "lockdep_states.h"
2645 #undef LOCKDEP_STATE
2646 };
2647 
2648 static inline int state_verbose(enum lock_usage_bit bit,
2649 				struct lock_class *class)
2650 {
2651 	return state_verbose_f[bit >> 2](class);
2652 }
2653 
2654 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2655 			     enum lock_usage_bit bit, const char *name);
2656 
2657 static int
2658 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2659 		enum lock_usage_bit new_bit)
2660 {
2661 	int excl_bit = exclusive_bit(new_bit);
2662 	int read = new_bit & 1;
2663 	int dir = new_bit & 2;
2664 
2665 	/*
2666 	 * mark USED_IN has to look forwards -- to ensure no dependency
2667 	 * has ENABLED state, which would allow recursion deadlocks.
2668 	 *
2669 	 * mark ENABLED has to look backwards -- to ensure no dependee
2670 	 * has USED_IN state, which, again, would allow  recursion deadlocks.
2671 	 */
2672 	check_usage_f usage = dir ?
2673 		check_usage_backwards : check_usage_forwards;
2674 
2675 	/*
2676 	 * Validate that this particular lock does not have conflicting
2677 	 * usage states.
2678 	 */
2679 	if (!valid_state(curr, this, new_bit, excl_bit))
2680 		return 0;
2681 
2682 	/*
2683 	 * Validate that the lock dependencies don't have conflicting usage
2684 	 * states.
2685 	 */
2686 	if ((!read || !dir || STRICT_READ_CHECKS) &&
2687 			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2688 		return 0;
2689 
2690 	/*
2691 	 * Check for read in write conflicts
2692 	 */
2693 	if (!read) {
2694 		if (!valid_state(curr, this, new_bit, excl_bit + 1))
2695 			return 0;
2696 
2697 		if (STRICT_READ_CHECKS &&
2698 			!usage(curr, this, excl_bit + 1,
2699 				state_name(new_bit + 1)))
2700 			return 0;
2701 	}
2702 
2703 	if (state_verbose(new_bit, hlock_class(this)))
2704 		return 2;
2705 
2706 	return 1;
2707 }
2708 
2709 enum mark_type {
2710 #define LOCKDEP_STATE(__STATE)	__STATE,
2711 #include "lockdep_states.h"
2712 #undef LOCKDEP_STATE
2713 };
2714 
2715 /*
2716  * Mark all held locks with a usage bit:
2717  */
2718 static int
2719 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2720 {
2721 	enum lock_usage_bit usage_bit;
2722 	struct held_lock *hlock;
2723 	int i;
2724 
2725 	for (i = 0; i < curr->lockdep_depth; i++) {
2726 		hlock = curr->held_locks + i;
2727 
2728 		usage_bit = 2 + (mark << 2); /* ENABLED */
2729 		if (hlock->read)
2730 			usage_bit += 1; /* READ */
2731 
2732 		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2733 
2734 		if (!hlock->check)
2735 			continue;
2736 
2737 		if (!mark_lock(curr, hlock, usage_bit))
2738 			return 0;
2739 	}
2740 
2741 	return 1;
2742 }
2743 
2744 /*
2745  * Hardirqs will be enabled:
2746  */
2747 static void __trace_hardirqs_on_caller(unsigned long ip)
2748 {
2749 	struct task_struct *curr = current;
2750 
2751 	/* we'll do an OFF -> ON transition: */
2752 	curr->hardirqs_enabled = 1;
2753 
2754 	/*
2755 	 * We are going to turn hardirqs on, so set the
2756 	 * usage bit for all held locks:
2757 	 */
2758 	if (!mark_held_locks(curr, HARDIRQ))
2759 		return;
2760 	/*
2761 	 * If we have softirqs enabled, then set the usage
2762 	 * bit for all held locks. (disabled hardirqs prevented
2763 	 * this bit from being set before)
2764 	 */
2765 	if (curr->softirqs_enabled)
2766 		if (!mark_held_locks(curr, SOFTIRQ))
2767 			return;
2768 
2769 	curr->hardirq_enable_ip = ip;
2770 	curr->hardirq_enable_event = ++curr->irq_events;
2771 	debug_atomic_inc(hardirqs_on_events);
2772 }
2773 
2774 void lockdep_hardirqs_on(unsigned long ip)
2775 {
2776 	if (unlikely(!debug_locks || current->lockdep_recursion))
2777 		return;
2778 
2779 	if (unlikely(current->hardirqs_enabled)) {
2780 		/*
2781 		 * Neither irq nor preemption are disabled here
2782 		 * so this is racy by nature but losing one hit
2783 		 * in a stat is not a big deal.
2784 		 */
2785 		__debug_atomic_inc(redundant_hardirqs_on);
2786 		return;
2787 	}
2788 
2789 	/*
2790 	 * We're enabling irqs and according to our state above irqs weren't
2791 	 * already enabled, yet we find the hardware thinks they are in fact
2792 	 * enabled.. someone messed up their IRQ state tracing.
2793 	 */
2794 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2795 		return;
2796 
2797 	/*
2798 	 * See the fine text that goes along with this variable definition.
2799 	 */
2800 	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2801 		return;
2802 
2803 	/*
2804 	 * Can't allow enabling interrupts while in an interrupt handler,
2805 	 * that's general bad form and such. Recursion, limited stack etc..
2806 	 */
2807 	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2808 		return;
2809 
2810 	current->lockdep_recursion = 1;
2811 	__trace_hardirqs_on_caller(ip);
2812 	current->lockdep_recursion = 0;
2813 }
2814 
2815 /*
2816  * Hardirqs were disabled:
2817  */
2818 void lockdep_hardirqs_off(unsigned long ip)
2819 {
2820 	struct task_struct *curr = current;
2821 
2822 	if (unlikely(!debug_locks || current->lockdep_recursion))
2823 		return;
2824 
2825 	/*
2826 	 * So we're supposed to get called after you mask local IRQs, but for
2827 	 * some reason the hardware doesn't quite think you did a proper job.
2828 	 */
2829 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2830 		return;
2831 
2832 	if (curr->hardirqs_enabled) {
2833 		/*
2834 		 * We have done an ON -> OFF transition:
2835 		 */
2836 		curr->hardirqs_enabled = 0;
2837 		curr->hardirq_disable_ip = ip;
2838 		curr->hardirq_disable_event = ++curr->irq_events;
2839 		debug_atomic_inc(hardirqs_off_events);
2840 	} else
2841 		debug_atomic_inc(redundant_hardirqs_off);
2842 }
2843 
2844 /*
2845  * Softirqs will be enabled:
2846  */
2847 void trace_softirqs_on(unsigned long ip)
2848 {
2849 	struct task_struct *curr = current;
2850 
2851 	if (unlikely(!debug_locks || current->lockdep_recursion))
2852 		return;
2853 
2854 	/*
2855 	 * We fancy IRQs being disabled here, see softirq.c, avoids
2856 	 * funny state and nesting things.
2857 	 */
2858 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2859 		return;
2860 
2861 	if (curr->softirqs_enabled) {
2862 		debug_atomic_inc(redundant_softirqs_on);
2863 		return;
2864 	}
2865 
2866 	current->lockdep_recursion = 1;
2867 	/*
2868 	 * We'll do an OFF -> ON transition:
2869 	 */
2870 	curr->softirqs_enabled = 1;
2871 	curr->softirq_enable_ip = ip;
2872 	curr->softirq_enable_event = ++curr->irq_events;
2873 	debug_atomic_inc(softirqs_on_events);
2874 	/*
2875 	 * We are going to turn softirqs on, so set the
2876 	 * usage bit for all held locks, if hardirqs are
2877 	 * enabled too:
2878 	 */
2879 	if (curr->hardirqs_enabled)
2880 		mark_held_locks(curr, SOFTIRQ);
2881 	current->lockdep_recursion = 0;
2882 }
2883 
2884 /*
2885  * Softirqs were disabled:
2886  */
2887 void trace_softirqs_off(unsigned long ip)
2888 {
2889 	struct task_struct *curr = current;
2890 
2891 	if (unlikely(!debug_locks || current->lockdep_recursion))
2892 		return;
2893 
2894 	/*
2895 	 * We fancy IRQs being disabled here, see softirq.c
2896 	 */
2897 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2898 		return;
2899 
2900 	if (curr->softirqs_enabled) {
2901 		/*
2902 		 * We have done an ON -> OFF transition:
2903 		 */
2904 		curr->softirqs_enabled = 0;
2905 		curr->softirq_disable_ip = ip;
2906 		curr->softirq_disable_event = ++curr->irq_events;
2907 		debug_atomic_inc(softirqs_off_events);
2908 		/*
2909 		 * Whoops, we wanted softirqs off, so why aren't they?
2910 		 */
2911 		DEBUG_LOCKS_WARN_ON(!softirq_count());
2912 	} else
2913 		debug_atomic_inc(redundant_softirqs_off);
2914 }
2915 
2916 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2917 {
2918 	/*
2919 	 * If non-trylock use in a hardirq or softirq context, then
2920 	 * mark the lock as used in these contexts:
2921 	 */
2922 	if (!hlock->trylock) {
2923 		if (hlock->read) {
2924 			if (curr->hardirq_context)
2925 				if (!mark_lock(curr, hlock,
2926 						LOCK_USED_IN_HARDIRQ_READ))
2927 					return 0;
2928 			if (curr->softirq_context)
2929 				if (!mark_lock(curr, hlock,
2930 						LOCK_USED_IN_SOFTIRQ_READ))
2931 					return 0;
2932 		} else {
2933 			if (curr->hardirq_context)
2934 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2935 					return 0;
2936 			if (curr->softirq_context)
2937 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2938 					return 0;
2939 		}
2940 	}
2941 	if (!hlock->hardirqs_off) {
2942 		if (hlock->read) {
2943 			if (!mark_lock(curr, hlock,
2944 					LOCK_ENABLED_HARDIRQ_READ))
2945 				return 0;
2946 			if (curr->softirqs_enabled)
2947 				if (!mark_lock(curr, hlock,
2948 						LOCK_ENABLED_SOFTIRQ_READ))
2949 					return 0;
2950 		} else {
2951 			if (!mark_lock(curr, hlock,
2952 					LOCK_ENABLED_HARDIRQ))
2953 				return 0;
2954 			if (curr->softirqs_enabled)
2955 				if (!mark_lock(curr, hlock,
2956 						LOCK_ENABLED_SOFTIRQ))
2957 					return 0;
2958 		}
2959 	}
2960 
2961 	return 1;
2962 }
2963 
2964 static inline unsigned int task_irq_context(struct task_struct *task)
2965 {
2966 	return 2 * !!task->hardirq_context + !!task->softirq_context;
2967 }
2968 
2969 static int separate_irq_context(struct task_struct *curr,
2970 		struct held_lock *hlock)
2971 {
2972 	unsigned int depth = curr->lockdep_depth;
2973 
2974 	/*
2975 	 * Keep track of points where we cross into an interrupt context:
2976 	 */
2977 	if (depth) {
2978 		struct held_lock *prev_hlock;
2979 
2980 		prev_hlock = curr->held_locks + depth-1;
2981 		/*
2982 		 * If we cross into another context, reset the
2983 		 * hash key (this also prevents the checking and the
2984 		 * adding of the dependency to 'prev'):
2985 		 */
2986 		if (prev_hlock->irq_context != hlock->irq_context)
2987 			return 1;
2988 	}
2989 	return 0;
2990 }
2991 
2992 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2993 
2994 static inline
2995 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2996 		enum lock_usage_bit new_bit)
2997 {
2998 	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2999 	return 1;
3000 }
3001 
3002 static inline int mark_irqflags(struct task_struct *curr,
3003 		struct held_lock *hlock)
3004 {
3005 	return 1;
3006 }
3007 
3008 static inline unsigned int task_irq_context(struct task_struct *task)
3009 {
3010 	return 0;
3011 }
3012 
3013 static inline int separate_irq_context(struct task_struct *curr,
3014 		struct held_lock *hlock)
3015 {
3016 	return 0;
3017 }
3018 
3019 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3020 
3021 /*
3022  * Mark a lock with a usage bit, and validate the state transition:
3023  */
3024 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3025 			     enum lock_usage_bit new_bit)
3026 {
3027 	unsigned int new_mask = 1 << new_bit, ret = 1;
3028 
3029 	/*
3030 	 * If already set then do not dirty the cacheline,
3031 	 * nor do any checks:
3032 	 */
3033 	if (likely(hlock_class(this)->usage_mask & new_mask))
3034 		return 1;
3035 
3036 	if (!graph_lock())
3037 		return 0;
3038 	/*
3039 	 * Make sure we didn't race:
3040 	 */
3041 	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3042 		graph_unlock();
3043 		return 1;
3044 	}
3045 
3046 	hlock_class(this)->usage_mask |= new_mask;
3047 
3048 	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3049 		return 0;
3050 
3051 	switch (new_bit) {
3052 #define LOCKDEP_STATE(__STATE)			\
3053 	case LOCK_USED_IN_##__STATE:		\
3054 	case LOCK_USED_IN_##__STATE##_READ:	\
3055 	case LOCK_ENABLED_##__STATE:		\
3056 	case LOCK_ENABLED_##__STATE##_READ:
3057 #include "lockdep_states.h"
3058 #undef LOCKDEP_STATE
3059 		ret = mark_lock_irq(curr, this, new_bit);
3060 		if (!ret)
3061 			return 0;
3062 		break;
3063 	case LOCK_USED:
3064 		debug_atomic_dec(nr_unused_locks);
3065 		break;
3066 	default:
3067 		if (!debug_locks_off_graph_unlock())
3068 			return 0;
3069 		WARN_ON(1);
3070 		return 0;
3071 	}
3072 
3073 	graph_unlock();
3074 
3075 	/*
3076 	 * We must printk outside of the graph_lock:
3077 	 */
3078 	if (ret == 2) {
3079 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3080 		print_lock(this);
3081 		print_irqtrace_events(curr);
3082 		dump_stack();
3083 	}
3084 
3085 	return ret;
3086 }
3087 
3088 /*
3089  * Initialize a lock instance's lock-class mapping info:
3090  */
3091 static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3092 		      struct lock_class_key *key, int subclass)
3093 {
3094 	int i;
3095 
3096 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3097 		lock->class_cache[i] = NULL;
3098 
3099 #ifdef CONFIG_LOCK_STAT
3100 	lock->cpu = raw_smp_processor_id();
3101 #endif
3102 
3103 	/*
3104 	 * Can't be having no nameless bastards around this place!
3105 	 */
3106 	if (DEBUG_LOCKS_WARN_ON(!name)) {
3107 		lock->name = "NULL";
3108 		return;
3109 	}
3110 
3111 	lock->name = name;
3112 
3113 	/*
3114 	 * No key, no joy, we need to hash something.
3115 	 */
3116 	if (DEBUG_LOCKS_WARN_ON(!key))
3117 		return;
3118 	/*
3119 	 * Sanity check, the lock-class key must be persistent:
3120 	 */
3121 	if (!static_obj(key)) {
3122 		printk("BUG: key %px not in .data!\n", key);
3123 		/*
3124 		 * What it says above ^^^^^, I suggest you read it.
3125 		 */
3126 		DEBUG_LOCKS_WARN_ON(1);
3127 		return;
3128 	}
3129 	lock->key = key;
3130 
3131 	if (unlikely(!debug_locks))
3132 		return;
3133 
3134 	if (subclass) {
3135 		unsigned long flags;
3136 
3137 		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3138 			return;
3139 
3140 		raw_local_irq_save(flags);
3141 		current->lockdep_recursion = 1;
3142 		register_lock_class(lock, subclass, 1);
3143 		current->lockdep_recursion = 0;
3144 		raw_local_irq_restore(flags);
3145 	}
3146 }
3147 
3148 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3149 		      struct lock_class_key *key, int subclass)
3150 {
3151 	__lockdep_init_map(lock, name, key, subclass);
3152 }
3153 EXPORT_SYMBOL_GPL(lockdep_init_map);
3154 
3155 struct lock_class_key __lockdep_no_validate__;
3156 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3157 
3158 static int
3159 print_lock_nested_lock_not_held(struct task_struct *curr,
3160 				struct held_lock *hlock,
3161 				unsigned long ip)
3162 {
3163 	if (!debug_locks_off())
3164 		return 0;
3165 	if (debug_locks_silent)
3166 		return 0;
3167 
3168 	pr_warn("\n");
3169 	pr_warn("==================================\n");
3170 	pr_warn("WARNING: Nested lock was not taken\n");
3171 	print_kernel_ident();
3172 	pr_warn("----------------------------------\n");
3173 
3174 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3175 	print_lock(hlock);
3176 
3177 	pr_warn("\nbut this task is not holding:\n");
3178 	pr_warn("%s\n", hlock->nest_lock->name);
3179 
3180 	pr_warn("\nstack backtrace:\n");
3181 	dump_stack();
3182 
3183 	pr_warn("\nother info that might help us debug this:\n");
3184 	lockdep_print_held_locks(curr);
3185 
3186 	pr_warn("\nstack backtrace:\n");
3187 	dump_stack();
3188 
3189 	return 0;
3190 }
3191 
3192 static int __lock_is_held(const struct lockdep_map *lock, int read);
3193 
3194 /*
3195  * This gets called for every mutex_lock*()/spin_lock*() operation.
3196  * We maintain the dependency maps and validate the locking attempt:
3197  *
3198  * The callers must make sure that IRQs are disabled before calling it,
3199  * otherwise we could get an interrupt which would want to take locks,
3200  * which would end up in lockdep again.
3201  */
3202 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3203 			  int trylock, int read, int check, int hardirqs_off,
3204 			  struct lockdep_map *nest_lock, unsigned long ip,
3205 			  int references, int pin_count)
3206 {
3207 	struct task_struct *curr = current;
3208 	struct lock_class *class = NULL;
3209 	struct held_lock *hlock;
3210 	unsigned int depth;
3211 	int chain_head = 0;
3212 	int class_idx;
3213 	u64 chain_key;
3214 
3215 	if (unlikely(!debug_locks))
3216 		return 0;
3217 
3218 	if (!prove_locking || lock->key == &__lockdep_no_validate__)
3219 		check = 0;
3220 
3221 	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3222 		class = lock->class_cache[subclass];
3223 	/*
3224 	 * Not cached?
3225 	 */
3226 	if (unlikely(!class)) {
3227 		class = register_lock_class(lock, subclass, 0);
3228 		if (!class)
3229 			return 0;
3230 	}
3231 
3232 	debug_class_ops_inc(class);
3233 
3234 	if (very_verbose(class)) {
3235 		printk("\nacquire class [%px] %s", class->key, class->name);
3236 		if (class->name_version > 1)
3237 			printk(KERN_CONT "#%d", class->name_version);
3238 		printk(KERN_CONT "\n");
3239 		dump_stack();
3240 	}
3241 
3242 	/*
3243 	 * Add the lock to the list of currently held locks.
3244 	 * (we dont increase the depth just yet, up until the
3245 	 * dependency checks are done)
3246 	 */
3247 	depth = curr->lockdep_depth;
3248 	/*
3249 	 * Ran out of static storage for our per-task lock stack again have we?
3250 	 */
3251 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3252 		return 0;
3253 
3254 	class_idx = class - lock_classes + 1;
3255 
3256 	if (depth) {
3257 		hlock = curr->held_locks + depth - 1;
3258 		if (hlock->class_idx == class_idx && nest_lock) {
3259 			if (hlock->references) {
3260 				/*
3261 				 * Check: unsigned int references:12, overflow.
3262 				 */
3263 				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3264 					return 0;
3265 
3266 				hlock->references++;
3267 			} else {
3268 				hlock->references = 2;
3269 			}
3270 
3271 			return 1;
3272 		}
3273 	}
3274 
3275 	hlock = curr->held_locks + depth;
3276 	/*
3277 	 * Plain impossible, we just registered it and checked it weren't no
3278 	 * NULL like.. I bet this mushroom I ate was good!
3279 	 */
3280 	if (DEBUG_LOCKS_WARN_ON(!class))
3281 		return 0;
3282 	hlock->class_idx = class_idx;
3283 	hlock->acquire_ip = ip;
3284 	hlock->instance = lock;
3285 	hlock->nest_lock = nest_lock;
3286 	hlock->irq_context = task_irq_context(curr);
3287 	hlock->trylock = trylock;
3288 	hlock->read = read;
3289 	hlock->check = check;
3290 	hlock->hardirqs_off = !!hardirqs_off;
3291 	hlock->references = references;
3292 #ifdef CONFIG_LOCK_STAT
3293 	hlock->waittime_stamp = 0;
3294 	hlock->holdtime_stamp = lockstat_clock();
3295 #endif
3296 	hlock->pin_count = pin_count;
3297 
3298 	if (check && !mark_irqflags(curr, hlock))
3299 		return 0;
3300 
3301 	/* mark it as used: */
3302 	if (!mark_lock(curr, hlock, LOCK_USED))
3303 		return 0;
3304 
3305 	/*
3306 	 * Calculate the chain hash: it's the combined hash of all the
3307 	 * lock keys along the dependency chain. We save the hash value
3308 	 * at every step so that we can get the current hash easily
3309 	 * after unlock. The chain hash is then used to cache dependency
3310 	 * results.
3311 	 *
3312 	 * The 'key ID' is what is the most compact key value to drive
3313 	 * the hash, not class->key.
3314 	 */
3315 	/*
3316 	 * Whoops, we did it again.. ran straight out of our static allocation.
3317 	 */
3318 	if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3319 		return 0;
3320 
3321 	chain_key = curr->curr_chain_key;
3322 	if (!depth) {
3323 		/*
3324 		 * How can we have a chain hash when we ain't got no keys?!
3325 		 */
3326 		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3327 			return 0;
3328 		chain_head = 1;
3329 	}
3330 
3331 	hlock->prev_chain_key = chain_key;
3332 	if (separate_irq_context(curr, hlock)) {
3333 		chain_key = 0;
3334 		chain_head = 1;
3335 	}
3336 	chain_key = iterate_chain_key(chain_key, class_idx);
3337 
3338 	if (nest_lock && !__lock_is_held(nest_lock, -1))
3339 		return print_lock_nested_lock_not_held(curr, hlock, ip);
3340 
3341 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3342 		return 0;
3343 
3344 	curr->curr_chain_key = chain_key;
3345 	curr->lockdep_depth++;
3346 	check_chain_key(curr);
3347 #ifdef CONFIG_DEBUG_LOCKDEP
3348 	if (unlikely(!debug_locks))
3349 		return 0;
3350 #endif
3351 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3352 		debug_locks_off();
3353 		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3354 		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3355 		       curr->lockdep_depth, MAX_LOCK_DEPTH);
3356 
3357 		lockdep_print_held_locks(current);
3358 		debug_show_all_locks();
3359 		dump_stack();
3360 
3361 		return 0;
3362 	}
3363 
3364 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3365 		max_lockdep_depth = curr->lockdep_depth;
3366 
3367 	return 1;
3368 }
3369 
3370 static int
3371 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3372 			   unsigned long ip)
3373 {
3374 	if (!debug_locks_off())
3375 		return 0;
3376 	if (debug_locks_silent)
3377 		return 0;
3378 
3379 	pr_warn("\n");
3380 	pr_warn("=====================================\n");
3381 	pr_warn("WARNING: bad unlock balance detected!\n");
3382 	print_kernel_ident();
3383 	pr_warn("-------------------------------------\n");
3384 	pr_warn("%s/%d is trying to release lock (",
3385 		curr->comm, task_pid_nr(curr));
3386 	print_lockdep_cache(lock);
3387 	pr_cont(") at:\n");
3388 	print_ip_sym(ip);
3389 	pr_warn("but there are no more locks to release!\n");
3390 	pr_warn("\nother info that might help us debug this:\n");
3391 	lockdep_print_held_locks(curr);
3392 
3393 	pr_warn("\nstack backtrace:\n");
3394 	dump_stack();
3395 
3396 	return 0;
3397 }
3398 
3399 static int match_held_lock(const struct held_lock *hlock,
3400 					const struct lockdep_map *lock)
3401 {
3402 	if (hlock->instance == lock)
3403 		return 1;
3404 
3405 	if (hlock->references) {
3406 		const struct lock_class *class = lock->class_cache[0];
3407 
3408 		if (!class)
3409 			class = look_up_lock_class(lock, 0);
3410 
3411 		/*
3412 		 * If look_up_lock_class() failed to find a class, we're trying
3413 		 * to test if we hold a lock that has never yet been acquired.
3414 		 * Clearly if the lock hasn't been acquired _ever_, we're not
3415 		 * holding it either, so report failure.
3416 		 */
3417 		if (!class)
3418 			return 0;
3419 
3420 		/*
3421 		 * References, but not a lock we're actually ref-counting?
3422 		 * State got messed up, follow the sites that change ->references
3423 		 * and try to make sense of it.
3424 		 */
3425 		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3426 			return 0;
3427 
3428 		if (hlock->class_idx == class - lock_classes + 1)
3429 			return 1;
3430 	}
3431 
3432 	return 0;
3433 }
3434 
3435 /* @depth must not be zero */
3436 static struct held_lock *find_held_lock(struct task_struct *curr,
3437 					struct lockdep_map *lock,
3438 					unsigned int depth, int *idx)
3439 {
3440 	struct held_lock *ret, *hlock, *prev_hlock;
3441 	int i;
3442 
3443 	i = depth - 1;
3444 	hlock = curr->held_locks + i;
3445 	ret = hlock;
3446 	if (match_held_lock(hlock, lock))
3447 		goto out;
3448 
3449 	ret = NULL;
3450 	for (i--, prev_hlock = hlock--;
3451 	     i >= 0;
3452 	     i--, prev_hlock = hlock--) {
3453 		/*
3454 		 * We must not cross into another context:
3455 		 */
3456 		if (prev_hlock->irq_context != hlock->irq_context) {
3457 			ret = NULL;
3458 			break;
3459 		}
3460 		if (match_held_lock(hlock, lock)) {
3461 			ret = hlock;
3462 			break;
3463 		}
3464 	}
3465 
3466 out:
3467 	*idx = i;
3468 	return ret;
3469 }
3470 
3471 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3472 			      int idx)
3473 {
3474 	struct held_lock *hlock;
3475 
3476 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3477 		return 0;
3478 
3479 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3480 		if (!__lock_acquire(hlock->instance,
3481 				    hlock_class(hlock)->subclass,
3482 				    hlock->trylock,
3483 				    hlock->read, hlock->check,
3484 				    hlock->hardirqs_off,
3485 				    hlock->nest_lock, hlock->acquire_ip,
3486 				    hlock->references, hlock->pin_count))
3487 			return 1;
3488 	}
3489 	return 0;
3490 }
3491 
3492 static int
3493 __lock_set_class(struct lockdep_map *lock, const char *name,
3494 		 struct lock_class_key *key, unsigned int subclass,
3495 		 unsigned long ip)
3496 {
3497 	struct task_struct *curr = current;
3498 	struct held_lock *hlock;
3499 	struct lock_class *class;
3500 	unsigned int depth;
3501 	int i;
3502 
3503 	depth = curr->lockdep_depth;
3504 	/*
3505 	 * This function is about (re)setting the class of a held lock,
3506 	 * yet we're not actually holding any locks. Naughty user!
3507 	 */
3508 	if (DEBUG_LOCKS_WARN_ON(!depth))
3509 		return 0;
3510 
3511 	hlock = find_held_lock(curr, lock, depth, &i);
3512 	if (!hlock)
3513 		return print_unlock_imbalance_bug(curr, lock, ip);
3514 
3515 	lockdep_init_map(lock, name, key, 0);
3516 	class = register_lock_class(lock, subclass, 0);
3517 	hlock->class_idx = class - lock_classes + 1;
3518 
3519 	curr->lockdep_depth = i;
3520 	curr->curr_chain_key = hlock->prev_chain_key;
3521 
3522 	if (reacquire_held_locks(curr, depth, i))
3523 		return 0;
3524 
3525 	/*
3526 	 * I took it apart and put it back together again, except now I have
3527 	 * these 'spare' parts.. where shall I put them.
3528 	 */
3529 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3530 		return 0;
3531 	return 1;
3532 }
3533 
3534 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3535 {
3536 	struct task_struct *curr = current;
3537 	struct held_lock *hlock;
3538 	unsigned int depth;
3539 	int i;
3540 
3541 	depth = curr->lockdep_depth;
3542 	/*
3543 	 * This function is about (re)setting the class of a held lock,
3544 	 * yet we're not actually holding any locks. Naughty user!
3545 	 */
3546 	if (DEBUG_LOCKS_WARN_ON(!depth))
3547 		return 0;
3548 
3549 	hlock = find_held_lock(curr, lock, depth, &i);
3550 	if (!hlock)
3551 		return print_unlock_imbalance_bug(curr, lock, ip);
3552 
3553 	curr->lockdep_depth = i;
3554 	curr->curr_chain_key = hlock->prev_chain_key;
3555 
3556 	WARN(hlock->read, "downgrading a read lock");
3557 	hlock->read = 1;
3558 	hlock->acquire_ip = ip;
3559 
3560 	if (reacquire_held_locks(curr, depth, i))
3561 		return 0;
3562 
3563 	/*
3564 	 * I took it apart and put it back together again, except now I have
3565 	 * these 'spare' parts.. where shall I put them.
3566 	 */
3567 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3568 		return 0;
3569 	return 1;
3570 }
3571 
3572 /*
3573  * Remove the lock to the list of currently held locks - this gets
3574  * called on mutex_unlock()/spin_unlock*() (or on a failed
3575  * mutex_lock_interruptible()).
3576  *
3577  * @nested is an hysterical artifact, needs a tree wide cleanup.
3578  */
3579 static int
3580 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3581 {
3582 	struct task_struct *curr = current;
3583 	struct held_lock *hlock;
3584 	unsigned int depth;
3585 	int i;
3586 
3587 	if (unlikely(!debug_locks))
3588 		return 0;
3589 
3590 	depth = curr->lockdep_depth;
3591 	/*
3592 	 * So we're all set to release this lock.. wait what lock? We don't
3593 	 * own any locks, you've been drinking again?
3594 	 */
3595 	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3596 		 return print_unlock_imbalance_bug(curr, lock, ip);
3597 
3598 	/*
3599 	 * Check whether the lock exists in the current stack
3600 	 * of held locks:
3601 	 */
3602 	hlock = find_held_lock(curr, lock, depth, &i);
3603 	if (!hlock)
3604 		return print_unlock_imbalance_bug(curr, lock, ip);
3605 
3606 	if (hlock->instance == lock)
3607 		lock_release_holdtime(hlock);
3608 
3609 	WARN(hlock->pin_count, "releasing a pinned lock\n");
3610 
3611 	if (hlock->references) {
3612 		hlock->references--;
3613 		if (hlock->references) {
3614 			/*
3615 			 * We had, and after removing one, still have
3616 			 * references, the current lock stack is still
3617 			 * valid. We're done!
3618 			 */
3619 			return 1;
3620 		}
3621 	}
3622 
3623 	/*
3624 	 * We have the right lock to unlock, 'hlock' points to it.
3625 	 * Now we remove it from the stack, and add back the other
3626 	 * entries (if any), recalculating the hash along the way:
3627 	 */
3628 
3629 	curr->lockdep_depth = i;
3630 	curr->curr_chain_key = hlock->prev_chain_key;
3631 
3632 	/*
3633 	 * The most likely case is when the unlock is on the innermost
3634 	 * lock. In this case, we are done!
3635 	 */
3636 	if (i == depth-1)
3637 		return 1;
3638 
3639 	if (reacquire_held_locks(curr, depth, i + 1))
3640 		return 0;
3641 
3642 	/*
3643 	 * We had N bottles of beer on the wall, we drank one, but now
3644 	 * there's not N-1 bottles of beer left on the wall...
3645 	 */
3646 	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
3647 
3648 	/*
3649 	 * Since reacquire_held_locks() would have called check_chain_key()
3650 	 * indirectly via __lock_acquire(), we don't need to do it again
3651 	 * on return.
3652 	 */
3653 	return 0;
3654 }
3655 
3656 static int __lock_is_held(const struct lockdep_map *lock, int read)
3657 {
3658 	struct task_struct *curr = current;
3659 	int i;
3660 
3661 	for (i = 0; i < curr->lockdep_depth; i++) {
3662 		struct held_lock *hlock = curr->held_locks + i;
3663 
3664 		if (match_held_lock(hlock, lock)) {
3665 			if (read == -1 || hlock->read == read)
3666 				return 1;
3667 
3668 			return 0;
3669 		}
3670 	}
3671 
3672 	return 0;
3673 }
3674 
3675 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3676 {
3677 	struct pin_cookie cookie = NIL_COOKIE;
3678 	struct task_struct *curr = current;
3679 	int i;
3680 
3681 	if (unlikely(!debug_locks))
3682 		return cookie;
3683 
3684 	for (i = 0; i < curr->lockdep_depth; i++) {
3685 		struct held_lock *hlock = curr->held_locks + i;
3686 
3687 		if (match_held_lock(hlock, lock)) {
3688 			/*
3689 			 * Grab 16bits of randomness; this is sufficient to not
3690 			 * be guessable and still allows some pin nesting in
3691 			 * our u32 pin_count.
3692 			 */
3693 			cookie.val = 1 + (prandom_u32() >> 16);
3694 			hlock->pin_count += cookie.val;
3695 			return cookie;
3696 		}
3697 	}
3698 
3699 	WARN(1, "pinning an unheld lock\n");
3700 	return cookie;
3701 }
3702 
3703 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3704 {
3705 	struct task_struct *curr = current;
3706 	int i;
3707 
3708 	if (unlikely(!debug_locks))
3709 		return;
3710 
3711 	for (i = 0; i < curr->lockdep_depth; i++) {
3712 		struct held_lock *hlock = curr->held_locks + i;
3713 
3714 		if (match_held_lock(hlock, lock)) {
3715 			hlock->pin_count += cookie.val;
3716 			return;
3717 		}
3718 	}
3719 
3720 	WARN(1, "pinning an unheld lock\n");
3721 }
3722 
3723 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3724 {
3725 	struct task_struct *curr = current;
3726 	int i;
3727 
3728 	if (unlikely(!debug_locks))
3729 		return;
3730 
3731 	for (i = 0; i < curr->lockdep_depth; i++) {
3732 		struct held_lock *hlock = curr->held_locks + i;
3733 
3734 		if (match_held_lock(hlock, lock)) {
3735 			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3736 				return;
3737 
3738 			hlock->pin_count -= cookie.val;
3739 
3740 			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3741 				hlock->pin_count = 0;
3742 
3743 			return;
3744 		}
3745 	}
3746 
3747 	WARN(1, "unpinning an unheld lock\n");
3748 }
3749 
3750 /*
3751  * Check whether we follow the irq-flags state precisely:
3752  */
3753 static void check_flags(unsigned long flags)
3754 {
3755 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3756     defined(CONFIG_TRACE_IRQFLAGS)
3757 	if (!debug_locks)
3758 		return;
3759 
3760 	if (irqs_disabled_flags(flags)) {
3761 		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3762 			printk("possible reason: unannotated irqs-off.\n");
3763 		}
3764 	} else {
3765 		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3766 			printk("possible reason: unannotated irqs-on.\n");
3767 		}
3768 	}
3769 
3770 	/*
3771 	 * We dont accurately track softirq state in e.g.
3772 	 * hardirq contexts (such as on 4KSTACKS), so only
3773 	 * check if not in hardirq contexts:
3774 	 */
3775 	if (!hardirq_count()) {
3776 		if (softirq_count()) {
3777 			/* like the above, but with softirqs */
3778 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3779 		} else {
3780 			/* lick the above, does it taste good? */
3781 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3782 		}
3783 	}
3784 
3785 	if (!debug_locks)
3786 		print_irqtrace_events(current);
3787 #endif
3788 }
3789 
3790 void lock_set_class(struct lockdep_map *lock, const char *name,
3791 		    struct lock_class_key *key, unsigned int subclass,
3792 		    unsigned long ip)
3793 {
3794 	unsigned long flags;
3795 
3796 	if (unlikely(current->lockdep_recursion))
3797 		return;
3798 
3799 	raw_local_irq_save(flags);
3800 	current->lockdep_recursion = 1;
3801 	check_flags(flags);
3802 	if (__lock_set_class(lock, name, key, subclass, ip))
3803 		check_chain_key(current);
3804 	current->lockdep_recursion = 0;
3805 	raw_local_irq_restore(flags);
3806 }
3807 EXPORT_SYMBOL_GPL(lock_set_class);
3808 
3809 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3810 {
3811 	unsigned long flags;
3812 
3813 	if (unlikely(current->lockdep_recursion))
3814 		return;
3815 
3816 	raw_local_irq_save(flags);
3817 	current->lockdep_recursion = 1;
3818 	check_flags(flags);
3819 	if (__lock_downgrade(lock, ip))
3820 		check_chain_key(current);
3821 	current->lockdep_recursion = 0;
3822 	raw_local_irq_restore(flags);
3823 }
3824 EXPORT_SYMBOL_GPL(lock_downgrade);
3825 
3826 /*
3827  * We are not always called with irqs disabled - do that here,
3828  * and also avoid lockdep recursion:
3829  */
3830 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3831 			  int trylock, int read, int check,
3832 			  struct lockdep_map *nest_lock, unsigned long ip)
3833 {
3834 	unsigned long flags;
3835 
3836 	if (unlikely(current->lockdep_recursion))
3837 		return;
3838 
3839 	raw_local_irq_save(flags);
3840 	check_flags(flags);
3841 
3842 	current->lockdep_recursion = 1;
3843 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3844 	__lock_acquire(lock, subclass, trylock, read, check,
3845 		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3846 	current->lockdep_recursion = 0;
3847 	raw_local_irq_restore(flags);
3848 }
3849 EXPORT_SYMBOL_GPL(lock_acquire);
3850 
3851 void lock_release(struct lockdep_map *lock, int nested,
3852 			  unsigned long ip)
3853 {
3854 	unsigned long flags;
3855 
3856 	if (unlikely(current->lockdep_recursion))
3857 		return;
3858 
3859 	raw_local_irq_save(flags);
3860 	check_flags(flags);
3861 	current->lockdep_recursion = 1;
3862 	trace_lock_release(lock, ip);
3863 	if (__lock_release(lock, nested, ip))
3864 		check_chain_key(current);
3865 	current->lockdep_recursion = 0;
3866 	raw_local_irq_restore(flags);
3867 }
3868 EXPORT_SYMBOL_GPL(lock_release);
3869 
3870 int lock_is_held_type(const struct lockdep_map *lock, int read)
3871 {
3872 	unsigned long flags;
3873 	int ret = 0;
3874 
3875 	if (unlikely(current->lockdep_recursion))
3876 		return 1; /* avoid false negative lockdep_assert_held() */
3877 
3878 	raw_local_irq_save(flags);
3879 	check_flags(flags);
3880 
3881 	current->lockdep_recursion = 1;
3882 	ret = __lock_is_held(lock, read);
3883 	current->lockdep_recursion = 0;
3884 	raw_local_irq_restore(flags);
3885 
3886 	return ret;
3887 }
3888 EXPORT_SYMBOL_GPL(lock_is_held_type);
3889 
3890 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3891 {
3892 	struct pin_cookie cookie = NIL_COOKIE;
3893 	unsigned long flags;
3894 
3895 	if (unlikely(current->lockdep_recursion))
3896 		return cookie;
3897 
3898 	raw_local_irq_save(flags);
3899 	check_flags(flags);
3900 
3901 	current->lockdep_recursion = 1;
3902 	cookie = __lock_pin_lock(lock);
3903 	current->lockdep_recursion = 0;
3904 	raw_local_irq_restore(flags);
3905 
3906 	return cookie;
3907 }
3908 EXPORT_SYMBOL_GPL(lock_pin_lock);
3909 
3910 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3911 {
3912 	unsigned long flags;
3913 
3914 	if (unlikely(current->lockdep_recursion))
3915 		return;
3916 
3917 	raw_local_irq_save(flags);
3918 	check_flags(flags);
3919 
3920 	current->lockdep_recursion = 1;
3921 	__lock_repin_lock(lock, cookie);
3922 	current->lockdep_recursion = 0;
3923 	raw_local_irq_restore(flags);
3924 }
3925 EXPORT_SYMBOL_GPL(lock_repin_lock);
3926 
3927 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3928 {
3929 	unsigned long flags;
3930 
3931 	if (unlikely(current->lockdep_recursion))
3932 		return;
3933 
3934 	raw_local_irq_save(flags);
3935 	check_flags(flags);
3936 
3937 	current->lockdep_recursion = 1;
3938 	__lock_unpin_lock(lock, cookie);
3939 	current->lockdep_recursion = 0;
3940 	raw_local_irq_restore(flags);
3941 }
3942 EXPORT_SYMBOL_GPL(lock_unpin_lock);
3943 
3944 #ifdef CONFIG_LOCK_STAT
3945 static int
3946 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3947 			   unsigned long ip)
3948 {
3949 	if (!debug_locks_off())
3950 		return 0;
3951 	if (debug_locks_silent)
3952 		return 0;
3953 
3954 	pr_warn("\n");
3955 	pr_warn("=================================\n");
3956 	pr_warn("WARNING: bad contention detected!\n");
3957 	print_kernel_ident();
3958 	pr_warn("---------------------------------\n");
3959 	pr_warn("%s/%d is trying to contend lock (",
3960 		curr->comm, task_pid_nr(curr));
3961 	print_lockdep_cache(lock);
3962 	pr_cont(") at:\n");
3963 	print_ip_sym(ip);
3964 	pr_warn("but there are no locks held!\n");
3965 	pr_warn("\nother info that might help us debug this:\n");
3966 	lockdep_print_held_locks(curr);
3967 
3968 	pr_warn("\nstack backtrace:\n");
3969 	dump_stack();
3970 
3971 	return 0;
3972 }
3973 
3974 static void
3975 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3976 {
3977 	struct task_struct *curr = current;
3978 	struct held_lock *hlock;
3979 	struct lock_class_stats *stats;
3980 	unsigned int depth;
3981 	int i, contention_point, contending_point;
3982 
3983 	depth = curr->lockdep_depth;
3984 	/*
3985 	 * Whee, we contended on this lock, except it seems we're not
3986 	 * actually trying to acquire anything much at all..
3987 	 */
3988 	if (DEBUG_LOCKS_WARN_ON(!depth))
3989 		return;
3990 
3991 	hlock = find_held_lock(curr, lock, depth, &i);
3992 	if (!hlock) {
3993 		print_lock_contention_bug(curr, lock, ip);
3994 		return;
3995 	}
3996 
3997 	if (hlock->instance != lock)
3998 		return;
3999 
4000 	hlock->waittime_stamp = lockstat_clock();
4001 
4002 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4003 	contending_point = lock_point(hlock_class(hlock)->contending_point,
4004 				      lock->ip);
4005 
4006 	stats = get_lock_stats(hlock_class(hlock));
4007 	if (contention_point < LOCKSTAT_POINTS)
4008 		stats->contention_point[contention_point]++;
4009 	if (contending_point < LOCKSTAT_POINTS)
4010 		stats->contending_point[contending_point]++;
4011 	if (lock->cpu != smp_processor_id())
4012 		stats->bounces[bounce_contended + !!hlock->read]++;
4013 }
4014 
4015 static void
4016 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4017 {
4018 	struct task_struct *curr = current;
4019 	struct held_lock *hlock;
4020 	struct lock_class_stats *stats;
4021 	unsigned int depth;
4022 	u64 now, waittime = 0;
4023 	int i, cpu;
4024 
4025 	depth = curr->lockdep_depth;
4026 	/*
4027 	 * Yay, we acquired ownership of this lock we didn't try to
4028 	 * acquire, how the heck did that happen?
4029 	 */
4030 	if (DEBUG_LOCKS_WARN_ON(!depth))
4031 		return;
4032 
4033 	hlock = find_held_lock(curr, lock, depth, &i);
4034 	if (!hlock) {
4035 		print_lock_contention_bug(curr, lock, _RET_IP_);
4036 		return;
4037 	}
4038 
4039 	if (hlock->instance != lock)
4040 		return;
4041 
4042 	cpu = smp_processor_id();
4043 	if (hlock->waittime_stamp) {
4044 		now = lockstat_clock();
4045 		waittime = now - hlock->waittime_stamp;
4046 		hlock->holdtime_stamp = now;
4047 	}
4048 
4049 	trace_lock_acquired(lock, ip);
4050 
4051 	stats = get_lock_stats(hlock_class(hlock));
4052 	if (waittime) {
4053 		if (hlock->read)
4054 			lock_time_inc(&stats->read_waittime, waittime);
4055 		else
4056 			lock_time_inc(&stats->write_waittime, waittime);
4057 	}
4058 	if (lock->cpu != cpu)
4059 		stats->bounces[bounce_acquired + !!hlock->read]++;
4060 
4061 	lock->cpu = cpu;
4062 	lock->ip = ip;
4063 }
4064 
4065 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4066 {
4067 	unsigned long flags;
4068 
4069 	if (unlikely(!lock_stat || !debug_locks))
4070 		return;
4071 
4072 	if (unlikely(current->lockdep_recursion))
4073 		return;
4074 
4075 	raw_local_irq_save(flags);
4076 	check_flags(flags);
4077 	current->lockdep_recursion = 1;
4078 	trace_lock_contended(lock, ip);
4079 	__lock_contended(lock, ip);
4080 	current->lockdep_recursion = 0;
4081 	raw_local_irq_restore(flags);
4082 }
4083 EXPORT_SYMBOL_GPL(lock_contended);
4084 
4085 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4086 {
4087 	unsigned long flags;
4088 
4089 	if (unlikely(!lock_stat || !debug_locks))
4090 		return;
4091 
4092 	if (unlikely(current->lockdep_recursion))
4093 		return;
4094 
4095 	raw_local_irq_save(flags);
4096 	check_flags(flags);
4097 	current->lockdep_recursion = 1;
4098 	__lock_acquired(lock, ip);
4099 	current->lockdep_recursion = 0;
4100 	raw_local_irq_restore(flags);
4101 }
4102 EXPORT_SYMBOL_GPL(lock_acquired);
4103 #endif
4104 
4105 /*
4106  * Used by the testsuite, sanitize the validator state
4107  * after a simulated failure:
4108  */
4109 
4110 void lockdep_reset(void)
4111 {
4112 	unsigned long flags;
4113 	int i;
4114 
4115 	raw_local_irq_save(flags);
4116 	current->curr_chain_key = 0;
4117 	current->lockdep_depth = 0;
4118 	current->lockdep_recursion = 0;
4119 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4120 	nr_hardirq_chains = 0;
4121 	nr_softirq_chains = 0;
4122 	nr_process_chains = 0;
4123 	debug_locks = 1;
4124 	for (i = 0; i < CHAINHASH_SIZE; i++)
4125 		INIT_HLIST_HEAD(chainhash_table + i);
4126 	raw_local_irq_restore(flags);
4127 }
4128 
4129 static void zap_class(struct lock_class *class)
4130 {
4131 	int i;
4132 
4133 	/*
4134 	 * Remove all dependencies this lock is
4135 	 * involved in:
4136 	 */
4137 	for (i = 0; i < nr_list_entries; i++) {
4138 		if (list_entries[i].class == class)
4139 			list_del_rcu(&list_entries[i].entry);
4140 	}
4141 	/*
4142 	 * Unhash the class and remove it from the all_lock_classes list:
4143 	 */
4144 	hlist_del_rcu(&class->hash_entry);
4145 	list_del_rcu(&class->lock_entry);
4146 
4147 	RCU_INIT_POINTER(class->key, NULL);
4148 	RCU_INIT_POINTER(class->name, NULL);
4149 }
4150 
4151 static inline int within(const void *addr, void *start, unsigned long size)
4152 {
4153 	return addr >= start && addr < start + size;
4154 }
4155 
4156 /*
4157  * Used in module.c to remove lock classes from memory that is going to be
4158  * freed; and possibly re-used by other modules.
4159  *
4160  * We will have had one sync_sched() before getting here, so we're guaranteed
4161  * nobody will look up these exact classes -- they're properly dead but still
4162  * allocated.
4163  */
4164 void lockdep_free_key_range(void *start, unsigned long size)
4165 {
4166 	struct lock_class *class;
4167 	struct hlist_head *head;
4168 	unsigned long flags;
4169 	int i;
4170 	int locked;
4171 
4172 	raw_local_irq_save(flags);
4173 	locked = graph_lock();
4174 
4175 	/*
4176 	 * Unhash all classes that were created by this module:
4177 	 */
4178 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4179 		head = classhash_table + i;
4180 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4181 			if (within(class->key, start, size))
4182 				zap_class(class);
4183 			else if (within(class->name, start, size))
4184 				zap_class(class);
4185 		}
4186 	}
4187 
4188 	if (locked)
4189 		graph_unlock();
4190 	raw_local_irq_restore(flags);
4191 
4192 	/*
4193 	 * Wait for any possible iterators from look_up_lock_class() to pass
4194 	 * before continuing to free the memory they refer to.
4195 	 *
4196 	 * sync_sched() is sufficient because the read-side is IRQ disable.
4197 	 */
4198 	synchronize_sched();
4199 
4200 	/*
4201 	 * XXX at this point we could return the resources to the pool;
4202 	 * instead we leak them. We would need to change to bitmap allocators
4203 	 * instead of the linear allocators we have now.
4204 	 */
4205 }
4206 
4207 void lockdep_reset_lock(struct lockdep_map *lock)
4208 {
4209 	struct lock_class *class;
4210 	struct hlist_head *head;
4211 	unsigned long flags;
4212 	int i, j;
4213 	int locked;
4214 
4215 	raw_local_irq_save(flags);
4216 
4217 	/*
4218 	 * Remove all classes this lock might have:
4219 	 */
4220 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4221 		/*
4222 		 * If the class exists we look it up and zap it:
4223 		 */
4224 		class = look_up_lock_class(lock, j);
4225 		if (class)
4226 			zap_class(class);
4227 	}
4228 	/*
4229 	 * Debug check: in the end all mapped classes should
4230 	 * be gone.
4231 	 */
4232 	locked = graph_lock();
4233 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4234 		head = classhash_table + i;
4235 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4236 			int match = 0;
4237 
4238 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4239 				match |= class == lock->class_cache[j];
4240 
4241 			if (unlikely(match)) {
4242 				if (debug_locks_off_graph_unlock()) {
4243 					/*
4244 					 * We all just reset everything, how did it match?
4245 					 */
4246 					WARN_ON(1);
4247 				}
4248 				goto out_restore;
4249 			}
4250 		}
4251 	}
4252 	if (locked)
4253 		graph_unlock();
4254 
4255 out_restore:
4256 	raw_local_irq_restore(flags);
4257 }
4258 
4259 void __init lockdep_init(void)
4260 {
4261 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4262 
4263 	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4264 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4265 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4266 	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4267 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4268 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4269 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4270 
4271 	printk(" memory used by lock dependency info: %lu kB\n",
4272 		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4273 		sizeof(struct list_head) * CLASSHASH_SIZE +
4274 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4275 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4276 		sizeof(struct list_head) * CHAINHASH_SIZE
4277 #ifdef CONFIG_PROVE_LOCKING
4278 		+ sizeof(struct circular_queue)
4279 #endif
4280 		) / 1024
4281 		);
4282 
4283 	printk(" per task-struct memory footprint: %lu bytes\n",
4284 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4285 }
4286 
4287 static void
4288 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4289 		     const void *mem_to, struct held_lock *hlock)
4290 {
4291 	if (!debug_locks_off())
4292 		return;
4293 	if (debug_locks_silent)
4294 		return;
4295 
4296 	pr_warn("\n");
4297 	pr_warn("=========================\n");
4298 	pr_warn("WARNING: held lock freed!\n");
4299 	print_kernel_ident();
4300 	pr_warn("-------------------------\n");
4301 	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4302 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4303 	print_lock(hlock);
4304 	lockdep_print_held_locks(curr);
4305 
4306 	pr_warn("\nstack backtrace:\n");
4307 	dump_stack();
4308 }
4309 
4310 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4311 				const void* lock_from, unsigned long lock_len)
4312 {
4313 	return lock_from + lock_len <= mem_from ||
4314 		mem_from + mem_len <= lock_from;
4315 }
4316 
4317 /*
4318  * Called when kernel memory is freed (or unmapped), or if a lock
4319  * is destroyed or reinitialized - this code checks whether there is
4320  * any held lock in the memory range of <from> to <to>:
4321  */
4322 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4323 {
4324 	struct task_struct *curr = current;
4325 	struct held_lock *hlock;
4326 	unsigned long flags;
4327 	int i;
4328 
4329 	if (unlikely(!debug_locks))
4330 		return;
4331 
4332 	raw_local_irq_save(flags);
4333 	for (i = 0; i < curr->lockdep_depth; i++) {
4334 		hlock = curr->held_locks + i;
4335 
4336 		if (not_in_range(mem_from, mem_len, hlock->instance,
4337 					sizeof(*hlock->instance)))
4338 			continue;
4339 
4340 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4341 		break;
4342 	}
4343 	raw_local_irq_restore(flags);
4344 }
4345 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4346 
4347 static void print_held_locks_bug(void)
4348 {
4349 	if (!debug_locks_off())
4350 		return;
4351 	if (debug_locks_silent)
4352 		return;
4353 
4354 	pr_warn("\n");
4355 	pr_warn("====================================\n");
4356 	pr_warn("WARNING: %s/%d still has locks held!\n",
4357 	       current->comm, task_pid_nr(current));
4358 	print_kernel_ident();
4359 	pr_warn("------------------------------------\n");
4360 	lockdep_print_held_locks(current);
4361 	pr_warn("\nstack backtrace:\n");
4362 	dump_stack();
4363 }
4364 
4365 void debug_check_no_locks_held(void)
4366 {
4367 	if (unlikely(current->lockdep_depth > 0))
4368 		print_held_locks_bug();
4369 }
4370 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4371 
4372 #ifdef __KERNEL__
4373 void debug_show_all_locks(void)
4374 {
4375 	struct task_struct *g, *p;
4376 
4377 	if (unlikely(!debug_locks)) {
4378 		pr_warn("INFO: lockdep is turned off.\n");
4379 		return;
4380 	}
4381 	pr_warn("\nShowing all locks held in the system:\n");
4382 
4383 	rcu_read_lock();
4384 	for_each_process_thread(g, p) {
4385 		if (!p->lockdep_depth)
4386 			continue;
4387 		lockdep_print_held_locks(p);
4388 		touch_nmi_watchdog();
4389 		touch_all_softlockup_watchdogs();
4390 	}
4391 	rcu_read_unlock();
4392 
4393 	pr_warn("\n");
4394 	pr_warn("=============================================\n\n");
4395 }
4396 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4397 #endif
4398 
4399 /*
4400  * Careful: only use this function if you are sure that
4401  * the task cannot run in parallel!
4402  */
4403 void debug_show_held_locks(struct task_struct *task)
4404 {
4405 	if (unlikely(!debug_locks)) {
4406 		printk("INFO: lockdep is turned off.\n");
4407 		return;
4408 	}
4409 	lockdep_print_held_locks(task);
4410 }
4411 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4412 
4413 asmlinkage __visible void lockdep_sys_exit(void)
4414 {
4415 	struct task_struct *curr = current;
4416 
4417 	if (unlikely(curr->lockdep_depth)) {
4418 		if (!debug_locks_off())
4419 			return;
4420 		pr_warn("\n");
4421 		pr_warn("================================================\n");
4422 		pr_warn("WARNING: lock held when returning to user space!\n");
4423 		print_kernel_ident();
4424 		pr_warn("------------------------------------------------\n");
4425 		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4426 				curr->comm, curr->pid);
4427 		lockdep_print_held_locks(curr);
4428 	}
4429 
4430 	/*
4431 	 * The lock history for each syscall should be independent. So wipe the
4432 	 * slate clean on return to userspace.
4433 	 */
4434 	lockdep_invariant_state(false);
4435 }
4436 
4437 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4438 {
4439 	struct task_struct *curr = current;
4440 
4441 	/* Note: the following can be executed concurrently, so be careful. */
4442 	pr_warn("\n");
4443 	pr_warn("=============================\n");
4444 	pr_warn("WARNING: suspicious RCU usage\n");
4445 	print_kernel_ident();
4446 	pr_warn("-----------------------------\n");
4447 	pr_warn("%s:%d %s!\n", file, line, s);
4448 	pr_warn("\nother info that might help us debug this:\n\n");
4449 	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4450 	       !rcu_lockdep_current_cpu_online()
4451 			? "RCU used illegally from offline CPU!\n"
4452 			: !rcu_is_watching()
4453 				? "RCU used illegally from idle CPU!\n"
4454 				: "",
4455 	       rcu_scheduler_active, debug_locks);
4456 
4457 	/*
4458 	 * If a CPU is in the RCU-free window in idle (ie: in the section
4459 	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4460 	 * considers that CPU to be in an "extended quiescent state",
4461 	 * which means that RCU will be completely ignoring that CPU.
4462 	 * Therefore, rcu_read_lock() and friends have absolutely no
4463 	 * effect on a CPU running in that state. In other words, even if
4464 	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4465 	 * delete data structures out from under it.  RCU really has no
4466 	 * choice here: we need to keep an RCU-free window in idle where
4467 	 * the CPU may possibly enter into low power mode. This way we can
4468 	 * notice an extended quiescent state to other CPUs that started a grace
4469 	 * period. Otherwise we would delay any grace period as long as we run
4470 	 * in the idle task.
4471 	 *
4472 	 * So complain bitterly if someone does call rcu_read_lock(),
4473 	 * rcu_read_lock_bh() and so on from extended quiescent states.
4474 	 */
4475 	if (!rcu_is_watching())
4476 		pr_warn("RCU used illegally from extended quiescent state!\n");
4477 
4478 	lockdep_print_held_locks(curr);
4479 	pr_warn("\nstack backtrace:\n");
4480 	dump_stack();
4481 }
4482 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4483