xref: /openbmc/linux/kernel/locking/lockdep.c (revision e0f6d1a5)
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitops.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jhash.h>
52 #include <linux/nmi.h>
53 
54 #include <asm/sections.h>
55 
56 #include "lockdep_internals.h"
57 
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/lock.h>
60 
61 #ifdef CONFIG_PROVE_LOCKING
62 int prove_locking = 1;
63 module_param(prove_locking, int, 0644);
64 #else
65 #define prove_locking 0
66 #endif
67 
68 #ifdef CONFIG_LOCK_STAT
69 int lock_stat = 1;
70 module_param(lock_stat, int, 0644);
71 #else
72 #define lock_stat 0
73 #endif
74 
75 /*
76  * lockdep_lock: protects the lockdep graph, the hashes and the
77  *               class/list/hash allocators.
78  *
79  * This is one of the rare exceptions where it's justified
80  * to use a raw spinlock - we really dont want the spinlock
81  * code to recurse back into the lockdep code...
82  */
83 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
84 
85 static int graph_lock(void)
86 {
87 	arch_spin_lock(&lockdep_lock);
88 	/*
89 	 * Make sure that if another CPU detected a bug while
90 	 * walking the graph we dont change it (while the other
91 	 * CPU is busy printing out stuff with the graph lock
92 	 * dropped already)
93 	 */
94 	if (!debug_locks) {
95 		arch_spin_unlock(&lockdep_lock);
96 		return 0;
97 	}
98 	/* prevent any recursions within lockdep from causing deadlocks */
99 	current->lockdep_recursion++;
100 	return 1;
101 }
102 
103 static inline int graph_unlock(void)
104 {
105 	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
106 		/*
107 		 * The lockdep graph lock isn't locked while we expect it to
108 		 * be, we're confused now, bye!
109 		 */
110 		return DEBUG_LOCKS_WARN_ON(1);
111 	}
112 
113 	current->lockdep_recursion--;
114 	arch_spin_unlock(&lockdep_lock);
115 	return 0;
116 }
117 
118 /*
119  * Turn lock debugging off and return with 0 if it was off already,
120  * and also release the graph lock:
121  */
122 static inline int debug_locks_off_graph_unlock(void)
123 {
124 	int ret = debug_locks_off();
125 
126 	arch_spin_unlock(&lockdep_lock);
127 
128 	return ret;
129 }
130 
131 unsigned long nr_list_entries;
132 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
133 
134 /*
135  * All data structures here are protected by the global debug_lock.
136  *
137  * Mutex key structs only get allocated, once during bootup, and never
138  * get freed - this significantly simplifies the debugging code.
139  */
140 unsigned long nr_lock_classes;
141 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
142 
143 static inline struct lock_class *hlock_class(struct held_lock *hlock)
144 {
145 	if (!hlock->class_idx) {
146 		/*
147 		 * Someone passed in garbage, we give up.
148 		 */
149 		DEBUG_LOCKS_WARN_ON(1);
150 		return NULL;
151 	}
152 	return lock_classes + hlock->class_idx - 1;
153 }
154 
155 #ifdef CONFIG_LOCK_STAT
156 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
157 
158 static inline u64 lockstat_clock(void)
159 {
160 	return local_clock();
161 }
162 
163 static int lock_point(unsigned long points[], unsigned long ip)
164 {
165 	int i;
166 
167 	for (i = 0; i < LOCKSTAT_POINTS; i++) {
168 		if (points[i] == 0) {
169 			points[i] = ip;
170 			break;
171 		}
172 		if (points[i] == ip)
173 			break;
174 	}
175 
176 	return i;
177 }
178 
179 static void lock_time_inc(struct lock_time *lt, u64 time)
180 {
181 	if (time > lt->max)
182 		lt->max = time;
183 
184 	if (time < lt->min || !lt->nr)
185 		lt->min = time;
186 
187 	lt->total += time;
188 	lt->nr++;
189 }
190 
191 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
192 {
193 	if (!src->nr)
194 		return;
195 
196 	if (src->max > dst->max)
197 		dst->max = src->max;
198 
199 	if (src->min < dst->min || !dst->nr)
200 		dst->min = src->min;
201 
202 	dst->total += src->total;
203 	dst->nr += src->nr;
204 }
205 
206 struct lock_class_stats lock_stats(struct lock_class *class)
207 {
208 	struct lock_class_stats stats;
209 	int cpu, i;
210 
211 	memset(&stats, 0, sizeof(struct lock_class_stats));
212 	for_each_possible_cpu(cpu) {
213 		struct lock_class_stats *pcs =
214 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
215 
216 		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
217 			stats.contention_point[i] += pcs->contention_point[i];
218 
219 		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
220 			stats.contending_point[i] += pcs->contending_point[i];
221 
222 		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
223 		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
224 
225 		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
226 		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
227 
228 		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
229 			stats.bounces[i] += pcs->bounces[i];
230 	}
231 
232 	return stats;
233 }
234 
235 void clear_lock_stats(struct lock_class *class)
236 {
237 	int cpu;
238 
239 	for_each_possible_cpu(cpu) {
240 		struct lock_class_stats *cpu_stats =
241 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
242 
243 		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
244 	}
245 	memset(class->contention_point, 0, sizeof(class->contention_point));
246 	memset(class->contending_point, 0, sizeof(class->contending_point));
247 }
248 
249 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
250 {
251 	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
252 }
253 
254 static void put_lock_stats(struct lock_class_stats *stats)
255 {
256 	put_cpu_var(cpu_lock_stats);
257 }
258 
259 static void lock_release_holdtime(struct held_lock *hlock)
260 {
261 	struct lock_class_stats *stats;
262 	u64 holdtime;
263 
264 	if (!lock_stat)
265 		return;
266 
267 	holdtime = lockstat_clock() - hlock->holdtime_stamp;
268 
269 	stats = get_lock_stats(hlock_class(hlock));
270 	if (hlock->read)
271 		lock_time_inc(&stats->read_holdtime, holdtime);
272 	else
273 		lock_time_inc(&stats->write_holdtime, holdtime);
274 	put_lock_stats(stats);
275 }
276 #else
277 static inline void lock_release_holdtime(struct held_lock *hlock)
278 {
279 }
280 #endif
281 
282 /*
283  * We keep a global list of all lock classes. The list only grows,
284  * never shrinks. The list is only accessed with the lockdep
285  * spinlock lock held.
286  */
287 LIST_HEAD(all_lock_classes);
288 
289 /*
290  * The lockdep classes are in a hash-table as well, for fast lookup:
291  */
292 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
293 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
294 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
295 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
296 
297 static struct hlist_head classhash_table[CLASSHASH_SIZE];
298 
299 /*
300  * We put the lock dependency chains into a hash-table as well, to cache
301  * their existence:
302  */
303 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
304 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
305 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
306 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
307 
308 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
309 
310 /*
311  * The hash key of the lock dependency chains is a hash itself too:
312  * it's a hash of all locks taken up to that lock, including that lock.
313  * It's a 64-bit hash, because it's important for the keys to be
314  * unique.
315  */
316 static inline u64 iterate_chain_key(u64 key, u32 idx)
317 {
318 	u32 k0 = key, k1 = key >> 32;
319 
320 	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
321 
322 	return k0 | (u64)k1 << 32;
323 }
324 
325 void lockdep_off(void)
326 {
327 	current->lockdep_recursion++;
328 }
329 EXPORT_SYMBOL(lockdep_off);
330 
331 void lockdep_on(void)
332 {
333 	current->lockdep_recursion--;
334 }
335 EXPORT_SYMBOL(lockdep_on);
336 
337 /*
338  * Debugging switches:
339  */
340 
341 #define VERBOSE			0
342 #define VERY_VERBOSE		0
343 
344 #if VERBOSE
345 # define HARDIRQ_VERBOSE	1
346 # define SOFTIRQ_VERBOSE	1
347 #else
348 # define HARDIRQ_VERBOSE	0
349 # define SOFTIRQ_VERBOSE	0
350 #endif
351 
352 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
353 /*
354  * Quick filtering for interesting events:
355  */
356 static int class_filter(struct lock_class *class)
357 {
358 #if 0
359 	/* Example */
360 	if (class->name_version == 1 &&
361 			!strcmp(class->name, "lockname"))
362 		return 1;
363 	if (class->name_version == 1 &&
364 			!strcmp(class->name, "&struct->lockfield"))
365 		return 1;
366 #endif
367 	/* Filter everything else. 1 would be to allow everything else */
368 	return 0;
369 }
370 #endif
371 
372 static int verbose(struct lock_class *class)
373 {
374 #if VERBOSE
375 	return class_filter(class);
376 #endif
377 	return 0;
378 }
379 
380 /*
381  * Stack-trace: tightly packed array of stack backtrace
382  * addresses. Protected by the graph_lock.
383  */
384 unsigned long nr_stack_trace_entries;
385 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
386 
387 static void print_lockdep_off(const char *bug_msg)
388 {
389 	printk(KERN_DEBUG "%s\n", bug_msg);
390 	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
391 #ifdef CONFIG_LOCK_STAT
392 	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
393 #endif
394 }
395 
396 static int save_trace(struct stack_trace *trace)
397 {
398 	trace->nr_entries = 0;
399 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
400 	trace->entries = stack_trace + nr_stack_trace_entries;
401 
402 	trace->skip = 3;
403 
404 	save_stack_trace(trace);
405 
406 	/*
407 	 * Some daft arches put -1 at the end to indicate its a full trace.
408 	 *
409 	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
410 	 * complete trace that maxes out the entries provided will be reported
411 	 * as incomplete, friggin useless </rant>
412 	 */
413 	if (trace->nr_entries != 0 &&
414 	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
415 		trace->nr_entries--;
416 
417 	trace->max_entries = trace->nr_entries;
418 
419 	nr_stack_trace_entries += trace->nr_entries;
420 
421 	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
422 		if (!debug_locks_off_graph_unlock())
423 			return 0;
424 
425 		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
426 		dump_stack();
427 
428 		return 0;
429 	}
430 
431 	return 1;
432 }
433 
434 unsigned int nr_hardirq_chains;
435 unsigned int nr_softirq_chains;
436 unsigned int nr_process_chains;
437 unsigned int max_lockdep_depth;
438 
439 #ifdef CONFIG_DEBUG_LOCKDEP
440 /*
441  * Various lockdep statistics:
442  */
443 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
444 #endif
445 
446 /*
447  * Locking printouts:
448  */
449 
450 #define __USAGE(__STATE)						\
451 	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
452 	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
453 	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
454 	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
455 
456 static const char *usage_str[] =
457 {
458 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
459 #include "lockdep_states.h"
460 #undef LOCKDEP_STATE
461 	[LOCK_USED] = "INITIAL USE",
462 };
463 
464 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
465 {
466 	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
467 }
468 
469 static inline unsigned long lock_flag(enum lock_usage_bit bit)
470 {
471 	return 1UL << bit;
472 }
473 
474 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
475 {
476 	char c = '.';
477 
478 	if (class->usage_mask & lock_flag(bit + 2))
479 		c = '+';
480 	if (class->usage_mask & lock_flag(bit)) {
481 		c = '-';
482 		if (class->usage_mask & lock_flag(bit + 2))
483 			c = '?';
484 	}
485 
486 	return c;
487 }
488 
489 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
490 {
491 	int i = 0;
492 
493 #define LOCKDEP_STATE(__STATE) 						\
494 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
495 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
496 #include "lockdep_states.h"
497 #undef LOCKDEP_STATE
498 
499 	usage[i] = '\0';
500 }
501 
502 static void __print_lock_name(struct lock_class *class)
503 {
504 	char str[KSYM_NAME_LEN];
505 	const char *name;
506 
507 	name = class->name;
508 	if (!name) {
509 		name = __get_key_name(class->key, str);
510 		printk(KERN_CONT "%s", name);
511 	} else {
512 		printk(KERN_CONT "%s", name);
513 		if (class->name_version > 1)
514 			printk(KERN_CONT "#%d", class->name_version);
515 		if (class->subclass)
516 			printk(KERN_CONT "/%d", class->subclass);
517 	}
518 }
519 
520 static void print_lock_name(struct lock_class *class)
521 {
522 	char usage[LOCK_USAGE_CHARS];
523 
524 	get_usage_chars(class, usage);
525 
526 	printk(KERN_CONT " (");
527 	__print_lock_name(class);
528 	printk(KERN_CONT "){%s}", usage);
529 }
530 
531 static void print_lockdep_cache(struct lockdep_map *lock)
532 {
533 	const char *name;
534 	char str[KSYM_NAME_LEN];
535 
536 	name = lock->name;
537 	if (!name)
538 		name = __get_key_name(lock->key->subkeys, str);
539 
540 	printk(KERN_CONT "%s", name);
541 }
542 
543 static void print_lock(struct held_lock *hlock)
544 {
545 	/*
546 	 * We can be called locklessly through debug_show_all_locks() so be
547 	 * extra careful, the hlock might have been released and cleared.
548 	 */
549 	unsigned int class_idx = hlock->class_idx;
550 
551 	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
552 	barrier();
553 
554 	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
555 		printk(KERN_CONT "<RELEASED>\n");
556 		return;
557 	}
558 
559 	printk(KERN_CONT "%p", hlock->instance);
560 	print_lock_name(lock_classes + class_idx - 1);
561 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
562 }
563 
564 static void lockdep_print_held_locks(struct task_struct *curr)
565 {
566 	int i, depth = curr->lockdep_depth;
567 
568 	if (!depth) {
569 		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
570 		return;
571 	}
572 	printk("%d lock%s held by %s/%d:\n",
573 		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
574 
575 	for (i = 0; i < depth; i++) {
576 		printk(" #%d: ", i);
577 		print_lock(curr->held_locks + i);
578 	}
579 }
580 
581 static void print_kernel_ident(void)
582 {
583 	printk("%s %.*s %s\n", init_utsname()->release,
584 		(int)strcspn(init_utsname()->version, " "),
585 		init_utsname()->version,
586 		print_tainted());
587 }
588 
589 static int very_verbose(struct lock_class *class)
590 {
591 #if VERY_VERBOSE
592 	return class_filter(class);
593 #endif
594 	return 0;
595 }
596 
597 /*
598  * Is this the address of a static object:
599  */
600 #ifdef __KERNEL__
601 static int static_obj(void *obj)
602 {
603 	unsigned long start = (unsigned long) &_stext,
604 		      end   = (unsigned long) &_end,
605 		      addr  = (unsigned long) obj;
606 
607 	/*
608 	 * static variable?
609 	 */
610 	if ((addr >= start) && (addr < end))
611 		return 1;
612 
613 	if (arch_is_kernel_data(addr))
614 		return 1;
615 
616 	/*
617 	 * in-kernel percpu var?
618 	 */
619 	if (is_kernel_percpu_address(addr))
620 		return 1;
621 
622 	/*
623 	 * module static or percpu var?
624 	 */
625 	return is_module_address(addr) || is_module_percpu_address(addr);
626 }
627 #endif
628 
629 /*
630  * To make lock name printouts unique, we calculate a unique
631  * class->name_version generation counter:
632  */
633 static int count_matching_names(struct lock_class *new_class)
634 {
635 	struct lock_class *class;
636 	int count = 0;
637 
638 	if (!new_class->name)
639 		return 0;
640 
641 	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
642 		if (new_class->key - new_class->subclass == class->key)
643 			return class->name_version;
644 		if (class->name && !strcmp(class->name, new_class->name))
645 			count = max(count, class->name_version);
646 	}
647 
648 	return count + 1;
649 }
650 
651 static inline struct lock_class *
652 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
653 {
654 	struct lockdep_subclass_key *key;
655 	struct hlist_head *hash_head;
656 	struct lock_class *class;
657 
658 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
659 		debug_locks_off();
660 		printk(KERN_ERR
661 			"BUG: looking up invalid subclass: %u\n", subclass);
662 		printk(KERN_ERR
663 			"turning off the locking correctness validator.\n");
664 		dump_stack();
665 		return NULL;
666 	}
667 
668 	/*
669 	 * If it is not initialised then it has never been locked,
670 	 * so it won't be present in the hash table.
671 	 */
672 	if (unlikely(!lock->key))
673 		return NULL;
674 
675 	/*
676 	 * NOTE: the class-key must be unique. For dynamic locks, a static
677 	 * lock_class_key variable is passed in through the mutex_init()
678 	 * (or spin_lock_init()) call - which acts as the key. For static
679 	 * locks we use the lock object itself as the key.
680 	 */
681 	BUILD_BUG_ON(sizeof(struct lock_class_key) >
682 			sizeof(struct lockdep_map));
683 
684 	key = lock->key->subkeys + subclass;
685 
686 	hash_head = classhashentry(key);
687 
688 	/*
689 	 * We do an RCU walk of the hash, see lockdep_free_key_range().
690 	 */
691 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
692 		return NULL;
693 
694 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
695 		if (class->key == key) {
696 			/*
697 			 * Huh! same key, different name? Did someone trample
698 			 * on some memory? We're most confused.
699 			 */
700 			WARN_ON_ONCE(class->name != lock->name);
701 			return class;
702 		}
703 	}
704 
705 	return NULL;
706 }
707 
708 /*
709  * Static locks do not have their class-keys yet - for them the key is
710  * the lock object itself. If the lock is in the per cpu area, the
711  * canonical address of the lock (per cpu offset removed) is used.
712  */
713 static bool assign_lock_key(struct lockdep_map *lock)
714 {
715 	unsigned long can_addr, addr = (unsigned long)lock;
716 
717 	if (__is_kernel_percpu_address(addr, &can_addr))
718 		lock->key = (void *)can_addr;
719 	else if (__is_module_percpu_address(addr, &can_addr))
720 		lock->key = (void *)can_addr;
721 	else if (static_obj(lock))
722 		lock->key = (void *)lock;
723 	else {
724 		/* Debug-check: all keys must be persistent! */
725 		debug_locks_off();
726 		pr_err("INFO: trying to register non-static key.\n");
727 		pr_err("the code is fine but needs lockdep annotation.\n");
728 		pr_err("turning off the locking correctness validator.\n");
729 		dump_stack();
730 		return false;
731 	}
732 
733 	return true;
734 }
735 
736 /*
737  * Register a lock's class in the hash-table, if the class is not present
738  * yet. Otherwise we look it up. We cache the result in the lock object
739  * itself, so actual lookup of the hash should be once per lock object.
740  */
741 static struct lock_class *
742 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
743 {
744 	struct lockdep_subclass_key *key;
745 	struct hlist_head *hash_head;
746 	struct lock_class *class;
747 
748 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
749 
750 	class = look_up_lock_class(lock, subclass);
751 	if (likely(class))
752 		goto out_set_class_cache;
753 
754 	if (!lock->key) {
755 		if (!assign_lock_key(lock))
756 			return NULL;
757 	} else if (!static_obj(lock->key)) {
758 		return NULL;
759 	}
760 
761 	key = lock->key->subkeys + subclass;
762 	hash_head = classhashentry(key);
763 
764 	if (!graph_lock()) {
765 		return NULL;
766 	}
767 	/*
768 	 * We have to do the hash-walk again, to avoid races
769 	 * with another CPU:
770 	 */
771 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
772 		if (class->key == key)
773 			goto out_unlock_set;
774 	}
775 
776 	/*
777 	 * Allocate a new key from the static array, and add it to
778 	 * the hash:
779 	 */
780 	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
781 		if (!debug_locks_off_graph_unlock()) {
782 			return NULL;
783 		}
784 
785 		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
786 		dump_stack();
787 		return NULL;
788 	}
789 	class = lock_classes + nr_lock_classes++;
790 	debug_atomic_inc(nr_unused_locks);
791 	class->key = key;
792 	class->name = lock->name;
793 	class->subclass = subclass;
794 	INIT_LIST_HEAD(&class->lock_entry);
795 	INIT_LIST_HEAD(&class->locks_before);
796 	INIT_LIST_HEAD(&class->locks_after);
797 	class->name_version = count_matching_names(class);
798 	/*
799 	 * We use RCU's safe list-add method to make
800 	 * parallel walking of the hash-list safe:
801 	 */
802 	hlist_add_head_rcu(&class->hash_entry, hash_head);
803 	/*
804 	 * Add it to the global list of classes:
805 	 */
806 	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
807 
808 	if (verbose(class)) {
809 		graph_unlock();
810 
811 		printk("\nnew class %px: %s", class->key, class->name);
812 		if (class->name_version > 1)
813 			printk(KERN_CONT "#%d", class->name_version);
814 		printk(KERN_CONT "\n");
815 		dump_stack();
816 
817 		if (!graph_lock()) {
818 			return NULL;
819 		}
820 	}
821 out_unlock_set:
822 	graph_unlock();
823 
824 out_set_class_cache:
825 	if (!subclass || force)
826 		lock->class_cache[0] = class;
827 	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
828 		lock->class_cache[subclass] = class;
829 
830 	/*
831 	 * Hash collision, did we smoke some? We found a class with a matching
832 	 * hash but the subclass -- which is hashed in -- didn't match.
833 	 */
834 	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
835 		return NULL;
836 
837 	return class;
838 }
839 
840 #ifdef CONFIG_PROVE_LOCKING
841 /*
842  * Allocate a lockdep entry. (assumes the graph_lock held, returns
843  * with NULL on failure)
844  */
845 static struct lock_list *alloc_list_entry(void)
846 {
847 	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
848 		if (!debug_locks_off_graph_unlock())
849 			return NULL;
850 
851 		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
852 		dump_stack();
853 		return NULL;
854 	}
855 	return list_entries + nr_list_entries++;
856 }
857 
858 /*
859  * Add a new dependency to the head of the list:
860  */
861 static int add_lock_to_list(struct lock_class *this, struct list_head *head,
862 			    unsigned long ip, int distance,
863 			    struct stack_trace *trace)
864 {
865 	struct lock_list *entry;
866 	/*
867 	 * Lock not present yet - get a new dependency struct and
868 	 * add it to the list:
869 	 */
870 	entry = alloc_list_entry();
871 	if (!entry)
872 		return 0;
873 
874 	entry->class = this;
875 	entry->distance = distance;
876 	entry->trace = *trace;
877 	/*
878 	 * Both allocation and removal are done under the graph lock; but
879 	 * iteration is under RCU-sched; see look_up_lock_class() and
880 	 * lockdep_free_key_range().
881 	 */
882 	list_add_tail_rcu(&entry->entry, head);
883 
884 	return 1;
885 }
886 
887 /*
888  * For good efficiency of modular, we use power of 2
889  */
890 #define MAX_CIRCULAR_QUEUE_SIZE		4096UL
891 #define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
892 
893 /*
894  * The circular_queue and helpers is used to implement the
895  * breadth-first search(BFS)algorithem, by which we can build
896  * the shortest path from the next lock to be acquired to the
897  * previous held lock if there is a circular between them.
898  */
899 struct circular_queue {
900 	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
901 	unsigned int  front, rear;
902 };
903 
904 static struct circular_queue lock_cq;
905 
906 unsigned int max_bfs_queue_depth;
907 
908 static unsigned int lockdep_dependency_gen_id;
909 
910 static inline void __cq_init(struct circular_queue *cq)
911 {
912 	cq->front = cq->rear = 0;
913 	lockdep_dependency_gen_id++;
914 }
915 
916 static inline int __cq_empty(struct circular_queue *cq)
917 {
918 	return (cq->front == cq->rear);
919 }
920 
921 static inline int __cq_full(struct circular_queue *cq)
922 {
923 	return ((cq->rear + 1) & CQ_MASK) == cq->front;
924 }
925 
926 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
927 {
928 	if (__cq_full(cq))
929 		return -1;
930 
931 	cq->element[cq->rear] = elem;
932 	cq->rear = (cq->rear + 1) & CQ_MASK;
933 	return 0;
934 }
935 
936 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
937 {
938 	if (__cq_empty(cq))
939 		return -1;
940 
941 	*elem = cq->element[cq->front];
942 	cq->front = (cq->front + 1) & CQ_MASK;
943 	return 0;
944 }
945 
946 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
947 {
948 	return (cq->rear - cq->front) & CQ_MASK;
949 }
950 
951 static inline void mark_lock_accessed(struct lock_list *lock,
952 					struct lock_list *parent)
953 {
954 	unsigned long nr;
955 
956 	nr = lock - list_entries;
957 	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
958 	lock->parent = parent;
959 	lock->class->dep_gen_id = lockdep_dependency_gen_id;
960 }
961 
962 static inline unsigned long lock_accessed(struct lock_list *lock)
963 {
964 	unsigned long nr;
965 
966 	nr = lock - list_entries;
967 	WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
968 	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
969 }
970 
971 static inline struct lock_list *get_lock_parent(struct lock_list *child)
972 {
973 	return child->parent;
974 }
975 
976 static inline int get_lock_depth(struct lock_list *child)
977 {
978 	int depth = 0;
979 	struct lock_list *parent;
980 
981 	while ((parent = get_lock_parent(child))) {
982 		child = parent;
983 		depth++;
984 	}
985 	return depth;
986 }
987 
988 static int __bfs(struct lock_list *source_entry,
989 		 void *data,
990 		 int (*match)(struct lock_list *entry, void *data),
991 		 struct lock_list **target_entry,
992 		 int forward)
993 {
994 	struct lock_list *entry;
995 	struct list_head *head;
996 	struct circular_queue *cq = &lock_cq;
997 	int ret = 1;
998 
999 	if (match(source_entry, data)) {
1000 		*target_entry = source_entry;
1001 		ret = 0;
1002 		goto exit;
1003 	}
1004 
1005 	if (forward)
1006 		head = &source_entry->class->locks_after;
1007 	else
1008 		head = &source_entry->class->locks_before;
1009 
1010 	if (list_empty(head))
1011 		goto exit;
1012 
1013 	__cq_init(cq);
1014 	__cq_enqueue(cq, (unsigned long)source_entry);
1015 
1016 	while (!__cq_empty(cq)) {
1017 		struct lock_list *lock;
1018 
1019 		__cq_dequeue(cq, (unsigned long *)&lock);
1020 
1021 		if (!lock->class) {
1022 			ret = -2;
1023 			goto exit;
1024 		}
1025 
1026 		if (forward)
1027 			head = &lock->class->locks_after;
1028 		else
1029 			head = &lock->class->locks_before;
1030 
1031 		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1032 
1033 		list_for_each_entry_rcu(entry, head, entry) {
1034 			if (!lock_accessed(entry)) {
1035 				unsigned int cq_depth;
1036 				mark_lock_accessed(entry, lock);
1037 				if (match(entry, data)) {
1038 					*target_entry = entry;
1039 					ret = 0;
1040 					goto exit;
1041 				}
1042 
1043 				if (__cq_enqueue(cq, (unsigned long)entry)) {
1044 					ret = -1;
1045 					goto exit;
1046 				}
1047 				cq_depth = __cq_get_elem_count(cq);
1048 				if (max_bfs_queue_depth < cq_depth)
1049 					max_bfs_queue_depth = cq_depth;
1050 			}
1051 		}
1052 	}
1053 exit:
1054 	return ret;
1055 }
1056 
1057 static inline int __bfs_forwards(struct lock_list *src_entry,
1058 			void *data,
1059 			int (*match)(struct lock_list *entry, void *data),
1060 			struct lock_list **target_entry)
1061 {
1062 	return __bfs(src_entry, data, match, target_entry, 1);
1063 
1064 }
1065 
1066 static inline int __bfs_backwards(struct lock_list *src_entry,
1067 			void *data,
1068 			int (*match)(struct lock_list *entry, void *data),
1069 			struct lock_list **target_entry)
1070 {
1071 	return __bfs(src_entry, data, match, target_entry, 0);
1072 
1073 }
1074 
1075 /*
1076  * Recursive, forwards-direction lock-dependency checking, used for
1077  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1078  * checking.
1079  */
1080 
1081 /*
1082  * Print a dependency chain entry (this is only done when a deadlock
1083  * has been detected):
1084  */
1085 static noinline int
1086 print_circular_bug_entry(struct lock_list *target, int depth)
1087 {
1088 	if (debug_locks_silent)
1089 		return 0;
1090 	printk("\n-> #%u", depth);
1091 	print_lock_name(target->class);
1092 	printk(KERN_CONT ":\n");
1093 	print_stack_trace(&target->trace, 6);
1094 
1095 	return 0;
1096 }
1097 
1098 static void
1099 print_circular_lock_scenario(struct held_lock *src,
1100 			     struct held_lock *tgt,
1101 			     struct lock_list *prt)
1102 {
1103 	struct lock_class *source = hlock_class(src);
1104 	struct lock_class *target = hlock_class(tgt);
1105 	struct lock_class *parent = prt->class;
1106 
1107 	/*
1108 	 * A direct locking problem where unsafe_class lock is taken
1109 	 * directly by safe_class lock, then all we need to show
1110 	 * is the deadlock scenario, as it is obvious that the
1111 	 * unsafe lock is taken under the safe lock.
1112 	 *
1113 	 * But if there is a chain instead, where the safe lock takes
1114 	 * an intermediate lock (middle_class) where this lock is
1115 	 * not the same as the safe lock, then the lock chain is
1116 	 * used to describe the problem. Otherwise we would need
1117 	 * to show a different CPU case for each link in the chain
1118 	 * from the safe_class lock to the unsafe_class lock.
1119 	 */
1120 	if (parent != source) {
1121 		printk("Chain exists of:\n  ");
1122 		__print_lock_name(source);
1123 		printk(KERN_CONT " --> ");
1124 		__print_lock_name(parent);
1125 		printk(KERN_CONT " --> ");
1126 		__print_lock_name(target);
1127 		printk(KERN_CONT "\n\n");
1128 	}
1129 
1130 	printk(" Possible unsafe locking scenario:\n\n");
1131 	printk("       CPU0                    CPU1\n");
1132 	printk("       ----                    ----\n");
1133 	printk("  lock(");
1134 	__print_lock_name(target);
1135 	printk(KERN_CONT ");\n");
1136 	printk("                               lock(");
1137 	__print_lock_name(parent);
1138 	printk(KERN_CONT ");\n");
1139 	printk("                               lock(");
1140 	__print_lock_name(target);
1141 	printk(KERN_CONT ");\n");
1142 	printk("  lock(");
1143 	__print_lock_name(source);
1144 	printk(KERN_CONT ");\n");
1145 	printk("\n *** DEADLOCK ***\n\n");
1146 }
1147 
1148 /*
1149  * When a circular dependency is detected, print the
1150  * header first:
1151  */
1152 static noinline int
1153 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1154 			struct held_lock *check_src,
1155 			struct held_lock *check_tgt)
1156 {
1157 	struct task_struct *curr = current;
1158 
1159 	if (debug_locks_silent)
1160 		return 0;
1161 
1162 	pr_warn("\n");
1163 	pr_warn("======================================================\n");
1164 	pr_warn("WARNING: possible circular locking dependency detected\n");
1165 	print_kernel_ident();
1166 	pr_warn("------------------------------------------------------\n");
1167 	pr_warn("%s/%d is trying to acquire lock:\n",
1168 		curr->comm, task_pid_nr(curr));
1169 	print_lock(check_src);
1170 
1171 	pr_warn("\nbut task is already holding lock:\n");
1172 
1173 	print_lock(check_tgt);
1174 	pr_warn("\nwhich lock already depends on the new lock.\n\n");
1175 	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1176 
1177 	print_circular_bug_entry(entry, depth);
1178 
1179 	return 0;
1180 }
1181 
1182 static inline int class_equal(struct lock_list *entry, void *data)
1183 {
1184 	return entry->class == data;
1185 }
1186 
1187 static noinline int print_circular_bug(struct lock_list *this,
1188 				struct lock_list *target,
1189 				struct held_lock *check_src,
1190 				struct held_lock *check_tgt,
1191 				struct stack_trace *trace)
1192 {
1193 	struct task_struct *curr = current;
1194 	struct lock_list *parent;
1195 	struct lock_list *first_parent;
1196 	int depth;
1197 
1198 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1199 		return 0;
1200 
1201 	if (!save_trace(&this->trace))
1202 		return 0;
1203 
1204 	depth = get_lock_depth(target);
1205 
1206 	print_circular_bug_header(target, depth, check_src, check_tgt);
1207 
1208 	parent = get_lock_parent(target);
1209 	first_parent = parent;
1210 
1211 	while (parent) {
1212 		print_circular_bug_entry(parent, --depth);
1213 		parent = get_lock_parent(parent);
1214 	}
1215 
1216 	printk("\nother info that might help us debug this:\n\n");
1217 	print_circular_lock_scenario(check_src, check_tgt,
1218 				     first_parent);
1219 
1220 	lockdep_print_held_locks(curr);
1221 
1222 	printk("\nstack backtrace:\n");
1223 	dump_stack();
1224 
1225 	return 0;
1226 }
1227 
1228 static noinline int print_bfs_bug(int ret)
1229 {
1230 	if (!debug_locks_off_graph_unlock())
1231 		return 0;
1232 
1233 	/*
1234 	 * Breadth-first-search failed, graph got corrupted?
1235 	 */
1236 	WARN(1, "lockdep bfs error:%d\n", ret);
1237 
1238 	return 0;
1239 }
1240 
1241 static int noop_count(struct lock_list *entry, void *data)
1242 {
1243 	(*(unsigned long *)data)++;
1244 	return 0;
1245 }
1246 
1247 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1248 {
1249 	unsigned long  count = 0;
1250 	struct lock_list *uninitialized_var(target_entry);
1251 
1252 	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1253 
1254 	return count;
1255 }
1256 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1257 {
1258 	unsigned long ret, flags;
1259 	struct lock_list this;
1260 
1261 	this.parent = NULL;
1262 	this.class = class;
1263 
1264 	local_irq_save(flags);
1265 	arch_spin_lock(&lockdep_lock);
1266 	ret = __lockdep_count_forward_deps(&this);
1267 	arch_spin_unlock(&lockdep_lock);
1268 	local_irq_restore(flags);
1269 
1270 	return ret;
1271 }
1272 
1273 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1274 {
1275 	unsigned long  count = 0;
1276 	struct lock_list *uninitialized_var(target_entry);
1277 
1278 	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1279 
1280 	return count;
1281 }
1282 
1283 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1284 {
1285 	unsigned long ret, flags;
1286 	struct lock_list this;
1287 
1288 	this.parent = NULL;
1289 	this.class = class;
1290 
1291 	local_irq_save(flags);
1292 	arch_spin_lock(&lockdep_lock);
1293 	ret = __lockdep_count_backward_deps(&this);
1294 	arch_spin_unlock(&lockdep_lock);
1295 	local_irq_restore(flags);
1296 
1297 	return ret;
1298 }
1299 
1300 /*
1301  * Prove that the dependency graph starting at <entry> can not
1302  * lead to <target>. Print an error and return 0 if it does.
1303  */
1304 static noinline int
1305 check_noncircular(struct lock_list *root, struct lock_class *target,
1306 		struct lock_list **target_entry)
1307 {
1308 	int result;
1309 
1310 	debug_atomic_inc(nr_cyclic_checks);
1311 
1312 	result = __bfs_forwards(root, target, class_equal, target_entry);
1313 
1314 	return result;
1315 }
1316 
1317 static noinline int
1318 check_redundant(struct lock_list *root, struct lock_class *target,
1319 		struct lock_list **target_entry)
1320 {
1321 	int result;
1322 
1323 	debug_atomic_inc(nr_redundant_checks);
1324 
1325 	result = __bfs_forwards(root, target, class_equal, target_entry);
1326 
1327 	return result;
1328 }
1329 
1330 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1331 /*
1332  * Forwards and backwards subgraph searching, for the purposes of
1333  * proving that two subgraphs can be connected by a new dependency
1334  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1335  */
1336 
1337 static inline int usage_match(struct lock_list *entry, void *bit)
1338 {
1339 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1340 }
1341 
1342 
1343 
1344 /*
1345  * Find a node in the forwards-direction dependency sub-graph starting
1346  * at @root->class that matches @bit.
1347  *
1348  * Return 0 if such a node exists in the subgraph, and put that node
1349  * into *@target_entry.
1350  *
1351  * Return 1 otherwise and keep *@target_entry unchanged.
1352  * Return <0 on error.
1353  */
1354 static int
1355 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1356 			struct lock_list **target_entry)
1357 {
1358 	int result;
1359 
1360 	debug_atomic_inc(nr_find_usage_forwards_checks);
1361 
1362 	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1363 
1364 	return result;
1365 }
1366 
1367 /*
1368  * Find a node in the backwards-direction dependency sub-graph starting
1369  * at @root->class that matches @bit.
1370  *
1371  * Return 0 if such a node exists in the subgraph, and put that node
1372  * into *@target_entry.
1373  *
1374  * Return 1 otherwise and keep *@target_entry unchanged.
1375  * Return <0 on error.
1376  */
1377 static int
1378 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1379 			struct lock_list **target_entry)
1380 {
1381 	int result;
1382 
1383 	debug_atomic_inc(nr_find_usage_backwards_checks);
1384 
1385 	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1386 
1387 	return result;
1388 }
1389 
1390 static void print_lock_class_header(struct lock_class *class, int depth)
1391 {
1392 	int bit;
1393 
1394 	printk("%*s->", depth, "");
1395 	print_lock_name(class);
1396 	printk(KERN_CONT " ops: %lu", class->ops);
1397 	printk(KERN_CONT " {\n");
1398 
1399 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1400 		if (class->usage_mask & (1 << bit)) {
1401 			int len = depth;
1402 
1403 			len += printk("%*s   %s", depth, "", usage_str[bit]);
1404 			len += printk(KERN_CONT " at:\n");
1405 			print_stack_trace(class->usage_traces + bit, len);
1406 		}
1407 	}
1408 	printk("%*s }\n", depth, "");
1409 
1410 	printk("%*s ... key      at: [<%px>] %pS\n",
1411 		depth, "", class->key, class->key);
1412 }
1413 
1414 /*
1415  * printk the shortest lock dependencies from @start to @end in reverse order:
1416  */
1417 static void __used
1418 print_shortest_lock_dependencies(struct lock_list *leaf,
1419 				struct lock_list *root)
1420 {
1421 	struct lock_list *entry = leaf;
1422 	int depth;
1423 
1424 	/*compute depth from generated tree by BFS*/
1425 	depth = get_lock_depth(leaf);
1426 
1427 	do {
1428 		print_lock_class_header(entry->class, depth);
1429 		printk("%*s ... acquired at:\n", depth, "");
1430 		print_stack_trace(&entry->trace, 2);
1431 		printk("\n");
1432 
1433 		if (depth == 0 && (entry != root)) {
1434 			printk("lockdep:%s bad path found in chain graph\n", __func__);
1435 			break;
1436 		}
1437 
1438 		entry = get_lock_parent(entry);
1439 		depth--;
1440 	} while (entry && (depth >= 0));
1441 
1442 	return;
1443 }
1444 
1445 static void
1446 print_irq_lock_scenario(struct lock_list *safe_entry,
1447 			struct lock_list *unsafe_entry,
1448 			struct lock_class *prev_class,
1449 			struct lock_class *next_class)
1450 {
1451 	struct lock_class *safe_class = safe_entry->class;
1452 	struct lock_class *unsafe_class = unsafe_entry->class;
1453 	struct lock_class *middle_class = prev_class;
1454 
1455 	if (middle_class == safe_class)
1456 		middle_class = next_class;
1457 
1458 	/*
1459 	 * A direct locking problem where unsafe_class lock is taken
1460 	 * directly by safe_class lock, then all we need to show
1461 	 * is the deadlock scenario, as it is obvious that the
1462 	 * unsafe lock is taken under the safe lock.
1463 	 *
1464 	 * But if there is a chain instead, where the safe lock takes
1465 	 * an intermediate lock (middle_class) where this lock is
1466 	 * not the same as the safe lock, then the lock chain is
1467 	 * used to describe the problem. Otherwise we would need
1468 	 * to show a different CPU case for each link in the chain
1469 	 * from the safe_class lock to the unsafe_class lock.
1470 	 */
1471 	if (middle_class != unsafe_class) {
1472 		printk("Chain exists of:\n  ");
1473 		__print_lock_name(safe_class);
1474 		printk(KERN_CONT " --> ");
1475 		__print_lock_name(middle_class);
1476 		printk(KERN_CONT " --> ");
1477 		__print_lock_name(unsafe_class);
1478 		printk(KERN_CONT "\n\n");
1479 	}
1480 
1481 	printk(" Possible interrupt unsafe locking scenario:\n\n");
1482 	printk("       CPU0                    CPU1\n");
1483 	printk("       ----                    ----\n");
1484 	printk("  lock(");
1485 	__print_lock_name(unsafe_class);
1486 	printk(KERN_CONT ");\n");
1487 	printk("                               local_irq_disable();\n");
1488 	printk("                               lock(");
1489 	__print_lock_name(safe_class);
1490 	printk(KERN_CONT ");\n");
1491 	printk("                               lock(");
1492 	__print_lock_name(middle_class);
1493 	printk(KERN_CONT ");\n");
1494 	printk("  <Interrupt>\n");
1495 	printk("    lock(");
1496 	__print_lock_name(safe_class);
1497 	printk(KERN_CONT ");\n");
1498 	printk("\n *** DEADLOCK ***\n\n");
1499 }
1500 
1501 static int
1502 print_bad_irq_dependency(struct task_struct *curr,
1503 			 struct lock_list *prev_root,
1504 			 struct lock_list *next_root,
1505 			 struct lock_list *backwards_entry,
1506 			 struct lock_list *forwards_entry,
1507 			 struct held_lock *prev,
1508 			 struct held_lock *next,
1509 			 enum lock_usage_bit bit1,
1510 			 enum lock_usage_bit bit2,
1511 			 const char *irqclass)
1512 {
1513 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1514 		return 0;
1515 
1516 	pr_warn("\n");
1517 	pr_warn("=====================================================\n");
1518 	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1519 		irqclass, irqclass);
1520 	print_kernel_ident();
1521 	pr_warn("-----------------------------------------------------\n");
1522 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1523 		curr->comm, task_pid_nr(curr),
1524 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1525 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1526 		curr->hardirqs_enabled,
1527 		curr->softirqs_enabled);
1528 	print_lock(next);
1529 
1530 	pr_warn("\nand this task is already holding:\n");
1531 	print_lock(prev);
1532 	pr_warn("which would create a new lock dependency:\n");
1533 	print_lock_name(hlock_class(prev));
1534 	pr_cont(" ->");
1535 	print_lock_name(hlock_class(next));
1536 	pr_cont("\n");
1537 
1538 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1539 		irqclass);
1540 	print_lock_name(backwards_entry->class);
1541 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1542 
1543 	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1544 
1545 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1546 	print_lock_name(forwards_entry->class);
1547 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1548 	pr_warn("...");
1549 
1550 	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1551 
1552 	pr_warn("\nother info that might help us debug this:\n\n");
1553 	print_irq_lock_scenario(backwards_entry, forwards_entry,
1554 				hlock_class(prev), hlock_class(next));
1555 
1556 	lockdep_print_held_locks(curr);
1557 
1558 	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1559 	if (!save_trace(&prev_root->trace))
1560 		return 0;
1561 	print_shortest_lock_dependencies(backwards_entry, prev_root);
1562 
1563 	pr_warn("\nthe dependencies between the lock to be acquired");
1564 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1565 	if (!save_trace(&next_root->trace))
1566 		return 0;
1567 	print_shortest_lock_dependencies(forwards_entry, next_root);
1568 
1569 	pr_warn("\nstack backtrace:\n");
1570 	dump_stack();
1571 
1572 	return 0;
1573 }
1574 
1575 static int
1576 check_usage(struct task_struct *curr, struct held_lock *prev,
1577 	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1578 	    enum lock_usage_bit bit_forwards, const char *irqclass)
1579 {
1580 	int ret;
1581 	struct lock_list this, that;
1582 	struct lock_list *uninitialized_var(target_entry);
1583 	struct lock_list *uninitialized_var(target_entry1);
1584 
1585 	this.parent = NULL;
1586 
1587 	this.class = hlock_class(prev);
1588 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1589 	if (ret < 0)
1590 		return print_bfs_bug(ret);
1591 	if (ret == 1)
1592 		return ret;
1593 
1594 	that.parent = NULL;
1595 	that.class = hlock_class(next);
1596 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1597 	if (ret < 0)
1598 		return print_bfs_bug(ret);
1599 	if (ret == 1)
1600 		return ret;
1601 
1602 	return print_bad_irq_dependency(curr, &this, &that,
1603 			target_entry, target_entry1,
1604 			prev, next,
1605 			bit_backwards, bit_forwards, irqclass);
1606 }
1607 
1608 static const char *state_names[] = {
1609 #define LOCKDEP_STATE(__STATE) \
1610 	__stringify(__STATE),
1611 #include "lockdep_states.h"
1612 #undef LOCKDEP_STATE
1613 };
1614 
1615 static const char *state_rnames[] = {
1616 #define LOCKDEP_STATE(__STATE) \
1617 	__stringify(__STATE)"-READ",
1618 #include "lockdep_states.h"
1619 #undef LOCKDEP_STATE
1620 };
1621 
1622 static inline const char *state_name(enum lock_usage_bit bit)
1623 {
1624 	return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1625 }
1626 
1627 static int exclusive_bit(int new_bit)
1628 {
1629 	/*
1630 	 * USED_IN
1631 	 * USED_IN_READ
1632 	 * ENABLED
1633 	 * ENABLED_READ
1634 	 *
1635 	 * bit 0 - write/read
1636 	 * bit 1 - used_in/enabled
1637 	 * bit 2+  state
1638 	 */
1639 
1640 	int state = new_bit & ~3;
1641 	int dir = new_bit & 2;
1642 
1643 	/*
1644 	 * keep state, bit flip the direction and strip read.
1645 	 */
1646 	return state | (dir ^ 2);
1647 }
1648 
1649 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1650 			   struct held_lock *next, enum lock_usage_bit bit)
1651 {
1652 	/*
1653 	 * Prove that the new dependency does not connect a hardirq-safe
1654 	 * lock with a hardirq-unsafe lock - to achieve this we search
1655 	 * the backwards-subgraph starting at <prev>, and the
1656 	 * forwards-subgraph starting at <next>:
1657 	 */
1658 	if (!check_usage(curr, prev, next, bit,
1659 			   exclusive_bit(bit), state_name(bit)))
1660 		return 0;
1661 
1662 	bit++; /* _READ */
1663 
1664 	/*
1665 	 * Prove that the new dependency does not connect a hardirq-safe-read
1666 	 * lock with a hardirq-unsafe lock - to achieve this we search
1667 	 * the backwards-subgraph starting at <prev>, and the
1668 	 * forwards-subgraph starting at <next>:
1669 	 */
1670 	if (!check_usage(curr, prev, next, bit,
1671 			   exclusive_bit(bit), state_name(bit)))
1672 		return 0;
1673 
1674 	return 1;
1675 }
1676 
1677 static int
1678 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1679 		struct held_lock *next)
1680 {
1681 #define LOCKDEP_STATE(__STATE)						\
1682 	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
1683 		return 0;
1684 #include "lockdep_states.h"
1685 #undef LOCKDEP_STATE
1686 
1687 	return 1;
1688 }
1689 
1690 static void inc_chains(void)
1691 {
1692 	if (current->hardirq_context)
1693 		nr_hardirq_chains++;
1694 	else {
1695 		if (current->softirq_context)
1696 			nr_softirq_chains++;
1697 		else
1698 			nr_process_chains++;
1699 	}
1700 }
1701 
1702 #else
1703 
1704 static inline int
1705 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1706 		struct held_lock *next)
1707 {
1708 	return 1;
1709 }
1710 
1711 static inline void inc_chains(void)
1712 {
1713 	nr_process_chains++;
1714 }
1715 
1716 #endif
1717 
1718 static void
1719 print_deadlock_scenario(struct held_lock *nxt,
1720 			     struct held_lock *prv)
1721 {
1722 	struct lock_class *next = hlock_class(nxt);
1723 	struct lock_class *prev = hlock_class(prv);
1724 
1725 	printk(" Possible unsafe locking scenario:\n\n");
1726 	printk("       CPU0\n");
1727 	printk("       ----\n");
1728 	printk("  lock(");
1729 	__print_lock_name(prev);
1730 	printk(KERN_CONT ");\n");
1731 	printk("  lock(");
1732 	__print_lock_name(next);
1733 	printk(KERN_CONT ");\n");
1734 	printk("\n *** DEADLOCK ***\n\n");
1735 	printk(" May be due to missing lock nesting notation\n\n");
1736 }
1737 
1738 static int
1739 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1740 		   struct held_lock *next)
1741 {
1742 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1743 		return 0;
1744 
1745 	pr_warn("\n");
1746 	pr_warn("============================================\n");
1747 	pr_warn("WARNING: possible recursive locking detected\n");
1748 	print_kernel_ident();
1749 	pr_warn("--------------------------------------------\n");
1750 	pr_warn("%s/%d is trying to acquire lock:\n",
1751 		curr->comm, task_pid_nr(curr));
1752 	print_lock(next);
1753 	pr_warn("\nbut task is already holding lock:\n");
1754 	print_lock(prev);
1755 
1756 	pr_warn("\nother info that might help us debug this:\n");
1757 	print_deadlock_scenario(next, prev);
1758 	lockdep_print_held_locks(curr);
1759 
1760 	pr_warn("\nstack backtrace:\n");
1761 	dump_stack();
1762 
1763 	return 0;
1764 }
1765 
1766 /*
1767  * Check whether we are holding such a class already.
1768  *
1769  * (Note that this has to be done separately, because the graph cannot
1770  * detect such classes of deadlocks.)
1771  *
1772  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1773  */
1774 static int
1775 check_deadlock(struct task_struct *curr, struct held_lock *next,
1776 	       struct lockdep_map *next_instance, int read)
1777 {
1778 	struct held_lock *prev;
1779 	struct held_lock *nest = NULL;
1780 	int i;
1781 
1782 	for (i = 0; i < curr->lockdep_depth; i++) {
1783 		prev = curr->held_locks + i;
1784 
1785 		if (prev->instance == next->nest_lock)
1786 			nest = prev;
1787 
1788 		if (hlock_class(prev) != hlock_class(next))
1789 			continue;
1790 
1791 		/*
1792 		 * Allow read-after-read recursion of the same
1793 		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1794 		 */
1795 		if ((read == 2) && prev->read)
1796 			return 2;
1797 
1798 		/*
1799 		 * We're holding the nest_lock, which serializes this lock's
1800 		 * nesting behaviour.
1801 		 */
1802 		if (nest)
1803 			return 2;
1804 
1805 		return print_deadlock_bug(curr, prev, next);
1806 	}
1807 	return 1;
1808 }
1809 
1810 /*
1811  * There was a chain-cache miss, and we are about to add a new dependency
1812  * to a previous lock. We recursively validate the following rules:
1813  *
1814  *  - would the adding of the <prev> -> <next> dependency create a
1815  *    circular dependency in the graph? [== circular deadlock]
1816  *
1817  *  - does the new prev->next dependency connect any hardirq-safe lock
1818  *    (in the full backwards-subgraph starting at <prev>) with any
1819  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1820  *    <next>)? [== illegal lock inversion with hardirq contexts]
1821  *
1822  *  - does the new prev->next dependency connect any softirq-safe lock
1823  *    (in the full backwards-subgraph starting at <prev>) with any
1824  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1825  *    <next>)? [== illegal lock inversion with softirq contexts]
1826  *
1827  * any of these scenarios could lead to a deadlock.
1828  *
1829  * Then if all the validations pass, we add the forwards and backwards
1830  * dependency.
1831  */
1832 static int
1833 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1834 	       struct held_lock *next, int distance, struct stack_trace *trace,
1835 	       int (*save)(struct stack_trace *trace))
1836 {
1837 	struct lock_list *uninitialized_var(target_entry);
1838 	struct lock_list *entry;
1839 	struct lock_list this;
1840 	int ret;
1841 
1842 	/*
1843 	 * Prove that the new <prev> -> <next> dependency would not
1844 	 * create a circular dependency in the graph. (We do this by
1845 	 * forward-recursing into the graph starting at <next>, and
1846 	 * checking whether we can reach <prev>.)
1847 	 *
1848 	 * We are using global variables to control the recursion, to
1849 	 * keep the stackframe size of the recursive functions low:
1850 	 */
1851 	this.class = hlock_class(next);
1852 	this.parent = NULL;
1853 	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1854 	if (unlikely(!ret)) {
1855 		if (!trace->entries) {
1856 			/*
1857 			 * If @save fails here, the printing might trigger
1858 			 * a WARN but because of the !nr_entries it should
1859 			 * not do bad things.
1860 			 */
1861 			save(trace);
1862 		}
1863 		return print_circular_bug(&this, target_entry, next, prev, trace);
1864 	}
1865 	else if (unlikely(ret < 0))
1866 		return print_bfs_bug(ret);
1867 
1868 	if (!check_prev_add_irq(curr, prev, next))
1869 		return 0;
1870 
1871 	/*
1872 	 * For recursive read-locks we do all the dependency checks,
1873 	 * but we dont store read-triggered dependencies (only
1874 	 * write-triggered dependencies). This ensures that only the
1875 	 * write-side dependencies matter, and that if for example a
1876 	 * write-lock never takes any other locks, then the reads are
1877 	 * equivalent to a NOP.
1878 	 */
1879 	if (next->read == 2 || prev->read == 2)
1880 		return 1;
1881 	/*
1882 	 * Is the <prev> -> <next> dependency already present?
1883 	 *
1884 	 * (this may occur even though this is a new chain: consider
1885 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1886 	 *  chains - the second one will be new, but L1 already has
1887 	 *  L2 added to its dependency list, due to the first chain.)
1888 	 */
1889 	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1890 		if (entry->class == hlock_class(next)) {
1891 			if (distance == 1)
1892 				entry->distance = 1;
1893 			return 1;
1894 		}
1895 	}
1896 
1897 	/*
1898 	 * Is the <prev> -> <next> link redundant?
1899 	 */
1900 	this.class = hlock_class(prev);
1901 	this.parent = NULL;
1902 	ret = check_redundant(&this, hlock_class(next), &target_entry);
1903 	if (!ret) {
1904 		debug_atomic_inc(nr_redundant);
1905 		return 2;
1906 	}
1907 	if (ret < 0)
1908 		return print_bfs_bug(ret);
1909 
1910 
1911 	if (!trace->entries && !save(trace))
1912 		return 0;
1913 
1914 	/*
1915 	 * Ok, all validations passed, add the new lock
1916 	 * to the previous lock's dependency list:
1917 	 */
1918 	ret = add_lock_to_list(hlock_class(next),
1919 			       &hlock_class(prev)->locks_after,
1920 			       next->acquire_ip, distance, trace);
1921 
1922 	if (!ret)
1923 		return 0;
1924 
1925 	ret = add_lock_to_list(hlock_class(prev),
1926 			       &hlock_class(next)->locks_before,
1927 			       next->acquire_ip, distance, trace);
1928 	if (!ret)
1929 		return 0;
1930 
1931 	return 2;
1932 }
1933 
1934 /*
1935  * Add the dependency to all directly-previous locks that are 'relevant'.
1936  * The ones that are relevant are (in increasing distance from curr):
1937  * all consecutive trylock entries and the final non-trylock entry - or
1938  * the end of this context's lock-chain - whichever comes first.
1939  */
1940 static int
1941 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1942 {
1943 	int depth = curr->lockdep_depth;
1944 	struct held_lock *hlock;
1945 	struct stack_trace trace = {
1946 		.nr_entries = 0,
1947 		.max_entries = 0,
1948 		.entries = NULL,
1949 		.skip = 0,
1950 	};
1951 
1952 	/*
1953 	 * Debugging checks.
1954 	 *
1955 	 * Depth must not be zero for a non-head lock:
1956 	 */
1957 	if (!depth)
1958 		goto out_bug;
1959 	/*
1960 	 * At least two relevant locks must exist for this
1961 	 * to be a head:
1962 	 */
1963 	if (curr->held_locks[depth].irq_context !=
1964 			curr->held_locks[depth-1].irq_context)
1965 		goto out_bug;
1966 
1967 	for (;;) {
1968 		int distance = curr->lockdep_depth - depth + 1;
1969 		hlock = curr->held_locks + depth - 1;
1970 
1971 		/*
1972 		 * Only non-recursive-read entries get new dependencies
1973 		 * added:
1974 		 */
1975 		if (hlock->read != 2 && hlock->check) {
1976 			int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
1977 			if (!ret)
1978 				return 0;
1979 
1980 			/*
1981 			 * Stop after the first non-trylock entry,
1982 			 * as non-trylock entries have added their
1983 			 * own direct dependencies already, so this
1984 			 * lock is connected to them indirectly:
1985 			 */
1986 			if (!hlock->trylock)
1987 				break;
1988 		}
1989 
1990 		depth--;
1991 		/*
1992 		 * End of lock-stack?
1993 		 */
1994 		if (!depth)
1995 			break;
1996 		/*
1997 		 * Stop the search if we cross into another context:
1998 		 */
1999 		if (curr->held_locks[depth].irq_context !=
2000 				curr->held_locks[depth-1].irq_context)
2001 			break;
2002 	}
2003 	return 1;
2004 out_bug:
2005 	if (!debug_locks_off_graph_unlock())
2006 		return 0;
2007 
2008 	/*
2009 	 * Clearly we all shouldn't be here, but since we made it we
2010 	 * can reliable say we messed up our state. See the above two
2011 	 * gotos for reasons why we could possibly end up here.
2012 	 */
2013 	WARN_ON(1);
2014 
2015 	return 0;
2016 }
2017 
2018 unsigned long nr_lock_chains;
2019 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2020 int nr_chain_hlocks;
2021 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2022 
2023 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2024 {
2025 	return lock_classes + chain_hlocks[chain->base + i];
2026 }
2027 
2028 /*
2029  * Returns the index of the first held_lock of the current chain
2030  */
2031 static inline int get_first_held_lock(struct task_struct *curr,
2032 					struct held_lock *hlock)
2033 {
2034 	int i;
2035 	struct held_lock *hlock_curr;
2036 
2037 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2038 		hlock_curr = curr->held_locks + i;
2039 		if (hlock_curr->irq_context != hlock->irq_context)
2040 			break;
2041 
2042 	}
2043 
2044 	return ++i;
2045 }
2046 
2047 #ifdef CONFIG_DEBUG_LOCKDEP
2048 /*
2049  * Returns the next chain_key iteration
2050  */
2051 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2052 {
2053 	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2054 
2055 	printk(" class_idx:%d -> chain_key:%016Lx",
2056 		class_idx,
2057 		(unsigned long long)new_chain_key);
2058 	return new_chain_key;
2059 }
2060 
2061 static void
2062 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2063 {
2064 	struct held_lock *hlock;
2065 	u64 chain_key = 0;
2066 	int depth = curr->lockdep_depth;
2067 	int i;
2068 
2069 	printk("depth: %u\n", depth + 1);
2070 	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2071 		hlock = curr->held_locks + i;
2072 		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2073 
2074 		print_lock(hlock);
2075 	}
2076 
2077 	print_chain_key_iteration(hlock_next->class_idx, chain_key);
2078 	print_lock(hlock_next);
2079 }
2080 
2081 static void print_chain_keys_chain(struct lock_chain *chain)
2082 {
2083 	int i;
2084 	u64 chain_key = 0;
2085 	int class_id;
2086 
2087 	printk("depth: %u\n", chain->depth);
2088 	for (i = 0; i < chain->depth; i++) {
2089 		class_id = chain_hlocks[chain->base + i];
2090 		chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2091 
2092 		print_lock_name(lock_classes + class_id);
2093 		printk("\n");
2094 	}
2095 }
2096 
2097 static void print_collision(struct task_struct *curr,
2098 			struct held_lock *hlock_next,
2099 			struct lock_chain *chain)
2100 {
2101 	pr_warn("\n");
2102 	pr_warn("============================\n");
2103 	pr_warn("WARNING: chain_key collision\n");
2104 	print_kernel_ident();
2105 	pr_warn("----------------------------\n");
2106 	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2107 	pr_warn("Hash chain already cached but the contents don't match!\n");
2108 
2109 	pr_warn("Held locks:");
2110 	print_chain_keys_held_locks(curr, hlock_next);
2111 
2112 	pr_warn("Locks in cached chain:");
2113 	print_chain_keys_chain(chain);
2114 
2115 	pr_warn("\nstack backtrace:\n");
2116 	dump_stack();
2117 }
2118 #endif
2119 
2120 /*
2121  * Checks whether the chain and the current held locks are consistent
2122  * in depth and also in content. If they are not it most likely means
2123  * that there was a collision during the calculation of the chain_key.
2124  * Returns: 0 not passed, 1 passed
2125  */
2126 static int check_no_collision(struct task_struct *curr,
2127 			struct held_lock *hlock,
2128 			struct lock_chain *chain)
2129 {
2130 #ifdef CONFIG_DEBUG_LOCKDEP
2131 	int i, j, id;
2132 
2133 	i = get_first_held_lock(curr, hlock);
2134 
2135 	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2136 		print_collision(curr, hlock, chain);
2137 		return 0;
2138 	}
2139 
2140 	for (j = 0; j < chain->depth - 1; j++, i++) {
2141 		id = curr->held_locks[i].class_idx - 1;
2142 
2143 		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2144 			print_collision(curr, hlock, chain);
2145 			return 0;
2146 		}
2147 	}
2148 #endif
2149 	return 1;
2150 }
2151 
2152 /*
2153  * This is for building a chain between just two different classes,
2154  * instead of adding a new hlock upon current, which is done by
2155  * add_chain_cache().
2156  *
2157  * This can be called in any context with two classes, while
2158  * add_chain_cache() must be done within the lock owener's context
2159  * since it uses hlock which might be racy in another context.
2160  */
2161 static inline int add_chain_cache_classes(unsigned int prev,
2162 					  unsigned int next,
2163 					  unsigned int irq_context,
2164 					  u64 chain_key)
2165 {
2166 	struct hlist_head *hash_head = chainhashentry(chain_key);
2167 	struct lock_chain *chain;
2168 
2169 	/*
2170 	 * Allocate a new chain entry from the static array, and add
2171 	 * it to the hash:
2172 	 */
2173 
2174 	/*
2175 	 * We might need to take the graph lock, ensure we've got IRQs
2176 	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2177 	 * lockdep won't complain about its own locking errors.
2178 	 */
2179 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2180 		return 0;
2181 
2182 	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2183 		if (!debug_locks_off_graph_unlock())
2184 			return 0;
2185 
2186 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2187 		dump_stack();
2188 		return 0;
2189 	}
2190 
2191 	chain = lock_chains + nr_lock_chains++;
2192 	chain->chain_key = chain_key;
2193 	chain->irq_context = irq_context;
2194 	chain->depth = 2;
2195 	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2196 		chain->base = nr_chain_hlocks;
2197 		nr_chain_hlocks += chain->depth;
2198 		chain_hlocks[chain->base] = prev - 1;
2199 		chain_hlocks[chain->base + 1] = next -1;
2200 	}
2201 #ifdef CONFIG_DEBUG_LOCKDEP
2202 	/*
2203 	 * Important for check_no_collision().
2204 	 */
2205 	else {
2206 		if (!debug_locks_off_graph_unlock())
2207 			return 0;
2208 
2209 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2210 		dump_stack();
2211 		return 0;
2212 	}
2213 #endif
2214 
2215 	hlist_add_head_rcu(&chain->entry, hash_head);
2216 	debug_atomic_inc(chain_lookup_misses);
2217 	inc_chains();
2218 
2219 	return 1;
2220 }
2221 
2222 /*
2223  * Adds a dependency chain into chain hashtable. And must be called with
2224  * graph_lock held.
2225  *
2226  * Return 0 if fail, and graph_lock is released.
2227  * Return 1 if succeed, with graph_lock held.
2228  */
2229 static inline int add_chain_cache(struct task_struct *curr,
2230 				  struct held_lock *hlock,
2231 				  u64 chain_key)
2232 {
2233 	struct lock_class *class = hlock_class(hlock);
2234 	struct hlist_head *hash_head = chainhashentry(chain_key);
2235 	struct lock_chain *chain;
2236 	int i, j;
2237 
2238 	/*
2239 	 * Allocate a new chain entry from the static array, and add
2240 	 * it to the hash:
2241 	 */
2242 
2243 	/*
2244 	 * We might need to take the graph lock, ensure we've got IRQs
2245 	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2246 	 * lockdep won't complain about its own locking errors.
2247 	 */
2248 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2249 		return 0;
2250 
2251 	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2252 		if (!debug_locks_off_graph_unlock())
2253 			return 0;
2254 
2255 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2256 		dump_stack();
2257 		return 0;
2258 	}
2259 	chain = lock_chains + nr_lock_chains++;
2260 	chain->chain_key = chain_key;
2261 	chain->irq_context = hlock->irq_context;
2262 	i = get_first_held_lock(curr, hlock);
2263 	chain->depth = curr->lockdep_depth + 1 - i;
2264 
2265 	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2266 	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2267 	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2268 
2269 	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2270 		chain->base = nr_chain_hlocks;
2271 		for (j = 0; j < chain->depth - 1; j++, i++) {
2272 			int lock_id = curr->held_locks[i].class_idx - 1;
2273 			chain_hlocks[chain->base + j] = lock_id;
2274 		}
2275 		chain_hlocks[chain->base + j] = class - lock_classes;
2276 	}
2277 
2278 	if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
2279 		nr_chain_hlocks += chain->depth;
2280 
2281 #ifdef CONFIG_DEBUG_LOCKDEP
2282 	/*
2283 	 * Important for check_no_collision().
2284 	 */
2285 	if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
2286 		if (!debug_locks_off_graph_unlock())
2287 			return 0;
2288 
2289 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2290 		dump_stack();
2291 		return 0;
2292 	}
2293 #endif
2294 
2295 	hlist_add_head_rcu(&chain->entry, hash_head);
2296 	debug_atomic_inc(chain_lookup_misses);
2297 	inc_chains();
2298 
2299 	return 1;
2300 }
2301 
2302 /*
2303  * Look up a dependency chain.
2304  */
2305 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2306 {
2307 	struct hlist_head *hash_head = chainhashentry(chain_key);
2308 	struct lock_chain *chain;
2309 
2310 	/*
2311 	 * We can walk it lock-free, because entries only get added
2312 	 * to the hash:
2313 	 */
2314 	hlist_for_each_entry_rcu(chain, hash_head, entry) {
2315 		if (chain->chain_key == chain_key) {
2316 			debug_atomic_inc(chain_lookup_hits);
2317 			return chain;
2318 		}
2319 	}
2320 	return NULL;
2321 }
2322 
2323 /*
2324  * If the key is not present yet in dependency chain cache then
2325  * add it and return 1 - in this case the new dependency chain is
2326  * validated. If the key is already hashed, return 0.
2327  * (On return with 1 graph_lock is held.)
2328  */
2329 static inline int lookup_chain_cache_add(struct task_struct *curr,
2330 					 struct held_lock *hlock,
2331 					 u64 chain_key)
2332 {
2333 	struct lock_class *class = hlock_class(hlock);
2334 	struct lock_chain *chain = lookup_chain_cache(chain_key);
2335 
2336 	if (chain) {
2337 cache_hit:
2338 		if (!check_no_collision(curr, hlock, chain))
2339 			return 0;
2340 
2341 		if (very_verbose(class)) {
2342 			printk("\nhash chain already cached, key: "
2343 					"%016Lx tail class: [%px] %s\n",
2344 					(unsigned long long)chain_key,
2345 					class->key, class->name);
2346 		}
2347 
2348 		return 0;
2349 	}
2350 
2351 	if (very_verbose(class)) {
2352 		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2353 			(unsigned long long)chain_key, class->key, class->name);
2354 	}
2355 
2356 	if (!graph_lock())
2357 		return 0;
2358 
2359 	/*
2360 	 * We have to walk the chain again locked - to avoid duplicates:
2361 	 */
2362 	chain = lookup_chain_cache(chain_key);
2363 	if (chain) {
2364 		graph_unlock();
2365 		goto cache_hit;
2366 	}
2367 
2368 	if (!add_chain_cache(curr, hlock, chain_key))
2369 		return 0;
2370 
2371 	return 1;
2372 }
2373 
2374 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2375 		struct held_lock *hlock, int chain_head, u64 chain_key)
2376 {
2377 	/*
2378 	 * Trylock needs to maintain the stack of held locks, but it
2379 	 * does not add new dependencies, because trylock can be done
2380 	 * in any order.
2381 	 *
2382 	 * We look up the chain_key and do the O(N^2) check and update of
2383 	 * the dependencies only if this is a new dependency chain.
2384 	 * (If lookup_chain_cache_add() return with 1 it acquires
2385 	 * graph_lock for us)
2386 	 */
2387 	if (!hlock->trylock && hlock->check &&
2388 	    lookup_chain_cache_add(curr, hlock, chain_key)) {
2389 		/*
2390 		 * Check whether last held lock:
2391 		 *
2392 		 * - is irq-safe, if this lock is irq-unsafe
2393 		 * - is softirq-safe, if this lock is hardirq-unsafe
2394 		 *
2395 		 * And check whether the new lock's dependency graph
2396 		 * could lead back to the previous lock.
2397 		 *
2398 		 * any of these scenarios could lead to a deadlock. If
2399 		 * All validations
2400 		 */
2401 		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2402 
2403 		if (!ret)
2404 			return 0;
2405 		/*
2406 		 * Mark recursive read, as we jump over it when
2407 		 * building dependencies (just like we jump over
2408 		 * trylock entries):
2409 		 */
2410 		if (ret == 2)
2411 			hlock->read = 2;
2412 		/*
2413 		 * Add dependency only if this lock is not the head
2414 		 * of the chain, and if it's not a secondary read-lock:
2415 		 */
2416 		if (!chain_head && ret != 2) {
2417 			if (!check_prevs_add(curr, hlock))
2418 				return 0;
2419 		}
2420 
2421 		graph_unlock();
2422 	} else {
2423 		/* after lookup_chain_cache_add(): */
2424 		if (unlikely(!debug_locks))
2425 			return 0;
2426 	}
2427 
2428 	return 1;
2429 }
2430 #else
2431 static inline int validate_chain(struct task_struct *curr,
2432 	       	struct lockdep_map *lock, struct held_lock *hlock,
2433 		int chain_head, u64 chain_key)
2434 {
2435 	return 1;
2436 }
2437 #endif
2438 
2439 /*
2440  * We are building curr_chain_key incrementally, so double-check
2441  * it from scratch, to make sure that it's done correctly:
2442  */
2443 static void check_chain_key(struct task_struct *curr)
2444 {
2445 #ifdef CONFIG_DEBUG_LOCKDEP
2446 	struct held_lock *hlock, *prev_hlock = NULL;
2447 	unsigned int i;
2448 	u64 chain_key = 0;
2449 
2450 	for (i = 0; i < curr->lockdep_depth; i++) {
2451 		hlock = curr->held_locks + i;
2452 		if (chain_key != hlock->prev_chain_key) {
2453 			debug_locks_off();
2454 			/*
2455 			 * We got mighty confused, our chain keys don't match
2456 			 * with what we expect, someone trample on our task state?
2457 			 */
2458 			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2459 				curr->lockdep_depth, i,
2460 				(unsigned long long)chain_key,
2461 				(unsigned long long)hlock->prev_chain_key);
2462 			return;
2463 		}
2464 		/*
2465 		 * Whoops ran out of static storage again?
2466 		 */
2467 		if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2468 			return;
2469 
2470 		if (prev_hlock && (prev_hlock->irq_context !=
2471 							hlock->irq_context))
2472 			chain_key = 0;
2473 		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2474 		prev_hlock = hlock;
2475 	}
2476 	if (chain_key != curr->curr_chain_key) {
2477 		debug_locks_off();
2478 		/*
2479 		 * More smoking hash instead of calculating it, damn see these
2480 		 * numbers float.. I bet that a pink elephant stepped on my memory.
2481 		 */
2482 		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2483 			curr->lockdep_depth, i,
2484 			(unsigned long long)chain_key,
2485 			(unsigned long long)curr->curr_chain_key);
2486 	}
2487 #endif
2488 }
2489 
2490 static void
2491 print_usage_bug_scenario(struct held_lock *lock)
2492 {
2493 	struct lock_class *class = hlock_class(lock);
2494 
2495 	printk(" Possible unsafe locking scenario:\n\n");
2496 	printk("       CPU0\n");
2497 	printk("       ----\n");
2498 	printk("  lock(");
2499 	__print_lock_name(class);
2500 	printk(KERN_CONT ");\n");
2501 	printk("  <Interrupt>\n");
2502 	printk("    lock(");
2503 	__print_lock_name(class);
2504 	printk(KERN_CONT ");\n");
2505 	printk("\n *** DEADLOCK ***\n\n");
2506 }
2507 
2508 static int
2509 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2510 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2511 {
2512 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2513 		return 0;
2514 
2515 	pr_warn("\n");
2516 	pr_warn("================================\n");
2517 	pr_warn("WARNING: inconsistent lock state\n");
2518 	print_kernel_ident();
2519 	pr_warn("--------------------------------\n");
2520 
2521 	pr_warn("inconsistent {%s} -> {%s} usage.\n",
2522 		usage_str[prev_bit], usage_str[new_bit]);
2523 
2524 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2525 		curr->comm, task_pid_nr(curr),
2526 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2527 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2528 		trace_hardirqs_enabled(curr),
2529 		trace_softirqs_enabled(curr));
2530 	print_lock(this);
2531 
2532 	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2533 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2534 
2535 	print_irqtrace_events(curr);
2536 	pr_warn("\nother info that might help us debug this:\n");
2537 	print_usage_bug_scenario(this);
2538 
2539 	lockdep_print_held_locks(curr);
2540 
2541 	pr_warn("\nstack backtrace:\n");
2542 	dump_stack();
2543 
2544 	return 0;
2545 }
2546 
2547 /*
2548  * Print out an error if an invalid bit is set:
2549  */
2550 static inline int
2551 valid_state(struct task_struct *curr, struct held_lock *this,
2552 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2553 {
2554 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2555 		return print_usage_bug(curr, this, bad_bit, new_bit);
2556 	return 1;
2557 }
2558 
2559 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2560 		     enum lock_usage_bit new_bit);
2561 
2562 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2563 
2564 /*
2565  * print irq inversion bug:
2566  */
2567 static int
2568 print_irq_inversion_bug(struct task_struct *curr,
2569 			struct lock_list *root, struct lock_list *other,
2570 			struct held_lock *this, int forwards,
2571 			const char *irqclass)
2572 {
2573 	struct lock_list *entry = other;
2574 	struct lock_list *middle = NULL;
2575 	int depth;
2576 
2577 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2578 		return 0;
2579 
2580 	pr_warn("\n");
2581 	pr_warn("========================================================\n");
2582 	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2583 	print_kernel_ident();
2584 	pr_warn("--------------------------------------------------------\n");
2585 	pr_warn("%s/%d just changed the state of lock:\n",
2586 		curr->comm, task_pid_nr(curr));
2587 	print_lock(this);
2588 	if (forwards)
2589 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2590 	else
2591 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2592 	print_lock_name(other->class);
2593 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2594 
2595 	pr_warn("\nother info that might help us debug this:\n");
2596 
2597 	/* Find a middle lock (if one exists) */
2598 	depth = get_lock_depth(other);
2599 	do {
2600 		if (depth == 0 && (entry != root)) {
2601 			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2602 			break;
2603 		}
2604 		middle = entry;
2605 		entry = get_lock_parent(entry);
2606 		depth--;
2607 	} while (entry && entry != root && (depth >= 0));
2608 	if (forwards)
2609 		print_irq_lock_scenario(root, other,
2610 			middle ? middle->class : root->class, other->class);
2611 	else
2612 		print_irq_lock_scenario(other, root,
2613 			middle ? middle->class : other->class, root->class);
2614 
2615 	lockdep_print_held_locks(curr);
2616 
2617 	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2618 	if (!save_trace(&root->trace))
2619 		return 0;
2620 	print_shortest_lock_dependencies(other, root);
2621 
2622 	pr_warn("\nstack backtrace:\n");
2623 	dump_stack();
2624 
2625 	return 0;
2626 }
2627 
2628 /*
2629  * Prove that in the forwards-direction subgraph starting at <this>
2630  * there is no lock matching <mask>:
2631  */
2632 static int
2633 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2634 		     enum lock_usage_bit bit, const char *irqclass)
2635 {
2636 	int ret;
2637 	struct lock_list root;
2638 	struct lock_list *uninitialized_var(target_entry);
2639 
2640 	root.parent = NULL;
2641 	root.class = hlock_class(this);
2642 	ret = find_usage_forwards(&root, bit, &target_entry);
2643 	if (ret < 0)
2644 		return print_bfs_bug(ret);
2645 	if (ret == 1)
2646 		return ret;
2647 
2648 	return print_irq_inversion_bug(curr, &root, target_entry,
2649 					this, 1, irqclass);
2650 }
2651 
2652 /*
2653  * Prove that in the backwards-direction subgraph starting at <this>
2654  * there is no lock matching <mask>:
2655  */
2656 static int
2657 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2658 		      enum lock_usage_bit bit, const char *irqclass)
2659 {
2660 	int ret;
2661 	struct lock_list root;
2662 	struct lock_list *uninitialized_var(target_entry);
2663 
2664 	root.parent = NULL;
2665 	root.class = hlock_class(this);
2666 	ret = find_usage_backwards(&root, bit, &target_entry);
2667 	if (ret < 0)
2668 		return print_bfs_bug(ret);
2669 	if (ret == 1)
2670 		return ret;
2671 
2672 	return print_irq_inversion_bug(curr, &root, target_entry,
2673 					this, 0, irqclass);
2674 }
2675 
2676 void print_irqtrace_events(struct task_struct *curr)
2677 {
2678 	printk("irq event stamp: %u\n", curr->irq_events);
2679 	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
2680 		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2681 		(void *)curr->hardirq_enable_ip);
2682 	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2683 		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2684 		(void *)curr->hardirq_disable_ip);
2685 	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
2686 		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2687 		(void *)curr->softirq_enable_ip);
2688 	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2689 		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2690 		(void *)curr->softirq_disable_ip);
2691 }
2692 
2693 static int HARDIRQ_verbose(struct lock_class *class)
2694 {
2695 #if HARDIRQ_VERBOSE
2696 	return class_filter(class);
2697 #endif
2698 	return 0;
2699 }
2700 
2701 static int SOFTIRQ_verbose(struct lock_class *class)
2702 {
2703 #if SOFTIRQ_VERBOSE
2704 	return class_filter(class);
2705 #endif
2706 	return 0;
2707 }
2708 
2709 #define STRICT_READ_CHECKS	1
2710 
2711 static int (*state_verbose_f[])(struct lock_class *class) = {
2712 #define LOCKDEP_STATE(__STATE) \
2713 	__STATE##_verbose,
2714 #include "lockdep_states.h"
2715 #undef LOCKDEP_STATE
2716 };
2717 
2718 static inline int state_verbose(enum lock_usage_bit bit,
2719 				struct lock_class *class)
2720 {
2721 	return state_verbose_f[bit >> 2](class);
2722 }
2723 
2724 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2725 			     enum lock_usage_bit bit, const char *name);
2726 
2727 static int
2728 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2729 		enum lock_usage_bit new_bit)
2730 {
2731 	int excl_bit = exclusive_bit(new_bit);
2732 	int read = new_bit & 1;
2733 	int dir = new_bit & 2;
2734 
2735 	/*
2736 	 * mark USED_IN has to look forwards -- to ensure no dependency
2737 	 * has ENABLED state, which would allow recursion deadlocks.
2738 	 *
2739 	 * mark ENABLED has to look backwards -- to ensure no dependee
2740 	 * has USED_IN state, which, again, would allow  recursion deadlocks.
2741 	 */
2742 	check_usage_f usage = dir ?
2743 		check_usage_backwards : check_usage_forwards;
2744 
2745 	/*
2746 	 * Validate that this particular lock does not have conflicting
2747 	 * usage states.
2748 	 */
2749 	if (!valid_state(curr, this, new_bit, excl_bit))
2750 		return 0;
2751 
2752 	/*
2753 	 * Validate that the lock dependencies don't have conflicting usage
2754 	 * states.
2755 	 */
2756 	if ((!read || !dir || STRICT_READ_CHECKS) &&
2757 			!usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2758 		return 0;
2759 
2760 	/*
2761 	 * Check for read in write conflicts
2762 	 */
2763 	if (!read) {
2764 		if (!valid_state(curr, this, new_bit, excl_bit + 1))
2765 			return 0;
2766 
2767 		if (STRICT_READ_CHECKS &&
2768 			!usage(curr, this, excl_bit + 1,
2769 				state_name(new_bit + 1)))
2770 			return 0;
2771 	}
2772 
2773 	if (state_verbose(new_bit, hlock_class(this)))
2774 		return 2;
2775 
2776 	return 1;
2777 }
2778 
2779 enum mark_type {
2780 #define LOCKDEP_STATE(__STATE)	__STATE,
2781 #include "lockdep_states.h"
2782 #undef LOCKDEP_STATE
2783 };
2784 
2785 /*
2786  * Mark all held locks with a usage bit:
2787  */
2788 static int
2789 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2790 {
2791 	enum lock_usage_bit usage_bit;
2792 	struct held_lock *hlock;
2793 	int i;
2794 
2795 	for (i = 0; i < curr->lockdep_depth; i++) {
2796 		hlock = curr->held_locks + i;
2797 
2798 		usage_bit = 2 + (mark << 2); /* ENABLED */
2799 		if (hlock->read)
2800 			usage_bit += 1; /* READ */
2801 
2802 		BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2803 
2804 		if (!hlock->check)
2805 			continue;
2806 
2807 		if (!mark_lock(curr, hlock, usage_bit))
2808 			return 0;
2809 	}
2810 
2811 	return 1;
2812 }
2813 
2814 /*
2815  * Hardirqs will be enabled:
2816  */
2817 static void __trace_hardirqs_on_caller(unsigned long ip)
2818 {
2819 	struct task_struct *curr = current;
2820 
2821 	/* we'll do an OFF -> ON transition: */
2822 	curr->hardirqs_enabled = 1;
2823 
2824 	/*
2825 	 * We are going to turn hardirqs on, so set the
2826 	 * usage bit for all held locks:
2827 	 */
2828 	if (!mark_held_locks(curr, HARDIRQ))
2829 		return;
2830 	/*
2831 	 * If we have softirqs enabled, then set the usage
2832 	 * bit for all held locks. (disabled hardirqs prevented
2833 	 * this bit from being set before)
2834 	 */
2835 	if (curr->softirqs_enabled)
2836 		if (!mark_held_locks(curr, SOFTIRQ))
2837 			return;
2838 
2839 	curr->hardirq_enable_ip = ip;
2840 	curr->hardirq_enable_event = ++curr->irq_events;
2841 	debug_atomic_inc(hardirqs_on_events);
2842 }
2843 
2844 __visible void trace_hardirqs_on_caller(unsigned long ip)
2845 {
2846 	time_hardirqs_on(CALLER_ADDR0, ip);
2847 
2848 	if (unlikely(!debug_locks || current->lockdep_recursion))
2849 		return;
2850 
2851 	if (unlikely(current->hardirqs_enabled)) {
2852 		/*
2853 		 * Neither irq nor preemption are disabled here
2854 		 * so this is racy by nature but losing one hit
2855 		 * in a stat is not a big deal.
2856 		 */
2857 		__debug_atomic_inc(redundant_hardirqs_on);
2858 		return;
2859 	}
2860 
2861 	/*
2862 	 * We're enabling irqs and according to our state above irqs weren't
2863 	 * already enabled, yet we find the hardware thinks they are in fact
2864 	 * enabled.. someone messed up their IRQ state tracing.
2865 	 */
2866 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2867 		return;
2868 
2869 	/*
2870 	 * See the fine text that goes along with this variable definition.
2871 	 */
2872 	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2873 		return;
2874 
2875 	/*
2876 	 * Can't allow enabling interrupts while in an interrupt handler,
2877 	 * that's general bad form and such. Recursion, limited stack etc..
2878 	 */
2879 	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2880 		return;
2881 
2882 	current->lockdep_recursion = 1;
2883 	__trace_hardirqs_on_caller(ip);
2884 	current->lockdep_recursion = 0;
2885 }
2886 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2887 
2888 void trace_hardirqs_on(void)
2889 {
2890 	trace_hardirqs_on_caller(CALLER_ADDR0);
2891 }
2892 EXPORT_SYMBOL(trace_hardirqs_on);
2893 
2894 /*
2895  * Hardirqs were disabled:
2896  */
2897 __visible void trace_hardirqs_off_caller(unsigned long ip)
2898 {
2899 	struct task_struct *curr = current;
2900 
2901 	time_hardirqs_off(CALLER_ADDR0, ip);
2902 
2903 	if (unlikely(!debug_locks || current->lockdep_recursion))
2904 		return;
2905 
2906 	/*
2907 	 * So we're supposed to get called after you mask local IRQs, but for
2908 	 * some reason the hardware doesn't quite think you did a proper job.
2909 	 */
2910 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2911 		return;
2912 
2913 	if (curr->hardirqs_enabled) {
2914 		/*
2915 		 * We have done an ON -> OFF transition:
2916 		 */
2917 		curr->hardirqs_enabled = 0;
2918 		curr->hardirq_disable_ip = ip;
2919 		curr->hardirq_disable_event = ++curr->irq_events;
2920 		debug_atomic_inc(hardirqs_off_events);
2921 	} else
2922 		debug_atomic_inc(redundant_hardirqs_off);
2923 }
2924 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2925 
2926 void trace_hardirqs_off(void)
2927 {
2928 	trace_hardirqs_off_caller(CALLER_ADDR0);
2929 }
2930 EXPORT_SYMBOL(trace_hardirqs_off);
2931 
2932 /*
2933  * Softirqs will be enabled:
2934  */
2935 void trace_softirqs_on(unsigned long ip)
2936 {
2937 	struct task_struct *curr = current;
2938 
2939 	if (unlikely(!debug_locks || current->lockdep_recursion))
2940 		return;
2941 
2942 	/*
2943 	 * We fancy IRQs being disabled here, see softirq.c, avoids
2944 	 * funny state and nesting things.
2945 	 */
2946 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2947 		return;
2948 
2949 	if (curr->softirqs_enabled) {
2950 		debug_atomic_inc(redundant_softirqs_on);
2951 		return;
2952 	}
2953 
2954 	current->lockdep_recursion = 1;
2955 	/*
2956 	 * We'll do an OFF -> ON transition:
2957 	 */
2958 	curr->softirqs_enabled = 1;
2959 	curr->softirq_enable_ip = ip;
2960 	curr->softirq_enable_event = ++curr->irq_events;
2961 	debug_atomic_inc(softirqs_on_events);
2962 	/*
2963 	 * We are going to turn softirqs on, so set the
2964 	 * usage bit for all held locks, if hardirqs are
2965 	 * enabled too:
2966 	 */
2967 	if (curr->hardirqs_enabled)
2968 		mark_held_locks(curr, SOFTIRQ);
2969 	current->lockdep_recursion = 0;
2970 }
2971 
2972 /*
2973  * Softirqs were disabled:
2974  */
2975 void trace_softirqs_off(unsigned long ip)
2976 {
2977 	struct task_struct *curr = current;
2978 
2979 	if (unlikely(!debug_locks || current->lockdep_recursion))
2980 		return;
2981 
2982 	/*
2983 	 * We fancy IRQs being disabled here, see softirq.c
2984 	 */
2985 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2986 		return;
2987 
2988 	if (curr->softirqs_enabled) {
2989 		/*
2990 		 * We have done an ON -> OFF transition:
2991 		 */
2992 		curr->softirqs_enabled = 0;
2993 		curr->softirq_disable_ip = ip;
2994 		curr->softirq_disable_event = ++curr->irq_events;
2995 		debug_atomic_inc(softirqs_off_events);
2996 		/*
2997 		 * Whoops, we wanted softirqs off, so why aren't they?
2998 		 */
2999 		DEBUG_LOCKS_WARN_ON(!softirq_count());
3000 	} else
3001 		debug_atomic_inc(redundant_softirqs_off);
3002 }
3003 
3004 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3005 {
3006 	/*
3007 	 * If non-trylock use in a hardirq or softirq context, then
3008 	 * mark the lock as used in these contexts:
3009 	 */
3010 	if (!hlock->trylock) {
3011 		if (hlock->read) {
3012 			if (curr->hardirq_context)
3013 				if (!mark_lock(curr, hlock,
3014 						LOCK_USED_IN_HARDIRQ_READ))
3015 					return 0;
3016 			if (curr->softirq_context)
3017 				if (!mark_lock(curr, hlock,
3018 						LOCK_USED_IN_SOFTIRQ_READ))
3019 					return 0;
3020 		} else {
3021 			if (curr->hardirq_context)
3022 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3023 					return 0;
3024 			if (curr->softirq_context)
3025 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3026 					return 0;
3027 		}
3028 	}
3029 	if (!hlock->hardirqs_off) {
3030 		if (hlock->read) {
3031 			if (!mark_lock(curr, hlock,
3032 					LOCK_ENABLED_HARDIRQ_READ))
3033 				return 0;
3034 			if (curr->softirqs_enabled)
3035 				if (!mark_lock(curr, hlock,
3036 						LOCK_ENABLED_SOFTIRQ_READ))
3037 					return 0;
3038 		} else {
3039 			if (!mark_lock(curr, hlock,
3040 					LOCK_ENABLED_HARDIRQ))
3041 				return 0;
3042 			if (curr->softirqs_enabled)
3043 				if (!mark_lock(curr, hlock,
3044 						LOCK_ENABLED_SOFTIRQ))
3045 					return 0;
3046 		}
3047 	}
3048 
3049 	return 1;
3050 }
3051 
3052 static inline unsigned int task_irq_context(struct task_struct *task)
3053 {
3054 	return 2 * !!task->hardirq_context + !!task->softirq_context;
3055 }
3056 
3057 static int separate_irq_context(struct task_struct *curr,
3058 		struct held_lock *hlock)
3059 {
3060 	unsigned int depth = curr->lockdep_depth;
3061 
3062 	/*
3063 	 * Keep track of points where we cross into an interrupt context:
3064 	 */
3065 	if (depth) {
3066 		struct held_lock *prev_hlock;
3067 
3068 		prev_hlock = curr->held_locks + depth-1;
3069 		/*
3070 		 * If we cross into another context, reset the
3071 		 * hash key (this also prevents the checking and the
3072 		 * adding of the dependency to 'prev'):
3073 		 */
3074 		if (prev_hlock->irq_context != hlock->irq_context)
3075 			return 1;
3076 	}
3077 	return 0;
3078 }
3079 
3080 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3081 
3082 static inline
3083 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3084 		enum lock_usage_bit new_bit)
3085 {
3086 	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3087 	return 1;
3088 }
3089 
3090 static inline int mark_irqflags(struct task_struct *curr,
3091 		struct held_lock *hlock)
3092 {
3093 	return 1;
3094 }
3095 
3096 static inline unsigned int task_irq_context(struct task_struct *task)
3097 {
3098 	return 0;
3099 }
3100 
3101 static inline int separate_irq_context(struct task_struct *curr,
3102 		struct held_lock *hlock)
3103 {
3104 	return 0;
3105 }
3106 
3107 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3108 
3109 /*
3110  * Mark a lock with a usage bit, and validate the state transition:
3111  */
3112 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3113 			     enum lock_usage_bit new_bit)
3114 {
3115 	unsigned int new_mask = 1 << new_bit, ret = 1;
3116 
3117 	/*
3118 	 * If already set then do not dirty the cacheline,
3119 	 * nor do any checks:
3120 	 */
3121 	if (likely(hlock_class(this)->usage_mask & new_mask))
3122 		return 1;
3123 
3124 	if (!graph_lock())
3125 		return 0;
3126 	/*
3127 	 * Make sure we didn't race:
3128 	 */
3129 	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3130 		graph_unlock();
3131 		return 1;
3132 	}
3133 
3134 	hlock_class(this)->usage_mask |= new_mask;
3135 
3136 	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3137 		return 0;
3138 
3139 	switch (new_bit) {
3140 #define LOCKDEP_STATE(__STATE)			\
3141 	case LOCK_USED_IN_##__STATE:		\
3142 	case LOCK_USED_IN_##__STATE##_READ:	\
3143 	case LOCK_ENABLED_##__STATE:		\
3144 	case LOCK_ENABLED_##__STATE##_READ:
3145 #include "lockdep_states.h"
3146 #undef LOCKDEP_STATE
3147 		ret = mark_lock_irq(curr, this, new_bit);
3148 		if (!ret)
3149 			return 0;
3150 		break;
3151 	case LOCK_USED:
3152 		debug_atomic_dec(nr_unused_locks);
3153 		break;
3154 	default:
3155 		if (!debug_locks_off_graph_unlock())
3156 			return 0;
3157 		WARN_ON(1);
3158 		return 0;
3159 	}
3160 
3161 	graph_unlock();
3162 
3163 	/*
3164 	 * We must printk outside of the graph_lock:
3165 	 */
3166 	if (ret == 2) {
3167 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3168 		print_lock(this);
3169 		print_irqtrace_events(curr);
3170 		dump_stack();
3171 	}
3172 
3173 	return ret;
3174 }
3175 
3176 /*
3177  * Initialize a lock instance's lock-class mapping info:
3178  */
3179 static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
3180 		      struct lock_class_key *key, int subclass)
3181 {
3182 	int i;
3183 
3184 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3185 		lock->class_cache[i] = NULL;
3186 
3187 #ifdef CONFIG_LOCK_STAT
3188 	lock->cpu = raw_smp_processor_id();
3189 #endif
3190 
3191 	/*
3192 	 * Can't be having no nameless bastards around this place!
3193 	 */
3194 	if (DEBUG_LOCKS_WARN_ON(!name)) {
3195 		lock->name = "NULL";
3196 		return;
3197 	}
3198 
3199 	lock->name = name;
3200 
3201 	/*
3202 	 * No key, no joy, we need to hash something.
3203 	 */
3204 	if (DEBUG_LOCKS_WARN_ON(!key))
3205 		return;
3206 	/*
3207 	 * Sanity check, the lock-class key must be persistent:
3208 	 */
3209 	if (!static_obj(key)) {
3210 		printk("BUG: key %px not in .data!\n", key);
3211 		/*
3212 		 * What it says above ^^^^^, I suggest you read it.
3213 		 */
3214 		DEBUG_LOCKS_WARN_ON(1);
3215 		return;
3216 	}
3217 	lock->key = key;
3218 
3219 	if (unlikely(!debug_locks))
3220 		return;
3221 
3222 	if (subclass) {
3223 		unsigned long flags;
3224 
3225 		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3226 			return;
3227 
3228 		raw_local_irq_save(flags);
3229 		current->lockdep_recursion = 1;
3230 		register_lock_class(lock, subclass, 1);
3231 		current->lockdep_recursion = 0;
3232 		raw_local_irq_restore(flags);
3233 	}
3234 }
3235 
3236 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3237 		      struct lock_class_key *key, int subclass)
3238 {
3239 	__lockdep_init_map(lock, name, key, subclass);
3240 }
3241 EXPORT_SYMBOL_GPL(lockdep_init_map);
3242 
3243 struct lock_class_key __lockdep_no_validate__;
3244 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3245 
3246 static int
3247 print_lock_nested_lock_not_held(struct task_struct *curr,
3248 				struct held_lock *hlock,
3249 				unsigned long ip)
3250 {
3251 	if (!debug_locks_off())
3252 		return 0;
3253 	if (debug_locks_silent)
3254 		return 0;
3255 
3256 	pr_warn("\n");
3257 	pr_warn("==================================\n");
3258 	pr_warn("WARNING: Nested lock was not taken\n");
3259 	print_kernel_ident();
3260 	pr_warn("----------------------------------\n");
3261 
3262 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3263 	print_lock(hlock);
3264 
3265 	pr_warn("\nbut this task is not holding:\n");
3266 	pr_warn("%s\n", hlock->nest_lock->name);
3267 
3268 	pr_warn("\nstack backtrace:\n");
3269 	dump_stack();
3270 
3271 	pr_warn("\nother info that might help us debug this:\n");
3272 	lockdep_print_held_locks(curr);
3273 
3274 	pr_warn("\nstack backtrace:\n");
3275 	dump_stack();
3276 
3277 	return 0;
3278 }
3279 
3280 static int __lock_is_held(const struct lockdep_map *lock, int read);
3281 
3282 /*
3283  * This gets called for every mutex_lock*()/spin_lock*() operation.
3284  * We maintain the dependency maps and validate the locking attempt:
3285  */
3286 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3287 			  int trylock, int read, int check, int hardirqs_off,
3288 			  struct lockdep_map *nest_lock, unsigned long ip,
3289 			  int references, int pin_count)
3290 {
3291 	struct task_struct *curr = current;
3292 	struct lock_class *class = NULL;
3293 	struct held_lock *hlock;
3294 	unsigned int depth;
3295 	int chain_head = 0;
3296 	int class_idx;
3297 	u64 chain_key;
3298 
3299 	if (unlikely(!debug_locks))
3300 		return 0;
3301 
3302 	/*
3303 	 * Lockdep should run with IRQs disabled, otherwise we could
3304 	 * get an interrupt which would want to take locks, which would
3305 	 * end up in lockdep and have you got a head-ache already?
3306 	 */
3307 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3308 		return 0;
3309 
3310 	if (!prove_locking || lock->key == &__lockdep_no_validate__)
3311 		check = 0;
3312 
3313 	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3314 		class = lock->class_cache[subclass];
3315 	/*
3316 	 * Not cached?
3317 	 */
3318 	if (unlikely(!class)) {
3319 		class = register_lock_class(lock, subclass, 0);
3320 		if (!class)
3321 			return 0;
3322 	}
3323 	atomic_inc((atomic_t *)&class->ops);
3324 	if (very_verbose(class)) {
3325 		printk("\nacquire class [%px] %s", class->key, class->name);
3326 		if (class->name_version > 1)
3327 			printk(KERN_CONT "#%d", class->name_version);
3328 		printk(KERN_CONT "\n");
3329 		dump_stack();
3330 	}
3331 
3332 	/*
3333 	 * Add the lock to the list of currently held locks.
3334 	 * (we dont increase the depth just yet, up until the
3335 	 * dependency checks are done)
3336 	 */
3337 	depth = curr->lockdep_depth;
3338 	/*
3339 	 * Ran out of static storage for our per-task lock stack again have we?
3340 	 */
3341 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3342 		return 0;
3343 
3344 	class_idx = class - lock_classes + 1;
3345 
3346 	if (depth) {
3347 		hlock = curr->held_locks + depth - 1;
3348 		if (hlock->class_idx == class_idx && nest_lock) {
3349 			if (hlock->references) {
3350 				/*
3351 				 * Check: unsigned int references:12, overflow.
3352 				 */
3353 				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3354 					return 0;
3355 
3356 				hlock->references++;
3357 			} else {
3358 				hlock->references = 2;
3359 			}
3360 
3361 			return 1;
3362 		}
3363 	}
3364 
3365 	hlock = curr->held_locks + depth;
3366 	/*
3367 	 * Plain impossible, we just registered it and checked it weren't no
3368 	 * NULL like.. I bet this mushroom I ate was good!
3369 	 */
3370 	if (DEBUG_LOCKS_WARN_ON(!class))
3371 		return 0;
3372 	hlock->class_idx = class_idx;
3373 	hlock->acquire_ip = ip;
3374 	hlock->instance = lock;
3375 	hlock->nest_lock = nest_lock;
3376 	hlock->irq_context = task_irq_context(curr);
3377 	hlock->trylock = trylock;
3378 	hlock->read = read;
3379 	hlock->check = check;
3380 	hlock->hardirqs_off = !!hardirqs_off;
3381 	hlock->references = references;
3382 #ifdef CONFIG_LOCK_STAT
3383 	hlock->waittime_stamp = 0;
3384 	hlock->holdtime_stamp = lockstat_clock();
3385 #endif
3386 	hlock->pin_count = pin_count;
3387 
3388 	if (check && !mark_irqflags(curr, hlock))
3389 		return 0;
3390 
3391 	/* mark it as used: */
3392 	if (!mark_lock(curr, hlock, LOCK_USED))
3393 		return 0;
3394 
3395 	/*
3396 	 * Calculate the chain hash: it's the combined hash of all the
3397 	 * lock keys along the dependency chain. We save the hash value
3398 	 * at every step so that we can get the current hash easily
3399 	 * after unlock. The chain hash is then used to cache dependency
3400 	 * results.
3401 	 *
3402 	 * The 'key ID' is what is the most compact key value to drive
3403 	 * the hash, not class->key.
3404 	 */
3405 	/*
3406 	 * Whoops, we did it again.. ran straight out of our static allocation.
3407 	 */
3408 	if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3409 		return 0;
3410 
3411 	chain_key = curr->curr_chain_key;
3412 	if (!depth) {
3413 		/*
3414 		 * How can we have a chain hash when we ain't got no keys?!
3415 		 */
3416 		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3417 			return 0;
3418 		chain_head = 1;
3419 	}
3420 
3421 	hlock->prev_chain_key = chain_key;
3422 	if (separate_irq_context(curr, hlock)) {
3423 		chain_key = 0;
3424 		chain_head = 1;
3425 	}
3426 	chain_key = iterate_chain_key(chain_key, class_idx);
3427 
3428 	if (nest_lock && !__lock_is_held(nest_lock, -1))
3429 		return print_lock_nested_lock_not_held(curr, hlock, ip);
3430 
3431 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3432 		return 0;
3433 
3434 	curr->curr_chain_key = chain_key;
3435 	curr->lockdep_depth++;
3436 	check_chain_key(curr);
3437 #ifdef CONFIG_DEBUG_LOCKDEP
3438 	if (unlikely(!debug_locks))
3439 		return 0;
3440 #endif
3441 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3442 		debug_locks_off();
3443 		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3444 		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3445 		       curr->lockdep_depth, MAX_LOCK_DEPTH);
3446 
3447 		lockdep_print_held_locks(current);
3448 		debug_show_all_locks();
3449 		dump_stack();
3450 
3451 		return 0;
3452 	}
3453 
3454 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3455 		max_lockdep_depth = curr->lockdep_depth;
3456 
3457 	return 1;
3458 }
3459 
3460 static int
3461 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3462 			   unsigned long ip)
3463 {
3464 	if (!debug_locks_off())
3465 		return 0;
3466 	if (debug_locks_silent)
3467 		return 0;
3468 
3469 	pr_warn("\n");
3470 	pr_warn("=====================================\n");
3471 	pr_warn("WARNING: bad unlock balance detected!\n");
3472 	print_kernel_ident();
3473 	pr_warn("-------------------------------------\n");
3474 	pr_warn("%s/%d is trying to release lock (",
3475 		curr->comm, task_pid_nr(curr));
3476 	print_lockdep_cache(lock);
3477 	pr_cont(") at:\n");
3478 	print_ip_sym(ip);
3479 	pr_warn("but there are no more locks to release!\n");
3480 	pr_warn("\nother info that might help us debug this:\n");
3481 	lockdep_print_held_locks(curr);
3482 
3483 	pr_warn("\nstack backtrace:\n");
3484 	dump_stack();
3485 
3486 	return 0;
3487 }
3488 
3489 static int match_held_lock(const struct held_lock *hlock,
3490 					const struct lockdep_map *lock)
3491 {
3492 	if (hlock->instance == lock)
3493 		return 1;
3494 
3495 	if (hlock->references) {
3496 		const struct lock_class *class = lock->class_cache[0];
3497 
3498 		if (!class)
3499 			class = look_up_lock_class(lock, 0);
3500 
3501 		/*
3502 		 * If look_up_lock_class() failed to find a class, we're trying
3503 		 * to test if we hold a lock that has never yet been acquired.
3504 		 * Clearly if the lock hasn't been acquired _ever_, we're not
3505 		 * holding it either, so report failure.
3506 		 */
3507 		if (!class)
3508 			return 0;
3509 
3510 		/*
3511 		 * References, but not a lock we're actually ref-counting?
3512 		 * State got messed up, follow the sites that change ->references
3513 		 * and try to make sense of it.
3514 		 */
3515 		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3516 			return 0;
3517 
3518 		if (hlock->class_idx == class - lock_classes + 1)
3519 			return 1;
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 /* @depth must not be zero */
3526 static struct held_lock *find_held_lock(struct task_struct *curr,
3527 					struct lockdep_map *lock,
3528 					unsigned int depth, int *idx)
3529 {
3530 	struct held_lock *ret, *hlock, *prev_hlock;
3531 	int i;
3532 
3533 	i = depth - 1;
3534 	hlock = curr->held_locks + i;
3535 	ret = hlock;
3536 	if (match_held_lock(hlock, lock))
3537 		goto out;
3538 
3539 	ret = NULL;
3540 	for (i--, prev_hlock = hlock--;
3541 	     i >= 0;
3542 	     i--, prev_hlock = hlock--) {
3543 		/*
3544 		 * We must not cross into another context:
3545 		 */
3546 		if (prev_hlock->irq_context != hlock->irq_context) {
3547 			ret = NULL;
3548 			break;
3549 		}
3550 		if (match_held_lock(hlock, lock)) {
3551 			ret = hlock;
3552 			break;
3553 		}
3554 	}
3555 
3556 out:
3557 	*idx = i;
3558 	return ret;
3559 }
3560 
3561 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3562 			      int idx)
3563 {
3564 	struct held_lock *hlock;
3565 
3566 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3567 		if (!__lock_acquire(hlock->instance,
3568 				    hlock_class(hlock)->subclass,
3569 				    hlock->trylock,
3570 				    hlock->read, hlock->check,
3571 				    hlock->hardirqs_off,
3572 				    hlock->nest_lock, hlock->acquire_ip,
3573 				    hlock->references, hlock->pin_count))
3574 			return 1;
3575 	}
3576 	return 0;
3577 }
3578 
3579 static int
3580 __lock_set_class(struct lockdep_map *lock, const char *name,
3581 		 struct lock_class_key *key, unsigned int subclass,
3582 		 unsigned long ip)
3583 {
3584 	struct task_struct *curr = current;
3585 	struct held_lock *hlock;
3586 	struct lock_class *class;
3587 	unsigned int depth;
3588 	int i;
3589 
3590 	depth = curr->lockdep_depth;
3591 	/*
3592 	 * This function is about (re)setting the class of a held lock,
3593 	 * yet we're not actually holding any locks. Naughty user!
3594 	 */
3595 	if (DEBUG_LOCKS_WARN_ON(!depth))
3596 		return 0;
3597 
3598 	hlock = find_held_lock(curr, lock, depth, &i);
3599 	if (!hlock)
3600 		return print_unlock_imbalance_bug(curr, lock, ip);
3601 
3602 	lockdep_init_map(lock, name, key, 0);
3603 	class = register_lock_class(lock, subclass, 0);
3604 	hlock->class_idx = class - lock_classes + 1;
3605 
3606 	curr->lockdep_depth = i;
3607 	curr->curr_chain_key = hlock->prev_chain_key;
3608 
3609 	if (reacquire_held_locks(curr, depth, i))
3610 		return 0;
3611 
3612 	/*
3613 	 * I took it apart and put it back together again, except now I have
3614 	 * these 'spare' parts.. where shall I put them.
3615 	 */
3616 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3617 		return 0;
3618 	return 1;
3619 }
3620 
3621 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3622 {
3623 	struct task_struct *curr = current;
3624 	struct held_lock *hlock;
3625 	unsigned int depth;
3626 	int i;
3627 
3628 	depth = curr->lockdep_depth;
3629 	/*
3630 	 * This function is about (re)setting the class of a held lock,
3631 	 * yet we're not actually holding any locks. Naughty user!
3632 	 */
3633 	if (DEBUG_LOCKS_WARN_ON(!depth))
3634 		return 0;
3635 
3636 	hlock = find_held_lock(curr, lock, depth, &i);
3637 	if (!hlock)
3638 		return print_unlock_imbalance_bug(curr, lock, ip);
3639 
3640 	curr->lockdep_depth = i;
3641 	curr->curr_chain_key = hlock->prev_chain_key;
3642 
3643 	WARN(hlock->read, "downgrading a read lock");
3644 	hlock->read = 1;
3645 	hlock->acquire_ip = ip;
3646 
3647 	if (reacquire_held_locks(curr, depth, i))
3648 		return 0;
3649 
3650 	/*
3651 	 * I took it apart and put it back together again, except now I have
3652 	 * these 'spare' parts.. where shall I put them.
3653 	 */
3654 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3655 		return 0;
3656 	return 1;
3657 }
3658 
3659 /*
3660  * Remove the lock to the list of currently held locks - this gets
3661  * called on mutex_unlock()/spin_unlock*() (or on a failed
3662  * mutex_lock_interruptible()).
3663  *
3664  * @nested is an hysterical artifact, needs a tree wide cleanup.
3665  */
3666 static int
3667 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3668 {
3669 	struct task_struct *curr = current;
3670 	struct held_lock *hlock;
3671 	unsigned int depth;
3672 	int i;
3673 
3674 	if (unlikely(!debug_locks))
3675 		return 0;
3676 
3677 	depth = curr->lockdep_depth;
3678 	/*
3679 	 * So we're all set to release this lock.. wait what lock? We don't
3680 	 * own any locks, you've been drinking again?
3681 	 */
3682 	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3683 		 return print_unlock_imbalance_bug(curr, lock, ip);
3684 
3685 	/*
3686 	 * Check whether the lock exists in the current stack
3687 	 * of held locks:
3688 	 */
3689 	hlock = find_held_lock(curr, lock, depth, &i);
3690 	if (!hlock)
3691 		return print_unlock_imbalance_bug(curr, lock, ip);
3692 
3693 	if (hlock->instance == lock)
3694 		lock_release_holdtime(hlock);
3695 
3696 	WARN(hlock->pin_count, "releasing a pinned lock\n");
3697 
3698 	if (hlock->references) {
3699 		hlock->references--;
3700 		if (hlock->references) {
3701 			/*
3702 			 * We had, and after removing one, still have
3703 			 * references, the current lock stack is still
3704 			 * valid. We're done!
3705 			 */
3706 			return 1;
3707 		}
3708 	}
3709 
3710 	/*
3711 	 * We have the right lock to unlock, 'hlock' points to it.
3712 	 * Now we remove it from the stack, and add back the other
3713 	 * entries (if any), recalculating the hash along the way:
3714 	 */
3715 
3716 	curr->lockdep_depth = i;
3717 	curr->curr_chain_key = hlock->prev_chain_key;
3718 
3719 	if (reacquire_held_locks(curr, depth, i + 1))
3720 		return 0;
3721 
3722 	/*
3723 	 * We had N bottles of beer on the wall, we drank one, but now
3724 	 * there's not N-1 bottles of beer left on the wall...
3725 	 */
3726 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3727 		return 0;
3728 
3729 	return 1;
3730 }
3731 
3732 static int __lock_is_held(const struct lockdep_map *lock, int read)
3733 {
3734 	struct task_struct *curr = current;
3735 	int i;
3736 
3737 	for (i = 0; i < curr->lockdep_depth; i++) {
3738 		struct held_lock *hlock = curr->held_locks + i;
3739 
3740 		if (match_held_lock(hlock, lock)) {
3741 			if (read == -1 || hlock->read == read)
3742 				return 1;
3743 
3744 			return 0;
3745 		}
3746 	}
3747 
3748 	return 0;
3749 }
3750 
3751 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3752 {
3753 	struct pin_cookie cookie = NIL_COOKIE;
3754 	struct task_struct *curr = current;
3755 	int i;
3756 
3757 	if (unlikely(!debug_locks))
3758 		return cookie;
3759 
3760 	for (i = 0; i < curr->lockdep_depth; i++) {
3761 		struct held_lock *hlock = curr->held_locks + i;
3762 
3763 		if (match_held_lock(hlock, lock)) {
3764 			/*
3765 			 * Grab 16bits of randomness; this is sufficient to not
3766 			 * be guessable and still allows some pin nesting in
3767 			 * our u32 pin_count.
3768 			 */
3769 			cookie.val = 1 + (prandom_u32() >> 16);
3770 			hlock->pin_count += cookie.val;
3771 			return cookie;
3772 		}
3773 	}
3774 
3775 	WARN(1, "pinning an unheld lock\n");
3776 	return cookie;
3777 }
3778 
3779 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3780 {
3781 	struct task_struct *curr = current;
3782 	int i;
3783 
3784 	if (unlikely(!debug_locks))
3785 		return;
3786 
3787 	for (i = 0; i < curr->lockdep_depth; i++) {
3788 		struct held_lock *hlock = curr->held_locks + i;
3789 
3790 		if (match_held_lock(hlock, lock)) {
3791 			hlock->pin_count += cookie.val;
3792 			return;
3793 		}
3794 	}
3795 
3796 	WARN(1, "pinning an unheld lock\n");
3797 }
3798 
3799 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3800 {
3801 	struct task_struct *curr = current;
3802 	int i;
3803 
3804 	if (unlikely(!debug_locks))
3805 		return;
3806 
3807 	for (i = 0; i < curr->lockdep_depth; i++) {
3808 		struct held_lock *hlock = curr->held_locks + i;
3809 
3810 		if (match_held_lock(hlock, lock)) {
3811 			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3812 				return;
3813 
3814 			hlock->pin_count -= cookie.val;
3815 
3816 			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3817 				hlock->pin_count = 0;
3818 
3819 			return;
3820 		}
3821 	}
3822 
3823 	WARN(1, "unpinning an unheld lock\n");
3824 }
3825 
3826 /*
3827  * Check whether we follow the irq-flags state precisely:
3828  */
3829 static void check_flags(unsigned long flags)
3830 {
3831 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3832     defined(CONFIG_TRACE_IRQFLAGS)
3833 	if (!debug_locks)
3834 		return;
3835 
3836 	if (irqs_disabled_flags(flags)) {
3837 		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3838 			printk("possible reason: unannotated irqs-off.\n");
3839 		}
3840 	} else {
3841 		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3842 			printk("possible reason: unannotated irqs-on.\n");
3843 		}
3844 	}
3845 
3846 	/*
3847 	 * We dont accurately track softirq state in e.g.
3848 	 * hardirq contexts (such as on 4KSTACKS), so only
3849 	 * check if not in hardirq contexts:
3850 	 */
3851 	if (!hardirq_count()) {
3852 		if (softirq_count()) {
3853 			/* like the above, but with softirqs */
3854 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3855 		} else {
3856 			/* lick the above, does it taste good? */
3857 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3858 		}
3859 	}
3860 
3861 	if (!debug_locks)
3862 		print_irqtrace_events(current);
3863 #endif
3864 }
3865 
3866 void lock_set_class(struct lockdep_map *lock, const char *name,
3867 		    struct lock_class_key *key, unsigned int subclass,
3868 		    unsigned long ip)
3869 {
3870 	unsigned long flags;
3871 
3872 	if (unlikely(current->lockdep_recursion))
3873 		return;
3874 
3875 	raw_local_irq_save(flags);
3876 	current->lockdep_recursion = 1;
3877 	check_flags(flags);
3878 	if (__lock_set_class(lock, name, key, subclass, ip))
3879 		check_chain_key(current);
3880 	current->lockdep_recursion = 0;
3881 	raw_local_irq_restore(flags);
3882 }
3883 EXPORT_SYMBOL_GPL(lock_set_class);
3884 
3885 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3886 {
3887 	unsigned long flags;
3888 
3889 	if (unlikely(current->lockdep_recursion))
3890 		return;
3891 
3892 	raw_local_irq_save(flags);
3893 	current->lockdep_recursion = 1;
3894 	check_flags(flags);
3895 	if (__lock_downgrade(lock, ip))
3896 		check_chain_key(current);
3897 	current->lockdep_recursion = 0;
3898 	raw_local_irq_restore(flags);
3899 }
3900 EXPORT_SYMBOL_GPL(lock_downgrade);
3901 
3902 /*
3903  * We are not always called with irqs disabled - do that here,
3904  * and also avoid lockdep recursion:
3905  */
3906 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3907 			  int trylock, int read, int check,
3908 			  struct lockdep_map *nest_lock, unsigned long ip)
3909 {
3910 	unsigned long flags;
3911 
3912 	if (unlikely(current->lockdep_recursion))
3913 		return;
3914 
3915 	raw_local_irq_save(flags);
3916 	check_flags(flags);
3917 
3918 	current->lockdep_recursion = 1;
3919 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3920 	__lock_acquire(lock, subclass, trylock, read, check,
3921 		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3922 	current->lockdep_recursion = 0;
3923 	raw_local_irq_restore(flags);
3924 }
3925 EXPORT_SYMBOL_GPL(lock_acquire);
3926 
3927 void lock_release(struct lockdep_map *lock, int nested,
3928 			  unsigned long ip)
3929 {
3930 	unsigned long flags;
3931 
3932 	if (unlikely(current->lockdep_recursion))
3933 		return;
3934 
3935 	raw_local_irq_save(flags);
3936 	check_flags(flags);
3937 	current->lockdep_recursion = 1;
3938 	trace_lock_release(lock, ip);
3939 	if (__lock_release(lock, nested, ip))
3940 		check_chain_key(current);
3941 	current->lockdep_recursion = 0;
3942 	raw_local_irq_restore(flags);
3943 }
3944 EXPORT_SYMBOL_GPL(lock_release);
3945 
3946 int lock_is_held_type(const struct lockdep_map *lock, int read)
3947 {
3948 	unsigned long flags;
3949 	int ret = 0;
3950 
3951 	if (unlikely(current->lockdep_recursion))
3952 		return 1; /* avoid false negative lockdep_assert_held() */
3953 
3954 	raw_local_irq_save(flags);
3955 	check_flags(flags);
3956 
3957 	current->lockdep_recursion = 1;
3958 	ret = __lock_is_held(lock, read);
3959 	current->lockdep_recursion = 0;
3960 	raw_local_irq_restore(flags);
3961 
3962 	return ret;
3963 }
3964 EXPORT_SYMBOL_GPL(lock_is_held_type);
3965 
3966 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3967 {
3968 	struct pin_cookie cookie = NIL_COOKIE;
3969 	unsigned long flags;
3970 
3971 	if (unlikely(current->lockdep_recursion))
3972 		return cookie;
3973 
3974 	raw_local_irq_save(flags);
3975 	check_flags(flags);
3976 
3977 	current->lockdep_recursion = 1;
3978 	cookie = __lock_pin_lock(lock);
3979 	current->lockdep_recursion = 0;
3980 	raw_local_irq_restore(flags);
3981 
3982 	return cookie;
3983 }
3984 EXPORT_SYMBOL_GPL(lock_pin_lock);
3985 
3986 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3987 {
3988 	unsigned long flags;
3989 
3990 	if (unlikely(current->lockdep_recursion))
3991 		return;
3992 
3993 	raw_local_irq_save(flags);
3994 	check_flags(flags);
3995 
3996 	current->lockdep_recursion = 1;
3997 	__lock_repin_lock(lock, cookie);
3998 	current->lockdep_recursion = 0;
3999 	raw_local_irq_restore(flags);
4000 }
4001 EXPORT_SYMBOL_GPL(lock_repin_lock);
4002 
4003 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4004 {
4005 	unsigned long flags;
4006 
4007 	if (unlikely(current->lockdep_recursion))
4008 		return;
4009 
4010 	raw_local_irq_save(flags);
4011 	check_flags(flags);
4012 
4013 	current->lockdep_recursion = 1;
4014 	__lock_unpin_lock(lock, cookie);
4015 	current->lockdep_recursion = 0;
4016 	raw_local_irq_restore(flags);
4017 }
4018 EXPORT_SYMBOL_GPL(lock_unpin_lock);
4019 
4020 #ifdef CONFIG_LOCK_STAT
4021 static int
4022 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4023 			   unsigned long ip)
4024 {
4025 	if (!debug_locks_off())
4026 		return 0;
4027 	if (debug_locks_silent)
4028 		return 0;
4029 
4030 	pr_warn("\n");
4031 	pr_warn("=================================\n");
4032 	pr_warn("WARNING: bad contention detected!\n");
4033 	print_kernel_ident();
4034 	pr_warn("---------------------------------\n");
4035 	pr_warn("%s/%d is trying to contend lock (",
4036 		curr->comm, task_pid_nr(curr));
4037 	print_lockdep_cache(lock);
4038 	pr_cont(") at:\n");
4039 	print_ip_sym(ip);
4040 	pr_warn("but there are no locks held!\n");
4041 	pr_warn("\nother info that might help us debug this:\n");
4042 	lockdep_print_held_locks(curr);
4043 
4044 	pr_warn("\nstack backtrace:\n");
4045 	dump_stack();
4046 
4047 	return 0;
4048 }
4049 
4050 static void
4051 __lock_contended(struct lockdep_map *lock, unsigned long ip)
4052 {
4053 	struct task_struct *curr = current;
4054 	struct held_lock *hlock;
4055 	struct lock_class_stats *stats;
4056 	unsigned int depth;
4057 	int i, contention_point, contending_point;
4058 
4059 	depth = curr->lockdep_depth;
4060 	/*
4061 	 * Whee, we contended on this lock, except it seems we're not
4062 	 * actually trying to acquire anything much at all..
4063 	 */
4064 	if (DEBUG_LOCKS_WARN_ON(!depth))
4065 		return;
4066 
4067 	hlock = find_held_lock(curr, lock, depth, &i);
4068 	if (!hlock) {
4069 		print_lock_contention_bug(curr, lock, ip);
4070 		return;
4071 	}
4072 
4073 	if (hlock->instance != lock)
4074 		return;
4075 
4076 	hlock->waittime_stamp = lockstat_clock();
4077 
4078 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4079 	contending_point = lock_point(hlock_class(hlock)->contending_point,
4080 				      lock->ip);
4081 
4082 	stats = get_lock_stats(hlock_class(hlock));
4083 	if (contention_point < LOCKSTAT_POINTS)
4084 		stats->contention_point[contention_point]++;
4085 	if (contending_point < LOCKSTAT_POINTS)
4086 		stats->contending_point[contending_point]++;
4087 	if (lock->cpu != smp_processor_id())
4088 		stats->bounces[bounce_contended + !!hlock->read]++;
4089 	put_lock_stats(stats);
4090 }
4091 
4092 static void
4093 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4094 {
4095 	struct task_struct *curr = current;
4096 	struct held_lock *hlock;
4097 	struct lock_class_stats *stats;
4098 	unsigned int depth;
4099 	u64 now, waittime = 0;
4100 	int i, cpu;
4101 
4102 	depth = curr->lockdep_depth;
4103 	/*
4104 	 * Yay, we acquired ownership of this lock we didn't try to
4105 	 * acquire, how the heck did that happen?
4106 	 */
4107 	if (DEBUG_LOCKS_WARN_ON(!depth))
4108 		return;
4109 
4110 	hlock = find_held_lock(curr, lock, depth, &i);
4111 	if (!hlock) {
4112 		print_lock_contention_bug(curr, lock, _RET_IP_);
4113 		return;
4114 	}
4115 
4116 	if (hlock->instance != lock)
4117 		return;
4118 
4119 	cpu = smp_processor_id();
4120 	if (hlock->waittime_stamp) {
4121 		now = lockstat_clock();
4122 		waittime = now - hlock->waittime_stamp;
4123 		hlock->holdtime_stamp = now;
4124 	}
4125 
4126 	trace_lock_acquired(lock, ip);
4127 
4128 	stats = get_lock_stats(hlock_class(hlock));
4129 	if (waittime) {
4130 		if (hlock->read)
4131 			lock_time_inc(&stats->read_waittime, waittime);
4132 		else
4133 			lock_time_inc(&stats->write_waittime, waittime);
4134 	}
4135 	if (lock->cpu != cpu)
4136 		stats->bounces[bounce_acquired + !!hlock->read]++;
4137 	put_lock_stats(stats);
4138 
4139 	lock->cpu = cpu;
4140 	lock->ip = ip;
4141 }
4142 
4143 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4144 {
4145 	unsigned long flags;
4146 
4147 	if (unlikely(!lock_stat))
4148 		return;
4149 
4150 	if (unlikely(current->lockdep_recursion))
4151 		return;
4152 
4153 	raw_local_irq_save(flags);
4154 	check_flags(flags);
4155 	current->lockdep_recursion = 1;
4156 	trace_lock_contended(lock, ip);
4157 	__lock_contended(lock, ip);
4158 	current->lockdep_recursion = 0;
4159 	raw_local_irq_restore(flags);
4160 }
4161 EXPORT_SYMBOL_GPL(lock_contended);
4162 
4163 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4164 {
4165 	unsigned long flags;
4166 
4167 	if (unlikely(!lock_stat))
4168 		return;
4169 
4170 	if (unlikely(current->lockdep_recursion))
4171 		return;
4172 
4173 	raw_local_irq_save(flags);
4174 	check_flags(flags);
4175 	current->lockdep_recursion = 1;
4176 	__lock_acquired(lock, ip);
4177 	current->lockdep_recursion = 0;
4178 	raw_local_irq_restore(flags);
4179 }
4180 EXPORT_SYMBOL_GPL(lock_acquired);
4181 #endif
4182 
4183 /*
4184  * Used by the testsuite, sanitize the validator state
4185  * after a simulated failure:
4186  */
4187 
4188 void lockdep_reset(void)
4189 {
4190 	unsigned long flags;
4191 	int i;
4192 
4193 	raw_local_irq_save(flags);
4194 	current->curr_chain_key = 0;
4195 	current->lockdep_depth = 0;
4196 	current->lockdep_recursion = 0;
4197 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4198 	nr_hardirq_chains = 0;
4199 	nr_softirq_chains = 0;
4200 	nr_process_chains = 0;
4201 	debug_locks = 1;
4202 	for (i = 0; i < CHAINHASH_SIZE; i++)
4203 		INIT_HLIST_HEAD(chainhash_table + i);
4204 	raw_local_irq_restore(flags);
4205 }
4206 
4207 static void zap_class(struct lock_class *class)
4208 {
4209 	int i;
4210 
4211 	/*
4212 	 * Remove all dependencies this lock is
4213 	 * involved in:
4214 	 */
4215 	for (i = 0; i < nr_list_entries; i++) {
4216 		if (list_entries[i].class == class)
4217 			list_del_rcu(&list_entries[i].entry);
4218 	}
4219 	/*
4220 	 * Unhash the class and remove it from the all_lock_classes list:
4221 	 */
4222 	hlist_del_rcu(&class->hash_entry);
4223 	list_del_rcu(&class->lock_entry);
4224 
4225 	RCU_INIT_POINTER(class->key, NULL);
4226 	RCU_INIT_POINTER(class->name, NULL);
4227 }
4228 
4229 static inline int within(const void *addr, void *start, unsigned long size)
4230 {
4231 	return addr >= start && addr < start + size;
4232 }
4233 
4234 /*
4235  * Used in module.c to remove lock classes from memory that is going to be
4236  * freed; and possibly re-used by other modules.
4237  *
4238  * We will have had one sync_sched() before getting here, so we're guaranteed
4239  * nobody will look up these exact classes -- they're properly dead but still
4240  * allocated.
4241  */
4242 void lockdep_free_key_range(void *start, unsigned long size)
4243 {
4244 	struct lock_class *class;
4245 	struct hlist_head *head;
4246 	unsigned long flags;
4247 	int i;
4248 	int locked;
4249 
4250 	raw_local_irq_save(flags);
4251 	locked = graph_lock();
4252 
4253 	/*
4254 	 * Unhash all classes that were created by this module:
4255 	 */
4256 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4257 		head = classhash_table + i;
4258 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4259 			if (within(class->key, start, size))
4260 				zap_class(class);
4261 			else if (within(class->name, start, size))
4262 				zap_class(class);
4263 		}
4264 	}
4265 
4266 	if (locked)
4267 		graph_unlock();
4268 	raw_local_irq_restore(flags);
4269 
4270 	/*
4271 	 * Wait for any possible iterators from look_up_lock_class() to pass
4272 	 * before continuing to free the memory they refer to.
4273 	 *
4274 	 * sync_sched() is sufficient because the read-side is IRQ disable.
4275 	 */
4276 	synchronize_sched();
4277 
4278 	/*
4279 	 * XXX at this point we could return the resources to the pool;
4280 	 * instead we leak them. We would need to change to bitmap allocators
4281 	 * instead of the linear allocators we have now.
4282 	 */
4283 }
4284 
4285 void lockdep_reset_lock(struct lockdep_map *lock)
4286 {
4287 	struct lock_class *class;
4288 	struct hlist_head *head;
4289 	unsigned long flags;
4290 	int i, j;
4291 	int locked;
4292 
4293 	raw_local_irq_save(flags);
4294 
4295 	/*
4296 	 * Remove all classes this lock might have:
4297 	 */
4298 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4299 		/*
4300 		 * If the class exists we look it up and zap it:
4301 		 */
4302 		class = look_up_lock_class(lock, j);
4303 		if (class)
4304 			zap_class(class);
4305 	}
4306 	/*
4307 	 * Debug check: in the end all mapped classes should
4308 	 * be gone.
4309 	 */
4310 	locked = graph_lock();
4311 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4312 		head = classhash_table + i;
4313 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4314 			int match = 0;
4315 
4316 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4317 				match |= class == lock->class_cache[j];
4318 
4319 			if (unlikely(match)) {
4320 				if (debug_locks_off_graph_unlock()) {
4321 					/*
4322 					 * We all just reset everything, how did it match?
4323 					 */
4324 					WARN_ON(1);
4325 				}
4326 				goto out_restore;
4327 			}
4328 		}
4329 	}
4330 	if (locked)
4331 		graph_unlock();
4332 
4333 out_restore:
4334 	raw_local_irq_restore(flags);
4335 }
4336 
4337 void __init lockdep_info(void)
4338 {
4339 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4340 
4341 	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4342 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4343 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4344 	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4345 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4346 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4347 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4348 
4349 	printk(" memory used by lock dependency info: %lu kB\n",
4350 		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
4351 		sizeof(struct list_head) * CLASSHASH_SIZE +
4352 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
4353 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
4354 		sizeof(struct list_head) * CHAINHASH_SIZE
4355 #ifdef CONFIG_PROVE_LOCKING
4356 		+ sizeof(struct circular_queue)
4357 #endif
4358 		) / 1024
4359 		);
4360 
4361 	printk(" per task-struct memory footprint: %lu bytes\n",
4362 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
4363 }
4364 
4365 static void
4366 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4367 		     const void *mem_to, struct held_lock *hlock)
4368 {
4369 	if (!debug_locks_off())
4370 		return;
4371 	if (debug_locks_silent)
4372 		return;
4373 
4374 	pr_warn("\n");
4375 	pr_warn("=========================\n");
4376 	pr_warn("WARNING: held lock freed!\n");
4377 	print_kernel_ident();
4378 	pr_warn("-------------------------\n");
4379 	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4380 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4381 	print_lock(hlock);
4382 	lockdep_print_held_locks(curr);
4383 
4384 	pr_warn("\nstack backtrace:\n");
4385 	dump_stack();
4386 }
4387 
4388 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4389 				const void* lock_from, unsigned long lock_len)
4390 {
4391 	return lock_from + lock_len <= mem_from ||
4392 		mem_from + mem_len <= lock_from;
4393 }
4394 
4395 /*
4396  * Called when kernel memory is freed (or unmapped), or if a lock
4397  * is destroyed or reinitialized - this code checks whether there is
4398  * any held lock in the memory range of <from> to <to>:
4399  */
4400 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4401 {
4402 	struct task_struct *curr = current;
4403 	struct held_lock *hlock;
4404 	unsigned long flags;
4405 	int i;
4406 
4407 	if (unlikely(!debug_locks))
4408 		return;
4409 
4410 	local_irq_save(flags);
4411 	for (i = 0; i < curr->lockdep_depth; i++) {
4412 		hlock = curr->held_locks + i;
4413 
4414 		if (not_in_range(mem_from, mem_len, hlock->instance,
4415 					sizeof(*hlock->instance)))
4416 			continue;
4417 
4418 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4419 		break;
4420 	}
4421 	local_irq_restore(flags);
4422 }
4423 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4424 
4425 static void print_held_locks_bug(void)
4426 {
4427 	if (!debug_locks_off())
4428 		return;
4429 	if (debug_locks_silent)
4430 		return;
4431 
4432 	pr_warn("\n");
4433 	pr_warn("====================================\n");
4434 	pr_warn("WARNING: %s/%d still has locks held!\n",
4435 	       current->comm, task_pid_nr(current));
4436 	print_kernel_ident();
4437 	pr_warn("------------------------------------\n");
4438 	lockdep_print_held_locks(current);
4439 	pr_warn("\nstack backtrace:\n");
4440 	dump_stack();
4441 }
4442 
4443 void debug_check_no_locks_held(void)
4444 {
4445 	if (unlikely(current->lockdep_depth > 0))
4446 		print_held_locks_bug();
4447 }
4448 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4449 
4450 #ifdef __KERNEL__
4451 void debug_show_all_locks(void)
4452 {
4453 	struct task_struct *g, *p;
4454 	int count = 10;
4455 	int unlock = 1;
4456 
4457 	if (unlikely(!debug_locks)) {
4458 		pr_warn("INFO: lockdep is turned off.\n");
4459 		return;
4460 	}
4461 	pr_warn("\nShowing all locks held in the system:\n");
4462 
4463 	/*
4464 	 * Here we try to get the tasklist_lock as hard as possible,
4465 	 * if not successful after 2 seconds we ignore it (but keep
4466 	 * trying). This is to enable a debug printout even if a
4467 	 * tasklist_lock-holding task deadlocks or crashes.
4468 	 */
4469 retry:
4470 	if (!read_trylock(&tasklist_lock)) {
4471 		if (count == 10)
4472 			pr_warn("hm, tasklist_lock locked, retrying... ");
4473 		if (count) {
4474 			count--;
4475 			pr_cont(" #%d", 10-count);
4476 			mdelay(200);
4477 			goto retry;
4478 		}
4479 		pr_cont(" ignoring it.\n");
4480 		unlock = 0;
4481 	} else {
4482 		if (count != 10)
4483 			pr_cont(" locked it.\n");
4484 	}
4485 
4486 	do_each_thread(g, p) {
4487 		/*
4488 		 * It's not reliable to print a task's held locks
4489 		 * if it's not sleeping (or if it's not the current
4490 		 * task):
4491 		 */
4492 		if (p->state == TASK_RUNNING && p != current)
4493 			continue;
4494 		if (p->lockdep_depth)
4495 			lockdep_print_held_locks(p);
4496 		if (!unlock)
4497 			if (read_trylock(&tasklist_lock))
4498 				unlock = 1;
4499 		touch_nmi_watchdog();
4500 	} while_each_thread(g, p);
4501 
4502 	pr_warn("\n");
4503 	pr_warn("=============================================\n\n");
4504 
4505 	if (unlock)
4506 		read_unlock(&tasklist_lock);
4507 }
4508 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4509 #endif
4510 
4511 /*
4512  * Careful: only use this function if you are sure that
4513  * the task cannot run in parallel!
4514  */
4515 void debug_show_held_locks(struct task_struct *task)
4516 {
4517 	if (unlikely(!debug_locks)) {
4518 		printk("INFO: lockdep is turned off.\n");
4519 		return;
4520 	}
4521 	lockdep_print_held_locks(task);
4522 }
4523 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4524 
4525 asmlinkage __visible void lockdep_sys_exit(void)
4526 {
4527 	struct task_struct *curr = current;
4528 
4529 	if (unlikely(curr->lockdep_depth)) {
4530 		if (!debug_locks_off())
4531 			return;
4532 		pr_warn("\n");
4533 		pr_warn("================================================\n");
4534 		pr_warn("WARNING: lock held when returning to user space!\n");
4535 		print_kernel_ident();
4536 		pr_warn("------------------------------------------------\n");
4537 		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4538 				curr->comm, curr->pid);
4539 		lockdep_print_held_locks(curr);
4540 	}
4541 
4542 	/*
4543 	 * The lock history for each syscall should be independent. So wipe the
4544 	 * slate clean on return to userspace.
4545 	 */
4546 	lockdep_invariant_state(false);
4547 }
4548 
4549 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4550 {
4551 	struct task_struct *curr = current;
4552 
4553 	/* Note: the following can be executed concurrently, so be careful. */
4554 	pr_warn("\n");
4555 	pr_warn("=============================\n");
4556 	pr_warn("WARNING: suspicious RCU usage\n");
4557 	print_kernel_ident();
4558 	pr_warn("-----------------------------\n");
4559 	pr_warn("%s:%d %s!\n", file, line, s);
4560 	pr_warn("\nother info that might help us debug this:\n\n");
4561 	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4562 	       !rcu_lockdep_current_cpu_online()
4563 			? "RCU used illegally from offline CPU!\n"
4564 			: !rcu_is_watching()
4565 				? "RCU used illegally from idle CPU!\n"
4566 				: "",
4567 	       rcu_scheduler_active, debug_locks);
4568 
4569 	/*
4570 	 * If a CPU is in the RCU-free window in idle (ie: in the section
4571 	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4572 	 * considers that CPU to be in an "extended quiescent state",
4573 	 * which means that RCU will be completely ignoring that CPU.
4574 	 * Therefore, rcu_read_lock() and friends have absolutely no
4575 	 * effect on a CPU running in that state. In other words, even if
4576 	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4577 	 * delete data structures out from under it.  RCU really has no
4578 	 * choice here: we need to keep an RCU-free window in idle where
4579 	 * the CPU may possibly enter into low power mode. This way we can
4580 	 * notice an extended quiescent state to other CPUs that started a grace
4581 	 * period. Otherwise we would delay any grace period as long as we run
4582 	 * in the idle task.
4583 	 *
4584 	 * So complain bitterly if someone does call rcu_read_lock(),
4585 	 * rcu_read_lock_bh() and so on from extended quiescent states.
4586 	 */
4587 	if (!rcu_is_watching())
4588 		pr_warn("RCU used illegally from extended quiescent state!\n");
4589 
4590 	lockdep_print_held_locks(curr);
4591 	pr_warn("\nstack backtrace:\n");
4592 	dump_stack();
4593 }
4594 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
4595