xref: /openbmc/linux/kernel/locking/lockdep.c (revision b11ed18e)
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitmap.h>
49 #include <linux/bitops.h>
50 #include <linux/gfp.h>
51 #include <linux/random.h>
52 #include <linux/jhash.h>
53 #include <linux/nmi.h>
54 #include <linux/rcupdate.h>
55 #include <linux/kprobes.h>
56 
57 #include <asm/sections.h>
58 
59 #include "lockdep_internals.h"
60 
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/lock.h>
63 
64 #ifdef CONFIG_PROVE_LOCKING
65 int prove_locking = 1;
66 module_param(prove_locking, int, 0644);
67 #else
68 #define prove_locking 0
69 #endif
70 
71 #ifdef CONFIG_LOCK_STAT
72 int lock_stat = 1;
73 module_param(lock_stat, int, 0644);
74 #else
75 #define lock_stat 0
76 #endif
77 
78 /*
79  * lockdep_lock: protects the lockdep graph, the hashes and the
80  *               class/list/hash allocators.
81  *
82  * This is one of the rare exceptions where it's justified
83  * to use a raw spinlock - we really dont want the spinlock
84  * code to recurse back into the lockdep code...
85  */
86 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
87 static struct task_struct *lockdep_selftest_task_struct;
88 
89 static int graph_lock(void)
90 {
91 	arch_spin_lock(&lockdep_lock);
92 	/*
93 	 * Make sure that if another CPU detected a bug while
94 	 * walking the graph we dont change it (while the other
95 	 * CPU is busy printing out stuff with the graph lock
96 	 * dropped already)
97 	 */
98 	if (!debug_locks) {
99 		arch_spin_unlock(&lockdep_lock);
100 		return 0;
101 	}
102 	/* prevent any recursions within lockdep from causing deadlocks */
103 	current->lockdep_recursion++;
104 	return 1;
105 }
106 
107 static inline int graph_unlock(void)
108 {
109 	if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
110 		/*
111 		 * The lockdep graph lock isn't locked while we expect it to
112 		 * be, we're confused now, bye!
113 		 */
114 		return DEBUG_LOCKS_WARN_ON(1);
115 	}
116 
117 	current->lockdep_recursion--;
118 	arch_spin_unlock(&lockdep_lock);
119 	return 0;
120 }
121 
122 /*
123  * Turn lock debugging off and return with 0 if it was off already,
124  * and also release the graph lock:
125  */
126 static inline int debug_locks_off_graph_unlock(void)
127 {
128 	int ret = debug_locks_off();
129 
130 	arch_spin_unlock(&lockdep_lock);
131 
132 	return ret;
133 }
134 
135 unsigned long nr_list_entries;
136 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
137 static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
138 
139 /*
140  * All data structures here are protected by the global debug_lock.
141  *
142  * nr_lock_classes is the number of elements of lock_classes[] that is
143  * in use.
144  */
145 #define KEYHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
146 #define KEYHASH_SIZE		(1UL << KEYHASH_BITS)
147 static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
148 unsigned long nr_lock_classes;
149 #ifndef CONFIG_DEBUG_LOCKDEP
150 static
151 #endif
152 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
153 
154 static inline struct lock_class *hlock_class(struct held_lock *hlock)
155 {
156 	if (!hlock->class_idx) {
157 		/*
158 		 * Someone passed in garbage, we give up.
159 		 */
160 		DEBUG_LOCKS_WARN_ON(1);
161 		return NULL;
162 	}
163 	return lock_classes + hlock->class_idx - 1;
164 }
165 
166 #ifdef CONFIG_LOCK_STAT
167 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
168 
169 static inline u64 lockstat_clock(void)
170 {
171 	return local_clock();
172 }
173 
174 static int lock_point(unsigned long points[], unsigned long ip)
175 {
176 	int i;
177 
178 	for (i = 0; i < LOCKSTAT_POINTS; i++) {
179 		if (points[i] == 0) {
180 			points[i] = ip;
181 			break;
182 		}
183 		if (points[i] == ip)
184 			break;
185 	}
186 
187 	return i;
188 }
189 
190 static void lock_time_inc(struct lock_time *lt, u64 time)
191 {
192 	if (time > lt->max)
193 		lt->max = time;
194 
195 	if (time < lt->min || !lt->nr)
196 		lt->min = time;
197 
198 	lt->total += time;
199 	lt->nr++;
200 }
201 
202 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
203 {
204 	if (!src->nr)
205 		return;
206 
207 	if (src->max > dst->max)
208 		dst->max = src->max;
209 
210 	if (src->min < dst->min || !dst->nr)
211 		dst->min = src->min;
212 
213 	dst->total += src->total;
214 	dst->nr += src->nr;
215 }
216 
217 struct lock_class_stats lock_stats(struct lock_class *class)
218 {
219 	struct lock_class_stats stats;
220 	int cpu, i;
221 
222 	memset(&stats, 0, sizeof(struct lock_class_stats));
223 	for_each_possible_cpu(cpu) {
224 		struct lock_class_stats *pcs =
225 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
226 
227 		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
228 			stats.contention_point[i] += pcs->contention_point[i];
229 
230 		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
231 			stats.contending_point[i] += pcs->contending_point[i];
232 
233 		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
234 		lock_time_add(&pcs->write_waittime, &stats.write_waittime);
235 
236 		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
237 		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
238 
239 		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
240 			stats.bounces[i] += pcs->bounces[i];
241 	}
242 
243 	return stats;
244 }
245 
246 void clear_lock_stats(struct lock_class *class)
247 {
248 	int cpu;
249 
250 	for_each_possible_cpu(cpu) {
251 		struct lock_class_stats *cpu_stats =
252 			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
253 
254 		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
255 	}
256 	memset(class->contention_point, 0, sizeof(class->contention_point));
257 	memset(class->contending_point, 0, sizeof(class->contending_point));
258 }
259 
260 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
261 {
262 	return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
263 }
264 
265 static void lock_release_holdtime(struct held_lock *hlock)
266 {
267 	struct lock_class_stats *stats;
268 	u64 holdtime;
269 
270 	if (!lock_stat)
271 		return;
272 
273 	holdtime = lockstat_clock() - hlock->holdtime_stamp;
274 
275 	stats = get_lock_stats(hlock_class(hlock));
276 	if (hlock->read)
277 		lock_time_inc(&stats->read_holdtime, holdtime);
278 	else
279 		lock_time_inc(&stats->write_holdtime, holdtime);
280 }
281 #else
282 static inline void lock_release_holdtime(struct held_lock *hlock)
283 {
284 }
285 #endif
286 
287 /*
288  * We keep a global list of all lock classes. The list is only accessed with
289  * the lockdep spinlock lock held. free_lock_classes is a list with free
290  * elements. These elements are linked together by the lock_entry member in
291  * struct lock_class.
292  */
293 LIST_HEAD(all_lock_classes);
294 static LIST_HEAD(free_lock_classes);
295 
296 /**
297  * struct pending_free - information about data structures about to be freed
298  * @zapped: Head of a list with struct lock_class elements.
299  * @lock_chains_being_freed: Bitmap that indicates which lock_chains[] elements
300  *	are about to be freed.
301  */
302 struct pending_free {
303 	struct list_head zapped;
304 	DECLARE_BITMAP(lock_chains_being_freed, MAX_LOCKDEP_CHAINS);
305 };
306 
307 /**
308  * struct delayed_free - data structures used for delayed freeing
309  *
310  * A data structure for delayed freeing of data structures that may be
311  * accessed by RCU readers at the time these were freed.
312  *
313  * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
314  * @index:     Index of @pf to which freed data structures are added.
315  * @scheduled: Whether or not an RCU callback has been scheduled.
316  * @pf:        Array with information about data structures about to be freed.
317  */
318 static struct delayed_free {
319 	struct rcu_head		rcu_head;
320 	int			index;
321 	int			scheduled;
322 	struct pending_free	pf[2];
323 } delayed_free;
324 
325 /*
326  * The lockdep classes are in a hash-table as well, for fast lookup:
327  */
328 #define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
329 #define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
330 #define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
331 #define classhashentry(key)	(classhash_table + __classhashfn((key)))
332 
333 static struct hlist_head classhash_table[CLASSHASH_SIZE];
334 
335 /*
336  * We put the lock dependency chains into a hash-table as well, to cache
337  * their existence:
338  */
339 #define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
340 #define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
341 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
342 #define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
343 
344 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
345 
346 /*
347  * The hash key of the lock dependency chains is a hash itself too:
348  * it's a hash of all locks taken up to that lock, including that lock.
349  * It's a 64-bit hash, because it's important for the keys to be
350  * unique.
351  */
352 static inline u64 iterate_chain_key(u64 key, u32 idx)
353 {
354 	u32 k0 = key, k1 = key >> 32;
355 
356 	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
357 
358 	return k0 | (u64)k1 << 32;
359 }
360 
361 void lockdep_off(void)
362 {
363 	current->lockdep_recursion++;
364 }
365 EXPORT_SYMBOL(lockdep_off);
366 
367 void lockdep_on(void)
368 {
369 	current->lockdep_recursion--;
370 }
371 EXPORT_SYMBOL(lockdep_on);
372 
373 void lockdep_set_selftest_task(struct task_struct *task)
374 {
375 	lockdep_selftest_task_struct = task;
376 }
377 
378 /*
379  * Debugging switches:
380  */
381 
382 #define VERBOSE			0
383 #define VERY_VERBOSE		0
384 
385 #if VERBOSE
386 # define HARDIRQ_VERBOSE	1
387 # define SOFTIRQ_VERBOSE	1
388 #else
389 # define HARDIRQ_VERBOSE	0
390 # define SOFTIRQ_VERBOSE	0
391 #endif
392 
393 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
394 /*
395  * Quick filtering for interesting events:
396  */
397 static int class_filter(struct lock_class *class)
398 {
399 #if 0
400 	/* Example */
401 	if (class->name_version == 1 &&
402 			!strcmp(class->name, "lockname"))
403 		return 1;
404 	if (class->name_version == 1 &&
405 			!strcmp(class->name, "&struct->lockfield"))
406 		return 1;
407 #endif
408 	/* Filter everything else. 1 would be to allow everything else */
409 	return 0;
410 }
411 #endif
412 
413 static int verbose(struct lock_class *class)
414 {
415 #if VERBOSE
416 	return class_filter(class);
417 #endif
418 	return 0;
419 }
420 
421 /*
422  * Stack-trace: tightly packed array of stack backtrace
423  * addresses. Protected by the graph_lock.
424  */
425 unsigned long nr_stack_trace_entries;
426 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
427 
428 static void print_lockdep_off(const char *bug_msg)
429 {
430 	printk(KERN_DEBUG "%s\n", bug_msg);
431 	printk(KERN_DEBUG "turning off the locking correctness validator.\n");
432 #ifdef CONFIG_LOCK_STAT
433 	printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
434 #endif
435 }
436 
437 static int save_trace(struct stack_trace *trace)
438 {
439 	trace->nr_entries = 0;
440 	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
441 	trace->entries = stack_trace + nr_stack_trace_entries;
442 
443 	trace->skip = 3;
444 
445 	save_stack_trace(trace);
446 
447 	/*
448 	 * Some daft arches put -1 at the end to indicate its a full trace.
449 	 *
450 	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
451 	 * complete trace that maxes out the entries provided will be reported
452 	 * as incomplete, friggin useless </rant>
453 	 */
454 	if (trace->nr_entries != 0 &&
455 	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
456 		trace->nr_entries--;
457 
458 	trace->max_entries = trace->nr_entries;
459 
460 	nr_stack_trace_entries += trace->nr_entries;
461 
462 	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
463 		if (!debug_locks_off_graph_unlock())
464 			return 0;
465 
466 		print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
467 		dump_stack();
468 
469 		return 0;
470 	}
471 
472 	return 1;
473 }
474 
475 unsigned int nr_hardirq_chains;
476 unsigned int nr_softirq_chains;
477 unsigned int nr_process_chains;
478 unsigned int max_lockdep_depth;
479 
480 #ifdef CONFIG_DEBUG_LOCKDEP
481 /*
482  * Various lockdep statistics:
483  */
484 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
485 #endif
486 
487 /*
488  * Locking printouts:
489  */
490 
491 #define __USAGE(__STATE)						\
492 	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
493 	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
494 	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
495 	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
496 
497 static const char *usage_str[] =
498 {
499 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
500 #include "lockdep_states.h"
501 #undef LOCKDEP_STATE
502 	[LOCK_USED] = "INITIAL USE",
503 };
504 
505 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
506 {
507 	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
508 }
509 
510 static inline unsigned long lock_flag(enum lock_usage_bit bit)
511 {
512 	return 1UL << bit;
513 }
514 
515 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
516 {
517 	char c = '.';
518 
519 	if (class->usage_mask & lock_flag(bit + 2))
520 		c = '+';
521 	if (class->usage_mask & lock_flag(bit)) {
522 		c = '-';
523 		if (class->usage_mask & lock_flag(bit + 2))
524 			c = '?';
525 	}
526 
527 	return c;
528 }
529 
530 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
531 {
532 	int i = 0;
533 
534 #define LOCKDEP_STATE(__STATE) 						\
535 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
536 	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
537 #include "lockdep_states.h"
538 #undef LOCKDEP_STATE
539 
540 	usage[i] = '\0';
541 }
542 
543 static void __print_lock_name(struct lock_class *class)
544 {
545 	char str[KSYM_NAME_LEN];
546 	const char *name;
547 
548 	name = class->name;
549 	if (!name) {
550 		name = __get_key_name(class->key, str);
551 		printk(KERN_CONT "%s", name);
552 	} else {
553 		printk(KERN_CONT "%s", name);
554 		if (class->name_version > 1)
555 			printk(KERN_CONT "#%d", class->name_version);
556 		if (class->subclass)
557 			printk(KERN_CONT "/%d", class->subclass);
558 	}
559 }
560 
561 static void print_lock_name(struct lock_class *class)
562 {
563 	char usage[LOCK_USAGE_CHARS];
564 
565 	get_usage_chars(class, usage);
566 
567 	printk(KERN_CONT " (");
568 	__print_lock_name(class);
569 	printk(KERN_CONT "){%s}", usage);
570 }
571 
572 static void print_lockdep_cache(struct lockdep_map *lock)
573 {
574 	const char *name;
575 	char str[KSYM_NAME_LEN];
576 
577 	name = lock->name;
578 	if (!name)
579 		name = __get_key_name(lock->key->subkeys, str);
580 
581 	printk(KERN_CONT "%s", name);
582 }
583 
584 static void print_lock(struct held_lock *hlock)
585 {
586 	/*
587 	 * We can be called locklessly through debug_show_all_locks() so be
588 	 * extra careful, the hlock might have been released and cleared.
589 	 */
590 	unsigned int class_idx = hlock->class_idx;
591 
592 	/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
593 	barrier();
594 
595 	if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
596 		printk(KERN_CONT "<RELEASED>\n");
597 		return;
598 	}
599 
600 	printk(KERN_CONT "%p", hlock->instance);
601 	print_lock_name(lock_classes + class_idx - 1);
602 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
603 }
604 
605 static void lockdep_print_held_locks(struct task_struct *p)
606 {
607 	int i, depth = READ_ONCE(p->lockdep_depth);
608 
609 	if (!depth)
610 		printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
611 	else
612 		printk("%d lock%s held by %s/%d:\n", depth,
613 		       depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
614 	/*
615 	 * It's not reliable to print a task's held locks if it's not sleeping
616 	 * and it's not the current task.
617 	 */
618 	if (p->state == TASK_RUNNING && p != current)
619 		return;
620 	for (i = 0; i < depth; i++) {
621 		printk(" #%d: ", i);
622 		print_lock(p->held_locks + i);
623 	}
624 }
625 
626 static void print_kernel_ident(void)
627 {
628 	printk("%s %.*s %s\n", init_utsname()->release,
629 		(int)strcspn(init_utsname()->version, " "),
630 		init_utsname()->version,
631 		print_tainted());
632 }
633 
634 static int very_verbose(struct lock_class *class)
635 {
636 #if VERY_VERBOSE
637 	return class_filter(class);
638 #endif
639 	return 0;
640 }
641 
642 /*
643  * Is this the address of a static object:
644  */
645 #ifdef __KERNEL__
646 static int static_obj(const void *obj)
647 {
648 	unsigned long start = (unsigned long) &_stext,
649 		      end   = (unsigned long) &_end,
650 		      addr  = (unsigned long) obj;
651 
652 	/*
653 	 * static variable?
654 	 */
655 	if ((addr >= start) && (addr < end))
656 		return 1;
657 
658 	if (arch_is_kernel_data(addr))
659 		return 1;
660 
661 	/*
662 	 * in-kernel percpu var?
663 	 */
664 	if (is_kernel_percpu_address(addr))
665 		return 1;
666 
667 	/*
668 	 * module static or percpu var?
669 	 */
670 	return is_module_address(addr) || is_module_percpu_address(addr);
671 }
672 #endif
673 
674 /*
675  * To make lock name printouts unique, we calculate a unique
676  * class->name_version generation counter. The caller must hold the graph
677  * lock.
678  */
679 static int count_matching_names(struct lock_class *new_class)
680 {
681 	struct lock_class *class;
682 	int count = 0;
683 
684 	if (!new_class->name)
685 		return 0;
686 
687 	list_for_each_entry(class, &all_lock_classes, lock_entry) {
688 		if (new_class->key - new_class->subclass == class->key)
689 			return class->name_version;
690 		if (class->name && !strcmp(class->name, new_class->name))
691 			count = max(count, class->name_version);
692 	}
693 
694 	return count + 1;
695 }
696 
697 static inline struct lock_class *
698 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
699 {
700 	struct lockdep_subclass_key *key;
701 	struct hlist_head *hash_head;
702 	struct lock_class *class;
703 
704 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
705 		debug_locks_off();
706 		printk(KERN_ERR
707 			"BUG: looking up invalid subclass: %u\n", subclass);
708 		printk(KERN_ERR
709 			"turning off the locking correctness validator.\n");
710 		dump_stack();
711 		return NULL;
712 	}
713 
714 	/*
715 	 * If it is not initialised then it has never been locked,
716 	 * so it won't be present in the hash table.
717 	 */
718 	if (unlikely(!lock->key))
719 		return NULL;
720 
721 	/*
722 	 * NOTE: the class-key must be unique. For dynamic locks, a static
723 	 * lock_class_key variable is passed in through the mutex_init()
724 	 * (or spin_lock_init()) call - which acts as the key. For static
725 	 * locks we use the lock object itself as the key.
726 	 */
727 	BUILD_BUG_ON(sizeof(struct lock_class_key) >
728 			sizeof(struct lockdep_map));
729 
730 	key = lock->key->subkeys + subclass;
731 
732 	hash_head = classhashentry(key);
733 
734 	/*
735 	 * We do an RCU walk of the hash, see lockdep_free_key_range().
736 	 */
737 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
738 		return NULL;
739 
740 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
741 		if (class->key == key) {
742 			/*
743 			 * Huh! same key, different name? Did someone trample
744 			 * on some memory? We're most confused.
745 			 */
746 			WARN_ON_ONCE(class->name != lock->name);
747 			return class;
748 		}
749 	}
750 
751 	return NULL;
752 }
753 
754 /*
755  * Static locks do not have their class-keys yet - for them the key is
756  * the lock object itself. If the lock is in the per cpu area, the
757  * canonical address of the lock (per cpu offset removed) is used.
758  */
759 static bool assign_lock_key(struct lockdep_map *lock)
760 {
761 	unsigned long can_addr, addr = (unsigned long)lock;
762 
763 #ifdef __KERNEL__
764 	/*
765 	 * lockdep_free_key_range() assumes that struct lock_class_key
766 	 * objects do not overlap. Since we use the address of lock
767 	 * objects as class key for static objects, check whether the
768 	 * size of lock_class_key objects does not exceed the size of
769 	 * the smallest lock object.
770 	 */
771 	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(raw_spinlock_t));
772 #endif
773 
774 	if (__is_kernel_percpu_address(addr, &can_addr))
775 		lock->key = (void *)can_addr;
776 	else if (__is_module_percpu_address(addr, &can_addr))
777 		lock->key = (void *)can_addr;
778 	else if (static_obj(lock))
779 		lock->key = (void *)lock;
780 	else {
781 		/* Debug-check: all keys must be persistent! */
782 		debug_locks_off();
783 		pr_err("INFO: trying to register non-static key.\n");
784 		pr_err("the code is fine but needs lockdep annotation.\n");
785 		pr_err("turning off the locking correctness validator.\n");
786 		dump_stack();
787 		return false;
788 	}
789 
790 	return true;
791 }
792 
793 #ifdef CONFIG_DEBUG_LOCKDEP
794 
795 /* Check whether element @e occurs in list @h */
796 static bool in_list(struct list_head *e, struct list_head *h)
797 {
798 	struct list_head *f;
799 
800 	list_for_each(f, h) {
801 		if (e == f)
802 			return true;
803 	}
804 
805 	return false;
806 }
807 
808 /*
809  * Check whether entry @e occurs in any of the locks_after or locks_before
810  * lists.
811  */
812 static bool in_any_class_list(struct list_head *e)
813 {
814 	struct lock_class *class;
815 	int i;
816 
817 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
818 		class = &lock_classes[i];
819 		if (in_list(e, &class->locks_after) ||
820 		    in_list(e, &class->locks_before))
821 			return true;
822 	}
823 	return false;
824 }
825 
826 static bool class_lock_list_valid(struct lock_class *c, struct list_head *h)
827 {
828 	struct lock_list *e;
829 
830 	list_for_each_entry(e, h, entry) {
831 		if (e->links_to != c) {
832 			printk(KERN_INFO "class %s: mismatch for lock entry %ld; class %s <> %s",
833 			       c->name ? : "(?)",
834 			       (unsigned long)(e - list_entries),
835 			       e->links_to && e->links_to->name ?
836 			       e->links_to->name : "(?)",
837 			       e->class && e->class->name ? e->class->name :
838 			       "(?)");
839 			return false;
840 		}
841 	}
842 	return true;
843 }
844 
845 #ifdef CONFIG_PROVE_LOCKING
846 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
847 #endif
848 
849 static bool check_lock_chain_key(struct lock_chain *chain)
850 {
851 #ifdef CONFIG_PROVE_LOCKING
852 	u64 chain_key = 0;
853 	int i;
854 
855 	for (i = chain->base; i < chain->base + chain->depth; i++)
856 		chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
857 	/*
858 	 * The 'unsigned long long' casts avoid that a compiler warning
859 	 * is reported when building tools/lib/lockdep.
860 	 */
861 	if (chain->chain_key != chain_key) {
862 		printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
863 		       (unsigned long long)(chain - lock_chains),
864 		       (unsigned long long)chain->chain_key,
865 		       (unsigned long long)chain_key);
866 		return false;
867 	}
868 #endif
869 	return true;
870 }
871 
872 static bool in_any_zapped_class_list(struct lock_class *class)
873 {
874 	struct pending_free *pf;
875 	int i;
876 
877 	for (i = 0, pf = delayed_free.pf; i < ARRAY_SIZE(delayed_free.pf); i++, pf++) {
878 		if (in_list(&class->lock_entry, &pf->zapped))
879 			return true;
880 	}
881 
882 	return false;
883 }
884 
885 static bool __check_data_structures(void)
886 {
887 	struct lock_class *class;
888 	struct lock_chain *chain;
889 	struct hlist_head *head;
890 	struct lock_list *e;
891 	int i;
892 
893 	/* Check whether all classes occur in a lock list. */
894 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
895 		class = &lock_classes[i];
896 		if (!in_list(&class->lock_entry, &all_lock_classes) &&
897 		    !in_list(&class->lock_entry, &free_lock_classes) &&
898 		    !in_any_zapped_class_list(class)) {
899 			printk(KERN_INFO "class %px/%s is not in any class list\n",
900 			       class, class->name ? : "(?)");
901 			return false;
902 		}
903 	}
904 
905 	/* Check whether all classes have valid lock lists. */
906 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
907 		class = &lock_classes[i];
908 		if (!class_lock_list_valid(class, &class->locks_before))
909 			return false;
910 		if (!class_lock_list_valid(class, &class->locks_after))
911 			return false;
912 	}
913 
914 	/* Check the chain_key of all lock chains. */
915 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
916 		head = chainhash_table + i;
917 		hlist_for_each_entry_rcu(chain, head, entry) {
918 			if (!check_lock_chain_key(chain))
919 				return false;
920 		}
921 	}
922 
923 	/*
924 	 * Check whether all list entries that are in use occur in a class
925 	 * lock list.
926 	 */
927 	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
928 		e = list_entries + i;
929 		if (!in_any_class_list(&e->entry)) {
930 			printk(KERN_INFO "list entry %d is not in any class list; class %s <> %s\n",
931 			       (unsigned int)(e - list_entries),
932 			       e->class->name ? : "(?)",
933 			       e->links_to->name ? : "(?)");
934 			return false;
935 		}
936 	}
937 
938 	/*
939 	 * Check whether all list entries that are not in use do not occur in
940 	 * a class lock list.
941 	 */
942 	for_each_clear_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
943 		e = list_entries + i;
944 		if (in_any_class_list(&e->entry)) {
945 			printk(KERN_INFO "list entry %d occurs in a class list; class %s <> %s\n",
946 			       (unsigned int)(e - list_entries),
947 			       e->class && e->class->name ? e->class->name :
948 			       "(?)",
949 			       e->links_to && e->links_to->name ?
950 			       e->links_to->name : "(?)");
951 			return false;
952 		}
953 	}
954 
955 	return true;
956 }
957 
958 int check_consistency = 0;
959 module_param(check_consistency, int, 0644);
960 
961 static void check_data_structures(void)
962 {
963 	static bool once = false;
964 
965 	if (check_consistency && !once) {
966 		if (!__check_data_structures()) {
967 			once = true;
968 			WARN_ON(once);
969 		}
970 	}
971 }
972 
973 #else /* CONFIG_DEBUG_LOCKDEP */
974 
975 static inline void check_data_structures(void) { }
976 
977 #endif /* CONFIG_DEBUG_LOCKDEP */
978 
979 /*
980  * Initialize the lock_classes[] array elements, the free_lock_classes list
981  * and also the delayed_free structure.
982  */
983 static void init_data_structures_once(void)
984 {
985 	static bool ds_initialized, rcu_head_initialized;
986 	int i;
987 
988 	if (likely(rcu_head_initialized))
989 		return;
990 
991 	if (system_state >= SYSTEM_SCHEDULING) {
992 		init_rcu_head(&delayed_free.rcu_head);
993 		rcu_head_initialized = true;
994 	}
995 
996 	if (ds_initialized)
997 		return;
998 
999 	ds_initialized = true;
1000 
1001 	INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
1002 	INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
1003 
1004 	for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
1005 		list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
1006 		INIT_LIST_HEAD(&lock_classes[i].locks_after);
1007 		INIT_LIST_HEAD(&lock_classes[i].locks_before);
1008 	}
1009 }
1010 
1011 static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
1012 {
1013 	unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
1014 
1015 	return lock_keys_hash + hash;
1016 }
1017 
1018 /* Register a dynamically allocated key. */
1019 void lockdep_register_key(struct lock_class_key *key)
1020 {
1021 	struct hlist_head *hash_head;
1022 	struct lock_class_key *k;
1023 	unsigned long flags;
1024 
1025 	if (WARN_ON_ONCE(static_obj(key)))
1026 		return;
1027 	hash_head = keyhashentry(key);
1028 
1029 	raw_local_irq_save(flags);
1030 	if (!graph_lock())
1031 		goto restore_irqs;
1032 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1033 		if (WARN_ON_ONCE(k == key))
1034 			goto out_unlock;
1035 	}
1036 	hlist_add_head_rcu(&key->hash_entry, hash_head);
1037 out_unlock:
1038 	graph_unlock();
1039 restore_irqs:
1040 	raw_local_irq_restore(flags);
1041 }
1042 EXPORT_SYMBOL_GPL(lockdep_register_key);
1043 
1044 /* Check whether a key has been registered as a dynamic key. */
1045 static bool is_dynamic_key(const struct lock_class_key *key)
1046 {
1047 	struct hlist_head *hash_head;
1048 	struct lock_class_key *k;
1049 	bool found = false;
1050 
1051 	if (WARN_ON_ONCE(static_obj(key)))
1052 		return false;
1053 
1054 	/*
1055 	 * If lock debugging is disabled lock_keys_hash[] may contain
1056 	 * pointers to memory that has already been freed. Avoid triggering
1057 	 * a use-after-free in that case by returning early.
1058 	 */
1059 	if (!debug_locks)
1060 		return true;
1061 
1062 	hash_head = keyhashentry(key);
1063 
1064 	rcu_read_lock();
1065 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
1066 		if (k == key) {
1067 			found = true;
1068 			break;
1069 		}
1070 	}
1071 	rcu_read_unlock();
1072 
1073 	return found;
1074 }
1075 
1076 /*
1077  * Register a lock's class in the hash-table, if the class is not present
1078  * yet. Otherwise we look it up. We cache the result in the lock object
1079  * itself, so actual lookup of the hash should be once per lock object.
1080  */
1081 static struct lock_class *
1082 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1083 {
1084 	struct lockdep_subclass_key *key;
1085 	struct hlist_head *hash_head;
1086 	struct lock_class *class;
1087 
1088 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1089 
1090 	class = look_up_lock_class(lock, subclass);
1091 	if (likely(class))
1092 		goto out_set_class_cache;
1093 
1094 	if (!lock->key) {
1095 		if (!assign_lock_key(lock))
1096 			return NULL;
1097 	} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
1098 		return NULL;
1099 	}
1100 
1101 	key = lock->key->subkeys + subclass;
1102 	hash_head = classhashentry(key);
1103 
1104 	if (!graph_lock()) {
1105 		return NULL;
1106 	}
1107 	/*
1108 	 * We have to do the hash-walk again, to avoid races
1109 	 * with another CPU:
1110 	 */
1111 	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
1112 		if (class->key == key)
1113 			goto out_unlock_set;
1114 	}
1115 
1116 	init_data_structures_once();
1117 
1118 	/* Allocate a new lock class and add it to the hash. */
1119 	class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
1120 					 lock_entry);
1121 	if (!class) {
1122 		if (!debug_locks_off_graph_unlock()) {
1123 			return NULL;
1124 		}
1125 
1126 		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
1127 		dump_stack();
1128 		return NULL;
1129 	}
1130 	nr_lock_classes++;
1131 	debug_atomic_inc(nr_unused_locks);
1132 	class->key = key;
1133 	class->name = lock->name;
1134 	class->subclass = subclass;
1135 	WARN_ON_ONCE(!list_empty(&class->locks_before));
1136 	WARN_ON_ONCE(!list_empty(&class->locks_after));
1137 	class->name_version = count_matching_names(class);
1138 	/*
1139 	 * We use RCU's safe list-add method to make
1140 	 * parallel walking of the hash-list safe:
1141 	 */
1142 	hlist_add_head_rcu(&class->hash_entry, hash_head);
1143 	/*
1144 	 * Remove the class from the free list and add it to the global list
1145 	 * of classes.
1146 	 */
1147 	list_move_tail(&class->lock_entry, &all_lock_classes);
1148 
1149 	if (verbose(class)) {
1150 		graph_unlock();
1151 
1152 		printk("\nnew class %px: %s", class->key, class->name);
1153 		if (class->name_version > 1)
1154 			printk(KERN_CONT "#%d", class->name_version);
1155 		printk(KERN_CONT "\n");
1156 		dump_stack();
1157 
1158 		if (!graph_lock()) {
1159 			return NULL;
1160 		}
1161 	}
1162 out_unlock_set:
1163 	graph_unlock();
1164 
1165 out_set_class_cache:
1166 	if (!subclass || force)
1167 		lock->class_cache[0] = class;
1168 	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
1169 		lock->class_cache[subclass] = class;
1170 
1171 	/*
1172 	 * Hash collision, did we smoke some? We found a class with a matching
1173 	 * hash but the subclass -- which is hashed in -- didn't match.
1174 	 */
1175 	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1176 		return NULL;
1177 
1178 	return class;
1179 }
1180 
1181 #ifdef CONFIG_PROVE_LOCKING
1182 /*
1183  * Allocate a lockdep entry. (assumes the graph_lock held, returns
1184  * with NULL on failure)
1185  */
1186 static struct lock_list *alloc_list_entry(void)
1187 {
1188 	int idx = find_first_zero_bit(list_entries_in_use,
1189 				      ARRAY_SIZE(list_entries));
1190 
1191 	if (idx >= ARRAY_SIZE(list_entries)) {
1192 		if (!debug_locks_off_graph_unlock())
1193 			return NULL;
1194 
1195 		print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
1196 		dump_stack();
1197 		return NULL;
1198 	}
1199 	nr_list_entries++;
1200 	__set_bit(idx, list_entries_in_use);
1201 	return list_entries + idx;
1202 }
1203 
1204 /*
1205  * Add a new dependency to the head of the list:
1206  */
1207 static int add_lock_to_list(struct lock_class *this,
1208 			    struct lock_class *links_to, struct list_head *head,
1209 			    unsigned long ip, int distance,
1210 			    struct stack_trace *trace)
1211 {
1212 	struct lock_list *entry;
1213 	/*
1214 	 * Lock not present yet - get a new dependency struct and
1215 	 * add it to the list:
1216 	 */
1217 	entry = alloc_list_entry();
1218 	if (!entry)
1219 		return 0;
1220 
1221 	entry->class = this;
1222 	entry->links_to = links_to;
1223 	entry->distance = distance;
1224 	entry->trace = *trace;
1225 	/*
1226 	 * Both allocation and removal are done under the graph lock; but
1227 	 * iteration is under RCU-sched; see look_up_lock_class() and
1228 	 * lockdep_free_key_range().
1229 	 */
1230 	list_add_tail_rcu(&entry->entry, head);
1231 
1232 	return 1;
1233 }
1234 
1235 /*
1236  * For good efficiency of modular, we use power of 2
1237  */
1238 #define MAX_CIRCULAR_QUEUE_SIZE		4096UL
1239 #define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
1240 
1241 /*
1242  * The circular_queue and helpers is used to implement the
1243  * breadth-first search(BFS)algorithem, by which we can build
1244  * the shortest path from the next lock to be acquired to the
1245  * previous held lock if there is a circular between them.
1246  */
1247 struct circular_queue {
1248 	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
1249 	unsigned int  front, rear;
1250 };
1251 
1252 static struct circular_queue lock_cq;
1253 
1254 unsigned int max_bfs_queue_depth;
1255 
1256 static unsigned int lockdep_dependency_gen_id;
1257 
1258 static inline void __cq_init(struct circular_queue *cq)
1259 {
1260 	cq->front = cq->rear = 0;
1261 	lockdep_dependency_gen_id++;
1262 }
1263 
1264 static inline int __cq_empty(struct circular_queue *cq)
1265 {
1266 	return (cq->front == cq->rear);
1267 }
1268 
1269 static inline int __cq_full(struct circular_queue *cq)
1270 {
1271 	return ((cq->rear + 1) & CQ_MASK) == cq->front;
1272 }
1273 
1274 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
1275 {
1276 	if (__cq_full(cq))
1277 		return -1;
1278 
1279 	cq->element[cq->rear] = elem;
1280 	cq->rear = (cq->rear + 1) & CQ_MASK;
1281 	return 0;
1282 }
1283 
1284 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
1285 {
1286 	if (__cq_empty(cq))
1287 		return -1;
1288 
1289 	*elem = cq->element[cq->front];
1290 	cq->front = (cq->front + 1) & CQ_MASK;
1291 	return 0;
1292 }
1293 
1294 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
1295 {
1296 	return (cq->rear - cq->front) & CQ_MASK;
1297 }
1298 
1299 static inline void mark_lock_accessed(struct lock_list *lock,
1300 					struct lock_list *parent)
1301 {
1302 	unsigned long nr;
1303 
1304 	nr = lock - list_entries;
1305 	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1306 	lock->parent = parent;
1307 	lock->class->dep_gen_id = lockdep_dependency_gen_id;
1308 }
1309 
1310 static inline unsigned long lock_accessed(struct lock_list *lock)
1311 {
1312 	unsigned long nr;
1313 
1314 	nr = lock - list_entries;
1315 	WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
1316 	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1317 }
1318 
1319 static inline struct lock_list *get_lock_parent(struct lock_list *child)
1320 {
1321 	return child->parent;
1322 }
1323 
1324 static inline int get_lock_depth(struct lock_list *child)
1325 {
1326 	int depth = 0;
1327 	struct lock_list *parent;
1328 
1329 	while ((parent = get_lock_parent(child))) {
1330 		child = parent;
1331 		depth++;
1332 	}
1333 	return depth;
1334 }
1335 
1336 static int __bfs(struct lock_list *source_entry,
1337 		 void *data,
1338 		 int (*match)(struct lock_list *entry, void *data),
1339 		 struct lock_list **target_entry,
1340 		 int forward)
1341 {
1342 	struct lock_list *entry;
1343 	struct list_head *head;
1344 	struct circular_queue *cq = &lock_cq;
1345 	int ret = 1;
1346 
1347 	if (match(source_entry, data)) {
1348 		*target_entry = source_entry;
1349 		ret = 0;
1350 		goto exit;
1351 	}
1352 
1353 	if (forward)
1354 		head = &source_entry->class->locks_after;
1355 	else
1356 		head = &source_entry->class->locks_before;
1357 
1358 	if (list_empty(head))
1359 		goto exit;
1360 
1361 	__cq_init(cq);
1362 	__cq_enqueue(cq, (unsigned long)source_entry);
1363 
1364 	while (!__cq_empty(cq)) {
1365 		struct lock_list *lock;
1366 
1367 		__cq_dequeue(cq, (unsigned long *)&lock);
1368 
1369 		if (!lock->class) {
1370 			ret = -2;
1371 			goto exit;
1372 		}
1373 
1374 		if (forward)
1375 			head = &lock->class->locks_after;
1376 		else
1377 			head = &lock->class->locks_before;
1378 
1379 		DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1380 
1381 		list_for_each_entry_rcu(entry, head, entry) {
1382 			if (!lock_accessed(entry)) {
1383 				unsigned int cq_depth;
1384 				mark_lock_accessed(entry, lock);
1385 				if (match(entry, data)) {
1386 					*target_entry = entry;
1387 					ret = 0;
1388 					goto exit;
1389 				}
1390 
1391 				if (__cq_enqueue(cq, (unsigned long)entry)) {
1392 					ret = -1;
1393 					goto exit;
1394 				}
1395 				cq_depth = __cq_get_elem_count(cq);
1396 				if (max_bfs_queue_depth < cq_depth)
1397 					max_bfs_queue_depth = cq_depth;
1398 			}
1399 		}
1400 	}
1401 exit:
1402 	return ret;
1403 }
1404 
1405 static inline int __bfs_forwards(struct lock_list *src_entry,
1406 			void *data,
1407 			int (*match)(struct lock_list *entry, void *data),
1408 			struct lock_list **target_entry)
1409 {
1410 	return __bfs(src_entry, data, match, target_entry, 1);
1411 
1412 }
1413 
1414 static inline int __bfs_backwards(struct lock_list *src_entry,
1415 			void *data,
1416 			int (*match)(struct lock_list *entry, void *data),
1417 			struct lock_list **target_entry)
1418 {
1419 	return __bfs(src_entry, data, match, target_entry, 0);
1420 
1421 }
1422 
1423 /*
1424  * Recursive, forwards-direction lock-dependency checking, used for
1425  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1426  * checking.
1427  */
1428 
1429 /*
1430  * Print a dependency chain entry (this is only done when a deadlock
1431  * has been detected):
1432  */
1433 static noinline int
1434 print_circular_bug_entry(struct lock_list *target, int depth)
1435 {
1436 	if (debug_locks_silent)
1437 		return 0;
1438 	printk("\n-> #%u", depth);
1439 	print_lock_name(target->class);
1440 	printk(KERN_CONT ":\n");
1441 	print_stack_trace(&target->trace, 6);
1442 
1443 	return 0;
1444 }
1445 
1446 static void
1447 print_circular_lock_scenario(struct held_lock *src,
1448 			     struct held_lock *tgt,
1449 			     struct lock_list *prt)
1450 {
1451 	struct lock_class *source = hlock_class(src);
1452 	struct lock_class *target = hlock_class(tgt);
1453 	struct lock_class *parent = prt->class;
1454 
1455 	/*
1456 	 * A direct locking problem where unsafe_class lock is taken
1457 	 * directly by safe_class lock, then all we need to show
1458 	 * is the deadlock scenario, as it is obvious that the
1459 	 * unsafe lock is taken under the safe lock.
1460 	 *
1461 	 * But if there is a chain instead, where the safe lock takes
1462 	 * an intermediate lock (middle_class) where this lock is
1463 	 * not the same as the safe lock, then the lock chain is
1464 	 * used to describe the problem. Otherwise we would need
1465 	 * to show a different CPU case for each link in the chain
1466 	 * from the safe_class lock to the unsafe_class lock.
1467 	 */
1468 	if (parent != source) {
1469 		printk("Chain exists of:\n  ");
1470 		__print_lock_name(source);
1471 		printk(KERN_CONT " --> ");
1472 		__print_lock_name(parent);
1473 		printk(KERN_CONT " --> ");
1474 		__print_lock_name(target);
1475 		printk(KERN_CONT "\n\n");
1476 	}
1477 
1478 	printk(" Possible unsafe locking scenario:\n\n");
1479 	printk("       CPU0                    CPU1\n");
1480 	printk("       ----                    ----\n");
1481 	printk("  lock(");
1482 	__print_lock_name(target);
1483 	printk(KERN_CONT ");\n");
1484 	printk("                               lock(");
1485 	__print_lock_name(parent);
1486 	printk(KERN_CONT ");\n");
1487 	printk("                               lock(");
1488 	__print_lock_name(target);
1489 	printk(KERN_CONT ");\n");
1490 	printk("  lock(");
1491 	__print_lock_name(source);
1492 	printk(KERN_CONT ");\n");
1493 	printk("\n *** DEADLOCK ***\n\n");
1494 }
1495 
1496 /*
1497  * When a circular dependency is detected, print the
1498  * header first:
1499  */
1500 static noinline int
1501 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1502 			struct held_lock *check_src,
1503 			struct held_lock *check_tgt)
1504 {
1505 	struct task_struct *curr = current;
1506 
1507 	if (debug_locks_silent)
1508 		return 0;
1509 
1510 	pr_warn("\n");
1511 	pr_warn("======================================================\n");
1512 	pr_warn("WARNING: possible circular locking dependency detected\n");
1513 	print_kernel_ident();
1514 	pr_warn("------------------------------------------------------\n");
1515 	pr_warn("%s/%d is trying to acquire lock:\n",
1516 		curr->comm, task_pid_nr(curr));
1517 	print_lock(check_src);
1518 
1519 	pr_warn("\nbut task is already holding lock:\n");
1520 
1521 	print_lock(check_tgt);
1522 	pr_warn("\nwhich lock already depends on the new lock.\n\n");
1523 	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1524 
1525 	print_circular_bug_entry(entry, depth);
1526 
1527 	return 0;
1528 }
1529 
1530 static inline int class_equal(struct lock_list *entry, void *data)
1531 {
1532 	return entry->class == data;
1533 }
1534 
1535 static noinline int print_circular_bug(struct lock_list *this,
1536 				struct lock_list *target,
1537 				struct held_lock *check_src,
1538 				struct held_lock *check_tgt,
1539 				struct stack_trace *trace)
1540 {
1541 	struct task_struct *curr = current;
1542 	struct lock_list *parent;
1543 	struct lock_list *first_parent;
1544 	int depth;
1545 
1546 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1547 		return 0;
1548 
1549 	if (!save_trace(&this->trace))
1550 		return 0;
1551 
1552 	depth = get_lock_depth(target);
1553 
1554 	print_circular_bug_header(target, depth, check_src, check_tgt);
1555 
1556 	parent = get_lock_parent(target);
1557 	first_parent = parent;
1558 
1559 	while (parent) {
1560 		print_circular_bug_entry(parent, --depth);
1561 		parent = get_lock_parent(parent);
1562 	}
1563 
1564 	printk("\nother info that might help us debug this:\n\n");
1565 	print_circular_lock_scenario(check_src, check_tgt,
1566 				     first_parent);
1567 
1568 	lockdep_print_held_locks(curr);
1569 
1570 	printk("\nstack backtrace:\n");
1571 	dump_stack();
1572 
1573 	return 0;
1574 }
1575 
1576 static noinline int print_bfs_bug(int ret)
1577 {
1578 	if (!debug_locks_off_graph_unlock())
1579 		return 0;
1580 
1581 	/*
1582 	 * Breadth-first-search failed, graph got corrupted?
1583 	 */
1584 	WARN(1, "lockdep bfs error:%d\n", ret);
1585 
1586 	return 0;
1587 }
1588 
1589 static int noop_count(struct lock_list *entry, void *data)
1590 {
1591 	(*(unsigned long *)data)++;
1592 	return 0;
1593 }
1594 
1595 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1596 {
1597 	unsigned long  count = 0;
1598 	struct lock_list *uninitialized_var(target_entry);
1599 
1600 	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1601 
1602 	return count;
1603 }
1604 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1605 {
1606 	unsigned long ret, flags;
1607 	struct lock_list this;
1608 
1609 	this.parent = NULL;
1610 	this.class = class;
1611 
1612 	raw_local_irq_save(flags);
1613 	arch_spin_lock(&lockdep_lock);
1614 	ret = __lockdep_count_forward_deps(&this);
1615 	arch_spin_unlock(&lockdep_lock);
1616 	raw_local_irq_restore(flags);
1617 
1618 	return ret;
1619 }
1620 
1621 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1622 {
1623 	unsigned long  count = 0;
1624 	struct lock_list *uninitialized_var(target_entry);
1625 
1626 	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1627 
1628 	return count;
1629 }
1630 
1631 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1632 {
1633 	unsigned long ret, flags;
1634 	struct lock_list this;
1635 
1636 	this.parent = NULL;
1637 	this.class = class;
1638 
1639 	raw_local_irq_save(flags);
1640 	arch_spin_lock(&lockdep_lock);
1641 	ret = __lockdep_count_backward_deps(&this);
1642 	arch_spin_unlock(&lockdep_lock);
1643 	raw_local_irq_restore(flags);
1644 
1645 	return ret;
1646 }
1647 
1648 /*
1649  * Prove that the dependency graph starting at <entry> can not
1650  * lead to <target>. Print an error and return 0 if it does.
1651  */
1652 static noinline int
1653 check_noncircular(struct lock_list *root, struct lock_class *target,
1654 		struct lock_list **target_entry)
1655 {
1656 	int result;
1657 
1658 	debug_atomic_inc(nr_cyclic_checks);
1659 
1660 	result = __bfs_forwards(root, target, class_equal, target_entry);
1661 
1662 	return result;
1663 }
1664 
1665 static noinline int
1666 check_redundant(struct lock_list *root, struct lock_class *target,
1667 		struct lock_list **target_entry)
1668 {
1669 	int result;
1670 
1671 	debug_atomic_inc(nr_redundant_checks);
1672 
1673 	result = __bfs_forwards(root, target, class_equal, target_entry);
1674 
1675 	return result;
1676 }
1677 
1678 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1679 /*
1680  * Forwards and backwards subgraph searching, for the purposes of
1681  * proving that two subgraphs can be connected by a new dependency
1682  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1683  */
1684 
1685 static inline int usage_match(struct lock_list *entry, void *bit)
1686 {
1687 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1688 }
1689 
1690 
1691 
1692 /*
1693  * Find a node in the forwards-direction dependency sub-graph starting
1694  * at @root->class that matches @bit.
1695  *
1696  * Return 0 if such a node exists in the subgraph, and put that node
1697  * into *@target_entry.
1698  *
1699  * Return 1 otherwise and keep *@target_entry unchanged.
1700  * Return <0 on error.
1701  */
1702 static int
1703 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1704 			struct lock_list **target_entry)
1705 {
1706 	int result;
1707 
1708 	debug_atomic_inc(nr_find_usage_forwards_checks);
1709 
1710 	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1711 
1712 	return result;
1713 }
1714 
1715 /*
1716  * Find a node in the backwards-direction dependency sub-graph starting
1717  * at @root->class that matches @bit.
1718  *
1719  * Return 0 if such a node exists in the subgraph, and put that node
1720  * into *@target_entry.
1721  *
1722  * Return 1 otherwise and keep *@target_entry unchanged.
1723  * Return <0 on error.
1724  */
1725 static int
1726 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1727 			struct lock_list **target_entry)
1728 {
1729 	int result;
1730 
1731 	debug_atomic_inc(nr_find_usage_backwards_checks);
1732 
1733 	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1734 
1735 	return result;
1736 }
1737 
1738 static void print_lock_class_header(struct lock_class *class, int depth)
1739 {
1740 	int bit;
1741 
1742 	printk("%*s->", depth, "");
1743 	print_lock_name(class);
1744 #ifdef CONFIG_DEBUG_LOCKDEP
1745 	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1746 #endif
1747 	printk(KERN_CONT " {\n");
1748 
1749 	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1750 		if (class->usage_mask & (1 << bit)) {
1751 			int len = depth;
1752 
1753 			len += printk("%*s   %s", depth, "", usage_str[bit]);
1754 			len += printk(KERN_CONT " at:\n");
1755 			print_stack_trace(class->usage_traces + bit, len);
1756 		}
1757 	}
1758 	printk("%*s }\n", depth, "");
1759 
1760 	printk("%*s ... key      at: [<%px>] %pS\n",
1761 		depth, "", class->key, class->key);
1762 }
1763 
1764 /*
1765  * printk the shortest lock dependencies from @start to @end in reverse order:
1766  */
1767 static void __used
1768 print_shortest_lock_dependencies(struct lock_list *leaf,
1769 				struct lock_list *root)
1770 {
1771 	struct lock_list *entry = leaf;
1772 	int depth;
1773 
1774 	/*compute depth from generated tree by BFS*/
1775 	depth = get_lock_depth(leaf);
1776 
1777 	do {
1778 		print_lock_class_header(entry->class, depth);
1779 		printk("%*s ... acquired at:\n", depth, "");
1780 		print_stack_trace(&entry->trace, 2);
1781 		printk("\n");
1782 
1783 		if (depth == 0 && (entry != root)) {
1784 			printk("lockdep:%s bad path found in chain graph\n", __func__);
1785 			break;
1786 		}
1787 
1788 		entry = get_lock_parent(entry);
1789 		depth--;
1790 	} while (entry && (depth >= 0));
1791 
1792 	return;
1793 }
1794 
1795 static void
1796 print_irq_lock_scenario(struct lock_list *safe_entry,
1797 			struct lock_list *unsafe_entry,
1798 			struct lock_class *prev_class,
1799 			struct lock_class *next_class)
1800 {
1801 	struct lock_class *safe_class = safe_entry->class;
1802 	struct lock_class *unsafe_class = unsafe_entry->class;
1803 	struct lock_class *middle_class = prev_class;
1804 
1805 	if (middle_class == safe_class)
1806 		middle_class = next_class;
1807 
1808 	/*
1809 	 * A direct locking problem where unsafe_class lock is taken
1810 	 * directly by safe_class lock, then all we need to show
1811 	 * is the deadlock scenario, as it is obvious that the
1812 	 * unsafe lock is taken under the safe lock.
1813 	 *
1814 	 * But if there is a chain instead, where the safe lock takes
1815 	 * an intermediate lock (middle_class) where this lock is
1816 	 * not the same as the safe lock, then the lock chain is
1817 	 * used to describe the problem. Otherwise we would need
1818 	 * to show a different CPU case for each link in the chain
1819 	 * from the safe_class lock to the unsafe_class lock.
1820 	 */
1821 	if (middle_class != unsafe_class) {
1822 		printk("Chain exists of:\n  ");
1823 		__print_lock_name(safe_class);
1824 		printk(KERN_CONT " --> ");
1825 		__print_lock_name(middle_class);
1826 		printk(KERN_CONT " --> ");
1827 		__print_lock_name(unsafe_class);
1828 		printk(KERN_CONT "\n\n");
1829 	}
1830 
1831 	printk(" Possible interrupt unsafe locking scenario:\n\n");
1832 	printk("       CPU0                    CPU1\n");
1833 	printk("       ----                    ----\n");
1834 	printk("  lock(");
1835 	__print_lock_name(unsafe_class);
1836 	printk(KERN_CONT ");\n");
1837 	printk("                               local_irq_disable();\n");
1838 	printk("                               lock(");
1839 	__print_lock_name(safe_class);
1840 	printk(KERN_CONT ");\n");
1841 	printk("                               lock(");
1842 	__print_lock_name(middle_class);
1843 	printk(KERN_CONT ");\n");
1844 	printk("  <Interrupt>\n");
1845 	printk("    lock(");
1846 	__print_lock_name(safe_class);
1847 	printk(KERN_CONT ");\n");
1848 	printk("\n *** DEADLOCK ***\n\n");
1849 }
1850 
1851 static int
1852 print_bad_irq_dependency(struct task_struct *curr,
1853 			 struct lock_list *prev_root,
1854 			 struct lock_list *next_root,
1855 			 struct lock_list *backwards_entry,
1856 			 struct lock_list *forwards_entry,
1857 			 struct held_lock *prev,
1858 			 struct held_lock *next,
1859 			 enum lock_usage_bit bit1,
1860 			 enum lock_usage_bit bit2,
1861 			 const char *irqclass)
1862 {
1863 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1864 		return 0;
1865 
1866 	pr_warn("\n");
1867 	pr_warn("=====================================================\n");
1868 	pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1869 		irqclass, irqclass);
1870 	print_kernel_ident();
1871 	pr_warn("-----------------------------------------------------\n");
1872 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1873 		curr->comm, task_pid_nr(curr),
1874 		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1875 		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1876 		curr->hardirqs_enabled,
1877 		curr->softirqs_enabled);
1878 	print_lock(next);
1879 
1880 	pr_warn("\nand this task is already holding:\n");
1881 	print_lock(prev);
1882 	pr_warn("which would create a new lock dependency:\n");
1883 	print_lock_name(hlock_class(prev));
1884 	pr_cont(" ->");
1885 	print_lock_name(hlock_class(next));
1886 	pr_cont("\n");
1887 
1888 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1889 		irqclass);
1890 	print_lock_name(backwards_entry->class);
1891 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1892 
1893 	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1894 
1895 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1896 	print_lock_name(forwards_entry->class);
1897 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1898 	pr_warn("...");
1899 
1900 	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1901 
1902 	pr_warn("\nother info that might help us debug this:\n\n");
1903 	print_irq_lock_scenario(backwards_entry, forwards_entry,
1904 				hlock_class(prev), hlock_class(next));
1905 
1906 	lockdep_print_held_locks(curr);
1907 
1908 	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1909 	if (!save_trace(&prev_root->trace))
1910 		return 0;
1911 	print_shortest_lock_dependencies(backwards_entry, prev_root);
1912 
1913 	pr_warn("\nthe dependencies between the lock to be acquired");
1914 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1915 	if (!save_trace(&next_root->trace))
1916 		return 0;
1917 	print_shortest_lock_dependencies(forwards_entry, next_root);
1918 
1919 	pr_warn("\nstack backtrace:\n");
1920 	dump_stack();
1921 
1922 	return 0;
1923 }
1924 
1925 static int
1926 check_usage(struct task_struct *curr, struct held_lock *prev,
1927 	    struct held_lock *next, enum lock_usage_bit bit_backwards,
1928 	    enum lock_usage_bit bit_forwards, const char *irqclass)
1929 {
1930 	int ret;
1931 	struct lock_list this, that;
1932 	struct lock_list *uninitialized_var(target_entry);
1933 	struct lock_list *uninitialized_var(target_entry1);
1934 
1935 	this.parent = NULL;
1936 
1937 	this.class = hlock_class(prev);
1938 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1939 	if (ret < 0)
1940 		return print_bfs_bug(ret);
1941 	if (ret == 1)
1942 		return ret;
1943 
1944 	that.parent = NULL;
1945 	that.class = hlock_class(next);
1946 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1947 	if (ret < 0)
1948 		return print_bfs_bug(ret);
1949 	if (ret == 1)
1950 		return ret;
1951 
1952 	return print_bad_irq_dependency(curr, &this, &that,
1953 			target_entry, target_entry1,
1954 			prev, next,
1955 			bit_backwards, bit_forwards, irqclass);
1956 }
1957 
1958 static const char *state_names[] = {
1959 #define LOCKDEP_STATE(__STATE) \
1960 	__stringify(__STATE),
1961 #include "lockdep_states.h"
1962 #undef LOCKDEP_STATE
1963 };
1964 
1965 static const char *state_rnames[] = {
1966 #define LOCKDEP_STATE(__STATE) \
1967 	__stringify(__STATE)"-READ",
1968 #include "lockdep_states.h"
1969 #undef LOCKDEP_STATE
1970 };
1971 
1972 static inline const char *state_name(enum lock_usage_bit bit)
1973 {
1974 	return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1975 }
1976 
1977 static int exclusive_bit(int new_bit)
1978 {
1979 	int state = new_bit & LOCK_USAGE_STATE_MASK;
1980 	int dir = new_bit & LOCK_USAGE_DIR_MASK;
1981 
1982 	/*
1983 	 * keep state, bit flip the direction and strip read.
1984 	 */
1985 	return state | (dir ^ LOCK_USAGE_DIR_MASK);
1986 }
1987 
1988 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1989 			   struct held_lock *next, enum lock_usage_bit bit)
1990 {
1991 	/*
1992 	 * Prove that the new dependency does not connect a hardirq-safe
1993 	 * lock with a hardirq-unsafe lock - to achieve this we search
1994 	 * the backwards-subgraph starting at <prev>, and the
1995 	 * forwards-subgraph starting at <next>:
1996 	 */
1997 	if (!check_usage(curr, prev, next, bit,
1998 			   exclusive_bit(bit), state_name(bit)))
1999 		return 0;
2000 
2001 	bit++; /* _READ */
2002 
2003 	/*
2004 	 * Prove that the new dependency does not connect a hardirq-safe-read
2005 	 * lock with a hardirq-unsafe lock - to achieve this we search
2006 	 * the backwards-subgraph starting at <prev>, and the
2007 	 * forwards-subgraph starting at <next>:
2008 	 */
2009 	if (!check_usage(curr, prev, next, bit,
2010 			   exclusive_bit(bit), state_name(bit)))
2011 		return 0;
2012 
2013 	return 1;
2014 }
2015 
2016 static int
2017 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
2018 		struct held_lock *next)
2019 {
2020 #define LOCKDEP_STATE(__STATE)						\
2021 	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
2022 		return 0;
2023 #include "lockdep_states.h"
2024 #undef LOCKDEP_STATE
2025 
2026 	return 1;
2027 }
2028 
2029 static void inc_chains(void)
2030 {
2031 	if (current->hardirq_context)
2032 		nr_hardirq_chains++;
2033 	else {
2034 		if (current->softirq_context)
2035 			nr_softirq_chains++;
2036 		else
2037 			nr_process_chains++;
2038 	}
2039 }
2040 
2041 #else
2042 
2043 static inline int
2044 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
2045 		struct held_lock *next)
2046 {
2047 	return 1;
2048 }
2049 
2050 static inline void inc_chains(void)
2051 {
2052 	nr_process_chains++;
2053 }
2054 
2055 #endif
2056 
2057 static void
2058 print_deadlock_scenario(struct held_lock *nxt,
2059 			     struct held_lock *prv)
2060 {
2061 	struct lock_class *next = hlock_class(nxt);
2062 	struct lock_class *prev = hlock_class(prv);
2063 
2064 	printk(" Possible unsafe locking scenario:\n\n");
2065 	printk("       CPU0\n");
2066 	printk("       ----\n");
2067 	printk("  lock(");
2068 	__print_lock_name(prev);
2069 	printk(KERN_CONT ");\n");
2070 	printk("  lock(");
2071 	__print_lock_name(next);
2072 	printk(KERN_CONT ");\n");
2073 	printk("\n *** DEADLOCK ***\n\n");
2074 	printk(" May be due to missing lock nesting notation\n\n");
2075 }
2076 
2077 static int
2078 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2079 		   struct held_lock *next)
2080 {
2081 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2082 		return 0;
2083 
2084 	pr_warn("\n");
2085 	pr_warn("============================================\n");
2086 	pr_warn("WARNING: possible recursive locking detected\n");
2087 	print_kernel_ident();
2088 	pr_warn("--------------------------------------------\n");
2089 	pr_warn("%s/%d is trying to acquire lock:\n",
2090 		curr->comm, task_pid_nr(curr));
2091 	print_lock(next);
2092 	pr_warn("\nbut task is already holding lock:\n");
2093 	print_lock(prev);
2094 
2095 	pr_warn("\nother info that might help us debug this:\n");
2096 	print_deadlock_scenario(next, prev);
2097 	lockdep_print_held_locks(curr);
2098 
2099 	pr_warn("\nstack backtrace:\n");
2100 	dump_stack();
2101 
2102 	return 0;
2103 }
2104 
2105 /*
2106  * Check whether we are holding such a class already.
2107  *
2108  * (Note that this has to be done separately, because the graph cannot
2109  * detect such classes of deadlocks.)
2110  *
2111  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
2112  */
2113 static int
2114 check_deadlock(struct task_struct *curr, struct held_lock *next,
2115 	       struct lockdep_map *next_instance, int read)
2116 {
2117 	struct held_lock *prev;
2118 	struct held_lock *nest = NULL;
2119 	int i;
2120 
2121 	for (i = 0; i < curr->lockdep_depth; i++) {
2122 		prev = curr->held_locks + i;
2123 
2124 		if (prev->instance == next->nest_lock)
2125 			nest = prev;
2126 
2127 		if (hlock_class(prev) != hlock_class(next))
2128 			continue;
2129 
2130 		/*
2131 		 * Allow read-after-read recursion of the same
2132 		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
2133 		 */
2134 		if ((read == 2) && prev->read)
2135 			return 2;
2136 
2137 		/*
2138 		 * We're holding the nest_lock, which serializes this lock's
2139 		 * nesting behaviour.
2140 		 */
2141 		if (nest)
2142 			return 2;
2143 
2144 		return print_deadlock_bug(curr, prev, next);
2145 	}
2146 	return 1;
2147 }
2148 
2149 /*
2150  * There was a chain-cache miss, and we are about to add a new dependency
2151  * to a previous lock. We recursively validate the following rules:
2152  *
2153  *  - would the adding of the <prev> -> <next> dependency create a
2154  *    circular dependency in the graph? [== circular deadlock]
2155  *
2156  *  - does the new prev->next dependency connect any hardirq-safe lock
2157  *    (in the full backwards-subgraph starting at <prev>) with any
2158  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
2159  *    <next>)? [== illegal lock inversion with hardirq contexts]
2160  *
2161  *  - does the new prev->next dependency connect any softirq-safe lock
2162  *    (in the full backwards-subgraph starting at <prev>) with any
2163  *    softirq-unsafe lock (in the full forwards-subgraph starting at
2164  *    <next>)? [== illegal lock inversion with softirq contexts]
2165  *
2166  * any of these scenarios could lead to a deadlock.
2167  *
2168  * Then if all the validations pass, we add the forwards and backwards
2169  * dependency.
2170  */
2171 static int
2172 check_prev_add(struct task_struct *curr, struct held_lock *prev,
2173 	       struct held_lock *next, int distance, struct stack_trace *trace,
2174 	       int (*save)(struct stack_trace *trace))
2175 {
2176 	struct lock_list *uninitialized_var(target_entry);
2177 	struct lock_list *entry;
2178 	struct lock_list this;
2179 	int ret;
2180 
2181 	if (!hlock_class(prev)->key || !hlock_class(next)->key) {
2182 		/*
2183 		 * The warning statements below may trigger a use-after-free
2184 		 * of the class name. It is better to trigger a use-after free
2185 		 * and to have the class name most of the time instead of not
2186 		 * having the class name available.
2187 		 */
2188 		WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
2189 			  "Detected use-after-free of lock class %px/%s\n",
2190 			  hlock_class(prev),
2191 			  hlock_class(prev)->name);
2192 		WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
2193 			  "Detected use-after-free of lock class %px/%s\n",
2194 			  hlock_class(next),
2195 			  hlock_class(next)->name);
2196 		return 2;
2197 	}
2198 
2199 	/*
2200 	 * Prove that the new <prev> -> <next> dependency would not
2201 	 * create a circular dependency in the graph. (We do this by
2202 	 * forward-recursing into the graph starting at <next>, and
2203 	 * checking whether we can reach <prev>.)
2204 	 *
2205 	 * We are using global variables to control the recursion, to
2206 	 * keep the stackframe size of the recursive functions low:
2207 	 */
2208 	this.class = hlock_class(next);
2209 	this.parent = NULL;
2210 	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
2211 	if (unlikely(!ret)) {
2212 		if (!trace->entries) {
2213 			/*
2214 			 * If @save fails here, the printing might trigger
2215 			 * a WARN but because of the !nr_entries it should
2216 			 * not do bad things.
2217 			 */
2218 			save(trace);
2219 		}
2220 		return print_circular_bug(&this, target_entry, next, prev, trace);
2221 	}
2222 	else if (unlikely(ret < 0))
2223 		return print_bfs_bug(ret);
2224 
2225 	if (!check_prev_add_irq(curr, prev, next))
2226 		return 0;
2227 
2228 	/*
2229 	 * For recursive read-locks we do all the dependency checks,
2230 	 * but we dont store read-triggered dependencies (only
2231 	 * write-triggered dependencies). This ensures that only the
2232 	 * write-side dependencies matter, and that if for example a
2233 	 * write-lock never takes any other locks, then the reads are
2234 	 * equivalent to a NOP.
2235 	 */
2236 	if (next->read == 2 || prev->read == 2)
2237 		return 1;
2238 	/*
2239 	 * Is the <prev> -> <next> dependency already present?
2240 	 *
2241 	 * (this may occur even though this is a new chain: consider
2242 	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
2243 	 *  chains - the second one will be new, but L1 already has
2244 	 *  L2 added to its dependency list, due to the first chain.)
2245 	 */
2246 	list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
2247 		if (entry->class == hlock_class(next)) {
2248 			if (distance == 1)
2249 				entry->distance = 1;
2250 			return 1;
2251 		}
2252 	}
2253 
2254 	/*
2255 	 * Is the <prev> -> <next> link redundant?
2256 	 */
2257 	this.class = hlock_class(prev);
2258 	this.parent = NULL;
2259 	ret = check_redundant(&this, hlock_class(next), &target_entry);
2260 	if (!ret) {
2261 		debug_atomic_inc(nr_redundant);
2262 		return 2;
2263 	}
2264 	if (ret < 0)
2265 		return print_bfs_bug(ret);
2266 
2267 
2268 	if (!trace->entries && !save(trace))
2269 		return 0;
2270 
2271 	/*
2272 	 * Ok, all validations passed, add the new lock
2273 	 * to the previous lock's dependency list:
2274 	 */
2275 	ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
2276 			       &hlock_class(prev)->locks_after,
2277 			       next->acquire_ip, distance, trace);
2278 
2279 	if (!ret)
2280 		return 0;
2281 
2282 	ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
2283 			       &hlock_class(next)->locks_before,
2284 			       next->acquire_ip, distance, trace);
2285 	if (!ret)
2286 		return 0;
2287 
2288 	return 2;
2289 }
2290 
2291 /*
2292  * Add the dependency to all directly-previous locks that are 'relevant'.
2293  * The ones that are relevant are (in increasing distance from curr):
2294  * all consecutive trylock entries and the final non-trylock entry - or
2295  * the end of this context's lock-chain - whichever comes first.
2296  */
2297 static int
2298 check_prevs_add(struct task_struct *curr, struct held_lock *next)
2299 {
2300 	int depth = curr->lockdep_depth;
2301 	struct held_lock *hlock;
2302 	struct stack_trace trace = {
2303 		.nr_entries = 0,
2304 		.max_entries = 0,
2305 		.entries = NULL,
2306 		.skip = 0,
2307 	};
2308 
2309 	/*
2310 	 * Debugging checks.
2311 	 *
2312 	 * Depth must not be zero for a non-head lock:
2313 	 */
2314 	if (!depth)
2315 		goto out_bug;
2316 	/*
2317 	 * At least two relevant locks must exist for this
2318 	 * to be a head:
2319 	 */
2320 	if (curr->held_locks[depth].irq_context !=
2321 			curr->held_locks[depth-1].irq_context)
2322 		goto out_bug;
2323 
2324 	for (;;) {
2325 		int distance = curr->lockdep_depth - depth + 1;
2326 		hlock = curr->held_locks + depth - 1;
2327 
2328 		/*
2329 		 * Only non-recursive-read entries get new dependencies
2330 		 * added:
2331 		 */
2332 		if (hlock->read != 2 && hlock->check) {
2333 			int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
2334 			if (!ret)
2335 				return 0;
2336 
2337 			/*
2338 			 * Stop after the first non-trylock entry,
2339 			 * as non-trylock entries have added their
2340 			 * own direct dependencies already, so this
2341 			 * lock is connected to them indirectly:
2342 			 */
2343 			if (!hlock->trylock)
2344 				break;
2345 		}
2346 
2347 		depth--;
2348 		/*
2349 		 * End of lock-stack?
2350 		 */
2351 		if (!depth)
2352 			break;
2353 		/*
2354 		 * Stop the search if we cross into another context:
2355 		 */
2356 		if (curr->held_locks[depth].irq_context !=
2357 				curr->held_locks[depth-1].irq_context)
2358 			break;
2359 	}
2360 	return 1;
2361 out_bug:
2362 	if (!debug_locks_off_graph_unlock())
2363 		return 0;
2364 
2365 	/*
2366 	 * Clearly we all shouldn't be here, but since we made it we
2367 	 * can reliable say we messed up our state. See the above two
2368 	 * gotos for reasons why we could possibly end up here.
2369 	 */
2370 	WARN_ON(1);
2371 
2372 	return 0;
2373 }
2374 
2375 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2376 static DECLARE_BITMAP(lock_chains_in_use, MAX_LOCKDEP_CHAINS);
2377 int nr_chain_hlocks;
2378 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2379 
2380 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2381 {
2382 	return lock_classes + chain_hlocks[chain->base + i];
2383 }
2384 
2385 /*
2386  * Returns the index of the first held_lock of the current chain
2387  */
2388 static inline int get_first_held_lock(struct task_struct *curr,
2389 					struct held_lock *hlock)
2390 {
2391 	int i;
2392 	struct held_lock *hlock_curr;
2393 
2394 	for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2395 		hlock_curr = curr->held_locks + i;
2396 		if (hlock_curr->irq_context != hlock->irq_context)
2397 			break;
2398 
2399 	}
2400 
2401 	return ++i;
2402 }
2403 
2404 #ifdef CONFIG_DEBUG_LOCKDEP
2405 /*
2406  * Returns the next chain_key iteration
2407  */
2408 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2409 {
2410 	u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2411 
2412 	printk(" class_idx:%d -> chain_key:%016Lx",
2413 		class_idx,
2414 		(unsigned long long)new_chain_key);
2415 	return new_chain_key;
2416 }
2417 
2418 static void
2419 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2420 {
2421 	struct held_lock *hlock;
2422 	u64 chain_key = 0;
2423 	int depth = curr->lockdep_depth;
2424 	int i;
2425 
2426 	printk("depth: %u\n", depth + 1);
2427 	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2428 		hlock = curr->held_locks + i;
2429 		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2430 
2431 		print_lock(hlock);
2432 	}
2433 
2434 	print_chain_key_iteration(hlock_next->class_idx, chain_key);
2435 	print_lock(hlock_next);
2436 }
2437 
2438 static void print_chain_keys_chain(struct lock_chain *chain)
2439 {
2440 	int i;
2441 	u64 chain_key = 0;
2442 	int class_id;
2443 
2444 	printk("depth: %u\n", chain->depth);
2445 	for (i = 0; i < chain->depth; i++) {
2446 		class_id = chain_hlocks[chain->base + i];
2447 		chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2448 
2449 		print_lock_name(lock_classes + class_id);
2450 		printk("\n");
2451 	}
2452 }
2453 
2454 static void print_collision(struct task_struct *curr,
2455 			struct held_lock *hlock_next,
2456 			struct lock_chain *chain)
2457 {
2458 	pr_warn("\n");
2459 	pr_warn("============================\n");
2460 	pr_warn("WARNING: chain_key collision\n");
2461 	print_kernel_ident();
2462 	pr_warn("----------------------------\n");
2463 	pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2464 	pr_warn("Hash chain already cached but the contents don't match!\n");
2465 
2466 	pr_warn("Held locks:");
2467 	print_chain_keys_held_locks(curr, hlock_next);
2468 
2469 	pr_warn("Locks in cached chain:");
2470 	print_chain_keys_chain(chain);
2471 
2472 	pr_warn("\nstack backtrace:\n");
2473 	dump_stack();
2474 }
2475 #endif
2476 
2477 /*
2478  * Checks whether the chain and the current held locks are consistent
2479  * in depth and also in content. If they are not it most likely means
2480  * that there was a collision during the calculation of the chain_key.
2481  * Returns: 0 not passed, 1 passed
2482  */
2483 static int check_no_collision(struct task_struct *curr,
2484 			struct held_lock *hlock,
2485 			struct lock_chain *chain)
2486 {
2487 #ifdef CONFIG_DEBUG_LOCKDEP
2488 	int i, j, id;
2489 
2490 	i = get_first_held_lock(curr, hlock);
2491 
2492 	if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2493 		print_collision(curr, hlock, chain);
2494 		return 0;
2495 	}
2496 
2497 	for (j = 0; j < chain->depth - 1; j++, i++) {
2498 		id = curr->held_locks[i].class_idx - 1;
2499 
2500 		if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2501 			print_collision(curr, hlock, chain);
2502 			return 0;
2503 		}
2504 	}
2505 #endif
2506 	return 1;
2507 }
2508 
2509 /*
2510  * Given an index that is >= -1, return the index of the next lock chain.
2511  * Return -2 if there is no next lock chain.
2512  */
2513 long lockdep_next_lockchain(long i)
2514 {
2515 	i = find_next_bit(lock_chains_in_use, ARRAY_SIZE(lock_chains), i + 1);
2516 	return i < ARRAY_SIZE(lock_chains) ? i : -2;
2517 }
2518 
2519 unsigned long lock_chain_count(void)
2520 {
2521 	return bitmap_weight(lock_chains_in_use, ARRAY_SIZE(lock_chains));
2522 }
2523 
2524 /* Must be called with the graph lock held. */
2525 static struct lock_chain *alloc_lock_chain(void)
2526 {
2527 	int idx = find_first_zero_bit(lock_chains_in_use,
2528 				      ARRAY_SIZE(lock_chains));
2529 
2530 	if (unlikely(idx >= ARRAY_SIZE(lock_chains)))
2531 		return NULL;
2532 	__set_bit(idx, lock_chains_in_use);
2533 	return lock_chains + idx;
2534 }
2535 
2536 /*
2537  * Adds a dependency chain into chain hashtable. And must be called with
2538  * graph_lock held.
2539  *
2540  * Return 0 if fail, and graph_lock is released.
2541  * Return 1 if succeed, with graph_lock held.
2542  */
2543 static inline int add_chain_cache(struct task_struct *curr,
2544 				  struct held_lock *hlock,
2545 				  u64 chain_key)
2546 {
2547 	struct lock_class *class = hlock_class(hlock);
2548 	struct hlist_head *hash_head = chainhashentry(chain_key);
2549 	struct lock_chain *chain;
2550 	int i, j;
2551 
2552 	/*
2553 	 * The caller must hold the graph lock, ensure we've got IRQs
2554 	 * disabled to make this an IRQ-safe lock.. for recursion reasons
2555 	 * lockdep won't complain about its own locking errors.
2556 	 */
2557 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2558 		return 0;
2559 
2560 	chain = alloc_lock_chain();
2561 	if (!chain) {
2562 		if (!debug_locks_off_graph_unlock())
2563 			return 0;
2564 
2565 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2566 		dump_stack();
2567 		return 0;
2568 	}
2569 	chain->chain_key = chain_key;
2570 	chain->irq_context = hlock->irq_context;
2571 	i = get_first_held_lock(curr, hlock);
2572 	chain->depth = curr->lockdep_depth + 1 - i;
2573 
2574 	BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2575 	BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2576 	BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2577 
2578 	if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2579 		chain->base = nr_chain_hlocks;
2580 		for (j = 0; j < chain->depth - 1; j++, i++) {
2581 			int lock_id = curr->held_locks[i].class_idx - 1;
2582 			chain_hlocks[chain->base + j] = lock_id;
2583 		}
2584 		chain_hlocks[chain->base + j] = class - lock_classes;
2585 		nr_chain_hlocks += chain->depth;
2586 	} else {
2587 		if (!debug_locks_off_graph_unlock())
2588 			return 0;
2589 
2590 		print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2591 		dump_stack();
2592 		return 0;
2593 	}
2594 
2595 	hlist_add_head_rcu(&chain->entry, hash_head);
2596 	debug_atomic_inc(chain_lookup_misses);
2597 	inc_chains();
2598 
2599 	return 1;
2600 }
2601 
2602 /*
2603  * Look up a dependency chain. Must be called with either the graph lock or
2604  * the RCU read lock held.
2605  */
2606 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2607 {
2608 	struct hlist_head *hash_head = chainhashentry(chain_key);
2609 	struct lock_chain *chain;
2610 
2611 	hlist_for_each_entry_rcu(chain, hash_head, entry) {
2612 		if (READ_ONCE(chain->chain_key) == chain_key) {
2613 			debug_atomic_inc(chain_lookup_hits);
2614 			return chain;
2615 		}
2616 	}
2617 	return NULL;
2618 }
2619 
2620 /*
2621  * If the key is not present yet in dependency chain cache then
2622  * add it and return 1 - in this case the new dependency chain is
2623  * validated. If the key is already hashed, return 0.
2624  * (On return with 1 graph_lock is held.)
2625  */
2626 static inline int lookup_chain_cache_add(struct task_struct *curr,
2627 					 struct held_lock *hlock,
2628 					 u64 chain_key)
2629 {
2630 	struct lock_class *class = hlock_class(hlock);
2631 	struct lock_chain *chain = lookup_chain_cache(chain_key);
2632 
2633 	if (chain) {
2634 cache_hit:
2635 		if (!check_no_collision(curr, hlock, chain))
2636 			return 0;
2637 
2638 		if (very_verbose(class)) {
2639 			printk("\nhash chain already cached, key: "
2640 					"%016Lx tail class: [%px] %s\n",
2641 					(unsigned long long)chain_key,
2642 					class->key, class->name);
2643 		}
2644 
2645 		return 0;
2646 	}
2647 
2648 	if (very_verbose(class)) {
2649 		printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2650 			(unsigned long long)chain_key, class->key, class->name);
2651 	}
2652 
2653 	if (!graph_lock())
2654 		return 0;
2655 
2656 	/*
2657 	 * We have to walk the chain again locked - to avoid duplicates:
2658 	 */
2659 	chain = lookup_chain_cache(chain_key);
2660 	if (chain) {
2661 		graph_unlock();
2662 		goto cache_hit;
2663 	}
2664 
2665 	if (!add_chain_cache(curr, hlock, chain_key))
2666 		return 0;
2667 
2668 	return 1;
2669 }
2670 
2671 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2672 		struct held_lock *hlock, int chain_head, u64 chain_key)
2673 {
2674 	/*
2675 	 * Trylock needs to maintain the stack of held locks, but it
2676 	 * does not add new dependencies, because trylock can be done
2677 	 * in any order.
2678 	 *
2679 	 * We look up the chain_key and do the O(N^2) check and update of
2680 	 * the dependencies only if this is a new dependency chain.
2681 	 * (If lookup_chain_cache_add() return with 1 it acquires
2682 	 * graph_lock for us)
2683 	 */
2684 	if (!hlock->trylock && hlock->check &&
2685 	    lookup_chain_cache_add(curr, hlock, chain_key)) {
2686 		/*
2687 		 * Check whether last held lock:
2688 		 *
2689 		 * - is irq-safe, if this lock is irq-unsafe
2690 		 * - is softirq-safe, if this lock is hardirq-unsafe
2691 		 *
2692 		 * And check whether the new lock's dependency graph
2693 		 * could lead back to the previous lock.
2694 		 *
2695 		 * any of these scenarios could lead to a deadlock. If
2696 		 * All validations
2697 		 */
2698 		int ret = check_deadlock(curr, hlock, lock, hlock->read);
2699 
2700 		if (!ret)
2701 			return 0;
2702 		/*
2703 		 * Mark recursive read, as we jump over it when
2704 		 * building dependencies (just like we jump over
2705 		 * trylock entries):
2706 		 */
2707 		if (ret == 2)
2708 			hlock->read = 2;
2709 		/*
2710 		 * Add dependency only if this lock is not the head
2711 		 * of the chain, and if it's not a secondary read-lock:
2712 		 */
2713 		if (!chain_head && ret != 2) {
2714 			if (!check_prevs_add(curr, hlock))
2715 				return 0;
2716 		}
2717 
2718 		graph_unlock();
2719 	} else {
2720 		/* after lookup_chain_cache_add(): */
2721 		if (unlikely(!debug_locks))
2722 			return 0;
2723 	}
2724 
2725 	return 1;
2726 }
2727 #else
2728 static inline int validate_chain(struct task_struct *curr,
2729 	       	struct lockdep_map *lock, struct held_lock *hlock,
2730 		int chain_head, u64 chain_key)
2731 {
2732 	return 1;
2733 }
2734 #endif
2735 
2736 /*
2737  * We are building curr_chain_key incrementally, so double-check
2738  * it from scratch, to make sure that it's done correctly:
2739  */
2740 static void check_chain_key(struct task_struct *curr)
2741 {
2742 #ifdef CONFIG_DEBUG_LOCKDEP
2743 	struct held_lock *hlock, *prev_hlock = NULL;
2744 	unsigned int i;
2745 	u64 chain_key = 0;
2746 
2747 	for (i = 0; i < curr->lockdep_depth; i++) {
2748 		hlock = curr->held_locks + i;
2749 		if (chain_key != hlock->prev_chain_key) {
2750 			debug_locks_off();
2751 			/*
2752 			 * We got mighty confused, our chain keys don't match
2753 			 * with what we expect, someone trample on our task state?
2754 			 */
2755 			WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2756 				curr->lockdep_depth, i,
2757 				(unsigned long long)chain_key,
2758 				(unsigned long long)hlock->prev_chain_key);
2759 			return;
2760 		}
2761 		/*
2762 		 * Whoops ran out of static storage again?
2763 		 */
2764 		if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2765 			return;
2766 
2767 		if (prev_hlock && (prev_hlock->irq_context !=
2768 							hlock->irq_context))
2769 			chain_key = 0;
2770 		chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2771 		prev_hlock = hlock;
2772 	}
2773 	if (chain_key != curr->curr_chain_key) {
2774 		debug_locks_off();
2775 		/*
2776 		 * More smoking hash instead of calculating it, damn see these
2777 		 * numbers float.. I bet that a pink elephant stepped on my memory.
2778 		 */
2779 		WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2780 			curr->lockdep_depth, i,
2781 			(unsigned long long)chain_key,
2782 			(unsigned long long)curr->curr_chain_key);
2783 	}
2784 #endif
2785 }
2786 
2787 static void
2788 print_usage_bug_scenario(struct held_lock *lock)
2789 {
2790 	struct lock_class *class = hlock_class(lock);
2791 
2792 	printk(" Possible unsafe locking scenario:\n\n");
2793 	printk("       CPU0\n");
2794 	printk("       ----\n");
2795 	printk("  lock(");
2796 	__print_lock_name(class);
2797 	printk(KERN_CONT ");\n");
2798 	printk("  <Interrupt>\n");
2799 	printk("    lock(");
2800 	__print_lock_name(class);
2801 	printk(KERN_CONT ");\n");
2802 	printk("\n *** DEADLOCK ***\n\n");
2803 }
2804 
2805 static int
2806 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2807 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2808 {
2809 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2810 		return 0;
2811 
2812 	pr_warn("\n");
2813 	pr_warn("================================\n");
2814 	pr_warn("WARNING: inconsistent lock state\n");
2815 	print_kernel_ident();
2816 	pr_warn("--------------------------------\n");
2817 
2818 	pr_warn("inconsistent {%s} -> {%s} usage.\n",
2819 		usage_str[prev_bit], usage_str[new_bit]);
2820 
2821 	pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2822 		curr->comm, task_pid_nr(curr),
2823 		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2824 		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2825 		trace_hardirqs_enabled(curr),
2826 		trace_softirqs_enabled(curr));
2827 	print_lock(this);
2828 
2829 	pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2830 	print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2831 
2832 	print_irqtrace_events(curr);
2833 	pr_warn("\nother info that might help us debug this:\n");
2834 	print_usage_bug_scenario(this);
2835 
2836 	lockdep_print_held_locks(curr);
2837 
2838 	pr_warn("\nstack backtrace:\n");
2839 	dump_stack();
2840 
2841 	return 0;
2842 }
2843 
2844 /*
2845  * Print out an error if an invalid bit is set:
2846  */
2847 static inline int
2848 valid_state(struct task_struct *curr, struct held_lock *this,
2849 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2850 {
2851 	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2852 		return print_usage_bug(curr, this, bad_bit, new_bit);
2853 	return 1;
2854 }
2855 
2856 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2857 		     enum lock_usage_bit new_bit);
2858 
2859 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2860 
2861 /*
2862  * print irq inversion bug:
2863  */
2864 static int
2865 print_irq_inversion_bug(struct task_struct *curr,
2866 			struct lock_list *root, struct lock_list *other,
2867 			struct held_lock *this, int forwards,
2868 			const char *irqclass)
2869 {
2870 	struct lock_list *entry = other;
2871 	struct lock_list *middle = NULL;
2872 	int depth;
2873 
2874 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2875 		return 0;
2876 
2877 	pr_warn("\n");
2878 	pr_warn("========================================================\n");
2879 	pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2880 	print_kernel_ident();
2881 	pr_warn("--------------------------------------------------------\n");
2882 	pr_warn("%s/%d just changed the state of lock:\n",
2883 		curr->comm, task_pid_nr(curr));
2884 	print_lock(this);
2885 	if (forwards)
2886 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2887 	else
2888 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2889 	print_lock_name(other->class);
2890 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2891 
2892 	pr_warn("\nother info that might help us debug this:\n");
2893 
2894 	/* Find a middle lock (if one exists) */
2895 	depth = get_lock_depth(other);
2896 	do {
2897 		if (depth == 0 && (entry != root)) {
2898 			pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2899 			break;
2900 		}
2901 		middle = entry;
2902 		entry = get_lock_parent(entry);
2903 		depth--;
2904 	} while (entry && entry != root && (depth >= 0));
2905 	if (forwards)
2906 		print_irq_lock_scenario(root, other,
2907 			middle ? middle->class : root->class, other->class);
2908 	else
2909 		print_irq_lock_scenario(other, root,
2910 			middle ? middle->class : other->class, root->class);
2911 
2912 	lockdep_print_held_locks(curr);
2913 
2914 	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2915 	if (!save_trace(&root->trace))
2916 		return 0;
2917 	print_shortest_lock_dependencies(other, root);
2918 
2919 	pr_warn("\nstack backtrace:\n");
2920 	dump_stack();
2921 
2922 	return 0;
2923 }
2924 
2925 /*
2926  * Prove that in the forwards-direction subgraph starting at <this>
2927  * there is no lock matching <mask>:
2928  */
2929 static int
2930 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2931 		     enum lock_usage_bit bit, const char *irqclass)
2932 {
2933 	int ret;
2934 	struct lock_list root;
2935 	struct lock_list *uninitialized_var(target_entry);
2936 
2937 	root.parent = NULL;
2938 	root.class = hlock_class(this);
2939 	ret = find_usage_forwards(&root, bit, &target_entry);
2940 	if (ret < 0)
2941 		return print_bfs_bug(ret);
2942 	if (ret == 1)
2943 		return ret;
2944 
2945 	return print_irq_inversion_bug(curr, &root, target_entry,
2946 					this, 1, irqclass);
2947 }
2948 
2949 /*
2950  * Prove that in the backwards-direction subgraph starting at <this>
2951  * there is no lock matching <mask>:
2952  */
2953 static int
2954 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2955 		      enum lock_usage_bit bit, const char *irqclass)
2956 {
2957 	int ret;
2958 	struct lock_list root;
2959 	struct lock_list *uninitialized_var(target_entry);
2960 
2961 	root.parent = NULL;
2962 	root.class = hlock_class(this);
2963 	ret = find_usage_backwards(&root, bit, &target_entry);
2964 	if (ret < 0)
2965 		return print_bfs_bug(ret);
2966 	if (ret == 1)
2967 		return ret;
2968 
2969 	return print_irq_inversion_bug(curr, &root, target_entry,
2970 					this, 0, irqclass);
2971 }
2972 
2973 void print_irqtrace_events(struct task_struct *curr)
2974 {
2975 	printk("irq event stamp: %u\n", curr->irq_events);
2976 	printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
2977 		curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2978 		(void *)curr->hardirq_enable_ip);
2979 	printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2980 		curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2981 		(void *)curr->hardirq_disable_ip);
2982 	printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
2983 		curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2984 		(void *)curr->softirq_enable_ip);
2985 	printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2986 		curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2987 		(void *)curr->softirq_disable_ip);
2988 }
2989 
2990 static int HARDIRQ_verbose(struct lock_class *class)
2991 {
2992 #if HARDIRQ_VERBOSE
2993 	return class_filter(class);
2994 #endif
2995 	return 0;
2996 }
2997 
2998 static int SOFTIRQ_verbose(struct lock_class *class)
2999 {
3000 #if SOFTIRQ_VERBOSE
3001 	return class_filter(class);
3002 #endif
3003 	return 0;
3004 }
3005 
3006 #define STRICT_READ_CHECKS	1
3007 
3008 static int (*state_verbose_f[])(struct lock_class *class) = {
3009 #define LOCKDEP_STATE(__STATE) \
3010 	__STATE##_verbose,
3011 #include "lockdep_states.h"
3012 #undef LOCKDEP_STATE
3013 };
3014 
3015 static inline int state_verbose(enum lock_usage_bit bit,
3016 				struct lock_class *class)
3017 {
3018 	return state_verbose_f[bit >> 2](class);
3019 }
3020 
3021 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
3022 			     enum lock_usage_bit bit, const char *name);
3023 
3024 static int
3025 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3026 		enum lock_usage_bit new_bit)
3027 {
3028 	int excl_bit = exclusive_bit(new_bit);
3029 	int read = new_bit & LOCK_USAGE_READ_MASK;
3030 	int dir = new_bit & LOCK_USAGE_DIR_MASK;
3031 
3032 	/*
3033 	 * mark USED_IN has to look forwards -- to ensure no dependency
3034 	 * has ENABLED state, which would allow recursion deadlocks.
3035 	 *
3036 	 * mark ENABLED has to look backwards -- to ensure no dependee
3037 	 * has USED_IN state, which, again, would allow  recursion deadlocks.
3038 	 */
3039 	check_usage_f usage = dir ?
3040 		check_usage_backwards : check_usage_forwards;
3041 
3042 	/*
3043 	 * Validate that this particular lock does not have conflicting
3044 	 * usage states.
3045 	 */
3046 	if (!valid_state(curr, this, new_bit, excl_bit))
3047 		return 0;
3048 
3049 	/*
3050 	 * Validate that the lock dependencies don't have conflicting usage
3051 	 * states.
3052 	 */
3053 	if ((!read || !dir || STRICT_READ_CHECKS) &&
3054 			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
3055 		return 0;
3056 
3057 	/*
3058 	 * Check for read in write conflicts
3059 	 */
3060 	if (!read) {
3061 		if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
3062 			return 0;
3063 
3064 		if (STRICT_READ_CHECKS &&
3065 			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
3066 				state_name(new_bit + LOCK_USAGE_READ_MASK)))
3067 			return 0;
3068 	}
3069 
3070 	if (state_verbose(new_bit, hlock_class(this)))
3071 		return 2;
3072 
3073 	return 1;
3074 }
3075 
3076 /*
3077  * Mark all held locks with a usage bit:
3078  */
3079 static int
3080 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
3081 {
3082 	struct held_lock *hlock;
3083 	int i;
3084 
3085 	for (i = 0; i < curr->lockdep_depth; i++) {
3086 		enum lock_usage_bit hlock_bit = base_bit;
3087 		hlock = curr->held_locks + i;
3088 
3089 		if (hlock->read)
3090 			hlock_bit += LOCK_USAGE_READ_MASK;
3091 
3092 		BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
3093 
3094 		if (!hlock->check)
3095 			continue;
3096 
3097 		if (!mark_lock(curr, hlock, hlock_bit))
3098 			return 0;
3099 	}
3100 
3101 	return 1;
3102 }
3103 
3104 /*
3105  * Hardirqs will be enabled:
3106  */
3107 static void __trace_hardirqs_on_caller(unsigned long ip)
3108 {
3109 	struct task_struct *curr = current;
3110 
3111 	/* we'll do an OFF -> ON transition: */
3112 	curr->hardirqs_enabled = 1;
3113 
3114 	/*
3115 	 * We are going to turn hardirqs on, so set the
3116 	 * usage bit for all held locks:
3117 	 */
3118 	if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
3119 		return;
3120 	/*
3121 	 * If we have softirqs enabled, then set the usage
3122 	 * bit for all held locks. (disabled hardirqs prevented
3123 	 * this bit from being set before)
3124 	 */
3125 	if (curr->softirqs_enabled)
3126 		if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
3127 			return;
3128 
3129 	curr->hardirq_enable_ip = ip;
3130 	curr->hardirq_enable_event = ++curr->irq_events;
3131 	debug_atomic_inc(hardirqs_on_events);
3132 }
3133 
3134 void lockdep_hardirqs_on(unsigned long ip)
3135 {
3136 	if (unlikely(!debug_locks || current->lockdep_recursion))
3137 		return;
3138 
3139 	if (unlikely(current->hardirqs_enabled)) {
3140 		/*
3141 		 * Neither irq nor preemption are disabled here
3142 		 * so this is racy by nature but losing one hit
3143 		 * in a stat is not a big deal.
3144 		 */
3145 		__debug_atomic_inc(redundant_hardirqs_on);
3146 		return;
3147 	}
3148 
3149 	/*
3150 	 * We're enabling irqs and according to our state above irqs weren't
3151 	 * already enabled, yet we find the hardware thinks they are in fact
3152 	 * enabled.. someone messed up their IRQ state tracing.
3153 	 */
3154 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3155 		return;
3156 
3157 	/*
3158 	 * See the fine text that goes along with this variable definition.
3159 	 */
3160 	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
3161 		return;
3162 
3163 	/*
3164 	 * Can't allow enabling interrupts while in an interrupt handler,
3165 	 * that's general bad form and such. Recursion, limited stack etc..
3166 	 */
3167 	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3168 		return;
3169 
3170 	current->lockdep_recursion = 1;
3171 	__trace_hardirqs_on_caller(ip);
3172 	current->lockdep_recursion = 0;
3173 }
3174 NOKPROBE_SYMBOL(lockdep_hardirqs_on);
3175 
3176 /*
3177  * Hardirqs were disabled:
3178  */
3179 void lockdep_hardirqs_off(unsigned long ip)
3180 {
3181 	struct task_struct *curr = current;
3182 
3183 	if (unlikely(!debug_locks || current->lockdep_recursion))
3184 		return;
3185 
3186 	/*
3187 	 * So we're supposed to get called after you mask local IRQs, but for
3188 	 * some reason the hardware doesn't quite think you did a proper job.
3189 	 */
3190 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3191 		return;
3192 
3193 	if (curr->hardirqs_enabled) {
3194 		/*
3195 		 * We have done an ON -> OFF transition:
3196 		 */
3197 		curr->hardirqs_enabled = 0;
3198 		curr->hardirq_disable_ip = ip;
3199 		curr->hardirq_disable_event = ++curr->irq_events;
3200 		debug_atomic_inc(hardirqs_off_events);
3201 	} else
3202 		debug_atomic_inc(redundant_hardirqs_off);
3203 }
3204 NOKPROBE_SYMBOL(lockdep_hardirqs_off);
3205 
3206 /*
3207  * Softirqs will be enabled:
3208  */
3209 void trace_softirqs_on(unsigned long ip)
3210 {
3211 	struct task_struct *curr = current;
3212 
3213 	if (unlikely(!debug_locks || current->lockdep_recursion))
3214 		return;
3215 
3216 	/*
3217 	 * We fancy IRQs being disabled here, see softirq.c, avoids
3218 	 * funny state and nesting things.
3219 	 */
3220 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3221 		return;
3222 
3223 	if (curr->softirqs_enabled) {
3224 		debug_atomic_inc(redundant_softirqs_on);
3225 		return;
3226 	}
3227 
3228 	current->lockdep_recursion = 1;
3229 	/*
3230 	 * We'll do an OFF -> ON transition:
3231 	 */
3232 	curr->softirqs_enabled = 1;
3233 	curr->softirq_enable_ip = ip;
3234 	curr->softirq_enable_event = ++curr->irq_events;
3235 	debug_atomic_inc(softirqs_on_events);
3236 	/*
3237 	 * We are going to turn softirqs on, so set the
3238 	 * usage bit for all held locks, if hardirqs are
3239 	 * enabled too:
3240 	 */
3241 	if (curr->hardirqs_enabled)
3242 		mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
3243 	current->lockdep_recursion = 0;
3244 }
3245 
3246 /*
3247  * Softirqs were disabled:
3248  */
3249 void trace_softirqs_off(unsigned long ip)
3250 {
3251 	struct task_struct *curr = current;
3252 
3253 	if (unlikely(!debug_locks || current->lockdep_recursion))
3254 		return;
3255 
3256 	/*
3257 	 * We fancy IRQs being disabled here, see softirq.c
3258 	 */
3259 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3260 		return;
3261 
3262 	if (curr->softirqs_enabled) {
3263 		/*
3264 		 * We have done an ON -> OFF transition:
3265 		 */
3266 		curr->softirqs_enabled = 0;
3267 		curr->softirq_disable_ip = ip;
3268 		curr->softirq_disable_event = ++curr->irq_events;
3269 		debug_atomic_inc(softirqs_off_events);
3270 		/*
3271 		 * Whoops, we wanted softirqs off, so why aren't they?
3272 		 */
3273 		DEBUG_LOCKS_WARN_ON(!softirq_count());
3274 	} else
3275 		debug_atomic_inc(redundant_softirqs_off);
3276 }
3277 
3278 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
3279 {
3280 	/*
3281 	 * If non-trylock use in a hardirq or softirq context, then
3282 	 * mark the lock as used in these contexts:
3283 	 */
3284 	if (!hlock->trylock) {
3285 		if (hlock->read) {
3286 			if (curr->hardirq_context)
3287 				if (!mark_lock(curr, hlock,
3288 						LOCK_USED_IN_HARDIRQ_READ))
3289 					return 0;
3290 			if (curr->softirq_context)
3291 				if (!mark_lock(curr, hlock,
3292 						LOCK_USED_IN_SOFTIRQ_READ))
3293 					return 0;
3294 		} else {
3295 			if (curr->hardirq_context)
3296 				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3297 					return 0;
3298 			if (curr->softirq_context)
3299 				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3300 					return 0;
3301 		}
3302 	}
3303 	if (!hlock->hardirqs_off) {
3304 		if (hlock->read) {
3305 			if (!mark_lock(curr, hlock,
3306 					LOCK_ENABLED_HARDIRQ_READ))
3307 				return 0;
3308 			if (curr->softirqs_enabled)
3309 				if (!mark_lock(curr, hlock,
3310 						LOCK_ENABLED_SOFTIRQ_READ))
3311 					return 0;
3312 		} else {
3313 			if (!mark_lock(curr, hlock,
3314 					LOCK_ENABLED_HARDIRQ))
3315 				return 0;
3316 			if (curr->softirqs_enabled)
3317 				if (!mark_lock(curr, hlock,
3318 						LOCK_ENABLED_SOFTIRQ))
3319 					return 0;
3320 		}
3321 	}
3322 
3323 	return 1;
3324 }
3325 
3326 static inline unsigned int task_irq_context(struct task_struct *task)
3327 {
3328 	return 2 * !!task->hardirq_context + !!task->softirq_context;
3329 }
3330 
3331 static int separate_irq_context(struct task_struct *curr,
3332 		struct held_lock *hlock)
3333 {
3334 	unsigned int depth = curr->lockdep_depth;
3335 
3336 	/*
3337 	 * Keep track of points where we cross into an interrupt context:
3338 	 */
3339 	if (depth) {
3340 		struct held_lock *prev_hlock;
3341 
3342 		prev_hlock = curr->held_locks + depth-1;
3343 		/*
3344 		 * If we cross into another context, reset the
3345 		 * hash key (this also prevents the checking and the
3346 		 * adding of the dependency to 'prev'):
3347 		 */
3348 		if (prev_hlock->irq_context != hlock->irq_context)
3349 			return 1;
3350 	}
3351 	return 0;
3352 }
3353 
3354 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3355 
3356 static inline
3357 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3358 		enum lock_usage_bit new_bit)
3359 {
3360 	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3361 	return 1;
3362 }
3363 
3364 static inline int mark_irqflags(struct task_struct *curr,
3365 		struct held_lock *hlock)
3366 {
3367 	return 1;
3368 }
3369 
3370 static inline unsigned int task_irq_context(struct task_struct *task)
3371 {
3372 	return 0;
3373 }
3374 
3375 static inline int separate_irq_context(struct task_struct *curr,
3376 		struct held_lock *hlock)
3377 {
3378 	return 0;
3379 }
3380 
3381 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3382 
3383 /*
3384  * Mark a lock with a usage bit, and validate the state transition:
3385  */
3386 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3387 			     enum lock_usage_bit new_bit)
3388 {
3389 	unsigned int new_mask = 1 << new_bit, ret = 1;
3390 
3391 	/*
3392 	 * If already set then do not dirty the cacheline,
3393 	 * nor do any checks:
3394 	 */
3395 	if (likely(hlock_class(this)->usage_mask & new_mask))
3396 		return 1;
3397 
3398 	if (!graph_lock())
3399 		return 0;
3400 	/*
3401 	 * Make sure we didn't race:
3402 	 */
3403 	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3404 		graph_unlock();
3405 		return 1;
3406 	}
3407 
3408 	hlock_class(this)->usage_mask |= new_mask;
3409 
3410 	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3411 		return 0;
3412 
3413 	switch (new_bit) {
3414 #define LOCKDEP_STATE(__STATE)			\
3415 	case LOCK_USED_IN_##__STATE:		\
3416 	case LOCK_USED_IN_##__STATE##_READ:	\
3417 	case LOCK_ENABLED_##__STATE:		\
3418 	case LOCK_ENABLED_##__STATE##_READ:
3419 #include "lockdep_states.h"
3420 #undef LOCKDEP_STATE
3421 		ret = mark_lock_irq(curr, this, new_bit);
3422 		if (!ret)
3423 			return 0;
3424 		break;
3425 	case LOCK_USED:
3426 		debug_atomic_dec(nr_unused_locks);
3427 		break;
3428 	default:
3429 		if (!debug_locks_off_graph_unlock())
3430 			return 0;
3431 		WARN_ON(1);
3432 		return 0;
3433 	}
3434 
3435 	graph_unlock();
3436 
3437 	/*
3438 	 * We must printk outside of the graph_lock:
3439 	 */
3440 	if (ret == 2) {
3441 		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3442 		print_lock(this);
3443 		print_irqtrace_events(curr);
3444 		dump_stack();
3445 	}
3446 
3447 	return ret;
3448 }
3449 
3450 /*
3451  * Initialize a lock instance's lock-class mapping info:
3452  */
3453 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3454 		      struct lock_class_key *key, int subclass)
3455 {
3456 	int i;
3457 
3458 	for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3459 		lock->class_cache[i] = NULL;
3460 
3461 #ifdef CONFIG_LOCK_STAT
3462 	lock->cpu = raw_smp_processor_id();
3463 #endif
3464 
3465 	/*
3466 	 * Can't be having no nameless bastards around this place!
3467 	 */
3468 	if (DEBUG_LOCKS_WARN_ON(!name)) {
3469 		lock->name = "NULL";
3470 		return;
3471 	}
3472 
3473 	lock->name = name;
3474 
3475 	/*
3476 	 * No key, no joy, we need to hash something.
3477 	 */
3478 	if (DEBUG_LOCKS_WARN_ON(!key))
3479 		return;
3480 	/*
3481 	 * Sanity check, the lock-class key must either have been allocated
3482 	 * statically or must have been registered as a dynamic key.
3483 	 */
3484 	if (!static_obj(key) && !is_dynamic_key(key)) {
3485 		if (debug_locks)
3486 			printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
3487 		DEBUG_LOCKS_WARN_ON(1);
3488 		return;
3489 	}
3490 	lock->key = key;
3491 
3492 	if (unlikely(!debug_locks))
3493 		return;
3494 
3495 	if (subclass) {
3496 		unsigned long flags;
3497 
3498 		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3499 			return;
3500 
3501 		raw_local_irq_save(flags);
3502 		current->lockdep_recursion = 1;
3503 		register_lock_class(lock, subclass, 1);
3504 		current->lockdep_recursion = 0;
3505 		raw_local_irq_restore(flags);
3506 	}
3507 }
3508 EXPORT_SYMBOL_GPL(lockdep_init_map);
3509 
3510 struct lock_class_key __lockdep_no_validate__;
3511 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3512 
3513 static int
3514 print_lock_nested_lock_not_held(struct task_struct *curr,
3515 				struct held_lock *hlock,
3516 				unsigned long ip)
3517 {
3518 	if (!debug_locks_off())
3519 		return 0;
3520 	if (debug_locks_silent)
3521 		return 0;
3522 
3523 	pr_warn("\n");
3524 	pr_warn("==================================\n");
3525 	pr_warn("WARNING: Nested lock was not taken\n");
3526 	print_kernel_ident();
3527 	pr_warn("----------------------------------\n");
3528 
3529 	pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3530 	print_lock(hlock);
3531 
3532 	pr_warn("\nbut this task is not holding:\n");
3533 	pr_warn("%s\n", hlock->nest_lock->name);
3534 
3535 	pr_warn("\nstack backtrace:\n");
3536 	dump_stack();
3537 
3538 	pr_warn("\nother info that might help us debug this:\n");
3539 	lockdep_print_held_locks(curr);
3540 
3541 	pr_warn("\nstack backtrace:\n");
3542 	dump_stack();
3543 
3544 	return 0;
3545 }
3546 
3547 static int __lock_is_held(const struct lockdep_map *lock, int read);
3548 
3549 /*
3550  * This gets called for every mutex_lock*()/spin_lock*() operation.
3551  * We maintain the dependency maps and validate the locking attempt:
3552  *
3553  * The callers must make sure that IRQs are disabled before calling it,
3554  * otherwise we could get an interrupt which would want to take locks,
3555  * which would end up in lockdep again.
3556  */
3557 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3558 			  int trylock, int read, int check, int hardirqs_off,
3559 			  struct lockdep_map *nest_lock, unsigned long ip,
3560 			  int references, int pin_count)
3561 {
3562 	struct task_struct *curr = current;
3563 	struct lock_class *class = NULL;
3564 	struct held_lock *hlock;
3565 	unsigned int depth;
3566 	int chain_head = 0;
3567 	int class_idx;
3568 	u64 chain_key;
3569 
3570 	if (unlikely(!debug_locks))
3571 		return 0;
3572 
3573 	if (!prove_locking || lock->key == &__lockdep_no_validate__)
3574 		check = 0;
3575 
3576 	if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3577 		class = lock->class_cache[subclass];
3578 	/*
3579 	 * Not cached?
3580 	 */
3581 	if (unlikely(!class)) {
3582 		class = register_lock_class(lock, subclass, 0);
3583 		if (!class)
3584 			return 0;
3585 	}
3586 
3587 	debug_class_ops_inc(class);
3588 
3589 	if (very_verbose(class)) {
3590 		printk("\nacquire class [%px] %s", class->key, class->name);
3591 		if (class->name_version > 1)
3592 			printk(KERN_CONT "#%d", class->name_version);
3593 		printk(KERN_CONT "\n");
3594 		dump_stack();
3595 	}
3596 
3597 	/*
3598 	 * Add the lock to the list of currently held locks.
3599 	 * (we dont increase the depth just yet, up until the
3600 	 * dependency checks are done)
3601 	 */
3602 	depth = curr->lockdep_depth;
3603 	/*
3604 	 * Ran out of static storage for our per-task lock stack again have we?
3605 	 */
3606 	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3607 		return 0;
3608 
3609 	class_idx = class - lock_classes + 1;
3610 
3611 	if (depth) {
3612 		hlock = curr->held_locks + depth - 1;
3613 		if (hlock->class_idx == class_idx && nest_lock) {
3614 			if (hlock->references) {
3615 				/*
3616 				 * Check: unsigned int references:12, overflow.
3617 				 */
3618 				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3619 					return 0;
3620 
3621 				hlock->references++;
3622 			} else {
3623 				hlock->references = 2;
3624 			}
3625 
3626 			return 1;
3627 		}
3628 	}
3629 
3630 	hlock = curr->held_locks + depth;
3631 	/*
3632 	 * Plain impossible, we just registered it and checked it weren't no
3633 	 * NULL like.. I bet this mushroom I ate was good!
3634 	 */
3635 	if (DEBUG_LOCKS_WARN_ON(!class))
3636 		return 0;
3637 	hlock->class_idx = class_idx;
3638 	hlock->acquire_ip = ip;
3639 	hlock->instance = lock;
3640 	hlock->nest_lock = nest_lock;
3641 	hlock->irq_context = task_irq_context(curr);
3642 	hlock->trylock = trylock;
3643 	hlock->read = read;
3644 	hlock->check = check;
3645 	hlock->hardirqs_off = !!hardirqs_off;
3646 	hlock->references = references;
3647 #ifdef CONFIG_LOCK_STAT
3648 	hlock->waittime_stamp = 0;
3649 	hlock->holdtime_stamp = lockstat_clock();
3650 #endif
3651 	hlock->pin_count = pin_count;
3652 
3653 	if (check && !mark_irqflags(curr, hlock))
3654 		return 0;
3655 
3656 	/* mark it as used: */
3657 	if (!mark_lock(curr, hlock, LOCK_USED))
3658 		return 0;
3659 
3660 	/*
3661 	 * Calculate the chain hash: it's the combined hash of all the
3662 	 * lock keys along the dependency chain. We save the hash value
3663 	 * at every step so that we can get the current hash easily
3664 	 * after unlock. The chain hash is then used to cache dependency
3665 	 * results.
3666 	 *
3667 	 * The 'key ID' is what is the most compact key value to drive
3668 	 * the hash, not class->key.
3669 	 */
3670 	/*
3671 	 * Whoops, we did it again.. ran straight out of our static allocation.
3672 	 */
3673 	if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3674 		return 0;
3675 
3676 	chain_key = curr->curr_chain_key;
3677 	if (!depth) {
3678 		/*
3679 		 * How can we have a chain hash when we ain't got no keys?!
3680 		 */
3681 		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3682 			return 0;
3683 		chain_head = 1;
3684 	}
3685 
3686 	hlock->prev_chain_key = chain_key;
3687 	if (separate_irq_context(curr, hlock)) {
3688 		chain_key = 0;
3689 		chain_head = 1;
3690 	}
3691 	chain_key = iterate_chain_key(chain_key, class_idx);
3692 
3693 	if (nest_lock && !__lock_is_held(nest_lock, -1))
3694 		return print_lock_nested_lock_not_held(curr, hlock, ip);
3695 
3696 	if (!debug_locks_silent) {
3697 		WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
3698 		WARN_ON_ONCE(!hlock_class(hlock)->key);
3699 	}
3700 
3701 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3702 		return 0;
3703 
3704 	curr->curr_chain_key = chain_key;
3705 	curr->lockdep_depth++;
3706 	check_chain_key(curr);
3707 #ifdef CONFIG_DEBUG_LOCKDEP
3708 	if (unlikely(!debug_locks))
3709 		return 0;
3710 #endif
3711 	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3712 		debug_locks_off();
3713 		print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3714 		printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3715 		       curr->lockdep_depth, MAX_LOCK_DEPTH);
3716 
3717 		lockdep_print_held_locks(current);
3718 		debug_show_all_locks();
3719 		dump_stack();
3720 
3721 		return 0;
3722 	}
3723 
3724 	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3725 		max_lockdep_depth = curr->lockdep_depth;
3726 
3727 	return 1;
3728 }
3729 
3730 static int
3731 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3732 			   unsigned long ip)
3733 {
3734 	if (!debug_locks_off())
3735 		return 0;
3736 	if (debug_locks_silent)
3737 		return 0;
3738 
3739 	pr_warn("\n");
3740 	pr_warn("=====================================\n");
3741 	pr_warn("WARNING: bad unlock balance detected!\n");
3742 	print_kernel_ident();
3743 	pr_warn("-------------------------------------\n");
3744 	pr_warn("%s/%d is trying to release lock (",
3745 		curr->comm, task_pid_nr(curr));
3746 	print_lockdep_cache(lock);
3747 	pr_cont(") at:\n");
3748 	print_ip_sym(ip);
3749 	pr_warn("but there are no more locks to release!\n");
3750 	pr_warn("\nother info that might help us debug this:\n");
3751 	lockdep_print_held_locks(curr);
3752 
3753 	pr_warn("\nstack backtrace:\n");
3754 	dump_stack();
3755 
3756 	return 0;
3757 }
3758 
3759 static int match_held_lock(const struct held_lock *hlock,
3760 					const struct lockdep_map *lock)
3761 {
3762 	if (hlock->instance == lock)
3763 		return 1;
3764 
3765 	if (hlock->references) {
3766 		const struct lock_class *class = lock->class_cache[0];
3767 
3768 		if (!class)
3769 			class = look_up_lock_class(lock, 0);
3770 
3771 		/*
3772 		 * If look_up_lock_class() failed to find a class, we're trying
3773 		 * to test if we hold a lock that has never yet been acquired.
3774 		 * Clearly if the lock hasn't been acquired _ever_, we're not
3775 		 * holding it either, so report failure.
3776 		 */
3777 		if (!class)
3778 			return 0;
3779 
3780 		/*
3781 		 * References, but not a lock we're actually ref-counting?
3782 		 * State got messed up, follow the sites that change ->references
3783 		 * and try to make sense of it.
3784 		 */
3785 		if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3786 			return 0;
3787 
3788 		if (hlock->class_idx == class - lock_classes + 1)
3789 			return 1;
3790 	}
3791 
3792 	return 0;
3793 }
3794 
3795 /* @depth must not be zero */
3796 static struct held_lock *find_held_lock(struct task_struct *curr,
3797 					struct lockdep_map *lock,
3798 					unsigned int depth, int *idx)
3799 {
3800 	struct held_lock *ret, *hlock, *prev_hlock;
3801 	int i;
3802 
3803 	i = depth - 1;
3804 	hlock = curr->held_locks + i;
3805 	ret = hlock;
3806 	if (match_held_lock(hlock, lock))
3807 		goto out;
3808 
3809 	ret = NULL;
3810 	for (i--, prev_hlock = hlock--;
3811 	     i >= 0;
3812 	     i--, prev_hlock = hlock--) {
3813 		/*
3814 		 * We must not cross into another context:
3815 		 */
3816 		if (prev_hlock->irq_context != hlock->irq_context) {
3817 			ret = NULL;
3818 			break;
3819 		}
3820 		if (match_held_lock(hlock, lock)) {
3821 			ret = hlock;
3822 			break;
3823 		}
3824 	}
3825 
3826 out:
3827 	*idx = i;
3828 	return ret;
3829 }
3830 
3831 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3832 			      int idx)
3833 {
3834 	struct held_lock *hlock;
3835 
3836 	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3837 		return 0;
3838 
3839 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3840 		if (!__lock_acquire(hlock->instance,
3841 				    hlock_class(hlock)->subclass,
3842 				    hlock->trylock,
3843 				    hlock->read, hlock->check,
3844 				    hlock->hardirqs_off,
3845 				    hlock->nest_lock, hlock->acquire_ip,
3846 				    hlock->references, hlock->pin_count))
3847 			return 1;
3848 	}
3849 	return 0;
3850 }
3851 
3852 static int
3853 __lock_set_class(struct lockdep_map *lock, const char *name,
3854 		 struct lock_class_key *key, unsigned int subclass,
3855 		 unsigned long ip)
3856 {
3857 	struct task_struct *curr = current;
3858 	struct held_lock *hlock;
3859 	struct lock_class *class;
3860 	unsigned int depth;
3861 	int i;
3862 
3863 	if (unlikely(!debug_locks))
3864 		return 0;
3865 
3866 	depth = curr->lockdep_depth;
3867 	/*
3868 	 * This function is about (re)setting the class of a held lock,
3869 	 * yet we're not actually holding any locks. Naughty user!
3870 	 */
3871 	if (DEBUG_LOCKS_WARN_ON(!depth))
3872 		return 0;
3873 
3874 	hlock = find_held_lock(curr, lock, depth, &i);
3875 	if (!hlock)
3876 		return print_unlock_imbalance_bug(curr, lock, ip);
3877 
3878 	lockdep_init_map(lock, name, key, 0);
3879 	class = register_lock_class(lock, subclass, 0);
3880 	hlock->class_idx = class - lock_classes + 1;
3881 
3882 	curr->lockdep_depth = i;
3883 	curr->curr_chain_key = hlock->prev_chain_key;
3884 
3885 	if (reacquire_held_locks(curr, depth, i))
3886 		return 0;
3887 
3888 	/*
3889 	 * I took it apart and put it back together again, except now I have
3890 	 * these 'spare' parts.. where shall I put them.
3891 	 */
3892 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3893 		return 0;
3894 	return 1;
3895 }
3896 
3897 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3898 {
3899 	struct task_struct *curr = current;
3900 	struct held_lock *hlock;
3901 	unsigned int depth;
3902 	int i;
3903 
3904 	if (unlikely(!debug_locks))
3905 		return 0;
3906 
3907 	depth = curr->lockdep_depth;
3908 	/*
3909 	 * This function is about (re)setting the class of a held lock,
3910 	 * yet we're not actually holding any locks. Naughty user!
3911 	 */
3912 	if (DEBUG_LOCKS_WARN_ON(!depth))
3913 		return 0;
3914 
3915 	hlock = find_held_lock(curr, lock, depth, &i);
3916 	if (!hlock)
3917 		return print_unlock_imbalance_bug(curr, lock, ip);
3918 
3919 	curr->lockdep_depth = i;
3920 	curr->curr_chain_key = hlock->prev_chain_key;
3921 
3922 	WARN(hlock->read, "downgrading a read lock");
3923 	hlock->read = 1;
3924 	hlock->acquire_ip = ip;
3925 
3926 	if (reacquire_held_locks(curr, depth, i))
3927 		return 0;
3928 
3929 	/*
3930 	 * I took it apart and put it back together again, except now I have
3931 	 * these 'spare' parts.. where shall I put them.
3932 	 */
3933 	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3934 		return 0;
3935 	return 1;
3936 }
3937 
3938 /*
3939  * Remove the lock to the list of currently held locks - this gets
3940  * called on mutex_unlock()/spin_unlock*() (or on a failed
3941  * mutex_lock_interruptible()).
3942  *
3943  * @nested is an hysterical artifact, needs a tree wide cleanup.
3944  */
3945 static int
3946 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3947 {
3948 	struct task_struct *curr = current;
3949 	struct held_lock *hlock;
3950 	unsigned int depth;
3951 	int i;
3952 
3953 	if (unlikely(!debug_locks))
3954 		return 0;
3955 
3956 	depth = curr->lockdep_depth;
3957 	/*
3958 	 * So we're all set to release this lock.. wait what lock? We don't
3959 	 * own any locks, you've been drinking again?
3960 	 */
3961 	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3962 		 return print_unlock_imbalance_bug(curr, lock, ip);
3963 
3964 	/*
3965 	 * Check whether the lock exists in the current stack
3966 	 * of held locks:
3967 	 */
3968 	hlock = find_held_lock(curr, lock, depth, &i);
3969 	if (!hlock)
3970 		return print_unlock_imbalance_bug(curr, lock, ip);
3971 
3972 	if (hlock->instance == lock)
3973 		lock_release_holdtime(hlock);
3974 
3975 	WARN(hlock->pin_count, "releasing a pinned lock\n");
3976 
3977 	if (hlock->references) {
3978 		hlock->references--;
3979 		if (hlock->references) {
3980 			/*
3981 			 * We had, and after removing one, still have
3982 			 * references, the current lock stack is still
3983 			 * valid. We're done!
3984 			 */
3985 			return 1;
3986 		}
3987 	}
3988 
3989 	/*
3990 	 * We have the right lock to unlock, 'hlock' points to it.
3991 	 * Now we remove it from the stack, and add back the other
3992 	 * entries (if any), recalculating the hash along the way:
3993 	 */
3994 
3995 	curr->lockdep_depth = i;
3996 	curr->curr_chain_key = hlock->prev_chain_key;
3997 
3998 	/*
3999 	 * The most likely case is when the unlock is on the innermost
4000 	 * lock. In this case, we are done!
4001 	 */
4002 	if (i == depth-1)
4003 		return 1;
4004 
4005 	if (reacquire_held_locks(curr, depth, i + 1))
4006 		return 0;
4007 
4008 	/*
4009 	 * We had N bottles of beer on the wall, we drank one, but now
4010 	 * there's not N-1 bottles of beer left on the wall...
4011 	 */
4012 	DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
4013 
4014 	/*
4015 	 * Since reacquire_held_locks() would have called check_chain_key()
4016 	 * indirectly via __lock_acquire(), we don't need to do it again
4017 	 * on return.
4018 	 */
4019 	return 0;
4020 }
4021 
4022 static nokprobe_inline
4023 int __lock_is_held(const struct lockdep_map *lock, int read)
4024 {
4025 	struct task_struct *curr = current;
4026 	int i;
4027 
4028 	for (i = 0; i < curr->lockdep_depth; i++) {
4029 		struct held_lock *hlock = curr->held_locks + i;
4030 
4031 		if (match_held_lock(hlock, lock)) {
4032 			if (read == -1 || hlock->read == read)
4033 				return 1;
4034 
4035 			return 0;
4036 		}
4037 	}
4038 
4039 	return 0;
4040 }
4041 
4042 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
4043 {
4044 	struct pin_cookie cookie = NIL_COOKIE;
4045 	struct task_struct *curr = current;
4046 	int i;
4047 
4048 	if (unlikely(!debug_locks))
4049 		return cookie;
4050 
4051 	for (i = 0; i < curr->lockdep_depth; i++) {
4052 		struct held_lock *hlock = curr->held_locks + i;
4053 
4054 		if (match_held_lock(hlock, lock)) {
4055 			/*
4056 			 * Grab 16bits of randomness; this is sufficient to not
4057 			 * be guessable and still allows some pin nesting in
4058 			 * our u32 pin_count.
4059 			 */
4060 			cookie.val = 1 + (prandom_u32() >> 16);
4061 			hlock->pin_count += cookie.val;
4062 			return cookie;
4063 		}
4064 	}
4065 
4066 	WARN(1, "pinning an unheld lock\n");
4067 	return cookie;
4068 }
4069 
4070 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4071 {
4072 	struct task_struct *curr = current;
4073 	int i;
4074 
4075 	if (unlikely(!debug_locks))
4076 		return;
4077 
4078 	for (i = 0; i < curr->lockdep_depth; i++) {
4079 		struct held_lock *hlock = curr->held_locks + i;
4080 
4081 		if (match_held_lock(hlock, lock)) {
4082 			hlock->pin_count += cookie.val;
4083 			return;
4084 		}
4085 	}
4086 
4087 	WARN(1, "pinning an unheld lock\n");
4088 }
4089 
4090 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4091 {
4092 	struct task_struct *curr = current;
4093 	int i;
4094 
4095 	if (unlikely(!debug_locks))
4096 		return;
4097 
4098 	for (i = 0; i < curr->lockdep_depth; i++) {
4099 		struct held_lock *hlock = curr->held_locks + i;
4100 
4101 		if (match_held_lock(hlock, lock)) {
4102 			if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
4103 				return;
4104 
4105 			hlock->pin_count -= cookie.val;
4106 
4107 			if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
4108 				hlock->pin_count = 0;
4109 
4110 			return;
4111 		}
4112 	}
4113 
4114 	WARN(1, "unpinning an unheld lock\n");
4115 }
4116 
4117 /*
4118  * Check whether we follow the irq-flags state precisely:
4119  */
4120 static void check_flags(unsigned long flags)
4121 {
4122 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
4123     defined(CONFIG_TRACE_IRQFLAGS)
4124 	if (!debug_locks)
4125 		return;
4126 
4127 	if (irqs_disabled_flags(flags)) {
4128 		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4129 			printk("possible reason: unannotated irqs-off.\n");
4130 		}
4131 	} else {
4132 		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4133 			printk("possible reason: unannotated irqs-on.\n");
4134 		}
4135 	}
4136 
4137 	/*
4138 	 * We dont accurately track softirq state in e.g.
4139 	 * hardirq contexts (such as on 4KSTACKS), so only
4140 	 * check if not in hardirq contexts:
4141 	 */
4142 	if (!hardirq_count()) {
4143 		if (softirq_count()) {
4144 			/* like the above, but with softirqs */
4145 			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
4146 		} else {
4147 			/* lick the above, does it taste good? */
4148 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
4149 		}
4150 	}
4151 
4152 	if (!debug_locks)
4153 		print_irqtrace_events(current);
4154 #endif
4155 }
4156 
4157 void lock_set_class(struct lockdep_map *lock, const char *name,
4158 		    struct lock_class_key *key, unsigned int subclass,
4159 		    unsigned long ip)
4160 {
4161 	unsigned long flags;
4162 
4163 	if (unlikely(current->lockdep_recursion))
4164 		return;
4165 
4166 	raw_local_irq_save(flags);
4167 	current->lockdep_recursion = 1;
4168 	check_flags(flags);
4169 	if (__lock_set_class(lock, name, key, subclass, ip))
4170 		check_chain_key(current);
4171 	current->lockdep_recursion = 0;
4172 	raw_local_irq_restore(flags);
4173 }
4174 EXPORT_SYMBOL_GPL(lock_set_class);
4175 
4176 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4177 {
4178 	unsigned long flags;
4179 
4180 	if (unlikely(current->lockdep_recursion))
4181 		return;
4182 
4183 	raw_local_irq_save(flags);
4184 	current->lockdep_recursion = 1;
4185 	check_flags(flags);
4186 	if (__lock_downgrade(lock, ip))
4187 		check_chain_key(current);
4188 	current->lockdep_recursion = 0;
4189 	raw_local_irq_restore(flags);
4190 }
4191 EXPORT_SYMBOL_GPL(lock_downgrade);
4192 
4193 /*
4194  * We are not always called with irqs disabled - do that here,
4195  * and also avoid lockdep recursion:
4196  */
4197 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
4198 			  int trylock, int read, int check,
4199 			  struct lockdep_map *nest_lock, unsigned long ip)
4200 {
4201 	unsigned long flags;
4202 
4203 	if (unlikely(current->lockdep_recursion))
4204 		return;
4205 
4206 	raw_local_irq_save(flags);
4207 	check_flags(flags);
4208 
4209 	current->lockdep_recursion = 1;
4210 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
4211 	__lock_acquire(lock, subclass, trylock, read, check,
4212 		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
4213 	current->lockdep_recursion = 0;
4214 	raw_local_irq_restore(flags);
4215 }
4216 EXPORT_SYMBOL_GPL(lock_acquire);
4217 
4218 void lock_release(struct lockdep_map *lock, int nested,
4219 			  unsigned long ip)
4220 {
4221 	unsigned long flags;
4222 
4223 	if (unlikely(current->lockdep_recursion))
4224 		return;
4225 
4226 	raw_local_irq_save(flags);
4227 	check_flags(flags);
4228 	current->lockdep_recursion = 1;
4229 	trace_lock_release(lock, ip);
4230 	if (__lock_release(lock, nested, ip))
4231 		check_chain_key(current);
4232 	current->lockdep_recursion = 0;
4233 	raw_local_irq_restore(flags);
4234 }
4235 EXPORT_SYMBOL_GPL(lock_release);
4236 
4237 int lock_is_held_type(const struct lockdep_map *lock, int read)
4238 {
4239 	unsigned long flags;
4240 	int ret = 0;
4241 
4242 	if (unlikely(current->lockdep_recursion))
4243 		return 1; /* avoid false negative lockdep_assert_held() */
4244 
4245 	raw_local_irq_save(flags);
4246 	check_flags(flags);
4247 
4248 	current->lockdep_recursion = 1;
4249 	ret = __lock_is_held(lock, read);
4250 	current->lockdep_recursion = 0;
4251 	raw_local_irq_restore(flags);
4252 
4253 	return ret;
4254 }
4255 EXPORT_SYMBOL_GPL(lock_is_held_type);
4256 NOKPROBE_SYMBOL(lock_is_held_type);
4257 
4258 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
4259 {
4260 	struct pin_cookie cookie = NIL_COOKIE;
4261 	unsigned long flags;
4262 
4263 	if (unlikely(current->lockdep_recursion))
4264 		return cookie;
4265 
4266 	raw_local_irq_save(flags);
4267 	check_flags(flags);
4268 
4269 	current->lockdep_recursion = 1;
4270 	cookie = __lock_pin_lock(lock);
4271 	current->lockdep_recursion = 0;
4272 	raw_local_irq_restore(flags);
4273 
4274 	return cookie;
4275 }
4276 EXPORT_SYMBOL_GPL(lock_pin_lock);
4277 
4278 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4279 {
4280 	unsigned long flags;
4281 
4282 	if (unlikely(current->lockdep_recursion))
4283 		return;
4284 
4285 	raw_local_irq_save(flags);
4286 	check_flags(flags);
4287 
4288 	current->lockdep_recursion = 1;
4289 	__lock_repin_lock(lock, cookie);
4290 	current->lockdep_recursion = 0;
4291 	raw_local_irq_restore(flags);
4292 }
4293 EXPORT_SYMBOL_GPL(lock_repin_lock);
4294 
4295 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
4296 {
4297 	unsigned long flags;
4298 
4299 	if (unlikely(current->lockdep_recursion))
4300 		return;
4301 
4302 	raw_local_irq_save(flags);
4303 	check_flags(flags);
4304 
4305 	current->lockdep_recursion = 1;
4306 	__lock_unpin_lock(lock, cookie);
4307 	current->lockdep_recursion = 0;
4308 	raw_local_irq_restore(flags);
4309 }
4310 EXPORT_SYMBOL_GPL(lock_unpin_lock);
4311 
4312 #ifdef CONFIG_LOCK_STAT
4313 static int
4314 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4315 			   unsigned long ip)
4316 {
4317 	if (!debug_locks_off())
4318 		return 0;
4319 	if (debug_locks_silent)
4320 		return 0;
4321 
4322 	pr_warn("\n");
4323 	pr_warn("=================================\n");
4324 	pr_warn("WARNING: bad contention detected!\n");
4325 	print_kernel_ident();
4326 	pr_warn("---------------------------------\n");
4327 	pr_warn("%s/%d is trying to contend lock (",
4328 		curr->comm, task_pid_nr(curr));
4329 	print_lockdep_cache(lock);
4330 	pr_cont(") at:\n");
4331 	print_ip_sym(ip);
4332 	pr_warn("but there are no locks held!\n");
4333 	pr_warn("\nother info that might help us debug this:\n");
4334 	lockdep_print_held_locks(curr);
4335 
4336 	pr_warn("\nstack backtrace:\n");
4337 	dump_stack();
4338 
4339 	return 0;
4340 }
4341 
4342 static void
4343 __lock_contended(struct lockdep_map *lock, unsigned long ip)
4344 {
4345 	struct task_struct *curr = current;
4346 	struct held_lock *hlock;
4347 	struct lock_class_stats *stats;
4348 	unsigned int depth;
4349 	int i, contention_point, contending_point;
4350 
4351 	depth = curr->lockdep_depth;
4352 	/*
4353 	 * Whee, we contended on this lock, except it seems we're not
4354 	 * actually trying to acquire anything much at all..
4355 	 */
4356 	if (DEBUG_LOCKS_WARN_ON(!depth))
4357 		return;
4358 
4359 	hlock = find_held_lock(curr, lock, depth, &i);
4360 	if (!hlock) {
4361 		print_lock_contention_bug(curr, lock, ip);
4362 		return;
4363 	}
4364 
4365 	if (hlock->instance != lock)
4366 		return;
4367 
4368 	hlock->waittime_stamp = lockstat_clock();
4369 
4370 	contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4371 	contending_point = lock_point(hlock_class(hlock)->contending_point,
4372 				      lock->ip);
4373 
4374 	stats = get_lock_stats(hlock_class(hlock));
4375 	if (contention_point < LOCKSTAT_POINTS)
4376 		stats->contention_point[contention_point]++;
4377 	if (contending_point < LOCKSTAT_POINTS)
4378 		stats->contending_point[contending_point]++;
4379 	if (lock->cpu != smp_processor_id())
4380 		stats->bounces[bounce_contended + !!hlock->read]++;
4381 }
4382 
4383 static void
4384 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4385 {
4386 	struct task_struct *curr = current;
4387 	struct held_lock *hlock;
4388 	struct lock_class_stats *stats;
4389 	unsigned int depth;
4390 	u64 now, waittime = 0;
4391 	int i, cpu;
4392 
4393 	depth = curr->lockdep_depth;
4394 	/*
4395 	 * Yay, we acquired ownership of this lock we didn't try to
4396 	 * acquire, how the heck did that happen?
4397 	 */
4398 	if (DEBUG_LOCKS_WARN_ON(!depth))
4399 		return;
4400 
4401 	hlock = find_held_lock(curr, lock, depth, &i);
4402 	if (!hlock) {
4403 		print_lock_contention_bug(curr, lock, _RET_IP_);
4404 		return;
4405 	}
4406 
4407 	if (hlock->instance != lock)
4408 		return;
4409 
4410 	cpu = smp_processor_id();
4411 	if (hlock->waittime_stamp) {
4412 		now = lockstat_clock();
4413 		waittime = now - hlock->waittime_stamp;
4414 		hlock->holdtime_stamp = now;
4415 	}
4416 
4417 	trace_lock_acquired(lock, ip);
4418 
4419 	stats = get_lock_stats(hlock_class(hlock));
4420 	if (waittime) {
4421 		if (hlock->read)
4422 			lock_time_inc(&stats->read_waittime, waittime);
4423 		else
4424 			lock_time_inc(&stats->write_waittime, waittime);
4425 	}
4426 	if (lock->cpu != cpu)
4427 		stats->bounces[bounce_acquired + !!hlock->read]++;
4428 
4429 	lock->cpu = cpu;
4430 	lock->ip = ip;
4431 }
4432 
4433 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4434 {
4435 	unsigned long flags;
4436 
4437 	if (unlikely(!lock_stat || !debug_locks))
4438 		return;
4439 
4440 	if (unlikely(current->lockdep_recursion))
4441 		return;
4442 
4443 	raw_local_irq_save(flags);
4444 	check_flags(flags);
4445 	current->lockdep_recursion = 1;
4446 	trace_lock_contended(lock, ip);
4447 	__lock_contended(lock, ip);
4448 	current->lockdep_recursion = 0;
4449 	raw_local_irq_restore(flags);
4450 }
4451 EXPORT_SYMBOL_GPL(lock_contended);
4452 
4453 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4454 {
4455 	unsigned long flags;
4456 
4457 	if (unlikely(!lock_stat || !debug_locks))
4458 		return;
4459 
4460 	if (unlikely(current->lockdep_recursion))
4461 		return;
4462 
4463 	raw_local_irq_save(flags);
4464 	check_flags(flags);
4465 	current->lockdep_recursion = 1;
4466 	__lock_acquired(lock, ip);
4467 	current->lockdep_recursion = 0;
4468 	raw_local_irq_restore(flags);
4469 }
4470 EXPORT_SYMBOL_GPL(lock_acquired);
4471 #endif
4472 
4473 /*
4474  * Used by the testsuite, sanitize the validator state
4475  * after a simulated failure:
4476  */
4477 
4478 void lockdep_reset(void)
4479 {
4480 	unsigned long flags;
4481 	int i;
4482 
4483 	raw_local_irq_save(flags);
4484 	current->curr_chain_key = 0;
4485 	current->lockdep_depth = 0;
4486 	current->lockdep_recursion = 0;
4487 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4488 	nr_hardirq_chains = 0;
4489 	nr_softirq_chains = 0;
4490 	nr_process_chains = 0;
4491 	debug_locks = 1;
4492 	for (i = 0; i < CHAINHASH_SIZE; i++)
4493 		INIT_HLIST_HEAD(chainhash_table + i);
4494 	raw_local_irq_restore(flags);
4495 }
4496 
4497 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4498 static void remove_class_from_lock_chain(struct pending_free *pf,
4499 					 struct lock_chain *chain,
4500 					 struct lock_class *class)
4501 {
4502 #ifdef CONFIG_PROVE_LOCKING
4503 	struct lock_chain *new_chain;
4504 	u64 chain_key;
4505 	int i;
4506 
4507 	for (i = chain->base; i < chain->base + chain->depth; i++) {
4508 		if (chain_hlocks[i] != class - lock_classes)
4509 			continue;
4510 		/* The code below leaks one chain_hlock[] entry. */
4511 		if (--chain->depth > 0) {
4512 			memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4513 				(chain->base + chain->depth - i) *
4514 				sizeof(chain_hlocks[0]));
4515 		}
4516 		/*
4517 		 * Each lock class occurs at most once in a lock chain so once
4518 		 * we found a match we can break out of this loop.
4519 		 */
4520 		goto recalc;
4521 	}
4522 	/* Since the chain has not been modified, return. */
4523 	return;
4524 
4525 recalc:
4526 	chain_key = 0;
4527 	for (i = chain->base; i < chain->base + chain->depth; i++)
4528 		chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
4529 	if (chain->depth && chain->chain_key == chain_key)
4530 		return;
4531 	/* Overwrite the chain key for concurrent RCU readers. */
4532 	WRITE_ONCE(chain->chain_key, chain_key);
4533 	/*
4534 	 * Note: calling hlist_del_rcu() from inside a
4535 	 * hlist_for_each_entry_rcu() loop is safe.
4536 	 */
4537 	hlist_del_rcu(&chain->entry);
4538 	__set_bit(chain - lock_chains, pf->lock_chains_being_freed);
4539 	if (chain->depth == 0)
4540 		return;
4541 	/*
4542 	 * If the modified lock chain matches an existing lock chain, drop
4543 	 * the modified lock chain.
4544 	 */
4545 	if (lookup_chain_cache(chain_key))
4546 		return;
4547 	new_chain = alloc_lock_chain();
4548 	if (WARN_ON_ONCE(!new_chain)) {
4549 		debug_locks_off();
4550 		return;
4551 	}
4552 	*new_chain = *chain;
4553 	hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4554 #endif
4555 }
4556 
4557 /* Must be called with the graph lock held. */
4558 static void remove_class_from_lock_chains(struct pending_free *pf,
4559 					  struct lock_class *class)
4560 {
4561 	struct lock_chain *chain;
4562 	struct hlist_head *head;
4563 	int i;
4564 
4565 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4566 		head = chainhash_table + i;
4567 		hlist_for_each_entry_rcu(chain, head, entry) {
4568 			remove_class_from_lock_chain(pf, chain, class);
4569 		}
4570 	}
4571 }
4572 
4573 /*
4574  * Remove all references to a lock class. The caller must hold the graph lock.
4575  */
4576 static void zap_class(struct pending_free *pf, struct lock_class *class)
4577 {
4578 	struct lock_list *entry;
4579 	int i;
4580 
4581 	WARN_ON_ONCE(!class->key);
4582 
4583 	/*
4584 	 * Remove all dependencies this lock is
4585 	 * involved in:
4586 	 */
4587 	for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
4588 		entry = list_entries + i;
4589 		if (entry->class != class && entry->links_to != class)
4590 			continue;
4591 		__clear_bit(i, list_entries_in_use);
4592 		nr_list_entries--;
4593 		list_del_rcu(&entry->entry);
4594 	}
4595 	if (list_empty(&class->locks_after) &&
4596 	    list_empty(&class->locks_before)) {
4597 		list_move_tail(&class->lock_entry, &pf->zapped);
4598 		hlist_del_rcu(&class->hash_entry);
4599 		WRITE_ONCE(class->key, NULL);
4600 		WRITE_ONCE(class->name, NULL);
4601 		nr_lock_classes--;
4602 	} else {
4603 		WARN_ONCE(true, "%s() failed for class %s\n", __func__,
4604 			  class->name);
4605 	}
4606 
4607 	remove_class_from_lock_chains(pf, class);
4608 }
4609 
4610 static void reinit_class(struct lock_class *class)
4611 {
4612 	void *const p = class;
4613 	const unsigned int offset = offsetof(struct lock_class, key);
4614 
4615 	WARN_ON_ONCE(!class->lock_entry.next);
4616 	WARN_ON_ONCE(!list_empty(&class->locks_after));
4617 	WARN_ON_ONCE(!list_empty(&class->locks_before));
4618 	memset(p + offset, 0, sizeof(*class) - offset);
4619 	WARN_ON_ONCE(!class->lock_entry.next);
4620 	WARN_ON_ONCE(!list_empty(&class->locks_after));
4621 	WARN_ON_ONCE(!list_empty(&class->locks_before));
4622 }
4623 
4624 static inline int within(const void *addr, void *start, unsigned long size)
4625 {
4626 	return addr >= start && addr < start + size;
4627 }
4628 
4629 static bool inside_selftest(void)
4630 {
4631 	return current == lockdep_selftest_task_struct;
4632 }
4633 
4634 /* The caller must hold the graph lock. */
4635 static struct pending_free *get_pending_free(void)
4636 {
4637 	return delayed_free.pf + delayed_free.index;
4638 }
4639 
4640 static void free_zapped_rcu(struct rcu_head *cb);
4641 
4642 /*
4643  * Schedule an RCU callback if no RCU callback is pending. Must be called with
4644  * the graph lock held.
4645  */
4646 static void call_rcu_zapped(struct pending_free *pf)
4647 {
4648 	WARN_ON_ONCE(inside_selftest());
4649 
4650 	if (list_empty(&pf->zapped))
4651 		return;
4652 
4653 	if (delayed_free.scheduled)
4654 		return;
4655 
4656 	delayed_free.scheduled = true;
4657 
4658 	WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
4659 	delayed_free.index ^= 1;
4660 
4661 	call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
4662 }
4663 
4664 /* The caller must hold the graph lock. May be called from RCU context. */
4665 static void __free_zapped_classes(struct pending_free *pf)
4666 {
4667 	struct lock_class *class;
4668 
4669 	check_data_structures();
4670 
4671 	list_for_each_entry(class, &pf->zapped, lock_entry)
4672 		reinit_class(class);
4673 
4674 	list_splice_init(&pf->zapped, &free_lock_classes);
4675 
4676 #ifdef CONFIG_PROVE_LOCKING
4677 	bitmap_andnot(lock_chains_in_use, lock_chains_in_use,
4678 		      pf->lock_chains_being_freed, ARRAY_SIZE(lock_chains));
4679 	bitmap_clear(pf->lock_chains_being_freed, 0, ARRAY_SIZE(lock_chains));
4680 #endif
4681 }
4682 
4683 static void free_zapped_rcu(struct rcu_head *ch)
4684 {
4685 	struct pending_free *pf;
4686 	unsigned long flags;
4687 
4688 	if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
4689 		return;
4690 
4691 	raw_local_irq_save(flags);
4692 	if (!graph_lock())
4693 		goto out_irq;
4694 
4695 	/* closed head */
4696 	pf = delayed_free.pf + (delayed_free.index ^ 1);
4697 	__free_zapped_classes(pf);
4698 	delayed_free.scheduled = false;
4699 
4700 	/*
4701 	 * If there's anything on the open list, close and start a new callback.
4702 	 */
4703 	call_rcu_zapped(delayed_free.pf + delayed_free.index);
4704 
4705 	graph_unlock();
4706 out_irq:
4707 	raw_local_irq_restore(flags);
4708 }
4709 
4710 /*
4711  * Remove all lock classes from the class hash table and from the
4712  * all_lock_classes list whose key or name is in the address range [start,
4713  * start + size). Move these lock classes to the zapped_classes list. Must
4714  * be called with the graph lock held.
4715  */
4716 static void __lockdep_free_key_range(struct pending_free *pf, void *start,
4717 				     unsigned long size)
4718 {
4719 	struct lock_class *class;
4720 	struct hlist_head *head;
4721 	int i;
4722 
4723 	/* Unhash all classes that were created by a module. */
4724 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4725 		head = classhash_table + i;
4726 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4727 			if (!within(class->key, start, size) &&
4728 			    !within(class->name, start, size))
4729 				continue;
4730 			zap_class(pf, class);
4731 		}
4732 	}
4733 }
4734 
4735 /*
4736  * Used in module.c to remove lock classes from memory that is going to be
4737  * freed; and possibly re-used by other modules.
4738  *
4739  * We will have had one synchronize_rcu() before getting here, so we're
4740  * guaranteed nobody will look up these exact classes -- they're properly dead
4741  * but still allocated.
4742  */
4743 static void lockdep_free_key_range_reg(void *start, unsigned long size)
4744 {
4745 	struct pending_free *pf;
4746 	unsigned long flags;
4747 	int locked;
4748 
4749 	init_data_structures_once();
4750 
4751 	raw_local_irq_save(flags);
4752 	locked = graph_lock();
4753 	if (!locked)
4754 		goto out_irq;
4755 
4756 	pf = get_pending_free();
4757 	__lockdep_free_key_range(pf, start, size);
4758 	call_rcu_zapped(pf);
4759 
4760 	graph_unlock();
4761 out_irq:
4762 	raw_local_irq_restore(flags);
4763 
4764 	/*
4765 	 * Wait for any possible iterators from look_up_lock_class() to pass
4766 	 * before continuing to free the memory they refer to.
4767 	 */
4768 	synchronize_rcu();
4769 }
4770 
4771 /*
4772  * Free all lockdep keys in the range [start, start+size). Does not sleep.
4773  * Ignores debug_locks. Must only be used by the lockdep selftests.
4774  */
4775 static void lockdep_free_key_range_imm(void *start, unsigned long size)
4776 {
4777 	struct pending_free *pf = delayed_free.pf;
4778 	unsigned long flags;
4779 
4780 	init_data_structures_once();
4781 
4782 	raw_local_irq_save(flags);
4783 	arch_spin_lock(&lockdep_lock);
4784 	__lockdep_free_key_range(pf, start, size);
4785 	__free_zapped_classes(pf);
4786 	arch_spin_unlock(&lockdep_lock);
4787 	raw_local_irq_restore(flags);
4788 }
4789 
4790 void lockdep_free_key_range(void *start, unsigned long size)
4791 {
4792 	init_data_structures_once();
4793 
4794 	if (inside_selftest())
4795 		lockdep_free_key_range_imm(start, size);
4796 	else
4797 		lockdep_free_key_range_reg(start, size);
4798 }
4799 
4800 /*
4801  * Check whether any element of the @lock->class_cache[] array refers to a
4802  * registered lock class. The caller must hold either the graph lock or the
4803  * RCU read lock.
4804  */
4805 static bool lock_class_cache_is_registered(struct lockdep_map *lock)
4806 {
4807 	struct lock_class *class;
4808 	struct hlist_head *head;
4809 	int i, j;
4810 
4811 	for (i = 0; i < CLASSHASH_SIZE; i++) {
4812 		head = classhash_table + i;
4813 		hlist_for_each_entry_rcu(class, head, hash_entry) {
4814 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4815 				if (lock->class_cache[j] == class)
4816 					return true;
4817 		}
4818 	}
4819 	return false;
4820 }
4821 
4822 /* The caller must hold the graph lock. Does not sleep. */
4823 static void __lockdep_reset_lock(struct pending_free *pf,
4824 				 struct lockdep_map *lock)
4825 {
4826 	struct lock_class *class;
4827 	int j;
4828 
4829 	/*
4830 	 * Remove all classes this lock might have:
4831 	 */
4832 	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4833 		/*
4834 		 * If the class exists we look it up and zap it:
4835 		 */
4836 		class = look_up_lock_class(lock, j);
4837 		if (class)
4838 			zap_class(pf, class);
4839 	}
4840 	/*
4841 	 * Debug check: in the end all mapped classes should
4842 	 * be gone.
4843 	 */
4844 	if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
4845 		debug_locks_off();
4846 }
4847 
4848 /*
4849  * Remove all information lockdep has about a lock if debug_locks == 1. Free
4850  * released data structures from RCU context.
4851  */
4852 static void lockdep_reset_lock_reg(struct lockdep_map *lock)
4853 {
4854 	struct pending_free *pf;
4855 	unsigned long flags;
4856 	int locked;
4857 
4858 	raw_local_irq_save(flags);
4859 	locked = graph_lock();
4860 	if (!locked)
4861 		goto out_irq;
4862 
4863 	pf = get_pending_free();
4864 	__lockdep_reset_lock(pf, lock);
4865 	call_rcu_zapped(pf);
4866 
4867 	graph_unlock();
4868 out_irq:
4869 	raw_local_irq_restore(flags);
4870 }
4871 
4872 /*
4873  * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4874  * lockdep selftests.
4875  */
4876 static void lockdep_reset_lock_imm(struct lockdep_map *lock)
4877 {
4878 	struct pending_free *pf = delayed_free.pf;
4879 	unsigned long flags;
4880 
4881 	raw_local_irq_save(flags);
4882 	arch_spin_lock(&lockdep_lock);
4883 	__lockdep_reset_lock(pf, lock);
4884 	__free_zapped_classes(pf);
4885 	arch_spin_unlock(&lockdep_lock);
4886 	raw_local_irq_restore(flags);
4887 }
4888 
4889 void lockdep_reset_lock(struct lockdep_map *lock)
4890 {
4891 	init_data_structures_once();
4892 
4893 	if (inside_selftest())
4894 		lockdep_reset_lock_imm(lock);
4895 	else
4896 		lockdep_reset_lock_reg(lock);
4897 }
4898 
4899 /* Unregister a dynamically allocated key. */
4900 void lockdep_unregister_key(struct lock_class_key *key)
4901 {
4902 	struct hlist_head *hash_head = keyhashentry(key);
4903 	struct lock_class_key *k;
4904 	struct pending_free *pf;
4905 	unsigned long flags;
4906 	bool found = false;
4907 
4908 	might_sleep();
4909 
4910 	if (WARN_ON_ONCE(static_obj(key)))
4911 		return;
4912 
4913 	raw_local_irq_save(flags);
4914 	if (!graph_lock())
4915 		goto out_irq;
4916 
4917 	pf = get_pending_free();
4918 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
4919 		if (k == key) {
4920 			hlist_del_rcu(&k->hash_entry);
4921 			found = true;
4922 			break;
4923 		}
4924 	}
4925 	WARN_ON_ONCE(!found);
4926 	__lockdep_free_key_range(pf, key, 1);
4927 	call_rcu_zapped(pf);
4928 	graph_unlock();
4929 out_irq:
4930 	raw_local_irq_restore(flags);
4931 
4932 	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
4933 	synchronize_rcu();
4934 }
4935 EXPORT_SYMBOL_GPL(lockdep_unregister_key);
4936 
4937 void __init lockdep_init(void)
4938 {
4939 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4940 
4941 	printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4942 	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4943 	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4944 	printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4945 	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4946 	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4947 	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4948 
4949 	printk(" memory used by lock dependency info: %zu kB\n",
4950 	       (sizeof(lock_classes) +
4951 		sizeof(classhash_table) +
4952 		sizeof(list_entries) +
4953 		sizeof(list_entries_in_use) +
4954 		sizeof(chainhash_table) +
4955 		sizeof(delayed_free)
4956 #ifdef CONFIG_PROVE_LOCKING
4957 		+ sizeof(lock_cq)
4958 		+ sizeof(lock_chains)
4959 		+ sizeof(lock_chains_in_use)
4960 		+ sizeof(chain_hlocks)
4961 #endif
4962 		) / 1024
4963 		);
4964 
4965 	printk(" per task-struct memory footprint: %zu bytes\n",
4966 	       sizeof(((struct task_struct *)NULL)->held_locks));
4967 }
4968 
4969 static void
4970 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4971 		     const void *mem_to, struct held_lock *hlock)
4972 {
4973 	if (!debug_locks_off())
4974 		return;
4975 	if (debug_locks_silent)
4976 		return;
4977 
4978 	pr_warn("\n");
4979 	pr_warn("=========================\n");
4980 	pr_warn("WARNING: held lock freed!\n");
4981 	print_kernel_ident();
4982 	pr_warn("-------------------------\n");
4983 	pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4984 		curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4985 	print_lock(hlock);
4986 	lockdep_print_held_locks(curr);
4987 
4988 	pr_warn("\nstack backtrace:\n");
4989 	dump_stack();
4990 }
4991 
4992 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4993 				const void* lock_from, unsigned long lock_len)
4994 {
4995 	return lock_from + lock_len <= mem_from ||
4996 		mem_from + mem_len <= lock_from;
4997 }
4998 
4999 /*
5000  * Called when kernel memory is freed (or unmapped), or if a lock
5001  * is destroyed or reinitialized - this code checks whether there is
5002  * any held lock in the memory range of <from> to <to>:
5003  */
5004 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
5005 {
5006 	struct task_struct *curr = current;
5007 	struct held_lock *hlock;
5008 	unsigned long flags;
5009 	int i;
5010 
5011 	if (unlikely(!debug_locks))
5012 		return;
5013 
5014 	raw_local_irq_save(flags);
5015 	for (i = 0; i < curr->lockdep_depth; i++) {
5016 		hlock = curr->held_locks + i;
5017 
5018 		if (not_in_range(mem_from, mem_len, hlock->instance,
5019 					sizeof(*hlock->instance)))
5020 			continue;
5021 
5022 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
5023 		break;
5024 	}
5025 	raw_local_irq_restore(flags);
5026 }
5027 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
5028 
5029 static void print_held_locks_bug(void)
5030 {
5031 	if (!debug_locks_off())
5032 		return;
5033 	if (debug_locks_silent)
5034 		return;
5035 
5036 	pr_warn("\n");
5037 	pr_warn("====================================\n");
5038 	pr_warn("WARNING: %s/%d still has locks held!\n",
5039 	       current->comm, task_pid_nr(current));
5040 	print_kernel_ident();
5041 	pr_warn("------------------------------------\n");
5042 	lockdep_print_held_locks(current);
5043 	pr_warn("\nstack backtrace:\n");
5044 	dump_stack();
5045 }
5046 
5047 void debug_check_no_locks_held(void)
5048 {
5049 	if (unlikely(current->lockdep_depth > 0))
5050 		print_held_locks_bug();
5051 }
5052 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
5053 
5054 #ifdef __KERNEL__
5055 void debug_show_all_locks(void)
5056 {
5057 	struct task_struct *g, *p;
5058 
5059 	if (unlikely(!debug_locks)) {
5060 		pr_warn("INFO: lockdep is turned off.\n");
5061 		return;
5062 	}
5063 	pr_warn("\nShowing all locks held in the system:\n");
5064 
5065 	rcu_read_lock();
5066 	for_each_process_thread(g, p) {
5067 		if (!p->lockdep_depth)
5068 			continue;
5069 		lockdep_print_held_locks(p);
5070 		touch_nmi_watchdog();
5071 		touch_all_softlockup_watchdogs();
5072 	}
5073 	rcu_read_unlock();
5074 
5075 	pr_warn("\n");
5076 	pr_warn("=============================================\n\n");
5077 }
5078 EXPORT_SYMBOL_GPL(debug_show_all_locks);
5079 #endif
5080 
5081 /*
5082  * Careful: only use this function if you are sure that
5083  * the task cannot run in parallel!
5084  */
5085 void debug_show_held_locks(struct task_struct *task)
5086 {
5087 	if (unlikely(!debug_locks)) {
5088 		printk("INFO: lockdep is turned off.\n");
5089 		return;
5090 	}
5091 	lockdep_print_held_locks(task);
5092 }
5093 EXPORT_SYMBOL_GPL(debug_show_held_locks);
5094 
5095 asmlinkage __visible void lockdep_sys_exit(void)
5096 {
5097 	struct task_struct *curr = current;
5098 
5099 	if (unlikely(curr->lockdep_depth)) {
5100 		if (!debug_locks_off())
5101 			return;
5102 		pr_warn("\n");
5103 		pr_warn("================================================\n");
5104 		pr_warn("WARNING: lock held when returning to user space!\n");
5105 		print_kernel_ident();
5106 		pr_warn("------------------------------------------------\n");
5107 		pr_warn("%s/%d is leaving the kernel with locks still held!\n",
5108 				curr->comm, curr->pid);
5109 		lockdep_print_held_locks(curr);
5110 	}
5111 
5112 	/*
5113 	 * The lock history for each syscall should be independent. So wipe the
5114 	 * slate clean on return to userspace.
5115 	 */
5116 	lockdep_invariant_state(false);
5117 }
5118 
5119 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
5120 {
5121 	struct task_struct *curr = current;
5122 
5123 	/* Note: the following can be executed concurrently, so be careful. */
5124 	pr_warn("\n");
5125 	pr_warn("=============================\n");
5126 	pr_warn("WARNING: suspicious RCU usage\n");
5127 	print_kernel_ident();
5128 	pr_warn("-----------------------------\n");
5129 	pr_warn("%s:%d %s!\n", file, line, s);
5130 	pr_warn("\nother info that might help us debug this:\n\n");
5131 	pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5132 	       !rcu_lockdep_current_cpu_online()
5133 			? "RCU used illegally from offline CPU!\n"
5134 			: !rcu_is_watching()
5135 				? "RCU used illegally from idle CPU!\n"
5136 				: "",
5137 	       rcu_scheduler_active, debug_locks);
5138 
5139 	/*
5140 	 * If a CPU is in the RCU-free window in idle (ie: in the section
5141 	 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5142 	 * considers that CPU to be in an "extended quiescent state",
5143 	 * which means that RCU will be completely ignoring that CPU.
5144 	 * Therefore, rcu_read_lock() and friends have absolutely no
5145 	 * effect on a CPU running in that state. In other words, even if
5146 	 * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
5147 	 * delete data structures out from under it.  RCU really has no
5148 	 * choice here: we need to keep an RCU-free window in idle where
5149 	 * the CPU may possibly enter into low power mode. This way we can
5150 	 * notice an extended quiescent state to other CPUs that started a grace
5151 	 * period. Otherwise we would delay any grace period as long as we run
5152 	 * in the idle task.
5153 	 *
5154 	 * So complain bitterly if someone does call rcu_read_lock(),
5155 	 * rcu_read_lock_bh() and so on from extended quiescent states.
5156 	 */
5157 	if (!rcu_is_watching())
5158 		pr_warn("RCU used illegally from extended quiescent state!\n");
5159 
5160 	lockdep_print_held_locks(curr);
5161 	pr_warn("\nstack backtrace:\n");
5162 	dump_stack();
5163 }
5164 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
5165