xref: /openbmc/linux/lib/debugobjects.c (revision d2ba09c1)
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10 
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12 
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 
23 #define ODEBUG_HASH_BITS	14
24 #define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
25 
26 #define ODEBUG_POOL_SIZE	1024
27 #define ODEBUG_POOL_MIN_LEVEL	256
28 
29 #define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
32 
33 struct debug_bucket {
34 	struct hlist_head	list;
35 	raw_spinlock_t		lock;
36 };
37 
38 static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
39 
40 static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
41 
42 static DEFINE_RAW_SPINLOCK(pool_lock);
43 
44 static HLIST_HEAD(obj_pool);
45 static HLIST_HEAD(obj_to_free);
46 
47 static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
48 static int			obj_pool_free = ODEBUG_POOL_SIZE;
49 static int			obj_pool_used;
50 static int			obj_pool_max_used;
51 /* The number of objs on the global free list */
52 static int			obj_nr_tofree;
53 static struct kmem_cache	*obj_cache;
54 
55 static int			debug_objects_maxchain __read_mostly;
56 static int __maybe_unused	debug_objects_maxchecked __read_mostly;
57 static int			debug_objects_fixups __read_mostly;
58 static int			debug_objects_warnings __read_mostly;
59 static int			debug_objects_enabled __read_mostly
60 				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
61 static int			debug_objects_pool_size __read_mostly
62 				= ODEBUG_POOL_SIZE;
63 static int			debug_objects_pool_min_level __read_mostly
64 				= ODEBUG_POOL_MIN_LEVEL;
65 static struct debug_obj_descr	*descr_test  __read_mostly;
66 
67 /*
68  * Track numbers of kmem_cache_alloc()/free() calls done.
69  */
70 static int			debug_objects_allocated;
71 static int			debug_objects_freed;
72 
73 static void free_obj_work(struct work_struct *work);
74 static DECLARE_WORK(debug_obj_work, free_obj_work);
75 
76 static int __init enable_object_debug(char *str)
77 {
78 	debug_objects_enabled = 1;
79 	return 0;
80 }
81 
82 static int __init disable_object_debug(char *str)
83 {
84 	debug_objects_enabled = 0;
85 	return 0;
86 }
87 
88 early_param("debug_objects", enable_object_debug);
89 early_param("no_debug_objects", disable_object_debug);
90 
91 static const char *obj_states[ODEBUG_STATE_MAX] = {
92 	[ODEBUG_STATE_NONE]		= "none",
93 	[ODEBUG_STATE_INIT]		= "initialized",
94 	[ODEBUG_STATE_INACTIVE]		= "inactive",
95 	[ODEBUG_STATE_ACTIVE]		= "active",
96 	[ODEBUG_STATE_DESTROYED]	= "destroyed",
97 	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
98 };
99 
100 static void fill_pool(void)
101 {
102 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103 	struct debug_obj *new, *obj;
104 	unsigned long flags;
105 
106 	if (likely(obj_pool_free >= debug_objects_pool_min_level))
107 		return;
108 
109 	/*
110 	 * Reuse objs from the global free list; they will be reinitialized
111 	 * when allocating.
112 	 */
113 	while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 		raw_spin_lock_irqsave(&pool_lock, flags);
115 		/*
116 		 * Recheck with the lock held as the worker thread might have
117 		 * won the race and freed the global free list already.
118 		 */
119 		if (obj_nr_tofree) {
120 			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 			hlist_del(&obj->node);
122 			obj_nr_tofree--;
123 			hlist_add_head(&obj->node, &obj_pool);
124 			obj_pool_free++;
125 		}
126 		raw_spin_unlock_irqrestore(&pool_lock, flags);
127 	}
128 
129 	if (unlikely(!obj_cache))
130 		return;
131 
132 	while (obj_pool_free < debug_objects_pool_min_level) {
133 
134 		new = kmem_cache_zalloc(obj_cache, gfp);
135 		if (!new)
136 			return;
137 
138 		kmemleak_ignore(new);
139 		raw_spin_lock_irqsave(&pool_lock, flags);
140 		hlist_add_head(&new->node, &obj_pool);
141 		debug_objects_allocated++;
142 		obj_pool_free++;
143 		raw_spin_unlock_irqrestore(&pool_lock, flags);
144 	}
145 }
146 
147 /*
148  * Lookup an object in the hash bucket.
149  */
150 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
151 {
152 	struct debug_obj *obj;
153 	int cnt = 0;
154 
155 	hlist_for_each_entry(obj, &b->list, node) {
156 		cnt++;
157 		if (obj->object == addr)
158 			return obj;
159 	}
160 	if (cnt > debug_objects_maxchain)
161 		debug_objects_maxchain = cnt;
162 
163 	return NULL;
164 }
165 
166 /*
167  * Allocate a new object. If the pool is empty, switch off the debugger.
168  * Must be called with interrupts disabled.
169  */
170 static struct debug_obj *
171 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
172 {
173 	struct debug_obj *obj = NULL;
174 
175 	raw_spin_lock(&pool_lock);
176 	if (obj_pool.first) {
177 		obj	    = hlist_entry(obj_pool.first, typeof(*obj), node);
178 
179 		obj->object = addr;
180 		obj->descr  = descr;
181 		obj->state  = ODEBUG_STATE_NONE;
182 		obj->astate = 0;
183 		hlist_del(&obj->node);
184 
185 		hlist_add_head(&obj->node, &b->list);
186 
187 		obj_pool_used++;
188 		if (obj_pool_used > obj_pool_max_used)
189 			obj_pool_max_used = obj_pool_used;
190 
191 		obj_pool_free--;
192 		if (obj_pool_free < obj_pool_min_free)
193 			obj_pool_min_free = obj_pool_free;
194 	}
195 	raw_spin_unlock(&pool_lock);
196 
197 	return obj;
198 }
199 
200 /*
201  * workqueue function to free objects.
202  *
203  * To reduce contention on the global pool_lock, the actual freeing of
204  * debug objects will be delayed if the pool_lock is busy.
205  */
206 static void free_obj_work(struct work_struct *work)
207 {
208 	struct hlist_node *tmp;
209 	struct debug_obj *obj;
210 	unsigned long flags;
211 	HLIST_HEAD(tofree);
212 
213 	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
214 		return;
215 
216 	/*
217 	 * The objs on the pool list might be allocated before the work is
218 	 * run, so recheck if pool list it full or not, if not fill pool
219 	 * list from the global free list
220 	 */
221 	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
222 		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
223 		hlist_del(&obj->node);
224 		hlist_add_head(&obj->node, &obj_pool);
225 		obj_pool_free++;
226 		obj_nr_tofree--;
227 	}
228 
229 	/*
230 	 * Pool list is already full and there are still objs on the free
231 	 * list. Move remaining free objs to a temporary list to free the
232 	 * memory outside the pool_lock held region.
233 	 */
234 	if (obj_nr_tofree) {
235 		hlist_move_list(&obj_to_free, &tofree);
236 		debug_objects_freed += obj_nr_tofree;
237 		obj_nr_tofree = 0;
238 	}
239 	raw_spin_unlock_irqrestore(&pool_lock, flags);
240 
241 	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
242 		hlist_del(&obj->node);
243 		kmem_cache_free(obj_cache, obj);
244 	}
245 }
246 
247 static bool __free_object(struct debug_obj *obj)
248 {
249 	unsigned long flags;
250 	bool work;
251 
252 	raw_spin_lock_irqsave(&pool_lock, flags);
253 	work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
254 	obj_pool_used--;
255 
256 	if (work) {
257 		obj_nr_tofree++;
258 		hlist_add_head(&obj->node, &obj_to_free);
259 	} else {
260 		obj_pool_free++;
261 		hlist_add_head(&obj->node, &obj_pool);
262 	}
263 	raw_spin_unlock_irqrestore(&pool_lock, flags);
264 	return work;
265 }
266 
267 /*
268  * Put the object back into the pool and schedule work to free objects
269  * if necessary.
270  */
271 static void free_object(struct debug_obj *obj)
272 {
273 	if (__free_object(obj))
274 		schedule_work(&debug_obj_work);
275 }
276 
277 /*
278  * We run out of memory. That means we probably have tons of objects
279  * allocated.
280  */
281 static void debug_objects_oom(void)
282 {
283 	struct debug_bucket *db = obj_hash;
284 	struct hlist_node *tmp;
285 	HLIST_HEAD(freelist);
286 	struct debug_obj *obj;
287 	unsigned long flags;
288 	int i;
289 
290 	pr_warn("Out of memory. ODEBUG disabled\n");
291 
292 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
293 		raw_spin_lock_irqsave(&db->lock, flags);
294 		hlist_move_list(&db->list, &freelist);
295 		raw_spin_unlock_irqrestore(&db->lock, flags);
296 
297 		/* Now free them */
298 		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
299 			hlist_del(&obj->node);
300 			free_object(obj);
301 		}
302 	}
303 }
304 
305 /*
306  * We use the pfn of the address for the hash. That way we can check
307  * for freed objects simply by checking the affected bucket.
308  */
309 static struct debug_bucket *get_bucket(unsigned long addr)
310 {
311 	unsigned long hash;
312 
313 	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
314 	return &obj_hash[hash];
315 }
316 
317 static void debug_print_object(struct debug_obj *obj, char *msg)
318 {
319 	struct debug_obj_descr *descr = obj->descr;
320 	static int limit;
321 
322 	if (limit < 5 && descr != descr_test) {
323 		void *hint = descr->debug_hint ?
324 			descr->debug_hint(obj->object) : NULL;
325 		limit++;
326 		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
327 				 "object type: %s hint: %pS\n",
328 			msg, obj_states[obj->state], obj->astate,
329 			descr->name, hint);
330 	}
331 	debug_objects_warnings++;
332 }
333 
334 /*
335  * Try to repair the damage, so we have a better chance to get useful
336  * debug output.
337  */
338 static bool
339 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
340 		   void * addr, enum debug_obj_state state)
341 {
342 	if (fixup && fixup(addr, state)) {
343 		debug_objects_fixups++;
344 		return true;
345 	}
346 	return false;
347 }
348 
349 static void debug_object_is_on_stack(void *addr, int onstack)
350 {
351 	int is_on_stack;
352 	static int limit;
353 
354 	if (limit > 4)
355 		return;
356 
357 	is_on_stack = object_is_on_stack(addr);
358 	if (is_on_stack == onstack)
359 		return;
360 
361 	limit++;
362 	if (is_on_stack)
363 		pr_warn("object is on stack, but not annotated\n");
364 	else
365 		pr_warn("object is not on stack, but annotated\n");
366 	WARN_ON(1);
367 }
368 
369 static void
370 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
371 {
372 	enum debug_obj_state state;
373 	struct debug_bucket *db;
374 	struct debug_obj *obj;
375 	unsigned long flags;
376 
377 	fill_pool();
378 
379 	db = get_bucket((unsigned long) addr);
380 
381 	raw_spin_lock_irqsave(&db->lock, flags);
382 
383 	obj = lookup_object(addr, db);
384 	if (!obj) {
385 		obj = alloc_object(addr, db, descr);
386 		if (!obj) {
387 			debug_objects_enabled = 0;
388 			raw_spin_unlock_irqrestore(&db->lock, flags);
389 			debug_objects_oom();
390 			return;
391 		}
392 		debug_object_is_on_stack(addr, onstack);
393 	}
394 
395 	switch (obj->state) {
396 	case ODEBUG_STATE_NONE:
397 	case ODEBUG_STATE_INIT:
398 	case ODEBUG_STATE_INACTIVE:
399 		obj->state = ODEBUG_STATE_INIT;
400 		break;
401 
402 	case ODEBUG_STATE_ACTIVE:
403 		debug_print_object(obj, "init");
404 		state = obj->state;
405 		raw_spin_unlock_irqrestore(&db->lock, flags);
406 		debug_object_fixup(descr->fixup_init, addr, state);
407 		return;
408 
409 	case ODEBUG_STATE_DESTROYED:
410 		debug_print_object(obj, "init");
411 		break;
412 	default:
413 		break;
414 	}
415 
416 	raw_spin_unlock_irqrestore(&db->lock, flags);
417 }
418 
419 /**
420  * debug_object_init - debug checks when an object is initialized
421  * @addr:	address of the object
422  * @descr:	pointer to an object specific debug description structure
423  */
424 void debug_object_init(void *addr, struct debug_obj_descr *descr)
425 {
426 	if (!debug_objects_enabled)
427 		return;
428 
429 	__debug_object_init(addr, descr, 0);
430 }
431 EXPORT_SYMBOL_GPL(debug_object_init);
432 
433 /**
434  * debug_object_init_on_stack - debug checks when an object on stack is
435  *				initialized
436  * @addr:	address of the object
437  * @descr:	pointer to an object specific debug description structure
438  */
439 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
440 {
441 	if (!debug_objects_enabled)
442 		return;
443 
444 	__debug_object_init(addr, descr, 1);
445 }
446 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
447 
448 /**
449  * debug_object_activate - debug checks when an object is activated
450  * @addr:	address of the object
451  * @descr:	pointer to an object specific debug description structure
452  * Returns 0 for success, -EINVAL for check failed.
453  */
454 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
455 {
456 	enum debug_obj_state state;
457 	struct debug_bucket *db;
458 	struct debug_obj *obj;
459 	unsigned long flags;
460 	int ret;
461 	struct debug_obj o = { .object = addr,
462 			       .state = ODEBUG_STATE_NOTAVAILABLE,
463 			       .descr = descr };
464 
465 	if (!debug_objects_enabled)
466 		return 0;
467 
468 	db = get_bucket((unsigned long) addr);
469 
470 	raw_spin_lock_irqsave(&db->lock, flags);
471 
472 	obj = lookup_object(addr, db);
473 	if (obj) {
474 		switch (obj->state) {
475 		case ODEBUG_STATE_INIT:
476 		case ODEBUG_STATE_INACTIVE:
477 			obj->state = ODEBUG_STATE_ACTIVE;
478 			ret = 0;
479 			break;
480 
481 		case ODEBUG_STATE_ACTIVE:
482 			debug_print_object(obj, "activate");
483 			state = obj->state;
484 			raw_spin_unlock_irqrestore(&db->lock, flags);
485 			ret = debug_object_fixup(descr->fixup_activate, addr, state);
486 			return ret ? 0 : -EINVAL;
487 
488 		case ODEBUG_STATE_DESTROYED:
489 			debug_print_object(obj, "activate");
490 			ret = -EINVAL;
491 			break;
492 		default:
493 			ret = 0;
494 			break;
495 		}
496 		raw_spin_unlock_irqrestore(&db->lock, flags);
497 		return ret;
498 	}
499 
500 	raw_spin_unlock_irqrestore(&db->lock, flags);
501 	/*
502 	 * We are here when a static object is activated. We
503 	 * let the type specific code confirm whether this is
504 	 * true or not. if true, we just make sure that the
505 	 * static object is tracked in the object tracker. If
506 	 * not, this must be a bug, so we try to fix it up.
507 	 */
508 	if (descr->is_static_object && descr->is_static_object(addr)) {
509 		/* track this static object */
510 		debug_object_init(addr, descr);
511 		debug_object_activate(addr, descr);
512 	} else {
513 		debug_print_object(&o, "activate");
514 		ret = debug_object_fixup(descr->fixup_activate, addr,
515 					ODEBUG_STATE_NOTAVAILABLE);
516 		return ret ? 0 : -EINVAL;
517 	}
518 	return 0;
519 }
520 EXPORT_SYMBOL_GPL(debug_object_activate);
521 
522 /**
523  * debug_object_deactivate - debug checks when an object is deactivated
524  * @addr:	address of the object
525  * @descr:	pointer to an object specific debug description structure
526  */
527 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
528 {
529 	struct debug_bucket *db;
530 	struct debug_obj *obj;
531 	unsigned long flags;
532 
533 	if (!debug_objects_enabled)
534 		return;
535 
536 	db = get_bucket((unsigned long) addr);
537 
538 	raw_spin_lock_irqsave(&db->lock, flags);
539 
540 	obj = lookup_object(addr, db);
541 	if (obj) {
542 		switch (obj->state) {
543 		case ODEBUG_STATE_INIT:
544 		case ODEBUG_STATE_INACTIVE:
545 		case ODEBUG_STATE_ACTIVE:
546 			if (!obj->astate)
547 				obj->state = ODEBUG_STATE_INACTIVE;
548 			else
549 				debug_print_object(obj, "deactivate");
550 			break;
551 
552 		case ODEBUG_STATE_DESTROYED:
553 			debug_print_object(obj, "deactivate");
554 			break;
555 		default:
556 			break;
557 		}
558 	} else {
559 		struct debug_obj o = { .object = addr,
560 				       .state = ODEBUG_STATE_NOTAVAILABLE,
561 				       .descr = descr };
562 
563 		debug_print_object(&o, "deactivate");
564 	}
565 
566 	raw_spin_unlock_irqrestore(&db->lock, flags);
567 }
568 EXPORT_SYMBOL_GPL(debug_object_deactivate);
569 
570 /**
571  * debug_object_destroy - debug checks when an object is destroyed
572  * @addr:	address of the object
573  * @descr:	pointer to an object specific debug description structure
574  */
575 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
576 {
577 	enum debug_obj_state state;
578 	struct debug_bucket *db;
579 	struct debug_obj *obj;
580 	unsigned long flags;
581 
582 	if (!debug_objects_enabled)
583 		return;
584 
585 	db = get_bucket((unsigned long) addr);
586 
587 	raw_spin_lock_irqsave(&db->lock, flags);
588 
589 	obj = lookup_object(addr, db);
590 	if (!obj)
591 		goto out_unlock;
592 
593 	switch (obj->state) {
594 	case ODEBUG_STATE_NONE:
595 	case ODEBUG_STATE_INIT:
596 	case ODEBUG_STATE_INACTIVE:
597 		obj->state = ODEBUG_STATE_DESTROYED;
598 		break;
599 	case ODEBUG_STATE_ACTIVE:
600 		debug_print_object(obj, "destroy");
601 		state = obj->state;
602 		raw_spin_unlock_irqrestore(&db->lock, flags);
603 		debug_object_fixup(descr->fixup_destroy, addr, state);
604 		return;
605 
606 	case ODEBUG_STATE_DESTROYED:
607 		debug_print_object(obj, "destroy");
608 		break;
609 	default:
610 		break;
611 	}
612 out_unlock:
613 	raw_spin_unlock_irqrestore(&db->lock, flags);
614 }
615 EXPORT_SYMBOL_GPL(debug_object_destroy);
616 
617 /**
618  * debug_object_free - debug checks when an object is freed
619  * @addr:	address of the object
620  * @descr:	pointer to an object specific debug description structure
621  */
622 void debug_object_free(void *addr, struct debug_obj_descr *descr)
623 {
624 	enum debug_obj_state state;
625 	struct debug_bucket *db;
626 	struct debug_obj *obj;
627 	unsigned long flags;
628 
629 	if (!debug_objects_enabled)
630 		return;
631 
632 	db = get_bucket((unsigned long) addr);
633 
634 	raw_spin_lock_irqsave(&db->lock, flags);
635 
636 	obj = lookup_object(addr, db);
637 	if (!obj)
638 		goto out_unlock;
639 
640 	switch (obj->state) {
641 	case ODEBUG_STATE_ACTIVE:
642 		debug_print_object(obj, "free");
643 		state = obj->state;
644 		raw_spin_unlock_irqrestore(&db->lock, flags);
645 		debug_object_fixup(descr->fixup_free, addr, state);
646 		return;
647 	default:
648 		hlist_del(&obj->node);
649 		raw_spin_unlock_irqrestore(&db->lock, flags);
650 		free_object(obj);
651 		return;
652 	}
653 out_unlock:
654 	raw_spin_unlock_irqrestore(&db->lock, flags);
655 }
656 EXPORT_SYMBOL_GPL(debug_object_free);
657 
658 /**
659  * debug_object_assert_init - debug checks when object should be init-ed
660  * @addr:	address of the object
661  * @descr:	pointer to an object specific debug description structure
662  */
663 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
664 {
665 	struct debug_bucket *db;
666 	struct debug_obj *obj;
667 	unsigned long flags;
668 
669 	if (!debug_objects_enabled)
670 		return;
671 
672 	db = get_bucket((unsigned long) addr);
673 
674 	raw_spin_lock_irqsave(&db->lock, flags);
675 
676 	obj = lookup_object(addr, db);
677 	if (!obj) {
678 		struct debug_obj o = { .object = addr,
679 				       .state = ODEBUG_STATE_NOTAVAILABLE,
680 				       .descr = descr };
681 
682 		raw_spin_unlock_irqrestore(&db->lock, flags);
683 		/*
684 		 * Maybe the object is static, and we let the type specific
685 		 * code confirm. Track this static object if true, else invoke
686 		 * fixup.
687 		 */
688 		if (descr->is_static_object && descr->is_static_object(addr)) {
689 			/* Track this static object */
690 			debug_object_init(addr, descr);
691 		} else {
692 			debug_print_object(&o, "assert_init");
693 			debug_object_fixup(descr->fixup_assert_init, addr,
694 					   ODEBUG_STATE_NOTAVAILABLE);
695 		}
696 		return;
697 	}
698 
699 	raw_spin_unlock_irqrestore(&db->lock, flags);
700 }
701 EXPORT_SYMBOL_GPL(debug_object_assert_init);
702 
703 /**
704  * debug_object_active_state - debug checks object usage state machine
705  * @addr:	address of the object
706  * @descr:	pointer to an object specific debug description structure
707  * @expect:	expected state
708  * @next:	state to move to if expected state is found
709  */
710 void
711 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
712 			  unsigned int expect, unsigned int next)
713 {
714 	struct debug_bucket *db;
715 	struct debug_obj *obj;
716 	unsigned long flags;
717 
718 	if (!debug_objects_enabled)
719 		return;
720 
721 	db = get_bucket((unsigned long) addr);
722 
723 	raw_spin_lock_irqsave(&db->lock, flags);
724 
725 	obj = lookup_object(addr, db);
726 	if (obj) {
727 		switch (obj->state) {
728 		case ODEBUG_STATE_ACTIVE:
729 			if (obj->astate == expect)
730 				obj->astate = next;
731 			else
732 				debug_print_object(obj, "active_state");
733 			break;
734 
735 		default:
736 			debug_print_object(obj, "active_state");
737 			break;
738 		}
739 	} else {
740 		struct debug_obj o = { .object = addr,
741 				       .state = ODEBUG_STATE_NOTAVAILABLE,
742 				       .descr = descr };
743 
744 		debug_print_object(&o, "active_state");
745 	}
746 
747 	raw_spin_unlock_irqrestore(&db->lock, flags);
748 }
749 EXPORT_SYMBOL_GPL(debug_object_active_state);
750 
751 #ifdef CONFIG_DEBUG_OBJECTS_FREE
752 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
753 {
754 	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
755 	struct debug_obj_descr *descr;
756 	enum debug_obj_state state;
757 	struct debug_bucket *db;
758 	struct hlist_node *tmp;
759 	struct debug_obj *obj;
760 	int cnt, objs_checked = 0;
761 	bool work = false;
762 
763 	saddr = (unsigned long) address;
764 	eaddr = saddr + size;
765 	paddr = saddr & ODEBUG_CHUNK_MASK;
766 	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
767 	chunks >>= ODEBUG_CHUNK_SHIFT;
768 
769 	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
770 		db = get_bucket(paddr);
771 
772 repeat:
773 		cnt = 0;
774 		raw_spin_lock_irqsave(&db->lock, flags);
775 		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
776 			cnt++;
777 			oaddr = (unsigned long) obj->object;
778 			if (oaddr < saddr || oaddr >= eaddr)
779 				continue;
780 
781 			switch (obj->state) {
782 			case ODEBUG_STATE_ACTIVE:
783 				debug_print_object(obj, "free");
784 				descr = obj->descr;
785 				state = obj->state;
786 				raw_spin_unlock_irqrestore(&db->lock, flags);
787 				debug_object_fixup(descr->fixup_free,
788 						   (void *) oaddr, state);
789 				goto repeat;
790 			default:
791 				hlist_del(&obj->node);
792 				work |= __free_object(obj);
793 				break;
794 			}
795 		}
796 		raw_spin_unlock_irqrestore(&db->lock, flags);
797 
798 		if (cnt > debug_objects_maxchain)
799 			debug_objects_maxchain = cnt;
800 
801 		objs_checked += cnt;
802 	}
803 
804 	if (objs_checked > debug_objects_maxchecked)
805 		debug_objects_maxchecked = objs_checked;
806 
807 	/* Schedule work to actually kmem_cache_free() objects */
808 	if (work)
809 		schedule_work(&debug_obj_work);
810 }
811 
812 void debug_check_no_obj_freed(const void *address, unsigned long size)
813 {
814 	if (debug_objects_enabled)
815 		__debug_check_no_obj_freed(address, size);
816 }
817 #endif
818 
819 #ifdef CONFIG_DEBUG_FS
820 
821 static int debug_stats_show(struct seq_file *m, void *v)
822 {
823 	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
824 	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
825 	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
826 	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
827 	seq_printf(m, "pool_free     :%d\n", obj_pool_free);
828 	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
829 	seq_printf(m, "pool_used     :%d\n", obj_pool_used);
830 	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
831 	seq_printf(m, "on_free_list  :%d\n", obj_nr_tofree);
832 	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
833 	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
834 	return 0;
835 }
836 
837 static int debug_stats_open(struct inode *inode, struct file *filp)
838 {
839 	return single_open(filp, debug_stats_show, NULL);
840 }
841 
842 static const struct file_operations debug_stats_fops = {
843 	.open		= debug_stats_open,
844 	.read		= seq_read,
845 	.llseek		= seq_lseek,
846 	.release	= single_release,
847 };
848 
849 static int __init debug_objects_init_debugfs(void)
850 {
851 	struct dentry *dbgdir, *dbgstats;
852 
853 	if (!debug_objects_enabled)
854 		return 0;
855 
856 	dbgdir = debugfs_create_dir("debug_objects", NULL);
857 	if (!dbgdir)
858 		return -ENOMEM;
859 
860 	dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
861 				       &debug_stats_fops);
862 	if (!dbgstats)
863 		goto err;
864 
865 	return 0;
866 
867 err:
868 	debugfs_remove(dbgdir);
869 
870 	return -ENOMEM;
871 }
872 __initcall(debug_objects_init_debugfs);
873 
874 #else
875 static inline void debug_objects_init_debugfs(void) { }
876 #endif
877 
878 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
879 
880 /* Random data structure for the self test */
881 struct self_test {
882 	unsigned long	dummy1[6];
883 	int		static_init;
884 	unsigned long	dummy2[3];
885 };
886 
887 static __initdata struct debug_obj_descr descr_type_test;
888 
889 static bool __init is_static_object(void *addr)
890 {
891 	struct self_test *obj = addr;
892 
893 	return obj->static_init;
894 }
895 
896 /*
897  * fixup_init is called when:
898  * - an active object is initialized
899  */
900 static bool __init fixup_init(void *addr, enum debug_obj_state state)
901 {
902 	struct self_test *obj = addr;
903 
904 	switch (state) {
905 	case ODEBUG_STATE_ACTIVE:
906 		debug_object_deactivate(obj, &descr_type_test);
907 		debug_object_init(obj, &descr_type_test);
908 		return true;
909 	default:
910 		return false;
911 	}
912 }
913 
914 /*
915  * fixup_activate is called when:
916  * - an active object is activated
917  * - an unknown non-static object is activated
918  */
919 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
920 {
921 	struct self_test *obj = addr;
922 
923 	switch (state) {
924 	case ODEBUG_STATE_NOTAVAILABLE:
925 		return true;
926 	case ODEBUG_STATE_ACTIVE:
927 		debug_object_deactivate(obj, &descr_type_test);
928 		debug_object_activate(obj, &descr_type_test);
929 		return true;
930 
931 	default:
932 		return false;
933 	}
934 }
935 
936 /*
937  * fixup_destroy is called when:
938  * - an active object is destroyed
939  */
940 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
941 {
942 	struct self_test *obj = addr;
943 
944 	switch (state) {
945 	case ODEBUG_STATE_ACTIVE:
946 		debug_object_deactivate(obj, &descr_type_test);
947 		debug_object_destroy(obj, &descr_type_test);
948 		return true;
949 	default:
950 		return false;
951 	}
952 }
953 
954 /*
955  * fixup_free is called when:
956  * - an active object is freed
957  */
958 static bool __init fixup_free(void *addr, enum debug_obj_state state)
959 {
960 	struct self_test *obj = addr;
961 
962 	switch (state) {
963 	case ODEBUG_STATE_ACTIVE:
964 		debug_object_deactivate(obj, &descr_type_test);
965 		debug_object_free(obj, &descr_type_test);
966 		return true;
967 	default:
968 		return false;
969 	}
970 }
971 
972 static int __init
973 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
974 {
975 	struct debug_bucket *db;
976 	struct debug_obj *obj;
977 	unsigned long flags;
978 	int res = -EINVAL;
979 
980 	db = get_bucket((unsigned long) addr);
981 
982 	raw_spin_lock_irqsave(&db->lock, flags);
983 
984 	obj = lookup_object(addr, db);
985 	if (!obj && state != ODEBUG_STATE_NONE) {
986 		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
987 		goto out;
988 	}
989 	if (obj && obj->state != state) {
990 		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
991 		       obj->state, state);
992 		goto out;
993 	}
994 	if (fixups != debug_objects_fixups) {
995 		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
996 		       fixups, debug_objects_fixups);
997 		goto out;
998 	}
999 	if (warnings != debug_objects_warnings) {
1000 		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1001 		       warnings, debug_objects_warnings);
1002 		goto out;
1003 	}
1004 	res = 0;
1005 out:
1006 	raw_spin_unlock_irqrestore(&db->lock, flags);
1007 	if (res)
1008 		debug_objects_enabled = 0;
1009 	return res;
1010 }
1011 
1012 static __initdata struct debug_obj_descr descr_type_test = {
1013 	.name			= "selftest",
1014 	.is_static_object	= is_static_object,
1015 	.fixup_init		= fixup_init,
1016 	.fixup_activate		= fixup_activate,
1017 	.fixup_destroy		= fixup_destroy,
1018 	.fixup_free		= fixup_free,
1019 };
1020 
1021 static __initdata struct self_test obj = { .static_init = 0 };
1022 
1023 static void __init debug_objects_selftest(void)
1024 {
1025 	int fixups, oldfixups, warnings, oldwarnings;
1026 	unsigned long flags;
1027 
1028 	local_irq_save(flags);
1029 
1030 	fixups = oldfixups = debug_objects_fixups;
1031 	warnings = oldwarnings = debug_objects_warnings;
1032 	descr_test = &descr_type_test;
1033 
1034 	debug_object_init(&obj, &descr_type_test);
1035 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1036 		goto out;
1037 	debug_object_activate(&obj, &descr_type_test);
1038 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1039 		goto out;
1040 	debug_object_activate(&obj, &descr_type_test);
1041 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1042 		goto out;
1043 	debug_object_deactivate(&obj, &descr_type_test);
1044 	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1045 		goto out;
1046 	debug_object_destroy(&obj, &descr_type_test);
1047 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1048 		goto out;
1049 	debug_object_init(&obj, &descr_type_test);
1050 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1051 		goto out;
1052 	debug_object_activate(&obj, &descr_type_test);
1053 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1054 		goto out;
1055 	debug_object_deactivate(&obj, &descr_type_test);
1056 	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1057 		goto out;
1058 	debug_object_free(&obj, &descr_type_test);
1059 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1060 		goto out;
1061 
1062 	obj.static_init = 1;
1063 	debug_object_activate(&obj, &descr_type_test);
1064 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1065 		goto out;
1066 	debug_object_init(&obj, &descr_type_test);
1067 	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1068 		goto out;
1069 	debug_object_free(&obj, &descr_type_test);
1070 	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1071 		goto out;
1072 
1073 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1074 	debug_object_init(&obj, &descr_type_test);
1075 	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1076 		goto out;
1077 	debug_object_activate(&obj, &descr_type_test);
1078 	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1079 		goto out;
1080 	__debug_check_no_obj_freed(&obj, sizeof(obj));
1081 	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1082 		goto out;
1083 #endif
1084 	pr_info("selftest passed\n");
1085 
1086 out:
1087 	debug_objects_fixups = oldfixups;
1088 	debug_objects_warnings = oldwarnings;
1089 	descr_test = NULL;
1090 
1091 	local_irq_restore(flags);
1092 }
1093 #else
1094 static inline void debug_objects_selftest(void) { }
1095 #endif
1096 
1097 /*
1098  * Called during early boot to initialize the hash buckets and link
1099  * the static object pool objects into the poll list. After this call
1100  * the object tracker is fully operational.
1101  */
1102 void __init debug_objects_early_init(void)
1103 {
1104 	int i;
1105 
1106 	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1107 		raw_spin_lock_init(&obj_hash[i].lock);
1108 
1109 	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1110 		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1111 }
1112 
1113 /*
1114  * Convert the statically allocated objects to dynamic ones:
1115  */
1116 static int __init debug_objects_replace_static_objects(void)
1117 {
1118 	struct debug_bucket *db = obj_hash;
1119 	struct hlist_node *tmp;
1120 	struct debug_obj *obj, *new;
1121 	HLIST_HEAD(objects);
1122 	int i, cnt = 0;
1123 
1124 	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1125 		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1126 		if (!obj)
1127 			goto free;
1128 		kmemleak_ignore(obj);
1129 		hlist_add_head(&obj->node, &objects);
1130 	}
1131 
1132 	/*
1133 	 * When debug_objects_mem_init() is called we know that only
1134 	 * one CPU is up, so disabling interrupts is enough
1135 	 * protection. This avoids the lockdep hell of lock ordering.
1136 	 */
1137 	local_irq_disable();
1138 
1139 	/* Remove the statically allocated objects from the pool */
1140 	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1141 		hlist_del(&obj->node);
1142 	/* Move the allocated objects to the pool */
1143 	hlist_move_list(&objects, &obj_pool);
1144 
1145 	/* Replace the active object references */
1146 	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1147 		hlist_move_list(&db->list, &objects);
1148 
1149 		hlist_for_each_entry(obj, &objects, node) {
1150 			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1151 			hlist_del(&new->node);
1152 			/* copy object data */
1153 			*new = *obj;
1154 			hlist_add_head(&new->node, &db->list);
1155 			cnt++;
1156 		}
1157 	}
1158 	local_irq_enable();
1159 
1160 	pr_debug("%d of %d active objects replaced\n",
1161 		 cnt, obj_pool_used);
1162 	return 0;
1163 free:
1164 	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1165 		hlist_del(&obj->node);
1166 		kmem_cache_free(obj_cache, obj);
1167 	}
1168 	return -ENOMEM;
1169 }
1170 
1171 /*
1172  * Called after the kmem_caches are functional to setup a dedicated
1173  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1174  * prevents that the debug code is called on kmem_cache_free() for the
1175  * debug tracker objects to avoid recursive calls.
1176  */
1177 void __init debug_objects_mem_init(void)
1178 {
1179 	if (!debug_objects_enabled)
1180 		return;
1181 
1182 	obj_cache = kmem_cache_create("debug_objects_cache",
1183 				      sizeof (struct debug_obj), 0,
1184 				      SLAB_DEBUG_OBJECTS, NULL);
1185 
1186 	if (!obj_cache || debug_objects_replace_static_objects()) {
1187 		debug_objects_enabled = 0;
1188 		if (obj_cache)
1189 			kmem_cache_destroy(obj_cache);
1190 		pr_warn("out of memory.\n");
1191 	} else
1192 		debug_objects_selftest();
1193 
1194 	/*
1195 	 * Increase the thresholds for allocating and freeing objects
1196 	 * according to the number of possible CPUs available in the system.
1197 	 */
1198 	debug_objects_pool_size += num_possible_cpus() * 32;
1199 	debug_objects_pool_min_level += num_possible_cpus() * 4;
1200 }
1201