1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic infrastructure for lifetime debugging of objects.
4 *
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6 */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20
21 #define ODEBUG_HASH_BITS 14
22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
23
24 #define ODEBUG_POOL_SIZE 1024
25 #define ODEBUG_POOL_MIN_LEVEL 256
26 #define ODEBUG_POOL_PERCPU_SIZE 64
27 #define ODEBUG_BATCH_SIZE 16
28
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32
33 /*
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
37 */
38 #define ODEBUG_FREE_WORK_MAX 1024
39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
40
41 struct debug_bucket {
42 struct hlist_head list;
43 raw_spinlock_t lock;
44 };
45
46 /*
47 * Debug object percpu free list
48 * Access is protected by disabling irq
49 */
50 struct debug_percpu_free {
51 struct hlist_head free_objs;
52 int obj_free;
53 };
54
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
58
59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65
66 /*
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71 * can be off.
72 */
73 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int obj_pool_free = ODEBUG_POOL_SIZE;
75 static int obj_pool_used;
76 static int obj_pool_max_used;
77 static bool obj_freeing;
78 /* The number of objs on the global free list */
79 static int obj_nr_tofree;
80
81 static int debug_objects_maxchain __read_mostly;
82 static int __maybe_unused debug_objects_maxchecked __read_mostly;
83 static int debug_objects_fixups __read_mostly;
84 static int debug_objects_warnings __read_mostly;
85 static int debug_objects_enabled __read_mostly
86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int debug_objects_pool_size __read_mostly
88 = ODEBUG_POOL_SIZE;
89 static int debug_objects_pool_min_level __read_mostly
90 = ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test __read_mostly;
92 static struct kmem_cache *obj_cache __read_mostly;
93
94 /*
95 * Track numbers of kmem_cache_alloc()/free() calls done.
96 */
97 static int debug_objects_allocated;
98 static int debug_objects_freed;
99
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
enable_object_debug(char * str)103 static int __init enable_object_debug(char *str)
104 {
105 debug_objects_enabled = 1;
106 return 0;
107 }
108
disable_object_debug(char * str)109 static int __init disable_object_debug(char *str)
110 {
111 debug_objects_enabled = 0;
112 return 0;
113 }
114
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
117
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 [ODEBUG_STATE_NONE] = "none",
120 [ODEBUG_STATE_INIT] = "initialized",
121 [ODEBUG_STATE_INACTIVE] = "inactive",
122 [ODEBUG_STATE_ACTIVE] = "active",
123 [ODEBUG_STATE_DESTROYED] = "destroyed",
124 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
125 };
126
fill_pool(void)127 static void fill_pool(void)
128 {
129 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130 struct debug_obj *obj;
131 unsigned long flags;
132
133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134 return;
135
136 /*
137 * Reuse objs from the global free list; they will be reinitialized
138 * when allocating.
139 *
140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142 * sections.
143 */
144 while (READ_ONCE(obj_nr_tofree) &&
145 READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
146 raw_spin_lock_irqsave(&pool_lock, flags);
147 /*
148 * Recheck with the lock held as the worker thread might have
149 * won the race and freed the global free list already.
150 */
151 while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
152 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
153 hlist_del(&obj->node);
154 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
155 hlist_add_head(&obj->node, &obj_pool);
156 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
157 }
158 raw_spin_unlock_irqrestore(&pool_lock, flags);
159 }
160
161 if (unlikely(!obj_cache))
162 return;
163
164 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
165 struct debug_obj *new[ODEBUG_BATCH_SIZE];
166 int cnt;
167
168 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
169 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
170 if (!new[cnt])
171 break;
172 }
173 if (!cnt)
174 return;
175
176 raw_spin_lock_irqsave(&pool_lock, flags);
177 while (cnt) {
178 hlist_add_head(&new[--cnt]->node, &obj_pool);
179 debug_objects_allocated++;
180 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
181 }
182 raw_spin_unlock_irqrestore(&pool_lock, flags);
183 }
184 }
185
186 /*
187 * Lookup an object in the hash bucket.
188 */
lookup_object(void * addr,struct debug_bucket * b)189 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
190 {
191 struct debug_obj *obj;
192 int cnt = 0;
193
194 hlist_for_each_entry(obj, &b->list, node) {
195 cnt++;
196 if (obj->object == addr)
197 return obj;
198 }
199 if (cnt > debug_objects_maxchain)
200 debug_objects_maxchain = cnt;
201
202 return NULL;
203 }
204
205 /*
206 * Allocate a new object from the hlist
207 */
__alloc_object(struct hlist_head * list)208 static struct debug_obj *__alloc_object(struct hlist_head *list)
209 {
210 struct debug_obj *obj = NULL;
211
212 if (list->first) {
213 obj = hlist_entry(list->first, typeof(*obj), node);
214 hlist_del(&obj->node);
215 }
216
217 return obj;
218 }
219
220 static struct debug_obj *
alloc_object(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr)221 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
222 {
223 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
224 struct debug_obj *obj;
225
226 if (likely(obj_cache)) {
227 obj = __alloc_object(&percpu_pool->free_objs);
228 if (obj) {
229 percpu_pool->obj_free--;
230 goto init_obj;
231 }
232 }
233
234 raw_spin_lock(&pool_lock);
235 obj = __alloc_object(&obj_pool);
236 if (obj) {
237 obj_pool_used++;
238 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
239
240 /*
241 * Looking ahead, allocate one batch of debug objects and
242 * put them into the percpu free pool.
243 */
244 if (likely(obj_cache)) {
245 int i;
246
247 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
248 struct debug_obj *obj2;
249
250 obj2 = __alloc_object(&obj_pool);
251 if (!obj2)
252 break;
253 hlist_add_head(&obj2->node,
254 &percpu_pool->free_objs);
255 percpu_pool->obj_free++;
256 obj_pool_used++;
257 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
258 }
259 }
260
261 if (obj_pool_used > obj_pool_max_used)
262 obj_pool_max_used = obj_pool_used;
263
264 if (obj_pool_free < obj_pool_min_free)
265 obj_pool_min_free = obj_pool_free;
266 }
267 raw_spin_unlock(&pool_lock);
268
269 init_obj:
270 if (obj) {
271 obj->object = addr;
272 obj->descr = descr;
273 obj->state = ODEBUG_STATE_NONE;
274 obj->astate = 0;
275 hlist_add_head(&obj->node, &b->list);
276 }
277 return obj;
278 }
279
280 /*
281 * workqueue function to free objects.
282 *
283 * To reduce contention on the global pool_lock, the actual freeing of
284 * debug objects will be delayed if the pool_lock is busy.
285 */
free_obj_work(struct work_struct * work)286 static void free_obj_work(struct work_struct *work)
287 {
288 struct hlist_node *tmp;
289 struct debug_obj *obj;
290 unsigned long flags;
291 HLIST_HEAD(tofree);
292
293 WRITE_ONCE(obj_freeing, false);
294 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
295 return;
296
297 if (obj_pool_free >= debug_objects_pool_size)
298 goto free_objs;
299
300 /*
301 * The objs on the pool list might be allocated before the work is
302 * run, so recheck if pool list it full or not, if not fill pool
303 * list from the global free list. As it is likely that a workload
304 * may be gearing up to use more and more objects, don't free any
305 * of them until the next round.
306 */
307 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
308 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
309 hlist_del(&obj->node);
310 hlist_add_head(&obj->node, &obj_pool);
311 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
312 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
313 }
314 raw_spin_unlock_irqrestore(&pool_lock, flags);
315 return;
316
317 free_objs:
318 /*
319 * Pool list is already full and there are still objs on the free
320 * list. Move remaining free objs to a temporary list to free the
321 * memory outside the pool_lock held region.
322 */
323 if (obj_nr_tofree) {
324 hlist_move_list(&obj_to_free, &tofree);
325 debug_objects_freed += obj_nr_tofree;
326 WRITE_ONCE(obj_nr_tofree, 0);
327 }
328 raw_spin_unlock_irqrestore(&pool_lock, flags);
329
330 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
331 hlist_del(&obj->node);
332 kmem_cache_free(obj_cache, obj);
333 }
334 }
335
__free_object(struct debug_obj * obj)336 static void __free_object(struct debug_obj *obj)
337 {
338 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
339 struct debug_percpu_free *percpu_pool;
340 int lookahead_count = 0;
341 unsigned long flags;
342 bool work;
343
344 local_irq_save(flags);
345 if (!obj_cache)
346 goto free_to_obj_pool;
347
348 /*
349 * Try to free it into the percpu pool first.
350 */
351 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
352 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
353 hlist_add_head(&obj->node, &percpu_pool->free_objs);
354 percpu_pool->obj_free++;
355 local_irq_restore(flags);
356 return;
357 }
358
359 /*
360 * As the percpu pool is full, look ahead and pull out a batch
361 * of objects from the percpu pool and free them as well.
362 */
363 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
364 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
365 if (!objs[lookahead_count])
366 break;
367 percpu_pool->obj_free--;
368 }
369
370 free_to_obj_pool:
371 raw_spin_lock(&pool_lock);
372 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
373 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
374 obj_pool_used--;
375
376 if (work) {
377 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
378 hlist_add_head(&obj->node, &obj_to_free);
379 if (lookahead_count) {
380 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
381 obj_pool_used -= lookahead_count;
382 while (lookahead_count) {
383 hlist_add_head(&objs[--lookahead_count]->node,
384 &obj_to_free);
385 }
386 }
387
388 if ((obj_pool_free > debug_objects_pool_size) &&
389 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
390 int i;
391
392 /*
393 * Free one more batch of objects from obj_pool.
394 */
395 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
396 obj = __alloc_object(&obj_pool);
397 hlist_add_head(&obj->node, &obj_to_free);
398 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
399 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
400 }
401 }
402 } else {
403 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
404 hlist_add_head(&obj->node, &obj_pool);
405 if (lookahead_count) {
406 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
407 obj_pool_used -= lookahead_count;
408 while (lookahead_count) {
409 hlist_add_head(&objs[--lookahead_count]->node,
410 &obj_pool);
411 }
412 }
413 }
414 raw_spin_unlock(&pool_lock);
415 local_irq_restore(flags);
416 }
417
418 /*
419 * Put the object back into the pool and schedule work to free objects
420 * if necessary.
421 */
free_object(struct debug_obj * obj)422 static void free_object(struct debug_obj *obj)
423 {
424 __free_object(obj);
425 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
426 WRITE_ONCE(obj_freeing, true);
427 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
428 }
429 }
430
431 #ifdef CONFIG_HOTPLUG_CPU
object_cpu_offline(unsigned int cpu)432 static int object_cpu_offline(unsigned int cpu)
433 {
434 struct debug_percpu_free *percpu_pool;
435 struct hlist_node *tmp;
436 struct debug_obj *obj;
437 unsigned long flags;
438
439 /* Remote access is safe as the CPU is dead already */
440 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
441 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
442 hlist_del(&obj->node);
443 kmem_cache_free(obj_cache, obj);
444 }
445
446 raw_spin_lock_irqsave(&pool_lock, flags);
447 obj_pool_used -= percpu_pool->obj_free;
448 debug_objects_freed += percpu_pool->obj_free;
449 raw_spin_unlock_irqrestore(&pool_lock, flags);
450
451 percpu_pool->obj_free = 0;
452
453 return 0;
454 }
455 #endif
456
457 /*
458 * We run out of memory. That means we probably have tons of objects
459 * allocated.
460 */
debug_objects_oom(void)461 static void debug_objects_oom(void)
462 {
463 struct debug_bucket *db = obj_hash;
464 struct hlist_node *tmp;
465 HLIST_HEAD(freelist);
466 struct debug_obj *obj;
467 unsigned long flags;
468 int i;
469
470 pr_warn("Out of memory. ODEBUG disabled\n");
471
472 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
473 raw_spin_lock_irqsave(&db->lock, flags);
474 hlist_move_list(&db->list, &freelist);
475 raw_spin_unlock_irqrestore(&db->lock, flags);
476
477 /* Now free them */
478 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
479 hlist_del(&obj->node);
480 free_object(obj);
481 }
482 }
483 }
484
485 /*
486 * We use the pfn of the address for the hash. That way we can check
487 * for freed objects simply by checking the affected bucket.
488 */
get_bucket(unsigned long addr)489 static struct debug_bucket *get_bucket(unsigned long addr)
490 {
491 unsigned long hash;
492
493 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
494 return &obj_hash[hash];
495 }
496
debug_print_object(struct debug_obj * obj,char * msg)497 static void debug_print_object(struct debug_obj *obj, char *msg)
498 {
499 const struct debug_obj_descr *descr = obj->descr;
500 static int limit;
501
502 /*
503 * Don't report if lookup_object_or_alloc() by the current thread
504 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
505 * concurrent thread turned off debug_objects_enabled and cleared
506 * the hash buckets.
507 */
508 if (!debug_objects_enabled)
509 return;
510
511 if (limit < 5 && descr != descr_test) {
512 void *hint = descr->debug_hint ?
513 descr->debug_hint(obj->object) : NULL;
514 limit++;
515 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
516 "object: %p object type: %s hint: %pS\n",
517 msg, obj_states[obj->state], obj->astate,
518 obj->object, descr->name, hint);
519 }
520 debug_objects_warnings++;
521 }
522
523 /*
524 * Try to repair the damage, so we have a better chance to get useful
525 * debug output.
526 */
527 static bool
debug_object_fixup(bool (* fixup)(void * addr,enum debug_obj_state state),void * addr,enum debug_obj_state state)528 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
529 void * addr, enum debug_obj_state state)
530 {
531 if (fixup && fixup(addr, state)) {
532 debug_objects_fixups++;
533 return true;
534 }
535 return false;
536 }
537
debug_object_is_on_stack(void * addr,int onstack)538 static void debug_object_is_on_stack(void *addr, int onstack)
539 {
540 int is_on_stack;
541 static int limit;
542
543 if (limit > 4)
544 return;
545
546 is_on_stack = object_is_on_stack(addr);
547 if (is_on_stack == onstack)
548 return;
549
550 limit++;
551 if (is_on_stack)
552 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
553 task_stack_page(current));
554 else
555 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
556 task_stack_page(current));
557
558 WARN_ON(1);
559 }
560
lookup_object_or_alloc(void * addr,struct debug_bucket * b,const struct debug_obj_descr * descr,bool onstack,bool alloc_ifstatic)561 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
562 const struct debug_obj_descr *descr,
563 bool onstack, bool alloc_ifstatic)
564 {
565 struct debug_obj *obj = lookup_object(addr, b);
566 enum debug_obj_state state = ODEBUG_STATE_NONE;
567
568 if (likely(obj))
569 return obj;
570
571 /*
572 * debug_object_init() unconditionally allocates untracked
573 * objects. It does not matter whether it is a static object or
574 * not.
575 *
576 * debug_object_assert_init() and debug_object_activate() allow
577 * allocation only if the descriptor callback confirms that the
578 * object is static and considered initialized. For non-static
579 * objects the allocation needs to be done from the fixup callback.
580 */
581 if (unlikely(alloc_ifstatic)) {
582 if (!descr->is_static_object || !descr->is_static_object(addr))
583 return ERR_PTR(-ENOENT);
584 /* Statically allocated objects are considered initialized */
585 state = ODEBUG_STATE_INIT;
586 }
587
588 obj = alloc_object(addr, b, descr);
589 if (likely(obj)) {
590 obj->state = state;
591 debug_object_is_on_stack(addr, onstack);
592 return obj;
593 }
594
595 /* Out of memory. Do the cleanup outside of the locked region */
596 debug_objects_enabled = 0;
597 return NULL;
598 }
599
debug_objects_fill_pool(void)600 static void debug_objects_fill_pool(void)
601 {
602 /*
603 * On RT enabled kernels the pool refill must happen in preemptible
604 * context -- for !RT kernels we rely on the fact that spinlock_t and
605 * raw_spinlock_t are basically the same type and this lock-type
606 * inversion works just fine.
607 */
608 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
609 /*
610 * Annotate away the spinlock_t inside raw_spinlock_t warning
611 * by temporarily raising the wait-type to WAIT_SLEEP, matching
612 * the preemptible() condition above.
613 */
614 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
615 lock_map_acquire_try(&fill_pool_map);
616 fill_pool();
617 lock_map_release(&fill_pool_map);
618 }
619 }
620
621 static void
__debug_object_init(void * addr,const struct debug_obj_descr * descr,int onstack)622 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
623 {
624 struct debug_obj *obj, o;
625 struct debug_bucket *db;
626 unsigned long flags;
627
628 debug_objects_fill_pool();
629
630 db = get_bucket((unsigned long) addr);
631
632 raw_spin_lock_irqsave(&db->lock, flags);
633
634 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
635 if (unlikely(!obj)) {
636 raw_spin_unlock_irqrestore(&db->lock, flags);
637 debug_objects_oom();
638 return;
639 }
640
641 switch (obj->state) {
642 case ODEBUG_STATE_NONE:
643 case ODEBUG_STATE_INIT:
644 case ODEBUG_STATE_INACTIVE:
645 obj->state = ODEBUG_STATE_INIT;
646 raw_spin_unlock_irqrestore(&db->lock, flags);
647 return;
648 default:
649 break;
650 }
651
652 o = *obj;
653 raw_spin_unlock_irqrestore(&db->lock, flags);
654 debug_print_object(&o, "init");
655
656 if (o.state == ODEBUG_STATE_ACTIVE)
657 debug_object_fixup(descr->fixup_init, addr, o.state);
658 }
659
660 /**
661 * debug_object_init - debug checks when an object is initialized
662 * @addr: address of the object
663 * @descr: pointer to an object specific debug description structure
664 */
debug_object_init(void * addr,const struct debug_obj_descr * descr)665 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
666 {
667 if (!debug_objects_enabled)
668 return;
669
670 __debug_object_init(addr, descr, 0);
671 }
672 EXPORT_SYMBOL_GPL(debug_object_init);
673
674 /**
675 * debug_object_init_on_stack - debug checks when an object on stack is
676 * initialized
677 * @addr: address of the object
678 * @descr: pointer to an object specific debug description structure
679 */
debug_object_init_on_stack(void * addr,const struct debug_obj_descr * descr)680 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
681 {
682 if (!debug_objects_enabled)
683 return;
684
685 __debug_object_init(addr, descr, 1);
686 }
687 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
688
689 /**
690 * debug_object_activate - debug checks when an object is activated
691 * @addr: address of the object
692 * @descr: pointer to an object specific debug description structure
693 * Returns 0 for success, -EINVAL for check failed.
694 */
debug_object_activate(void * addr,const struct debug_obj_descr * descr)695 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
696 {
697 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
698 struct debug_bucket *db;
699 struct debug_obj *obj;
700 unsigned long flags;
701
702 if (!debug_objects_enabled)
703 return 0;
704
705 debug_objects_fill_pool();
706
707 db = get_bucket((unsigned long) addr);
708
709 raw_spin_lock_irqsave(&db->lock, flags);
710
711 obj = lookup_object_or_alloc(addr, db, descr, false, true);
712 if (unlikely(!obj)) {
713 raw_spin_unlock_irqrestore(&db->lock, flags);
714 debug_objects_oom();
715 return 0;
716 } else if (likely(!IS_ERR(obj))) {
717 switch (obj->state) {
718 case ODEBUG_STATE_ACTIVE:
719 case ODEBUG_STATE_DESTROYED:
720 o = *obj;
721 break;
722 case ODEBUG_STATE_INIT:
723 case ODEBUG_STATE_INACTIVE:
724 obj->state = ODEBUG_STATE_ACTIVE;
725 fallthrough;
726 default:
727 raw_spin_unlock_irqrestore(&db->lock, flags);
728 return 0;
729 }
730 }
731
732 raw_spin_unlock_irqrestore(&db->lock, flags);
733 debug_print_object(&o, "activate");
734
735 switch (o.state) {
736 case ODEBUG_STATE_ACTIVE:
737 case ODEBUG_STATE_NOTAVAILABLE:
738 if (debug_object_fixup(descr->fixup_activate, addr, o.state))
739 return 0;
740 fallthrough;
741 default:
742 return -EINVAL;
743 }
744 }
745 EXPORT_SYMBOL_GPL(debug_object_activate);
746
747 /**
748 * debug_object_deactivate - debug checks when an object is deactivated
749 * @addr: address of the object
750 * @descr: pointer to an object specific debug description structure
751 */
debug_object_deactivate(void * addr,const struct debug_obj_descr * descr)752 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
753 {
754 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
755 struct debug_bucket *db;
756 struct debug_obj *obj;
757 unsigned long flags;
758
759 if (!debug_objects_enabled)
760 return;
761
762 db = get_bucket((unsigned long) addr);
763
764 raw_spin_lock_irqsave(&db->lock, flags);
765
766 obj = lookup_object(addr, db);
767 if (obj) {
768 switch (obj->state) {
769 case ODEBUG_STATE_DESTROYED:
770 break;
771 case ODEBUG_STATE_INIT:
772 case ODEBUG_STATE_INACTIVE:
773 case ODEBUG_STATE_ACTIVE:
774 if (obj->astate)
775 break;
776 obj->state = ODEBUG_STATE_INACTIVE;
777 fallthrough;
778 default:
779 raw_spin_unlock_irqrestore(&db->lock, flags);
780 return;
781 }
782 o = *obj;
783 }
784
785 raw_spin_unlock_irqrestore(&db->lock, flags);
786 debug_print_object(&o, "deactivate");
787 }
788 EXPORT_SYMBOL_GPL(debug_object_deactivate);
789
790 /**
791 * debug_object_destroy - debug checks when an object is destroyed
792 * @addr: address of the object
793 * @descr: pointer to an object specific debug description structure
794 */
debug_object_destroy(void * addr,const struct debug_obj_descr * descr)795 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
796 {
797 struct debug_obj *obj, o;
798 struct debug_bucket *db;
799 unsigned long flags;
800
801 if (!debug_objects_enabled)
802 return;
803
804 db = get_bucket((unsigned long) addr);
805
806 raw_spin_lock_irqsave(&db->lock, flags);
807
808 obj = lookup_object(addr, db);
809 if (!obj) {
810 raw_spin_unlock_irqrestore(&db->lock, flags);
811 return;
812 }
813
814 switch (obj->state) {
815 case ODEBUG_STATE_ACTIVE:
816 case ODEBUG_STATE_DESTROYED:
817 break;
818 case ODEBUG_STATE_NONE:
819 case ODEBUG_STATE_INIT:
820 case ODEBUG_STATE_INACTIVE:
821 obj->state = ODEBUG_STATE_DESTROYED;
822 fallthrough;
823 default:
824 raw_spin_unlock_irqrestore(&db->lock, flags);
825 return;
826 }
827
828 o = *obj;
829 raw_spin_unlock_irqrestore(&db->lock, flags);
830 debug_print_object(&o, "destroy");
831
832 if (o.state == ODEBUG_STATE_ACTIVE)
833 debug_object_fixup(descr->fixup_destroy, addr, o.state);
834 }
835 EXPORT_SYMBOL_GPL(debug_object_destroy);
836
837 /**
838 * debug_object_free - debug checks when an object is freed
839 * @addr: address of the object
840 * @descr: pointer to an object specific debug description structure
841 */
debug_object_free(void * addr,const struct debug_obj_descr * descr)842 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
843 {
844 struct debug_obj *obj, o;
845 struct debug_bucket *db;
846 unsigned long flags;
847
848 if (!debug_objects_enabled)
849 return;
850
851 db = get_bucket((unsigned long) addr);
852
853 raw_spin_lock_irqsave(&db->lock, flags);
854
855 obj = lookup_object(addr, db);
856 if (!obj) {
857 raw_spin_unlock_irqrestore(&db->lock, flags);
858 return;
859 }
860
861 switch (obj->state) {
862 case ODEBUG_STATE_ACTIVE:
863 break;
864 default:
865 hlist_del(&obj->node);
866 raw_spin_unlock_irqrestore(&db->lock, flags);
867 free_object(obj);
868 return;
869 }
870
871 o = *obj;
872 raw_spin_unlock_irqrestore(&db->lock, flags);
873 debug_print_object(&o, "free");
874
875 debug_object_fixup(descr->fixup_free, addr, o.state);
876 }
877 EXPORT_SYMBOL_GPL(debug_object_free);
878
879 /**
880 * debug_object_assert_init - debug checks when object should be init-ed
881 * @addr: address of the object
882 * @descr: pointer to an object specific debug description structure
883 */
debug_object_assert_init(void * addr,const struct debug_obj_descr * descr)884 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
885 {
886 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
887 struct debug_bucket *db;
888 struct debug_obj *obj;
889 unsigned long flags;
890
891 if (!debug_objects_enabled)
892 return;
893
894 debug_objects_fill_pool();
895
896 db = get_bucket((unsigned long) addr);
897
898 raw_spin_lock_irqsave(&db->lock, flags);
899 obj = lookup_object_or_alloc(addr, db, descr, false, true);
900 raw_spin_unlock_irqrestore(&db->lock, flags);
901 if (likely(!IS_ERR_OR_NULL(obj)))
902 return;
903
904 /* If NULL the allocation has hit OOM */
905 if (!obj) {
906 debug_objects_oom();
907 return;
908 }
909
910 /* Object is neither tracked nor static. It's not initialized. */
911 debug_print_object(&o, "assert_init");
912 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
913 }
914 EXPORT_SYMBOL_GPL(debug_object_assert_init);
915
916 /**
917 * debug_object_active_state - debug checks object usage state machine
918 * @addr: address of the object
919 * @descr: pointer to an object specific debug description structure
920 * @expect: expected state
921 * @next: state to move to if expected state is found
922 */
923 void
debug_object_active_state(void * addr,const struct debug_obj_descr * descr,unsigned int expect,unsigned int next)924 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
925 unsigned int expect, unsigned int next)
926 {
927 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
928 struct debug_bucket *db;
929 struct debug_obj *obj;
930 unsigned long flags;
931
932 if (!debug_objects_enabled)
933 return;
934
935 db = get_bucket((unsigned long) addr);
936
937 raw_spin_lock_irqsave(&db->lock, flags);
938
939 obj = lookup_object(addr, db);
940 if (obj) {
941 switch (obj->state) {
942 case ODEBUG_STATE_ACTIVE:
943 if (obj->astate != expect)
944 break;
945 obj->astate = next;
946 raw_spin_unlock_irqrestore(&db->lock, flags);
947 return;
948 default:
949 break;
950 }
951 o = *obj;
952 }
953
954 raw_spin_unlock_irqrestore(&db->lock, flags);
955 debug_print_object(&o, "active_state");
956 }
957 EXPORT_SYMBOL_GPL(debug_object_active_state);
958
959 #ifdef CONFIG_DEBUG_OBJECTS_FREE
__debug_check_no_obj_freed(const void * address,unsigned long size)960 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
961 {
962 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
963 int cnt, objs_checked = 0;
964 struct debug_obj *obj, o;
965 struct debug_bucket *db;
966 struct hlist_node *tmp;
967
968 saddr = (unsigned long) address;
969 eaddr = saddr + size;
970 paddr = saddr & ODEBUG_CHUNK_MASK;
971 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
972 chunks >>= ODEBUG_CHUNK_SHIFT;
973
974 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
975 db = get_bucket(paddr);
976
977 repeat:
978 cnt = 0;
979 raw_spin_lock_irqsave(&db->lock, flags);
980 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
981 cnt++;
982 oaddr = (unsigned long) obj->object;
983 if (oaddr < saddr || oaddr >= eaddr)
984 continue;
985
986 switch (obj->state) {
987 case ODEBUG_STATE_ACTIVE:
988 o = *obj;
989 raw_spin_unlock_irqrestore(&db->lock, flags);
990 debug_print_object(&o, "free");
991 debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
992 goto repeat;
993 default:
994 hlist_del(&obj->node);
995 __free_object(obj);
996 break;
997 }
998 }
999 raw_spin_unlock_irqrestore(&db->lock, flags);
1000
1001 if (cnt > debug_objects_maxchain)
1002 debug_objects_maxchain = cnt;
1003
1004 objs_checked += cnt;
1005 }
1006
1007 if (objs_checked > debug_objects_maxchecked)
1008 debug_objects_maxchecked = objs_checked;
1009
1010 /* Schedule work to actually kmem_cache_free() objects */
1011 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1012 WRITE_ONCE(obj_freeing, true);
1013 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1014 }
1015 }
1016
debug_check_no_obj_freed(const void * address,unsigned long size)1017 void debug_check_no_obj_freed(const void *address, unsigned long size)
1018 {
1019 if (debug_objects_enabled)
1020 __debug_check_no_obj_freed(address, size);
1021 }
1022 #endif
1023
1024 #ifdef CONFIG_DEBUG_FS
1025
debug_stats_show(struct seq_file * m,void * v)1026 static int debug_stats_show(struct seq_file *m, void *v)
1027 {
1028 int cpu, obj_percpu_free = 0;
1029
1030 for_each_possible_cpu(cpu)
1031 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1032
1033 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1034 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1035 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1036 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1037 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1038 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1039 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1040 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1041 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1042 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1043 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1044 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1045 return 0;
1046 }
1047 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1048
debug_objects_init_debugfs(void)1049 static int __init debug_objects_init_debugfs(void)
1050 {
1051 struct dentry *dbgdir;
1052
1053 if (!debug_objects_enabled)
1054 return 0;
1055
1056 dbgdir = debugfs_create_dir("debug_objects", NULL);
1057
1058 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1059
1060 return 0;
1061 }
1062 __initcall(debug_objects_init_debugfs);
1063
1064 #else
debug_objects_init_debugfs(void)1065 static inline void debug_objects_init_debugfs(void) { }
1066 #endif
1067
1068 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1069
1070 /* Random data structure for the self test */
1071 struct self_test {
1072 unsigned long dummy1[6];
1073 int static_init;
1074 unsigned long dummy2[3];
1075 };
1076
1077 static __initconst const struct debug_obj_descr descr_type_test;
1078
is_static_object(void * addr)1079 static bool __init is_static_object(void *addr)
1080 {
1081 struct self_test *obj = addr;
1082
1083 return obj->static_init;
1084 }
1085
1086 /*
1087 * fixup_init is called when:
1088 * - an active object is initialized
1089 */
fixup_init(void * addr,enum debug_obj_state state)1090 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1091 {
1092 struct self_test *obj = addr;
1093
1094 switch (state) {
1095 case ODEBUG_STATE_ACTIVE:
1096 debug_object_deactivate(obj, &descr_type_test);
1097 debug_object_init(obj, &descr_type_test);
1098 return true;
1099 default:
1100 return false;
1101 }
1102 }
1103
1104 /*
1105 * fixup_activate is called when:
1106 * - an active object is activated
1107 * - an unknown non-static object is activated
1108 */
fixup_activate(void * addr,enum debug_obj_state state)1109 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1110 {
1111 struct self_test *obj = addr;
1112
1113 switch (state) {
1114 case ODEBUG_STATE_NOTAVAILABLE:
1115 return true;
1116 case ODEBUG_STATE_ACTIVE:
1117 debug_object_deactivate(obj, &descr_type_test);
1118 debug_object_activate(obj, &descr_type_test);
1119 return true;
1120
1121 default:
1122 return false;
1123 }
1124 }
1125
1126 /*
1127 * fixup_destroy is called when:
1128 * - an active object is destroyed
1129 */
fixup_destroy(void * addr,enum debug_obj_state state)1130 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1131 {
1132 struct self_test *obj = addr;
1133
1134 switch (state) {
1135 case ODEBUG_STATE_ACTIVE:
1136 debug_object_deactivate(obj, &descr_type_test);
1137 debug_object_destroy(obj, &descr_type_test);
1138 return true;
1139 default:
1140 return false;
1141 }
1142 }
1143
1144 /*
1145 * fixup_free is called when:
1146 * - an active object is freed
1147 */
fixup_free(void * addr,enum debug_obj_state state)1148 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1149 {
1150 struct self_test *obj = addr;
1151
1152 switch (state) {
1153 case ODEBUG_STATE_ACTIVE:
1154 debug_object_deactivate(obj, &descr_type_test);
1155 debug_object_free(obj, &descr_type_test);
1156 return true;
1157 default:
1158 return false;
1159 }
1160 }
1161
1162 static int __init
check_results(void * addr,enum debug_obj_state state,int fixups,int warnings)1163 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1164 {
1165 struct debug_bucket *db;
1166 struct debug_obj *obj;
1167 unsigned long flags;
1168 int res = -EINVAL;
1169
1170 db = get_bucket((unsigned long) addr);
1171
1172 raw_spin_lock_irqsave(&db->lock, flags);
1173
1174 obj = lookup_object(addr, db);
1175 if (!obj && state != ODEBUG_STATE_NONE) {
1176 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1177 goto out;
1178 }
1179 if (obj && obj->state != state) {
1180 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1181 obj->state, state);
1182 goto out;
1183 }
1184 if (fixups != debug_objects_fixups) {
1185 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1186 fixups, debug_objects_fixups);
1187 goto out;
1188 }
1189 if (warnings != debug_objects_warnings) {
1190 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1191 warnings, debug_objects_warnings);
1192 goto out;
1193 }
1194 res = 0;
1195 out:
1196 raw_spin_unlock_irqrestore(&db->lock, flags);
1197 if (res)
1198 debug_objects_enabled = 0;
1199 return res;
1200 }
1201
1202 static __initconst const struct debug_obj_descr descr_type_test = {
1203 .name = "selftest",
1204 .is_static_object = is_static_object,
1205 .fixup_init = fixup_init,
1206 .fixup_activate = fixup_activate,
1207 .fixup_destroy = fixup_destroy,
1208 .fixup_free = fixup_free,
1209 };
1210
1211 static __initdata struct self_test obj = { .static_init = 0 };
1212
debug_objects_selftest(void)1213 static void __init debug_objects_selftest(void)
1214 {
1215 int fixups, oldfixups, warnings, oldwarnings;
1216 unsigned long flags;
1217
1218 local_irq_save(flags);
1219
1220 fixups = oldfixups = debug_objects_fixups;
1221 warnings = oldwarnings = debug_objects_warnings;
1222 descr_test = &descr_type_test;
1223
1224 debug_object_init(&obj, &descr_type_test);
1225 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1226 goto out;
1227 debug_object_activate(&obj, &descr_type_test);
1228 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1229 goto out;
1230 debug_object_activate(&obj, &descr_type_test);
1231 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1232 goto out;
1233 debug_object_deactivate(&obj, &descr_type_test);
1234 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1235 goto out;
1236 debug_object_destroy(&obj, &descr_type_test);
1237 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1238 goto out;
1239 debug_object_init(&obj, &descr_type_test);
1240 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1241 goto out;
1242 debug_object_activate(&obj, &descr_type_test);
1243 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1244 goto out;
1245 debug_object_deactivate(&obj, &descr_type_test);
1246 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1247 goto out;
1248 debug_object_free(&obj, &descr_type_test);
1249 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1250 goto out;
1251
1252 obj.static_init = 1;
1253 debug_object_activate(&obj, &descr_type_test);
1254 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1255 goto out;
1256 debug_object_init(&obj, &descr_type_test);
1257 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1258 goto out;
1259 debug_object_free(&obj, &descr_type_test);
1260 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1261 goto out;
1262
1263 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1264 debug_object_init(&obj, &descr_type_test);
1265 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1266 goto out;
1267 debug_object_activate(&obj, &descr_type_test);
1268 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1269 goto out;
1270 __debug_check_no_obj_freed(&obj, sizeof(obj));
1271 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1272 goto out;
1273 #endif
1274 pr_info("selftest passed\n");
1275
1276 out:
1277 debug_objects_fixups = oldfixups;
1278 debug_objects_warnings = oldwarnings;
1279 descr_test = NULL;
1280
1281 local_irq_restore(flags);
1282 }
1283 #else
debug_objects_selftest(void)1284 static inline void debug_objects_selftest(void) { }
1285 #endif
1286
1287 /*
1288 * Called during early boot to initialize the hash buckets and link
1289 * the static object pool objects into the poll list. After this call
1290 * the object tracker is fully operational.
1291 */
debug_objects_early_init(void)1292 void __init debug_objects_early_init(void)
1293 {
1294 int i;
1295
1296 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1297 raw_spin_lock_init(&obj_hash[i].lock);
1298
1299 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1300 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1301 }
1302
1303 /*
1304 * Convert the statically allocated objects to dynamic ones:
1305 */
debug_objects_replace_static_objects(void)1306 static int __init debug_objects_replace_static_objects(void)
1307 {
1308 struct debug_bucket *db = obj_hash;
1309 struct hlist_node *tmp;
1310 struct debug_obj *obj, *new;
1311 HLIST_HEAD(objects);
1312 int i, cnt = 0;
1313
1314 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1315 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1316 if (!obj)
1317 goto free;
1318 hlist_add_head(&obj->node, &objects);
1319 }
1320
1321 debug_objects_allocated += i;
1322
1323 /*
1324 * debug_objects_mem_init() is now called early that only one CPU is up
1325 * and interrupts have been disabled, so it is safe to replace the
1326 * active object references.
1327 */
1328
1329 /* Remove the statically allocated objects from the pool */
1330 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1331 hlist_del(&obj->node);
1332 /* Move the allocated objects to the pool */
1333 hlist_move_list(&objects, &obj_pool);
1334
1335 /* Replace the active object references */
1336 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1337 hlist_move_list(&db->list, &objects);
1338
1339 hlist_for_each_entry(obj, &objects, node) {
1340 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1341 hlist_del(&new->node);
1342 /* copy object data */
1343 *new = *obj;
1344 hlist_add_head(&new->node, &db->list);
1345 cnt++;
1346 }
1347 }
1348
1349 pr_debug("%d of %d active objects replaced\n",
1350 cnt, obj_pool_used);
1351 return 0;
1352 free:
1353 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1354 hlist_del(&obj->node);
1355 kmem_cache_free(obj_cache, obj);
1356 }
1357 return -ENOMEM;
1358 }
1359
1360 /*
1361 * Called after the kmem_caches are functional to setup a dedicated
1362 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1363 * prevents that the debug code is called on kmem_cache_free() for the
1364 * debug tracker objects to avoid recursive calls.
1365 */
debug_objects_mem_init(void)1366 void __init debug_objects_mem_init(void)
1367 {
1368 int cpu, extras;
1369
1370 if (!debug_objects_enabled)
1371 return;
1372
1373 /*
1374 * Initialize the percpu object pools
1375 *
1376 * Initialization is not strictly necessary, but was done for
1377 * completeness.
1378 */
1379 for_each_possible_cpu(cpu)
1380 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1381
1382 obj_cache = kmem_cache_create("debug_objects_cache",
1383 sizeof (struct debug_obj), 0,
1384 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1385 NULL);
1386
1387 if (!obj_cache || debug_objects_replace_static_objects()) {
1388 debug_objects_enabled = 0;
1389 kmem_cache_destroy(obj_cache);
1390 pr_warn("out of memory.\n");
1391 return;
1392 } else
1393 debug_objects_selftest();
1394
1395 #ifdef CONFIG_HOTPLUG_CPU
1396 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1397 object_cpu_offline);
1398 #endif
1399
1400 /*
1401 * Increase the thresholds for allocating and freeing objects
1402 * according to the number of possible CPUs available in the system.
1403 */
1404 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1405 debug_objects_pool_size += extras;
1406 debug_objects_pool_min_level += extras;
1407 }
1408