1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 11 #define pr_fmt(fmt) "ODEBUG: " fmt 12 13 #include <linux/debugobjects.h> 14 #include <linux/interrupt.h> 15 #include <linux/sched.h> 16 #include <linux/seq_file.h> 17 #include <linux/debugfs.h> 18 #include <linux/slab.h> 19 #include <linux/hash.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 512 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 27 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 28 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 29 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 30 31 struct debug_bucket { 32 struct hlist_head list; 33 raw_spinlock_t lock; 34 }; 35 36 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 37 38 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 39 40 static DEFINE_RAW_SPINLOCK(pool_lock); 41 42 static HLIST_HEAD(obj_pool); 43 44 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 45 static int obj_pool_free = ODEBUG_POOL_SIZE; 46 static int obj_pool_used; 47 static int obj_pool_max_used; 48 static struct kmem_cache *obj_cache; 49 50 static int debug_objects_maxchain __read_mostly; 51 static int debug_objects_fixups __read_mostly; 52 static int debug_objects_warnings __read_mostly; 53 static int debug_objects_enabled __read_mostly 54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 55 56 static struct debug_obj_descr *descr_test __read_mostly; 57 58 static void free_obj_work(struct work_struct *work); 59 static DECLARE_WORK(debug_obj_work, free_obj_work); 60 61 static int __init enable_object_debug(char *str) 62 { 63 debug_objects_enabled = 1; 64 return 0; 65 } 66 67 static int __init disable_object_debug(char *str) 68 { 69 debug_objects_enabled = 0; 70 return 0; 71 } 72 73 early_param("debug_objects", enable_object_debug); 74 early_param("no_debug_objects", disable_object_debug); 75 76 static const char *obj_states[ODEBUG_STATE_MAX] = { 77 [ODEBUG_STATE_NONE] = "none", 78 [ODEBUG_STATE_INIT] = "initialized", 79 [ODEBUG_STATE_INACTIVE] = "inactive", 80 [ODEBUG_STATE_ACTIVE] = "active", 81 [ODEBUG_STATE_DESTROYED] = "destroyed", 82 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 83 }; 84 85 static void fill_pool(void) 86 { 87 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 88 struct debug_obj *new; 89 unsigned long flags; 90 91 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 92 return; 93 94 if (unlikely(!obj_cache)) 95 return; 96 97 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 98 99 new = kmem_cache_zalloc(obj_cache, gfp); 100 if (!new) 101 return; 102 103 raw_spin_lock_irqsave(&pool_lock, flags); 104 hlist_add_head(&new->node, &obj_pool); 105 obj_pool_free++; 106 raw_spin_unlock_irqrestore(&pool_lock, flags); 107 } 108 } 109 110 /* 111 * Lookup an object in the hash bucket. 112 */ 113 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 114 { 115 struct debug_obj *obj; 116 int cnt = 0; 117 118 hlist_for_each_entry(obj, &b->list, node) { 119 cnt++; 120 if (obj->object == addr) 121 return obj; 122 } 123 if (cnt > debug_objects_maxchain) 124 debug_objects_maxchain = cnt; 125 126 return NULL; 127 } 128 129 /* 130 * Allocate a new object. If the pool is empty, switch off the debugger. 131 * Must be called with interrupts disabled. 132 */ 133 static struct debug_obj * 134 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 135 { 136 struct debug_obj *obj = NULL; 137 138 raw_spin_lock(&pool_lock); 139 if (obj_pool.first) { 140 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 141 142 obj->object = addr; 143 obj->descr = descr; 144 obj->state = ODEBUG_STATE_NONE; 145 obj->astate = 0; 146 hlist_del(&obj->node); 147 148 hlist_add_head(&obj->node, &b->list); 149 150 obj_pool_used++; 151 if (obj_pool_used > obj_pool_max_used) 152 obj_pool_max_used = obj_pool_used; 153 154 obj_pool_free--; 155 if (obj_pool_free < obj_pool_min_free) 156 obj_pool_min_free = obj_pool_free; 157 } 158 raw_spin_unlock(&pool_lock); 159 160 return obj; 161 } 162 163 /* 164 * workqueue function to free objects. 165 */ 166 static void free_obj_work(struct work_struct *work) 167 { 168 struct debug_obj *obj; 169 unsigned long flags; 170 171 raw_spin_lock_irqsave(&pool_lock, flags); 172 while (obj_pool_free > ODEBUG_POOL_SIZE) { 173 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 174 hlist_del(&obj->node); 175 obj_pool_free--; 176 /* 177 * We release pool_lock across kmem_cache_free() to 178 * avoid contention on pool_lock. 179 */ 180 raw_spin_unlock_irqrestore(&pool_lock, flags); 181 kmem_cache_free(obj_cache, obj); 182 raw_spin_lock_irqsave(&pool_lock, flags); 183 } 184 raw_spin_unlock_irqrestore(&pool_lock, flags); 185 } 186 187 /* 188 * Put the object back into the pool and schedule work to free objects 189 * if necessary. 190 */ 191 static void free_object(struct debug_obj *obj) 192 { 193 unsigned long flags; 194 int sched = 0; 195 196 raw_spin_lock_irqsave(&pool_lock, flags); 197 /* 198 * schedule work when the pool is filled and the cache is 199 * initialized: 200 */ 201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 202 sched = keventd_up(); 203 hlist_add_head(&obj->node, &obj_pool); 204 obj_pool_free++; 205 obj_pool_used--; 206 raw_spin_unlock_irqrestore(&pool_lock, flags); 207 if (sched) 208 schedule_work(&debug_obj_work); 209 } 210 211 /* 212 * We run out of memory. That means we probably have tons of objects 213 * allocated. 214 */ 215 static void debug_objects_oom(void) 216 { 217 struct debug_bucket *db = obj_hash; 218 struct hlist_node *tmp; 219 HLIST_HEAD(freelist); 220 struct debug_obj *obj; 221 unsigned long flags; 222 int i; 223 224 pr_warn("Out of memory. ODEBUG disabled\n"); 225 226 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 227 raw_spin_lock_irqsave(&db->lock, flags); 228 hlist_move_list(&db->list, &freelist); 229 raw_spin_unlock_irqrestore(&db->lock, flags); 230 231 /* Now free them */ 232 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 233 hlist_del(&obj->node); 234 free_object(obj); 235 } 236 } 237 } 238 239 /* 240 * We use the pfn of the address for the hash. That way we can check 241 * for freed objects simply by checking the affected bucket. 242 */ 243 static struct debug_bucket *get_bucket(unsigned long addr) 244 { 245 unsigned long hash; 246 247 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 248 return &obj_hash[hash]; 249 } 250 251 static void debug_print_object(struct debug_obj *obj, char *msg) 252 { 253 struct debug_obj_descr *descr = obj->descr; 254 static int limit; 255 256 if (limit < 5 && descr != descr_test) { 257 void *hint = descr->debug_hint ? 258 descr->debug_hint(obj->object) : NULL; 259 limit++; 260 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 261 "object type: %s hint: %pS\n", 262 msg, obj_states[obj->state], obj->astate, 263 descr->name, hint); 264 } 265 debug_objects_warnings++; 266 } 267 268 /* 269 * Try to repair the damage, so we have a better chance to get useful 270 * debug output. 271 */ 272 static int 273 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 274 void * addr, enum debug_obj_state state) 275 { 276 int fixed = 0; 277 278 if (fixup) 279 fixed = fixup(addr, state); 280 debug_objects_fixups += fixed; 281 return fixed; 282 } 283 284 static void debug_object_is_on_stack(void *addr, int onstack) 285 { 286 int is_on_stack; 287 static int limit; 288 289 if (limit > 4) 290 return; 291 292 is_on_stack = object_is_on_stack(addr); 293 if (is_on_stack == onstack) 294 return; 295 296 limit++; 297 if (is_on_stack) 298 pr_warn("object is on stack, but not annotated\n"); 299 else 300 pr_warn("object is not on stack, but annotated\n"); 301 WARN_ON(1); 302 } 303 304 static void 305 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 306 { 307 enum debug_obj_state state; 308 struct debug_bucket *db; 309 struct debug_obj *obj; 310 unsigned long flags; 311 312 fill_pool(); 313 314 db = get_bucket((unsigned long) addr); 315 316 raw_spin_lock_irqsave(&db->lock, flags); 317 318 obj = lookup_object(addr, db); 319 if (!obj) { 320 obj = alloc_object(addr, db, descr); 321 if (!obj) { 322 debug_objects_enabled = 0; 323 raw_spin_unlock_irqrestore(&db->lock, flags); 324 debug_objects_oom(); 325 return; 326 } 327 debug_object_is_on_stack(addr, onstack); 328 } 329 330 switch (obj->state) { 331 case ODEBUG_STATE_NONE: 332 case ODEBUG_STATE_INIT: 333 case ODEBUG_STATE_INACTIVE: 334 obj->state = ODEBUG_STATE_INIT; 335 break; 336 337 case ODEBUG_STATE_ACTIVE: 338 debug_print_object(obj, "init"); 339 state = obj->state; 340 raw_spin_unlock_irqrestore(&db->lock, flags); 341 debug_object_fixup(descr->fixup_init, addr, state); 342 return; 343 344 case ODEBUG_STATE_DESTROYED: 345 debug_print_object(obj, "init"); 346 break; 347 default: 348 break; 349 } 350 351 raw_spin_unlock_irqrestore(&db->lock, flags); 352 } 353 354 /** 355 * debug_object_init - debug checks when an object is initialized 356 * @addr: address of the object 357 * @descr: pointer to an object specific debug description structure 358 */ 359 void debug_object_init(void *addr, struct debug_obj_descr *descr) 360 { 361 if (!debug_objects_enabled) 362 return; 363 364 __debug_object_init(addr, descr, 0); 365 } 366 367 /** 368 * debug_object_init_on_stack - debug checks when an object on stack is 369 * initialized 370 * @addr: address of the object 371 * @descr: pointer to an object specific debug description structure 372 */ 373 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 374 { 375 if (!debug_objects_enabled) 376 return; 377 378 __debug_object_init(addr, descr, 1); 379 } 380 381 /** 382 * debug_object_activate - debug checks when an object is activated 383 * @addr: address of the object 384 * @descr: pointer to an object specific debug description structure 385 * Returns 0 for success, -EINVAL for check failed. 386 */ 387 int debug_object_activate(void *addr, struct debug_obj_descr *descr) 388 { 389 enum debug_obj_state state; 390 struct debug_bucket *db; 391 struct debug_obj *obj; 392 unsigned long flags; 393 int ret; 394 struct debug_obj o = { .object = addr, 395 .state = ODEBUG_STATE_NOTAVAILABLE, 396 .descr = descr }; 397 398 if (!debug_objects_enabled) 399 return 0; 400 401 db = get_bucket((unsigned long) addr); 402 403 raw_spin_lock_irqsave(&db->lock, flags); 404 405 obj = lookup_object(addr, db); 406 if (obj) { 407 switch (obj->state) { 408 case ODEBUG_STATE_INIT: 409 case ODEBUG_STATE_INACTIVE: 410 obj->state = ODEBUG_STATE_ACTIVE; 411 ret = 0; 412 break; 413 414 case ODEBUG_STATE_ACTIVE: 415 debug_print_object(obj, "activate"); 416 state = obj->state; 417 raw_spin_unlock_irqrestore(&db->lock, flags); 418 ret = debug_object_fixup(descr->fixup_activate, addr, state); 419 return ret ? -EINVAL : 0; 420 421 case ODEBUG_STATE_DESTROYED: 422 debug_print_object(obj, "activate"); 423 ret = -EINVAL; 424 break; 425 default: 426 ret = 0; 427 break; 428 } 429 raw_spin_unlock_irqrestore(&db->lock, flags); 430 return ret; 431 } 432 433 raw_spin_unlock_irqrestore(&db->lock, flags); 434 /* 435 * This happens when a static object is activated. We 436 * let the type specific code decide whether this is 437 * true or not. 438 */ 439 if (debug_object_fixup(descr->fixup_activate, addr, 440 ODEBUG_STATE_NOTAVAILABLE)) { 441 debug_print_object(&o, "activate"); 442 return -EINVAL; 443 } 444 return 0; 445 } 446 447 /** 448 * debug_object_deactivate - debug checks when an object is deactivated 449 * @addr: address of the object 450 * @descr: pointer to an object specific debug description structure 451 */ 452 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 453 { 454 struct debug_bucket *db; 455 struct debug_obj *obj; 456 unsigned long flags; 457 458 if (!debug_objects_enabled) 459 return; 460 461 db = get_bucket((unsigned long) addr); 462 463 raw_spin_lock_irqsave(&db->lock, flags); 464 465 obj = lookup_object(addr, db); 466 if (obj) { 467 switch (obj->state) { 468 case ODEBUG_STATE_INIT: 469 case ODEBUG_STATE_INACTIVE: 470 case ODEBUG_STATE_ACTIVE: 471 if (!obj->astate) 472 obj->state = ODEBUG_STATE_INACTIVE; 473 else 474 debug_print_object(obj, "deactivate"); 475 break; 476 477 case ODEBUG_STATE_DESTROYED: 478 debug_print_object(obj, "deactivate"); 479 break; 480 default: 481 break; 482 } 483 } else { 484 struct debug_obj o = { .object = addr, 485 .state = ODEBUG_STATE_NOTAVAILABLE, 486 .descr = descr }; 487 488 debug_print_object(&o, "deactivate"); 489 } 490 491 raw_spin_unlock_irqrestore(&db->lock, flags); 492 } 493 494 /** 495 * debug_object_destroy - debug checks when an object is destroyed 496 * @addr: address of the object 497 * @descr: pointer to an object specific debug description structure 498 */ 499 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 500 { 501 enum debug_obj_state state; 502 struct debug_bucket *db; 503 struct debug_obj *obj; 504 unsigned long flags; 505 506 if (!debug_objects_enabled) 507 return; 508 509 db = get_bucket((unsigned long) addr); 510 511 raw_spin_lock_irqsave(&db->lock, flags); 512 513 obj = lookup_object(addr, db); 514 if (!obj) 515 goto out_unlock; 516 517 switch (obj->state) { 518 case ODEBUG_STATE_NONE: 519 case ODEBUG_STATE_INIT: 520 case ODEBUG_STATE_INACTIVE: 521 obj->state = ODEBUG_STATE_DESTROYED; 522 break; 523 case ODEBUG_STATE_ACTIVE: 524 debug_print_object(obj, "destroy"); 525 state = obj->state; 526 raw_spin_unlock_irqrestore(&db->lock, flags); 527 debug_object_fixup(descr->fixup_destroy, addr, state); 528 return; 529 530 case ODEBUG_STATE_DESTROYED: 531 debug_print_object(obj, "destroy"); 532 break; 533 default: 534 break; 535 } 536 out_unlock: 537 raw_spin_unlock_irqrestore(&db->lock, flags); 538 } 539 540 /** 541 * debug_object_free - debug checks when an object is freed 542 * @addr: address of the object 543 * @descr: pointer to an object specific debug description structure 544 */ 545 void debug_object_free(void *addr, struct debug_obj_descr *descr) 546 { 547 enum debug_obj_state state; 548 struct debug_bucket *db; 549 struct debug_obj *obj; 550 unsigned long flags; 551 552 if (!debug_objects_enabled) 553 return; 554 555 db = get_bucket((unsigned long) addr); 556 557 raw_spin_lock_irqsave(&db->lock, flags); 558 559 obj = lookup_object(addr, db); 560 if (!obj) 561 goto out_unlock; 562 563 switch (obj->state) { 564 case ODEBUG_STATE_ACTIVE: 565 debug_print_object(obj, "free"); 566 state = obj->state; 567 raw_spin_unlock_irqrestore(&db->lock, flags); 568 debug_object_fixup(descr->fixup_free, addr, state); 569 return; 570 default: 571 hlist_del(&obj->node); 572 raw_spin_unlock_irqrestore(&db->lock, flags); 573 free_object(obj); 574 return; 575 } 576 out_unlock: 577 raw_spin_unlock_irqrestore(&db->lock, flags); 578 } 579 580 /** 581 * debug_object_assert_init - debug checks when object should be init-ed 582 * @addr: address of the object 583 * @descr: pointer to an object specific debug description structure 584 */ 585 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) 586 { 587 struct debug_bucket *db; 588 struct debug_obj *obj; 589 unsigned long flags; 590 591 if (!debug_objects_enabled) 592 return; 593 594 db = get_bucket((unsigned long) addr); 595 596 raw_spin_lock_irqsave(&db->lock, flags); 597 598 obj = lookup_object(addr, db); 599 if (!obj) { 600 struct debug_obj o = { .object = addr, 601 .state = ODEBUG_STATE_NOTAVAILABLE, 602 .descr = descr }; 603 604 raw_spin_unlock_irqrestore(&db->lock, flags); 605 /* 606 * Maybe the object is static. Let the type specific 607 * code decide what to do. 608 */ 609 if (debug_object_fixup(descr->fixup_assert_init, addr, 610 ODEBUG_STATE_NOTAVAILABLE)) 611 debug_print_object(&o, "assert_init"); 612 return; 613 } 614 615 raw_spin_unlock_irqrestore(&db->lock, flags); 616 } 617 618 /** 619 * debug_object_active_state - debug checks object usage state machine 620 * @addr: address of the object 621 * @descr: pointer to an object specific debug description structure 622 * @expect: expected state 623 * @next: state to move to if expected state is found 624 */ 625 void 626 debug_object_active_state(void *addr, struct debug_obj_descr *descr, 627 unsigned int expect, unsigned int next) 628 { 629 struct debug_bucket *db; 630 struct debug_obj *obj; 631 unsigned long flags; 632 633 if (!debug_objects_enabled) 634 return; 635 636 db = get_bucket((unsigned long) addr); 637 638 raw_spin_lock_irqsave(&db->lock, flags); 639 640 obj = lookup_object(addr, db); 641 if (obj) { 642 switch (obj->state) { 643 case ODEBUG_STATE_ACTIVE: 644 if (obj->astate == expect) 645 obj->astate = next; 646 else 647 debug_print_object(obj, "active_state"); 648 break; 649 650 default: 651 debug_print_object(obj, "active_state"); 652 break; 653 } 654 } else { 655 struct debug_obj o = { .object = addr, 656 .state = ODEBUG_STATE_NOTAVAILABLE, 657 .descr = descr }; 658 659 debug_print_object(&o, "active_state"); 660 } 661 662 raw_spin_unlock_irqrestore(&db->lock, flags); 663 } 664 665 #ifdef CONFIG_DEBUG_OBJECTS_FREE 666 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 667 { 668 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 669 struct hlist_node *tmp; 670 HLIST_HEAD(freelist); 671 struct debug_obj_descr *descr; 672 enum debug_obj_state state; 673 struct debug_bucket *db; 674 struct debug_obj *obj; 675 int cnt; 676 677 saddr = (unsigned long) address; 678 eaddr = saddr + size; 679 paddr = saddr & ODEBUG_CHUNK_MASK; 680 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 681 chunks >>= ODEBUG_CHUNK_SHIFT; 682 683 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 684 db = get_bucket(paddr); 685 686 repeat: 687 cnt = 0; 688 raw_spin_lock_irqsave(&db->lock, flags); 689 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 690 cnt++; 691 oaddr = (unsigned long) obj->object; 692 if (oaddr < saddr || oaddr >= eaddr) 693 continue; 694 695 switch (obj->state) { 696 case ODEBUG_STATE_ACTIVE: 697 debug_print_object(obj, "free"); 698 descr = obj->descr; 699 state = obj->state; 700 raw_spin_unlock_irqrestore(&db->lock, flags); 701 debug_object_fixup(descr->fixup_free, 702 (void *) oaddr, state); 703 goto repeat; 704 default: 705 hlist_del(&obj->node); 706 hlist_add_head(&obj->node, &freelist); 707 break; 708 } 709 } 710 raw_spin_unlock_irqrestore(&db->lock, flags); 711 712 /* Now free them */ 713 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 714 hlist_del(&obj->node); 715 free_object(obj); 716 } 717 718 if (cnt > debug_objects_maxchain) 719 debug_objects_maxchain = cnt; 720 } 721 } 722 723 void debug_check_no_obj_freed(const void *address, unsigned long size) 724 { 725 if (debug_objects_enabled) 726 __debug_check_no_obj_freed(address, size); 727 } 728 #endif 729 730 #ifdef CONFIG_DEBUG_FS 731 732 static int debug_stats_show(struct seq_file *m, void *v) 733 { 734 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 735 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 736 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 737 seq_printf(m, "pool_free :%d\n", obj_pool_free); 738 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 739 seq_printf(m, "pool_used :%d\n", obj_pool_used); 740 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 741 return 0; 742 } 743 744 static int debug_stats_open(struct inode *inode, struct file *filp) 745 { 746 return single_open(filp, debug_stats_show, NULL); 747 } 748 749 static const struct file_operations debug_stats_fops = { 750 .open = debug_stats_open, 751 .read = seq_read, 752 .llseek = seq_lseek, 753 .release = single_release, 754 }; 755 756 static int __init debug_objects_init_debugfs(void) 757 { 758 struct dentry *dbgdir, *dbgstats; 759 760 if (!debug_objects_enabled) 761 return 0; 762 763 dbgdir = debugfs_create_dir("debug_objects", NULL); 764 if (!dbgdir) 765 return -ENOMEM; 766 767 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 768 &debug_stats_fops); 769 if (!dbgstats) 770 goto err; 771 772 return 0; 773 774 err: 775 debugfs_remove(dbgdir); 776 777 return -ENOMEM; 778 } 779 __initcall(debug_objects_init_debugfs); 780 781 #else 782 static inline void debug_objects_init_debugfs(void) { } 783 #endif 784 785 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 786 787 /* Random data structure for the self test */ 788 struct self_test { 789 unsigned long dummy1[6]; 790 int static_init; 791 unsigned long dummy2[3]; 792 }; 793 794 static __initdata struct debug_obj_descr descr_type_test; 795 796 /* 797 * fixup_init is called when: 798 * - an active object is initialized 799 */ 800 static int __init fixup_init(void *addr, enum debug_obj_state state) 801 { 802 struct self_test *obj = addr; 803 804 switch (state) { 805 case ODEBUG_STATE_ACTIVE: 806 debug_object_deactivate(obj, &descr_type_test); 807 debug_object_init(obj, &descr_type_test); 808 return 1; 809 default: 810 return 0; 811 } 812 } 813 814 /* 815 * fixup_activate is called when: 816 * - an active object is activated 817 * - an unknown object is activated (might be a statically initialized object) 818 */ 819 static int __init fixup_activate(void *addr, enum debug_obj_state state) 820 { 821 struct self_test *obj = addr; 822 823 switch (state) { 824 case ODEBUG_STATE_NOTAVAILABLE: 825 if (obj->static_init == 1) { 826 debug_object_init(obj, &descr_type_test); 827 debug_object_activate(obj, &descr_type_test); 828 return 0; 829 } 830 return 1; 831 832 case ODEBUG_STATE_ACTIVE: 833 debug_object_deactivate(obj, &descr_type_test); 834 debug_object_activate(obj, &descr_type_test); 835 return 1; 836 837 default: 838 return 0; 839 } 840 } 841 842 /* 843 * fixup_destroy is called when: 844 * - an active object is destroyed 845 */ 846 static int __init fixup_destroy(void *addr, enum debug_obj_state state) 847 { 848 struct self_test *obj = addr; 849 850 switch (state) { 851 case ODEBUG_STATE_ACTIVE: 852 debug_object_deactivate(obj, &descr_type_test); 853 debug_object_destroy(obj, &descr_type_test); 854 return 1; 855 default: 856 return 0; 857 } 858 } 859 860 /* 861 * fixup_free is called when: 862 * - an active object is freed 863 */ 864 static int __init fixup_free(void *addr, enum debug_obj_state state) 865 { 866 struct self_test *obj = addr; 867 868 switch (state) { 869 case ODEBUG_STATE_ACTIVE: 870 debug_object_deactivate(obj, &descr_type_test); 871 debug_object_free(obj, &descr_type_test); 872 return 1; 873 default: 874 return 0; 875 } 876 } 877 878 static int __init 879 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 880 { 881 struct debug_bucket *db; 882 struct debug_obj *obj; 883 unsigned long flags; 884 int res = -EINVAL; 885 886 db = get_bucket((unsigned long) addr); 887 888 raw_spin_lock_irqsave(&db->lock, flags); 889 890 obj = lookup_object(addr, db); 891 if (!obj && state != ODEBUG_STATE_NONE) { 892 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 893 goto out; 894 } 895 if (obj && obj->state != state) { 896 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 897 obj->state, state); 898 goto out; 899 } 900 if (fixups != debug_objects_fixups) { 901 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 902 fixups, debug_objects_fixups); 903 goto out; 904 } 905 if (warnings != debug_objects_warnings) { 906 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 907 warnings, debug_objects_warnings); 908 goto out; 909 } 910 res = 0; 911 out: 912 raw_spin_unlock_irqrestore(&db->lock, flags); 913 if (res) 914 debug_objects_enabled = 0; 915 return res; 916 } 917 918 static __initdata struct debug_obj_descr descr_type_test = { 919 .name = "selftest", 920 .fixup_init = fixup_init, 921 .fixup_activate = fixup_activate, 922 .fixup_destroy = fixup_destroy, 923 .fixup_free = fixup_free, 924 }; 925 926 static __initdata struct self_test obj = { .static_init = 0 }; 927 928 static void __init debug_objects_selftest(void) 929 { 930 int fixups, oldfixups, warnings, oldwarnings; 931 unsigned long flags; 932 933 local_irq_save(flags); 934 935 fixups = oldfixups = debug_objects_fixups; 936 warnings = oldwarnings = debug_objects_warnings; 937 descr_test = &descr_type_test; 938 939 debug_object_init(&obj, &descr_type_test); 940 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 941 goto out; 942 debug_object_activate(&obj, &descr_type_test); 943 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 944 goto out; 945 debug_object_activate(&obj, &descr_type_test); 946 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 947 goto out; 948 debug_object_deactivate(&obj, &descr_type_test); 949 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 950 goto out; 951 debug_object_destroy(&obj, &descr_type_test); 952 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 953 goto out; 954 debug_object_init(&obj, &descr_type_test); 955 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 956 goto out; 957 debug_object_activate(&obj, &descr_type_test); 958 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 959 goto out; 960 debug_object_deactivate(&obj, &descr_type_test); 961 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 962 goto out; 963 debug_object_free(&obj, &descr_type_test); 964 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 965 goto out; 966 967 obj.static_init = 1; 968 debug_object_activate(&obj, &descr_type_test); 969 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 970 goto out; 971 debug_object_init(&obj, &descr_type_test); 972 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 973 goto out; 974 debug_object_free(&obj, &descr_type_test); 975 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 976 goto out; 977 978 #ifdef CONFIG_DEBUG_OBJECTS_FREE 979 debug_object_init(&obj, &descr_type_test); 980 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 981 goto out; 982 debug_object_activate(&obj, &descr_type_test); 983 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 984 goto out; 985 __debug_check_no_obj_freed(&obj, sizeof(obj)); 986 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 987 goto out; 988 #endif 989 pr_info("selftest passed\n"); 990 991 out: 992 debug_objects_fixups = oldfixups; 993 debug_objects_warnings = oldwarnings; 994 descr_test = NULL; 995 996 local_irq_restore(flags); 997 } 998 #else 999 static inline void debug_objects_selftest(void) { } 1000 #endif 1001 1002 /* 1003 * Called during early boot to initialize the hash buckets and link 1004 * the static object pool objects into the poll list. After this call 1005 * the object tracker is fully operational. 1006 */ 1007 void __init debug_objects_early_init(void) 1008 { 1009 int i; 1010 1011 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1012 raw_spin_lock_init(&obj_hash[i].lock); 1013 1014 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1015 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 1016 } 1017 1018 /* 1019 * Convert the statically allocated objects to dynamic ones: 1020 */ 1021 static int __init debug_objects_replace_static_objects(void) 1022 { 1023 struct debug_bucket *db = obj_hash; 1024 struct hlist_node *tmp; 1025 struct debug_obj *obj, *new; 1026 HLIST_HEAD(objects); 1027 int i, cnt = 0; 1028 1029 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1030 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1031 if (!obj) 1032 goto free; 1033 hlist_add_head(&obj->node, &objects); 1034 } 1035 1036 /* 1037 * When debug_objects_mem_init() is called we know that only 1038 * one CPU is up, so disabling interrupts is enough 1039 * protection. This avoids the lockdep hell of lock ordering. 1040 */ 1041 local_irq_disable(); 1042 1043 /* Remove the statically allocated objects from the pool */ 1044 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1045 hlist_del(&obj->node); 1046 /* Move the allocated objects to the pool */ 1047 hlist_move_list(&objects, &obj_pool); 1048 1049 /* Replace the active object references */ 1050 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1051 hlist_move_list(&db->list, &objects); 1052 1053 hlist_for_each_entry(obj, &objects, node) { 1054 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1055 hlist_del(&new->node); 1056 /* copy object data */ 1057 *new = *obj; 1058 hlist_add_head(&new->node, &db->list); 1059 cnt++; 1060 } 1061 } 1062 local_irq_enable(); 1063 1064 pr_debug("%d of %d active objects replaced\n", 1065 cnt, obj_pool_used); 1066 return 0; 1067 free: 1068 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1069 hlist_del(&obj->node); 1070 kmem_cache_free(obj_cache, obj); 1071 } 1072 return -ENOMEM; 1073 } 1074 1075 /* 1076 * Called after the kmem_caches are functional to setup a dedicated 1077 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1078 * prevents that the debug code is called on kmem_cache_free() for the 1079 * debug tracker objects to avoid recursive calls. 1080 */ 1081 void __init debug_objects_mem_init(void) 1082 { 1083 if (!debug_objects_enabled) 1084 return; 1085 1086 obj_cache = kmem_cache_create("debug_objects_cache", 1087 sizeof (struct debug_obj), 0, 1088 SLAB_DEBUG_OBJECTS, NULL); 1089 1090 if (!obj_cache || debug_objects_replace_static_objects()) { 1091 debug_objects_enabled = 0; 1092 if (obj_cache) 1093 kmem_cache_destroy(obj_cache); 1094 pr_warn("out of memory.\n"); 1095 } else 1096 debug_objects_selftest(); 1097 } 1098