1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 #include <linux/debugobjects.h> 11 #include <linux/interrupt.h> 12 #include <linux/seq_file.h> 13 #include <linux/debugfs.h> 14 #include <linux/hash.h> 15 16 #define ODEBUG_HASH_BITS 14 17 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 18 19 #define ODEBUG_POOL_SIZE 512 20 #define ODEBUG_POOL_MIN_LEVEL 256 21 22 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 23 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 24 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 25 26 struct debug_bucket { 27 struct hlist_head list; 28 spinlock_t lock; 29 }; 30 31 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32 33 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE]; 34 35 static DEFINE_SPINLOCK(pool_lock); 36 37 static HLIST_HEAD(obj_pool); 38 39 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 40 static int obj_pool_free = ODEBUG_POOL_SIZE; 41 static int obj_pool_used; 42 static int obj_pool_max_used; 43 static struct kmem_cache *obj_cache; 44 45 static int debug_objects_maxchain __read_mostly; 46 static int debug_objects_fixups __read_mostly; 47 static int debug_objects_warnings __read_mostly; 48 static int debug_objects_enabled __read_mostly; 49 static struct debug_obj_descr *descr_test __read_mostly; 50 51 static int __init enable_object_debug(char *str) 52 { 53 debug_objects_enabled = 1; 54 return 0; 55 } 56 early_param("debug_objects", enable_object_debug); 57 58 static const char *obj_states[ODEBUG_STATE_MAX] = { 59 [ODEBUG_STATE_NONE] = "none", 60 [ODEBUG_STATE_INIT] = "initialized", 61 [ODEBUG_STATE_INACTIVE] = "inactive", 62 [ODEBUG_STATE_ACTIVE] = "active", 63 [ODEBUG_STATE_DESTROYED] = "destroyed", 64 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 65 }; 66 67 static int fill_pool(void) 68 { 69 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 70 struct debug_obj *new; 71 unsigned long flags; 72 73 if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL)) 74 return obj_pool_free; 75 76 if (unlikely(!obj_cache)) 77 return obj_pool_free; 78 79 while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) { 80 81 new = kmem_cache_zalloc(obj_cache, gfp); 82 if (!new) 83 return obj_pool_free; 84 85 spin_lock_irqsave(&pool_lock, flags); 86 hlist_add_head(&new->node, &obj_pool); 87 obj_pool_free++; 88 spin_unlock_irqrestore(&pool_lock, flags); 89 } 90 return obj_pool_free; 91 } 92 93 /* 94 * Lookup an object in the hash bucket. 95 */ 96 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 97 { 98 struct hlist_node *node; 99 struct debug_obj *obj; 100 int cnt = 0; 101 102 hlist_for_each_entry(obj, node, &b->list, node) { 103 cnt++; 104 if (obj->object == addr) 105 return obj; 106 } 107 if (cnt > debug_objects_maxchain) 108 debug_objects_maxchain = cnt; 109 110 return NULL; 111 } 112 113 /* 114 * Allocate a new object. If the pool is empty, switch off the debugger. 115 */ 116 static struct debug_obj * 117 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 118 { 119 struct debug_obj *obj = NULL; 120 121 spin_lock(&pool_lock); 122 if (obj_pool.first) { 123 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 124 125 obj->object = addr; 126 obj->descr = descr; 127 obj->state = ODEBUG_STATE_NONE; 128 hlist_del(&obj->node); 129 130 hlist_add_head(&obj->node, &b->list); 131 132 obj_pool_used++; 133 if (obj_pool_used > obj_pool_max_used) 134 obj_pool_max_used = obj_pool_used; 135 136 obj_pool_free--; 137 if (obj_pool_free < obj_pool_min_free) 138 obj_pool_min_free = obj_pool_free; 139 } 140 spin_unlock(&pool_lock); 141 142 return obj; 143 } 144 145 /* 146 * Put the object back into the pool or give it back to kmem_cache: 147 */ 148 static void free_object(struct debug_obj *obj) 149 { 150 unsigned long idx = (unsigned long)(obj - obj_static_pool); 151 152 if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) { 153 spin_lock(&pool_lock); 154 hlist_add_head(&obj->node, &obj_pool); 155 obj_pool_free++; 156 obj_pool_used--; 157 spin_unlock(&pool_lock); 158 } else { 159 spin_lock(&pool_lock); 160 obj_pool_used--; 161 spin_unlock(&pool_lock); 162 kmem_cache_free(obj_cache, obj); 163 } 164 } 165 166 /* 167 * We run out of memory. That means we probably have tons of objects 168 * allocated. 169 */ 170 static void debug_objects_oom(void) 171 { 172 struct debug_bucket *db = obj_hash; 173 struct hlist_node *node, *tmp; 174 struct debug_obj *obj; 175 unsigned long flags; 176 int i; 177 178 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 179 180 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 181 spin_lock_irqsave(&db->lock, flags); 182 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 183 hlist_del(&obj->node); 184 free_object(obj); 185 } 186 spin_unlock_irqrestore(&db->lock, flags); 187 } 188 } 189 190 /* 191 * We use the pfn of the address for the hash. That way we can check 192 * for freed objects simply by checking the affected bucket. 193 */ 194 static struct debug_bucket *get_bucket(unsigned long addr) 195 { 196 unsigned long hash; 197 198 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 199 return &obj_hash[hash]; 200 } 201 202 static void debug_print_object(struct debug_obj *obj, char *msg) 203 { 204 static int limit; 205 206 if (limit < 5 && obj->descr != descr_test) { 207 limit++; 208 WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg, 209 obj_states[obj->state], obj->descr->name); 210 } 211 debug_objects_warnings++; 212 } 213 214 /* 215 * Try to repair the damage, so we have a better chance to get useful 216 * debug output. 217 */ 218 static void 219 debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state), 220 void * addr, enum debug_obj_state state) 221 { 222 if (fixup) 223 debug_objects_fixups += fixup(addr, state); 224 } 225 226 static void debug_object_is_on_stack(void *addr, int onstack) 227 { 228 int is_on_stack; 229 static int limit; 230 231 if (limit > 4) 232 return; 233 234 is_on_stack = object_is_on_stack(addr); 235 if (is_on_stack == onstack) 236 return; 237 238 limit++; 239 if (is_on_stack) 240 printk(KERN_WARNING 241 "ODEBUG: object is on stack, but not annotated\n"); 242 else 243 printk(KERN_WARNING 244 "ODEBUG: object is not on stack, but annotated\n"); 245 WARN_ON(1); 246 } 247 248 static void 249 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 250 { 251 enum debug_obj_state state; 252 struct debug_bucket *db; 253 struct debug_obj *obj; 254 unsigned long flags; 255 256 fill_pool(); 257 258 db = get_bucket((unsigned long) addr); 259 260 spin_lock_irqsave(&db->lock, flags); 261 262 obj = lookup_object(addr, db); 263 if (!obj) { 264 obj = alloc_object(addr, db, descr); 265 if (!obj) { 266 debug_objects_enabled = 0; 267 spin_unlock_irqrestore(&db->lock, flags); 268 debug_objects_oom(); 269 return; 270 } 271 debug_object_is_on_stack(addr, onstack); 272 } 273 274 switch (obj->state) { 275 case ODEBUG_STATE_NONE: 276 case ODEBUG_STATE_INIT: 277 case ODEBUG_STATE_INACTIVE: 278 obj->state = ODEBUG_STATE_INIT; 279 break; 280 281 case ODEBUG_STATE_ACTIVE: 282 debug_print_object(obj, "init"); 283 state = obj->state; 284 spin_unlock_irqrestore(&db->lock, flags); 285 debug_object_fixup(descr->fixup_init, addr, state); 286 return; 287 288 case ODEBUG_STATE_DESTROYED: 289 debug_print_object(obj, "init"); 290 break; 291 default: 292 break; 293 } 294 295 spin_unlock_irqrestore(&db->lock, flags); 296 } 297 298 /** 299 * debug_object_init - debug checks when an object is initialized 300 * @addr: address of the object 301 * @descr: pointer to an object specific debug description structure 302 */ 303 void debug_object_init(void *addr, struct debug_obj_descr *descr) 304 { 305 if (!debug_objects_enabled) 306 return; 307 308 __debug_object_init(addr, descr, 0); 309 } 310 311 /** 312 * debug_object_init_on_stack - debug checks when an object on stack is 313 * initialized 314 * @addr: address of the object 315 * @descr: pointer to an object specific debug description structure 316 */ 317 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 318 { 319 if (!debug_objects_enabled) 320 return; 321 322 __debug_object_init(addr, descr, 1); 323 } 324 325 /** 326 * debug_object_activate - debug checks when an object is activated 327 * @addr: address of the object 328 * @descr: pointer to an object specific debug description structure 329 */ 330 void debug_object_activate(void *addr, struct debug_obj_descr *descr) 331 { 332 enum debug_obj_state state; 333 struct debug_bucket *db; 334 struct debug_obj *obj; 335 unsigned long flags; 336 337 if (!debug_objects_enabled) 338 return; 339 340 db = get_bucket((unsigned long) addr); 341 342 spin_lock_irqsave(&db->lock, flags); 343 344 obj = lookup_object(addr, db); 345 if (obj) { 346 switch (obj->state) { 347 case ODEBUG_STATE_INIT: 348 case ODEBUG_STATE_INACTIVE: 349 obj->state = ODEBUG_STATE_ACTIVE; 350 break; 351 352 case ODEBUG_STATE_ACTIVE: 353 debug_print_object(obj, "activate"); 354 state = obj->state; 355 spin_unlock_irqrestore(&db->lock, flags); 356 debug_object_fixup(descr->fixup_activate, addr, state); 357 return; 358 359 case ODEBUG_STATE_DESTROYED: 360 debug_print_object(obj, "activate"); 361 break; 362 default: 363 break; 364 } 365 spin_unlock_irqrestore(&db->lock, flags); 366 return; 367 } 368 369 spin_unlock_irqrestore(&db->lock, flags); 370 /* 371 * This happens when a static object is activated. We 372 * let the type specific code decide whether this is 373 * true or not. 374 */ 375 debug_object_fixup(descr->fixup_activate, addr, 376 ODEBUG_STATE_NOTAVAILABLE); 377 } 378 379 /** 380 * debug_object_deactivate - debug checks when an object is deactivated 381 * @addr: address of the object 382 * @descr: pointer to an object specific debug description structure 383 */ 384 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 385 { 386 struct debug_bucket *db; 387 struct debug_obj *obj; 388 unsigned long flags; 389 390 if (!debug_objects_enabled) 391 return; 392 393 db = get_bucket((unsigned long) addr); 394 395 spin_lock_irqsave(&db->lock, flags); 396 397 obj = lookup_object(addr, db); 398 if (obj) { 399 switch (obj->state) { 400 case ODEBUG_STATE_INIT: 401 case ODEBUG_STATE_INACTIVE: 402 case ODEBUG_STATE_ACTIVE: 403 obj->state = ODEBUG_STATE_INACTIVE; 404 break; 405 406 case ODEBUG_STATE_DESTROYED: 407 debug_print_object(obj, "deactivate"); 408 break; 409 default: 410 break; 411 } 412 } else { 413 struct debug_obj o = { .object = addr, 414 .state = ODEBUG_STATE_NOTAVAILABLE, 415 .descr = descr }; 416 417 debug_print_object(&o, "deactivate"); 418 } 419 420 spin_unlock_irqrestore(&db->lock, flags); 421 } 422 423 /** 424 * debug_object_destroy - debug checks when an object is destroyed 425 * @addr: address of the object 426 * @descr: pointer to an object specific debug description structure 427 */ 428 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 429 { 430 enum debug_obj_state state; 431 struct debug_bucket *db; 432 struct debug_obj *obj; 433 unsigned long flags; 434 435 if (!debug_objects_enabled) 436 return; 437 438 db = get_bucket((unsigned long) addr); 439 440 spin_lock_irqsave(&db->lock, flags); 441 442 obj = lookup_object(addr, db); 443 if (!obj) 444 goto out_unlock; 445 446 switch (obj->state) { 447 case ODEBUG_STATE_NONE: 448 case ODEBUG_STATE_INIT: 449 case ODEBUG_STATE_INACTIVE: 450 obj->state = ODEBUG_STATE_DESTROYED; 451 break; 452 case ODEBUG_STATE_ACTIVE: 453 debug_print_object(obj, "destroy"); 454 state = obj->state; 455 spin_unlock_irqrestore(&db->lock, flags); 456 debug_object_fixup(descr->fixup_destroy, addr, state); 457 return; 458 459 case ODEBUG_STATE_DESTROYED: 460 debug_print_object(obj, "destroy"); 461 break; 462 default: 463 break; 464 } 465 out_unlock: 466 spin_unlock_irqrestore(&db->lock, flags); 467 } 468 469 /** 470 * debug_object_free - debug checks when an object is freed 471 * @addr: address of the object 472 * @descr: pointer to an object specific debug description structure 473 */ 474 void debug_object_free(void *addr, struct debug_obj_descr *descr) 475 { 476 enum debug_obj_state state; 477 struct debug_bucket *db; 478 struct debug_obj *obj; 479 unsigned long flags; 480 481 if (!debug_objects_enabled) 482 return; 483 484 db = get_bucket((unsigned long) addr); 485 486 spin_lock_irqsave(&db->lock, flags); 487 488 obj = lookup_object(addr, db); 489 if (!obj) 490 goto out_unlock; 491 492 switch (obj->state) { 493 case ODEBUG_STATE_ACTIVE: 494 debug_print_object(obj, "free"); 495 state = obj->state; 496 spin_unlock_irqrestore(&db->lock, flags); 497 debug_object_fixup(descr->fixup_free, addr, state); 498 return; 499 default: 500 hlist_del(&obj->node); 501 free_object(obj); 502 break; 503 } 504 out_unlock: 505 spin_unlock_irqrestore(&db->lock, flags); 506 } 507 508 #ifdef CONFIG_DEBUG_OBJECTS_FREE 509 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 510 { 511 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 512 struct hlist_node *node, *tmp; 513 struct debug_obj_descr *descr; 514 enum debug_obj_state state; 515 struct debug_bucket *db; 516 struct debug_obj *obj; 517 int cnt; 518 519 saddr = (unsigned long) address; 520 eaddr = saddr + size; 521 paddr = saddr & ODEBUG_CHUNK_MASK; 522 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 523 chunks >>= ODEBUG_CHUNK_SHIFT; 524 525 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 526 db = get_bucket(paddr); 527 528 repeat: 529 cnt = 0; 530 spin_lock_irqsave(&db->lock, flags); 531 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 532 cnt++; 533 oaddr = (unsigned long) obj->object; 534 if (oaddr < saddr || oaddr >= eaddr) 535 continue; 536 537 switch (obj->state) { 538 case ODEBUG_STATE_ACTIVE: 539 debug_print_object(obj, "free"); 540 descr = obj->descr; 541 state = obj->state; 542 spin_unlock_irqrestore(&db->lock, flags); 543 debug_object_fixup(descr->fixup_free, 544 (void *) oaddr, state); 545 goto repeat; 546 default: 547 hlist_del(&obj->node); 548 free_object(obj); 549 break; 550 } 551 } 552 spin_unlock_irqrestore(&db->lock, flags); 553 if (cnt > debug_objects_maxchain) 554 debug_objects_maxchain = cnt; 555 } 556 } 557 558 void debug_check_no_obj_freed(const void *address, unsigned long size) 559 { 560 if (debug_objects_enabled) 561 __debug_check_no_obj_freed(address, size); 562 } 563 #endif 564 565 #ifdef CONFIG_DEBUG_FS 566 567 static int debug_stats_show(struct seq_file *m, void *v) 568 { 569 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 570 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 571 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 572 seq_printf(m, "pool_free :%d\n", obj_pool_free); 573 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 574 seq_printf(m, "pool_used :%d\n", obj_pool_used); 575 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 576 return 0; 577 } 578 579 static int debug_stats_open(struct inode *inode, struct file *filp) 580 { 581 return single_open(filp, debug_stats_show, NULL); 582 } 583 584 static const struct file_operations debug_stats_fops = { 585 .open = debug_stats_open, 586 .read = seq_read, 587 .llseek = seq_lseek, 588 .release = single_release, 589 }; 590 591 static int __init debug_objects_init_debugfs(void) 592 { 593 struct dentry *dbgdir, *dbgstats; 594 595 if (!debug_objects_enabled) 596 return 0; 597 598 dbgdir = debugfs_create_dir("debug_objects", NULL); 599 if (!dbgdir) 600 return -ENOMEM; 601 602 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 603 &debug_stats_fops); 604 if (!dbgstats) 605 goto err; 606 607 return 0; 608 609 err: 610 debugfs_remove(dbgdir); 611 612 return -ENOMEM; 613 } 614 __initcall(debug_objects_init_debugfs); 615 616 #else 617 static inline void debug_objects_init_debugfs(void) { } 618 #endif 619 620 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 621 622 /* Random data structure for the self test */ 623 struct self_test { 624 unsigned long dummy1[6]; 625 int static_init; 626 unsigned long dummy2[3]; 627 }; 628 629 static __initdata struct debug_obj_descr descr_type_test; 630 631 /* 632 * fixup_init is called when: 633 * - an active object is initialized 634 */ 635 static int __init fixup_init(void *addr, enum debug_obj_state state) 636 { 637 struct self_test *obj = addr; 638 639 switch (state) { 640 case ODEBUG_STATE_ACTIVE: 641 debug_object_deactivate(obj, &descr_type_test); 642 debug_object_init(obj, &descr_type_test); 643 return 1; 644 default: 645 return 0; 646 } 647 } 648 649 /* 650 * fixup_activate is called when: 651 * - an active object is activated 652 * - an unknown object is activated (might be a statically initialized object) 653 */ 654 static int __init fixup_activate(void *addr, enum debug_obj_state state) 655 { 656 struct self_test *obj = addr; 657 658 switch (state) { 659 case ODEBUG_STATE_NOTAVAILABLE: 660 if (obj->static_init == 1) { 661 debug_object_init(obj, &descr_type_test); 662 debug_object_activate(obj, &descr_type_test); 663 /* 664 * Real code should return 0 here ! This is 665 * not a fixup of some bad behaviour. We 666 * merily call the debug_init function to keep 667 * track of the object. 668 */ 669 return 1; 670 } else { 671 /* Real code needs to emit a warning here */ 672 } 673 return 0; 674 675 case ODEBUG_STATE_ACTIVE: 676 debug_object_deactivate(obj, &descr_type_test); 677 debug_object_activate(obj, &descr_type_test); 678 return 1; 679 680 default: 681 return 0; 682 } 683 } 684 685 /* 686 * fixup_destroy is called when: 687 * - an active object is destroyed 688 */ 689 static int __init fixup_destroy(void *addr, enum debug_obj_state state) 690 { 691 struct self_test *obj = addr; 692 693 switch (state) { 694 case ODEBUG_STATE_ACTIVE: 695 debug_object_deactivate(obj, &descr_type_test); 696 debug_object_destroy(obj, &descr_type_test); 697 return 1; 698 default: 699 return 0; 700 } 701 } 702 703 /* 704 * fixup_free is called when: 705 * - an active object is freed 706 */ 707 static int __init fixup_free(void *addr, enum debug_obj_state state) 708 { 709 struct self_test *obj = addr; 710 711 switch (state) { 712 case ODEBUG_STATE_ACTIVE: 713 debug_object_deactivate(obj, &descr_type_test); 714 debug_object_free(obj, &descr_type_test); 715 return 1; 716 default: 717 return 0; 718 } 719 } 720 721 static int 722 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 723 { 724 struct debug_bucket *db; 725 struct debug_obj *obj; 726 unsigned long flags; 727 int res = -EINVAL; 728 729 db = get_bucket((unsigned long) addr); 730 731 spin_lock_irqsave(&db->lock, flags); 732 733 obj = lookup_object(addr, db); 734 if (!obj && state != ODEBUG_STATE_NONE) { 735 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 736 goto out; 737 } 738 if (obj && obj->state != state) { 739 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 740 obj->state, state); 741 goto out; 742 } 743 if (fixups != debug_objects_fixups) { 744 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 745 fixups, debug_objects_fixups); 746 goto out; 747 } 748 if (warnings != debug_objects_warnings) { 749 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 750 warnings, debug_objects_warnings); 751 goto out; 752 } 753 res = 0; 754 out: 755 spin_unlock_irqrestore(&db->lock, flags); 756 if (res) 757 debug_objects_enabled = 0; 758 return res; 759 } 760 761 static __initdata struct debug_obj_descr descr_type_test = { 762 .name = "selftest", 763 .fixup_init = fixup_init, 764 .fixup_activate = fixup_activate, 765 .fixup_destroy = fixup_destroy, 766 .fixup_free = fixup_free, 767 }; 768 769 static __initdata struct self_test obj = { .static_init = 0 }; 770 771 static void __init debug_objects_selftest(void) 772 { 773 int fixups, oldfixups, warnings, oldwarnings; 774 unsigned long flags; 775 776 local_irq_save(flags); 777 778 fixups = oldfixups = debug_objects_fixups; 779 warnings = oldwarnings = debug_objects_warnings; 780 descr_test = &descr_type_test; 781 782 debug_object_init(&obj, &descr_type_test); 783 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 784 goto out; 785 debug_object_activate(&obj, &descr_type_test); 786 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 787 goto out; 788 debug_object_activate(&obj, &descr_type_test); 789 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 790 goto out; 791 debug_object_deactivate(&obj, &descr_type_test); 792 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 793 goto out; 794 debug_object_destroy(&obj, &descr_type_test); 795 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 796 goto out; 797 debug_object_init(&obj, &descr_type_test); 798 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 799 goto out; 800 debug_object_activate(&obj, &descr_type_test); 801 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 802 goto out; 803 debug_object_deactivate(&obj, &descr_type_test); 804 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 805 goto out; 806 debug_object_free(&obj, &descr_type_test); 807 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 808 goto out; 809 810 obj.static_init = 1; 811 debug_object_activate(&obj, &descr_type_test); 812 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings)) 813 goto out; 814 debug_object_init(&obj, &descr_type_test); 815 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 816 goto out; 817 debug_object_free(&obj, &descr_type_test); 818 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 819 goto out; 820 821 #ifdef CONFIG_DEBUG_OBJECTS_FREE 822 debug_object_init(&obj, &descr_type_test); 823 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 824 goto out; 825 debug_object_activate(&obj, &descr_type_test); 826 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 827 goto out; 828 __debug_check_no_obj_freed(&obj, sizeof(obj)); 829 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 830 goto out; 831 #endif 832 printk(KERN_INFO "ODEBUG: selftest passed\n"); 833 834 out: 835 debug_objects_fixups = oldfixups; 836 debug_objects_warnings = oldwarnings; 837 descr_test = NULL; 838 839 local_irq_restore(flags); 840 } 841 #else 842 static inline void debug_objects_selftest(void) { } 843 #endif 844 845 /* 846 * Called during early boot to initialize the hash buckets and link 847 * the static object pool objects into the poll list. After this call 848 * the object tracker is fully operational. 849 */ 850 void __init debug_objects_early_init(void) 851 { 852 int i; 853 854 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 855 spin_lock_init(&obj_hash[i].lock); 856 857 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 858 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 859 } 860 861 /* 862 * Called after the kmem_caches are functional to setup a dedicated 863 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 864 * prevents that the debug code is called on kmem_cache_free() for the 865 * debug tracker objects to avoid recursive calls. 866 */ 867 void __init debug_objects_mem_init(void) 868 { 869 if (!debug_objects_enabled) 870 return; 871 872 obj_cache = kmem_cache_create("debug_objects_cache", 873 sizeof (struct debug_obj), 0, 874 SLAB_DEBUG_OBJECTS, NULL); 875 876 if (!obj_cache) 877 debug_objects_enabled = 0; 878 else 879 debug_objects_selftest(); 880 } 881