1 /* 2 * Generic infrastructure for lifetime debugging of objects. 3 * 4 * Started by Thomas Gleixner 5 * 6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> 7 * 8 * For licencing details see kernel-base/COPYING 9 */ 10 11 #define pr_fmt(fmt) "ODEBUG: " fmt 12 13 #include <linux/debugobjects.h> 14 #include <linux/interrupt.h> 15 #include <linux/sched.h> 16 #include <linux/seq_file.h> 17 #include <linux/debugfs.h> 18 #include <linux/slab.h> 19 #include <linux/hash.h> 20 21 #define ODEBUG_HASH_BITS 14 22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) 23 24 #define ODEBUG_POOL_SIZE 1024 25 #define ODEBUG_POOL_MIN_LEVEL 256 26 27 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 28 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 29 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 30 31 struct debug_bucket { 32 struct hlist_head list; 33 raw_spinlock_t lock; 34 }; 35 36 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 37 38 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 39 40 static DEFINE_RAW_SPINLOCK(pool_lock); 41 42 static HLIST_HEAD(obj_pool); 43 44 static int obj_pool_min_free = ODEBUG_POOL_SIZE; 45 static int obj_pool_free = ODEBUG_POOL_SIZE; 46 static int obj_pool_used; 47 static int obj_pool_max_used; 48 static struct kmem_cache *obj_cache; 49 50 static int debug_objects_maxchain __read_mostly; 51 static int debug_objects_fixups __read_mostly; 52 static int debug_objects_warnings __read_mostly; 53 static int debug_objects_enabled __read_mostly 54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; 55 static int debug_objects_pool_size __read_mostly 56 = ODEBUG_POOL_SIZE; 57 static int debug_objects_pool_min_level __read_mostly 58 = ODEBUG_POOL_MIN_LEVEL; 59 static struct debug_obj_descr *descr_test __read_mostly; 60 61 /* 62 * Track numbers of kmem_cache_alloc()/free() calls done. 63 */ 64 static int debug_objects_allocated; 65 static int debug_objects_freed; 66 67 static void free_obj_work(struct work_struct *work); 68 static DECLARE_WORK(debug_obj_work, free_obj_work); 69 70 static int __init enable_object_debug(char *str) 71 { 72 debug_objects_enabled = 1; 73 return 0; 74 } 75 76 static int __init disable_object_debug(char *str) 77 { 78 debug_objects_enabled = 0; 79 return 0; 80 } 81 82 early_param("debug_objects", enable_object_debug); 83 early_param("no_debug_objects", disable_object_debug); 84 85 static const char *obj_states[ODEBUG_STATE_MAX] = { 86 [ODEBUG_STATE_NONE] = "none", 87 [ODEBUG_STATE_INIT] = "initialized", 88 [ODEBUG_STATE_INACTIVE] = "inactive", 89 [ODEBUG_STATE_ACTIVE] = "active", 90 [ODEBUG_STATE_DESTROYED] = "destroyed", 91 [ODEBUG_STATE_NOTAVAILABLE] = "not available", 92 }; 93 94 static void fill_pool(void) 95 { 96 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 97 struct debug_obj *new; 98 unsigned long flags; 99 100 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 101 return; 102 103 if (unlikely(!obj_cache)) 104 return; 105 106 while (obj_pool_free < debug_objects_pool_min_level) { 107 108 new = kmem_cache_zalloc(obj_cache, gfp); 109 if (!new) 110 return; 111 112 raw_spin_lock_irqsave(&pool_lock, flags); 113 hlist_add_head(&new->node, &obj_pool); 114 debug_objects_allocated++; 115 obj_pool_free++; 116 raw_spin_unlock_irqrestore(&pool_lock, flags); 117 } 118 } 119 120 /* 121 * Lookup an object in the hash bucket. 122 */ 123 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) 124 { 125 struct debug_obj *obj; 126 int cnt = 0; 127 128 hlist_for_each_entry(obj, &b->list, node) { 129 cnt++; 130 if (obj->object == addr) 131 return obj; 132 } 133 if (cnt > debug_objects_maxchain) 134 debug_objects_maxchain = cnt; 135 136 return NULL; 137 } 138 139 /* 140 * Allocate a new object. If the pool is empty, switch off the debugger. 141 * Must be called with interrupts disabled. 142 */ 143 static struct debug_obj * 144 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 145 { 146 struct debug_obj *obj = NULL; 147 148 raw_spin_lock(&pool_lock); 149 if (obj_pool.first) { 150 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 151 152 obj->object = addr; 153 obj->descr = descr; 154 obj->state = ODEBUG_STATE_NONE; 155 obj->astate = 0; 156 hlist_del(&obj->node); 157 158 hlist_add_head(&obj->node, &b->list); 159 160 obj_pool_used++; 161 if (obj_pool_used > obj_pool_max_used) 162 obj_pool_max_used = obj_pool_used; 163 164 obj_pool_free--; 165 if (obj_pool_free < obj_pool_min_free) 166 obj_pool_min_free = obj_pool_free; 167 } 168 raw_spin_unlock(&pool_lock); 169 170 return obj; 171 } 172 173 /* 174 * workqueue function to free objects. 175 * 176 * To reduce contention on the global pool_lock, the actual freeing of 177 * debug objects will be delayed if the pool_lock is busy. We also free 178 * the objects in a batch of 4 for each lock/unlock cycle. 179 */ 180 #define ODEBUG_FREE_BATCH 4 181 182 static void free_obj_work(struct work_struct *work) 183 { 184 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 185 unsigned long flags; 186 int i; 187 188 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 189 return; 190 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) { 191 for (i = 0; i < ODEBUG_FREE_BATCH; i++) { 192 objs[i] = hlist_entry(obj_pool.first, 193 typeof(*objs[0]), node); 194 hlist_del(&objs[i]->node); 195 } 196 197 obj_pool_free -= ODEBUG_FREE_BATCH; 198 debug_objects_freed += ODEBUG_FREE_BATCH; 199 /* 200 * We release pool_lock across kmem_cache_free() to 201 * avoid contention on pool_lock. 202 */ 203 raw_spin_unlock_irqrestore(&pool_lock, flags); 204 for (i = 0; i < ODEBUG_FREE_BATCH; i++) 205 kmem_cache_free(obj_cache, objs[i]); 206 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 207 return; 208 } 209 raw_spin_unlock_irqrestore(&pool_lock, flags); 210 } 211 212 /* 213 * Put the object back into the pool and schedule work to free objects 214 * if necessary. 215 */ 216 static void free_object(struct debug_obj *obj) 217 { 218 unsigned long flags; 219 int sched = 0; 220 221 raw_spin_lock_irqsave(&pool_lock, flags); 222 /* 223 * schedule work when the pool is filled and the cache is 224 * initialized: 225 */ 226 if (obj_pool_free > debug_objects_pool_size && obj_cache) 227 sched = 1; 228 hlist_add_head(&obj->node, &obj_pool); 229 obj_pool_free++; 230 obj_pool_used--; 231 raw_spin_unlock_irqrestore(&pool_lock, flags); 232 if (sched) 233 schedule_work(&debug_obj_work); 234 } 235 236 /* 237 * We run out of memory. That means we probably have tons of objects 238 * allocated. 239 */ 240 static void debug_objects_oom(void) 241 { 242 struct debug_bucket *db = obj_hash; 243 struct hlist_node *tmp; 244 HLIST_HEAD(freelist); 245 struct debug_obj *obj; 246 unsigned long flags; 247 int i; 248 249 pr_warn("Out of memory. ODEBUG disabled\n"); 250 251 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 252 raw_spin_lock_irqsave(&db->lock, flags); 253 hlist_move_list(&db->list, &freelist); 254 raw_spin_unlock_irqrestore(&db->lock, flags); 255 256 /* Now free them */ 257 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 258 hlist_del(&obj->node); 259 free_object(obj); 260 } 261 } 262 } 263 264 /* 265 * We use the pfn of the address for the hash. That way we can check 266 * for freed objects simply by checking the affected bucket. 267 */ 268 static struct debug_bucket *get_bucket(unsigned long addr) 269 { 270 unsigned long hash; 271 272 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); 273 return &obj_hash[hash]; 274 } 275 276 static void debug_print_object(struct debug_obj *obj, char *msg) 277 { 278 struct debug_obj_descr *descr = obj->descr; 279 static int limit; 280 281 if (limit < 5 && descr != descr_test) { 282 void *hint = descr->debug_hint ? 283 descr->debug_hint(obj->object) : NULL; 284 limit++; 285 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " 286 "object type: %s hint: %pS\n", 287 msg, obj_states[obj->state], obj->astate, 288 descr->name, hint); 289 } 290 debug_objects_warnings++; 291 } 292 293 /* 294 * Try to repair the damage, so we have a better chance to get useful 295 * debug output. 296 */ 297 static bool 298 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), 299 void * addr, enum debug_obj_state state) 300 { 301 if (fixup && fixup(addr, state)) { 302 debug_objects_fixups++; 303 return true; 304 } 305 return false; 306 } 307 308 static void debug_object_is_on_stack(void *addr, int onstack) 309 { 310 int is_on_stack; 311 static int limit; 312 313 if (limit > 4) 314 return; 315 316 is_on_stack = object_is_on_stack(addr); 317 if (is_on_stack == onstack) 318 return; 319 320 limit++; 321 if (is_on_stack) 322 pr_warn("object is on stack, but not annotated\n"); 323 else 324 pr_warn("object is not on stack, but annotated\n"); 325 WARN_ON(1); 326 } 327 328 static void 329 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 330 { 331 enum debug_obj_state state; 332 struct debug_bucket *db; 333 struct debug_obj *obj; 334 unsigned long flags; 335 336 fill_pool(); 337 338 db = get_bucket((unsigned long) addr); 339 340 raw_spin_lock_irqsave(&db->lock, flags); 341 342 obj = lookup_object(addr, db); 343 if (!obj) { 344 obj = alloc_object(addr, db, descr); 345 if (!obj) { 346 debug_objects_enabled = 0; 347 raw_spin_unlock_irqrestore(&db->lock, flags); 348 debug_objects_oom(); 349 return; 350 } 351 debug_object_is_on_stack(addr, onstack); 352 } 353 354 switch (obj->state) { 355 case ODEBUG_STATE_NONE: 356 case ODEBUG_STATE_INIT: 357 case ODEBUG_STATE_INACTIVE: 358 obj->state = ODEBUG_STATE_INIT; 359 break; 360 361 case ODEBUG_STATE_ACTIVE: 362 debug_print_object(obj, "init"); 363 state = obj->state; 364 raw_spin_unlock_irqrestore(&db->lock, flags); 365 debug_object_fixup(descr->fixup_init, addr, state); 366 return; 367 368 case ODEBUG_STATE_DESTROYED: 369 debug_print_object(obj, "init"); 370 break; 371 default: 372 break; 373 } 374 375 raw_spin_unlock_irqrestore(&db->lock, flags); 376 } 377 378 /** 379 * debug_object_init - debug checks when an object is initialized 380 * @addr: address of the object 381 * @descr: pointer to an object specific debug description structure 382 */ 383 void debug_object_init(void *addr, struct debug_obj_descr *descr) 384 { 385 if (!debug_objects_enabled) 386 return; 387 388 __debug_object_init(addr, descr, 0); 389 } 390 EXPORT_SYMBOL_GPL(debug_object_init); 391 392 /** 393 * debug_object_init_on_stack - debug checks when an object on stack is 394 * initialized 395 * @addr: address of the object 396 * @descr: pointer to an object specific debug description structure 397 */ 398 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) 399 { 400 if (!debug_objects_enabled) 401 return; 402 403 __debug_object_init(addr, descr, 1); 404 } 405 EXPORT_SYMBOL_GPL(debug_object_init_on_stack); 406 407 /** 408 * debug_object_activate - debug checks when an object is activated 409 * @addr: address of the object 410 * @descr: pointer to an object specific debug description structure 411 * Returns 0 for success, -EINVAL for check failed. 412 */ 413 int debug_object_activate(void *addr, struct debug_obj_descr *descr) 414 { 415 enum debug_obj_state state; 416 struct debug_bucket *db; 417 struct debug_obj *obj; 418 unsigned long flags; 419 int ret; 420 struct debug_obj o = { .object = addr, 421 .state = ODEBUG_STATE_NOTAVAILABLE, 422 .descr = descr }; 423 424 if (!debug_objects_enabled) 425 return 0; 426 427 db = get_bucket((unsigned long) addr); 428 429 raw_spin_lock_irqsave(&db->lock, flags); 430 431 obj = lookup_object(addr, db); 432 if (obj) { 433 switch (obj->state) { 434 case ODEBUG_STATE_INIT: 435 case ODEBUG_STATE_INACTIVE: 436 obj->state = ODEBUG_STATE_ACTIVE; 437 ret = 0; 438 break; 439 440 case ODEBUG_STATE_ACTIVE: 441 debug_print_object(obj, "activate"); 442 state = obj->state; 443 raw_spin_unlock_irqrestore(&db->lock, flags); 444 ret = debug_object_fixup(descr->fixup_activate, addr, state); 445 return ret ? 0 : -EINVAL; 446 447 case ODEBUG_STATE_DESTROYED: 448 debug_print_object(obj, "activate"); 449 ret = -EINVAL; 450 break; 451 default: 452 ret = 0; 453 break; 454 } 455 raw_spin_unlock_irqrestore(&db->lock, flags); 456 return ret; 457 } 458 459 raw_spin_unlock_irqrestore(&db->lock, flags); 460 /* 461 * We are here when a static object is activated. We 462 * let the type specific code confirm whether this is 463 * true or not. if true, we just make sure that the 464 * static object is tracked in the object tracker. If 465 * not, this must be a bug, so we try to fix it up. 466 */ 467 if (descr->is_static_object && descr->is_static_object(addr)) { 468 /* track this static object */ 469 debug_object_init(addr, descr); 470 debug_object_activate(addr, descr); 471 } else { 472 debug_print_object(&o, "activate"); 473 ret = debug_object_fixup(descr->fixup_activate, addr, 474 ODEBUG_STATE_NOTAVAILABLE); 475 return ret ? 0 : -EINVAL; 476 } 477 return 0; 478 } 479 EXPORT_SYMBOL_GPL(debug_object_activate); 480 481 /** 482 * debug_object_deactivate - debug checks when an object is deactivated 483 * @addr: address of the object 484 * @descr: pointer to an object specific debug description structure 485 */ 486 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) 487 { 488 struct debug_bucket *db; 489 struct debug_obj *obj; 490 unsigned long flags; 491 492 if (!debug_objects_enabled) 493 return; 494 495 db = get_bucket((unsigned long) addr); 496 497 raw_spin_lock_irqsave(&db->lock, flags); 498 499 obj = lookup_object(addr, db); 500 if (obj) { 501 switch (obj->state) { 502 case ODEBUG_STATE_INIT: 503 case ODEBUG_STATE_INACTIVE: 504 case ODEBUG_STATE_ACTIVE: 505 if (!obj->astate) 506 obj->state = ODEBUG_STATE_INACTIVE; 507 else 508 debug_print_object(obj, "deactivate"); 509 break; 510 511 case ODEBUG_STATE_DESTROYED: 512 debug_print_object(obj, "deactivate"); 513 break; 514 default: 515 break; 516 } 517 } else { 518 struct debug_obj o = { .object = addr, 519 .state = ODEBUG_STATE_NOTAVAILABLE, 520 .descr = descr }; 521 522 debug_print_object(&o, "deactivate"); 523 } 524 525 raw_spin_unlock_irqrestore(&db->lock, flags); 526 } 527 EXPORT_SYMBOL_GPL(debug_object_deactivate); 528 529 /** 530 * debug_object_destroy - debug checks when an object is destroyed 531 * @addr: address of the object 532 * @descr: pointer to an object specific debug description structure 533 */ 534 void debug_object_destroy(void *addr, struct debug_obj_descr *descr) 535 { 536 enum debug_obj_state state; 537 struct debug_bucket *db; 538 struct debug_obj *obj; 539 unsigned long flags; 540 541 if (!debug_objects_enabled) 542 return; 543 544 db = get_bucket((unsigned long) addr); 545 546 raw_spin_lock_irqsave(&db->lock, flags); 547 548 obj = lookup_object(addr, db); 549 if (!obj) 550 goto out_unlock; 551 552 switch (obj->state) { 553 case ODEBUG_STATE_NONE: 554 case ODEBUG_STATE_INIT: 555 case ODEBUG_STATE_INACTIVE: 556 obj->state = ODEBUG_STATE_DESTROYED; 557 break; 558 case ODEBUG_STATE_ACTIVE: 559 debug_print_object(obj, "destroy"); 560 state = obj->state; 561 raw_spin_unlock_irqrestore(&db->lock, flags); 562 debug_object_fixup(descr->fixup_destroy, addr, state); 563 return; 564 565 case ODEBUG_STATE_DESTROYED: 566 debug_print_object(obj, "destroy"); 567 break; 568 default: 569 break; 570 } 571 out_unlock: 572 raw_spin_unlock_irqrestore(&db->lock, flags); 573 } 574 EXPORT_SYMBOL_GPL(debug_object_destroy); 575 576 /** 577 * debug_object_free - debug checks when an object is freed 578 * @addr: address of the object 579 * @descr: pointer to an object specific debug description structure 580 */ 581 void debug_object_free(void *addr, struct debug_obj_descr *descr) 582 { 583 enum debug_obj_state state; 584 struct debug_bucket *db; 585 struct debug_obj *obj; 586 unsigned long flags; 587 588 if (!debug_objects_enabled) 589 return; 590 591 db = get_bucket((unsigned long) addr); 592 593 raw_spin_lock_irqsave(&db->lock, flags); 594 595 obj = lookup_object(addr, db); 596 if (!obj) 597 goto out_unlock; 598 599 switch (obj->state) { 600 case ODEBUG_STATE_ACTIVE: 601 debug_print_object(obj, "free"); 602 state = obj->state; 603 raw_spin_unlock_irqrestore(&db->lock, flags); 604 debug_object_fixup(descr->fixup_free, addr, state); 605 return; 606 default: 607 hlist_del(&obj->node); 608 raw_spin_unlock_irqrestore(&db->lock, flags); 609 free_object(obj); 610 return; 611 } 612 out_unlock: 613 raw_spin_unlock_irqrestore(&db->lock, flags); 614 } 615 EXPORT_SYMBOL_GPL(debug_object_free); 616 617 /** 618 * debug_object_assert_init - debug checks when object should be init-ed 619 * @addr: address of the object 620 * @descr: pointer to an object specific debug description structure 621 */ 622 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) 623 { 624 struct debug_bucket *db; 625 struct debug_obj *obj; 626 unsigned long flags; 627 628 if (!debug_objects_enabled) 629 return; 630 631 db = get_bucket((unsigned long) addr); 632 633 raw_spin_lock_irqsave(&db->lock, flags); 634 635 obj = lookup_object(addr, db); 636 if (!obj) { 637 struct debug_obj o = { .object = addr, 638 .state = ODEBUG_STATE_NOTAVAILABLE, 639 .descr = descr }; 640 641 raw_spin_unlock_irqrestore(&db->lock, flags); 642 /* 643 * Maybe the object is static, and we let the type specific 644 * code confirm. Track this static object if true, else invoke 645 * fixup. 646 */ 647 if (descr->is_static_object && descr->is_static_object(addr)) { 648 /* Track this static object */ 649 debug_object_init(addr, descr); 650 } else { 651 debug_print_object(&o, "assert_init"); 652 debug_object_fixup(descr->fixup_assert_init, addr, 653 ODEBUG_STATE_NOTAVAILABLE); 654 } 655 return; 656 } 657 658 raw_spin_unlock_irqrestore(&db->lock, flags); 659 } 660 EXPORT_SYMBOL_GPL(debug_object_assert_init); 661 662 /** 663 * debug_object_active_state - debug checks object usage state machine 664 * @addr: address of the object 665 * @descr: pointer to an object specific debug description structure 666 * @expect: expected state 667 * @next: state to move to if expected state is found 668 */ 669 void 670 debug_object_active_state(void *addr, struct debug_obj_descr *descr, 671 unsigned int expect, unsigned int next) 672 { 673 struct debug_bucket *db; 674 struct debug_obj *obj; 675 unsigned long flags; 676 677 if (!debug_objects_enabled) 678 return; 679 680 db = get_bucket((unsigned long) addr); 681 682 raw_spin_lock_irqsave(&db->lock, flags); 683 684 obj = lookup_object(addr, db); 685 if (obj) { 686 switch (obj->state) { 687 case ODEBUG_STATE_ACTIVE: 688 if (obj->astate == expect) 689 obj->astate = next; 690 else 691 debug_print_object(obj, "active_state"); 692 break; 693 694 default: 695 debug_print_object(obj, "active_state"); 696 break; 697 } 698 } else { 699 struct debug_obj o = { .object = addr, 700 .state = ODEBUG_STATE_NOTAVAILABLE, 701 .descr = descr }; 702 703 debug_print_object(&o, "active_state"); 704 } 705 706 raw_spin_unlock_irqrestore(&db->lock, flags); 707 } 708 EXPORT_SYMBOL_GPL(debug_object_active_state); 709 710 #ifdef CONFIG_DEBUG_OBJECTS_FREE 711 static void __debug_check_no_obj_freed(const void *address, unsigned long size) 712 { 713 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 714 struct hlist_node *tmp; 715 HLIST_HEAD(freelist); 716 struct debug_obj_descr *descr; 717 enum debug_obj_state state; 718 struct debug_bucket *db; 719 struct debug_obj *obj; 720 int cnt; 721 722 saddr = (unsigned long) address; 723 eaddr = saddr + size; 724 paddr = saddr & ODEBUG_CHUNK_MASK; 725 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); 726 chunks >>= ODEBUG_CHUNK_SHIFT; 727 728 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { 729 db = get_bucket(paddr); 730 731 repeat: 732 cnt = 0; 733 raw_spin_lock_irqsave(&db->lock, flags); 734 hlist_for_each_entry_safe(obj, tmp, &db->list, node) { 735 cnt++; 736 oaddr = (unsigned long) obj->object; 737 if (oaddr < saddr || oaddr >= eaddr) 738 continue; 739 740 switch (obj->state) { 741 case ODEBUG_STATE_ACTIVE: 742 debug_print_object(obj, "free"); 743 descr = obj->descr; 744 state = obj->state; 745 raw_spin_unlock_irqrestore(&db->lock, flags); 746 debug_object_fixup(descr->fixup_free, 747 (void *) oaddr, state); 748 goto repeat; 749 default: 750 hlist_del(&obj->node); 751 hlist_add_head(&obj->node, &freelist); 752 break; 753 } 754 } 755 raw_spin_unlock_irqrestore(&db->lock, flags); 756 757 /* Now free them */ 758 hlist_for_each_entry_safe(obj, tmp, &freelist, node) { 759 hlist_del(&obj->node); 760 free_object(obj); 761 } 762 763 if (cnt > debug_objects_maxchain) 764 debug_objects_maxchain = cnt; 765 } 766 } 767 768 void debug_check_no_obj_freed(const void *address, unsigned long size) 769 { 770 if (debug_objects_enabled) 771 __debug_check_no_obj_freed(address, size); 772 } 773 #endif 774 775 #ifdef CONFIG_DEBUG_FS 776 777 static int debug_stats_show(struct seq_file *m, void *v) 778 { 779 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 780 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 781 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 782 seq_printf(m, "pool_free :%d\n", obj_pool_free); 783 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 784 seq_printf(m, "pool_used :%d\n", obj_pool_used); 785 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 786 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 787 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 788 return 0; 789 } 790 791 static int debug_stats_open(struct inode *inode, struct file *filp) 792 { 793 return single_open(filp, debug_stats_show, NULL); 794 } 795 796 static const struct file_operations debug_stats_fops = { 797 .open = debug_stats_open, 798 .read = seq_read, 799 .llseek = seq_lseek, 800 .release = single_release, 801 }; 802 803 static int __init debug_objects_init_debugfs(void) 804 { 805 struct dentry *dbgdir, *dbgstats; 806 807 if (!debug_objects_enabled) 808 return 0; 809 810 dbgdir = debugfs_create_dir("debug_objects", NULL); 811 if (!dbgdir) 812 return -ENOMEM; 813 814 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 815 &debug_stats_fops); 816 if (!dbgstats) 817 goto err; 818 819 return 0; 820 821 err: 822 debugfs_remove(dbgdir); 823 824 return -ENOMEM; 825 } 826 __initcall(debug_objects_init_debugfs); 827 828 #else 829 static inline void debug_objects_init_debugfs(void) { } 830 #endif 831 832 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST 833 834 /* Random data structure for the self test */ 835 struct self_test { 836 unsigned long dummy1[6]; 837 int static_init; 838 unsigned long dummy2[3]; 839 }; 840 841 static __initdata struct debug_obj_descr descr_type_test; 842 843 static bool __init is_static_object(void *addr) 844 { 845 struct self_test *obj = addr; 846 847 return obj->static_init; 848 } 849 850 /* 851 * fixup_init is called when: 852 * - an active object is initialized 853 */ 854 static bool __init fixup_init(void *addr, enum debug_obj_state state) 855 { 856 struct self_test *obj = addr; 857 858 switch (state) { 859 case ODEBUG_STATE_ACTIVE: 860 debug_object_deactivate(obj, &descr_type_test); 861 debug_object_init(obj, &descr_type_test); 862 return true; 863 default: 864 return false; 865 } 866 } 867 868 /* 869 * fixup_activate is called when: 870 * - an active object is activated 871 * - an unknown non-static object is activated 872 */ 873 static bool __init fixup_activate(void *addr, enum debug_obj_state state) 874 { 875 struct self_test *obj = addr; 876 877 switch (state) { 878 case ODEBUG_STATE_NOTAVAILABLE: 879 return true; 880 case ODEBUG_STATE_ACTIVE: 881 debug_object_deactivate(obj, &descr_type_test); 882 debug_object_activate(obj, &descr_type_test); 883 return true; 884 885 default: 886 return false; 887 } 888 } 889 890 /* 891 * fixup_destroy is called when: 892 * - an active object is destroyed 893 */ 894 static bool __init fixup_destroy(void *addr, enum debug_obj_state state) 895 { 896 struct self_test *obj = addr; 897 898 switch (state) { 899 case ODEBUG_STATE_ACTIVE: 900 debug_object_deactivate(obj, &descr_type_test); 901 debug_object_destroy(obj, &descr_type_test); 902 return true; 903 default: 904 return false; 905 } 906 } 907 908 /* 909 * fixup_free is called when: 910 * - an active object is freed 911 */ 912 static bool __init fixup_free(void *addr, enum debug_obj_state state) 913 { 914 struct self_test *obj = addr; 915 916 switch (state) { 917 case ODEBUG_STATE_ACTIVE: 918 debug_object_deactivate(obj, &descr_type_test); 919 debug_object_free(obj, &descr_type_test); 920 return true; 921 default: 922 return false; 923 } 924 } 925 926 static int __init 927 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) 928 { 929 struct debug_bucket *db; 930 struct debug_obj *obj; 931 unsigned long flags; 932 int res = -EINVAL; 933 934 db = get_bucket((unsigned long) addr); 935 936 raw_spin_lock_irqsave(&db->lock, flags); 937 938 obj = lookup_object(addr, db); 939 if (!obj && state != ODEBUG_STATE_NONE) { 940 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); 941 goto out; 942 } 943 if (obj && obj->state != state) { 944 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", 945 obj->state, state); 946 goto out; 947 } 948 if (fixups != debug_objects_fixups) { 949 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", 950 fixups, debug_objects_fixups); 951 goto out; 952 } 953 if (warnings != debug_objects_warnings) { 954 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", 955 warnings, debug_objects_warnings); 956 goto out; 957 } 958 res = 0; 959 out: 960 raw_spin_unlock_irqrestore(&db->lock, flags); 961 if (res) 962 debug_objects_enabled = 0; 963 return res; 964 } 965 966 static __initdata struct debug_obj_descr descr_type_test = { 967 .name = "selftest", 968 .is_static_object = is_static_object, 969 .fixup_init = fixup_init, 970 .fixup_activate = fixup_activate, 971 .fixup_destroy = fixup_destroy, 972 .fixup_free = fixup_free, 973 }; 974 975 static __initdata struct self_test obj = { .static_init = 0 }; 976 977 static void __init debug_objects_selftest(void) 978 { 979 int fixups, oldfixups, warnings, oldwarnings; 980 unsigned long flags; 981 982 local_irq_save(flags); 983 984 fixups = oldfixups = debug_objects_fixups; 985 warnings = oldwarnings = debug_objects_warnings; 986 descr_test = &descr_type_test; 987 988 debug_object_init(&obj, &descr_type_test); 989 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 990 goto out; 991 debug_object_activate(&obj, &descr_type_test); 992 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 993 goto out; 994 debug_object_activate(&obj, &descr_type_test); 995 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) 996 goto out; 997 debug_object_deactivate(&obj, &descr_type_test); 998 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) 999 goto out; 1000 debug_object_destroy(&obj, &descr_type_test); 1001 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) 1002 goto out; 1003 debug_object_init(&obj, &descr_type_test); 1004 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1005 goto out; 1006 debug_object_activate(&obj, &descr_type_test); 1007 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1008 goto out; 1009 debug_object_deactivate(&obj, &descr_type_test); 1010 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) 1011 goto out; 1012 debug_object_free(&obj, &descr_type_test); 1013 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1014 goto out; 1015 1016 obj.static_init = 1; 1017 debug_object_activate(&obj, &descr_type_test); 1018 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1019 goto out; 1020 debug_object_init(&obj, &descr_type_test); 1021 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) 1022 goto out; 1023 debug_object_free(&obj, &descr_type_test); 1024 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) 1025 goto out; 1026 1027 #ifdef CONFIG_DEBUG_OBJECTS_FREE 1028 debug_object_init(&obj, &descr_type_test); 1029 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) 1030 goto out; 1031 debug_object_activate(&obj, &descr_type_test); 1032 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) 1033 goto out; 1034 __debug_check_no_obj_freed(&obj, sizeof(obj)); 1035 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) 1036 goto out; 1037 #endif 1038 pr_info("selftest passed\n"); 1039 1040 out: 1041 debug_objects_fixups = oldfixups; 1042 debug_objects_warnings = oldwarnings; 1043 descr_test = NULL; 1044 1045 local_irq_restore(flags); 1046 } 1047 #else 1048 static inline void debug_objects_selftest(void) { } 1049 #endif 1050 1051 /* 1052 * Called during early boot to initialize the hash buckets and link 1053 * the static object pool objects into the poll list. After this call 1054 * the object tracker is fully operational. 1055 */ 1056 void __init debug_objects_early_init(void) 1057 { 1058 int i; 1059 1060 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 1061 raw_spin_lock_init(&obj_hash[i].lock); 1062 1063 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 1064 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 1065 } 1066 1067 /* 1068 * Convert the statically allocated objects to dynamic ones: 1069 */ 1070 static int __init debug_objects_replace_static_objects(void) 1071 { 1072 struct debug_bucket *db = obj_hash; 1073 struct hlist_node *tmp; 1074 struct debug_obj *obj, *new; 1075 HLIST_HEAD(objects); 1076 int i, cnt = 0; 1077 1078 for (i = 0; i < ODEBUG_POOL_SIZE; i++) { 1079 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1080 if (!obj) 1081 goto free; 1082 hlist_add_head(&obj->node, &objects); 1083 } 1084 1085 /* 1086 * When debug_objects_mem_init() is called we know that only 1087 * one CPU is up, so disabling interrupts is enough 1088 * protection. This avoids the lockdep hell of lock ordering. 1089 */ 1090 local_irq_disable(); 1091 1092 /* Remove the statically allocated objects from the pool */ 1093 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1094 hlist_del(&obj->node); 1095 /* Move the allocated objects to the pool */ 1096 hlist_move_list(&objects, &obj_pool); 1097 1098 /* Replace the active object references */ 1099 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 1100 hlist_move_list(&db->list, &objects); 1101 1102 hlist_for_each_entry(obj, &objects, node) { 1103 new = hlist_entry(obj_pool.first, typeof(*obj), node); 1104 hlist_del(&new->node); 1105 /* copy object data */ 1106 *new = *obj; 1107 hlist_add_head(&new->node, &db->list); 1108 cnt++; 1109 } 1110 } 1111 local_irq_enable(); 1112 1113 pr_debug("%d of %d active objects replaced\n", 1114 cnt, obj_pool_used); 1115 return 0; 1116 free: 1117 hlist_for_each_entry_safe(obj, tmp, &objects, node) { 1118 hlist_del(&obj->node); 1119 kmem_cache_free(obj_cache, obj); 1120 } 1121 return -ENOMEM; 1122 } 1123 1124 /* 1125 * Called after the kmem_caches are functional to setup a dedicated 1126 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag 1127 * prevents that the debug code is called on kmem_cache_free() for the 1128 * debug tracker objects to avoid recursive calls. 1129 */ 1130 void __init debug_objects_mem_init(void) 1131 { 1132 if (!debug_objects_enabled) 1133 return; 1134 1135 obj_cache = kmem_cache_create("debug_objects_cache", 1136 sizeof (struct debug_obj), 0, 1137 SLAB_DEBUG_OBJECTS, NULL); 1138 1139 if (!obj_cache || debug_objects_replace_static_objects()) { 1140 debug_objects_enabled = 0; 1141 if (obj_cache) 1142 kmem_cache_destroy(obj_cache); 1143 pr_warn("out of memory.\n"); 1144 } else 1145 debug_objects_selftest(); 1146 1147 /* 1148 * Increase the thresholds for allocating and freeing objects 1149 * according to the number of possible CPUs available in the system. 1150 */ 1151 debug_objects_pool_size += num_possible_cpus() * 32; 1152 debug_objects_pool_min_level += num_possible_cpus() * 4; 1153 } 1154