1 /* 2 * mm/kmemleak.c 3 * 4 * Copyright (C) 2008 ARM Limited 5 * Written by Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * 21 * For more information on the algorithm and kmemleak usage, please see 22 * Documentation/kmemleak.txt. 23 * 24 * Notes on locking 25 * ---------------- 26 * 27 * The following locks and mutexes are used by kmemleak: 28 * 29 * - kmemleak_lock (rwlock): protects the object_list modifications and 30 * accesses to the object_tree_root. The object_list is the main list 31 * holding the metadata (struct kmemleak_object) for the allocated memory 32 * blocks. The object_tree_root is a red black tree used to look-up 33 * metadata based on a pointer to the corresponding memory block. The 34 * kmemleak_object structures are added to the object_list and 35 * object_tree_root in the create_object() function called from the 36 * kmemleak_alloc() callback and removed in delete_object() called from the 37 * kmemleak_free() callback 38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to 39 * the metadata (e.g. count) are protected by this lock. Note that some 40 * members of this structure may be protected by other means (atomic or 41 * kmemleak_lock). This lock is also held when scanning the corresponding 42 * memory block to avoid the kernel freeing it via the kmemleak_free() 43 * callback. This is less heavyweight than holding a global lock like 44 * kmemleak_lock during scanning 45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 46 * unreferenced objects at a time. The gray_list contains the objects which 47 * are already referenced or marked as false positives and need to be 48 * scanned. This list is only modified during a scanning episode when the 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 50 * Note that the kmemleak_object.use_count is incremented when an object is 51 * added to the gray_list and therefore cannot be freed. This mutex also 52 * prevents multiple users of the "kmemleak" debugfs file together with 53 * modifications to the memory scanning parameters including the scan_thread 54 * pointer 55 * 56 * Locks and mutexes are acquired/nested in the following order: 57 * 58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) 59 * 60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex 61 * regions. 62 * 63 * The kmemleak_object structures have a use_count incremented or decremented 64 * using the get_object()/put_object() functions. When the use_count becomes 65 * 0, this count can no longer be incremented and put_object() schedules the 66 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 67 * function must be protected by rcu_read_lock() to avoid accessing a freed 68 * structure. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/init.h> 74 #include <linux/kernel.h> 75 #include <linux/list.h> 76 #include <linux/sched.h> 77 #include <linux/jiffies.h> 78 #include <linux/delay.h> 79 #include <linux/export.h> 80 #include <linux/kthread.h> 81 #include <linux/rbtree.h> 82 #include <linux/fs.h> 83 #include <linux/debugfs.h> 84 #include <linux/seq_file.h> 85 #include <linux/cpumask.h> 86 #include <linux/spinlock.h> 87 #include <linux/mutex.h> 88 #include <linux/rcupdate.h> 89 #include <linux/stacktrace.h> 90 #include <linux/cache.h> 91 #include <linux/percpu.h> 92 #include <linux/hardirq.h> 93 #include <linux/mmzone.h> 94 #include <linux/slab.h> 95 #include <linux/thread_info.h> 96 #include <linux/err.h> 97 #include <linux/uaccess.h> 98 #include <linux/string.h> 99 #include <linux/nodemask.h> 100 #include <linux/mm.h> 101 #include <linux/workqueue.h> 102 #include <linux/crc32.h> 103 104 #include <asm/sections.h> 105 #include <asm/processor.h> 106 #include <linux/atomic.h> 107 108 #include <linux/kasan.h> 109 #include <linux/kmemcheck.h> 110 #include <linux/kmemleak.h> 111 #include <linux/memory_hotplug.h> 112 113 /* 114 * Kmemleak configuration and common defines. 115 */ 116 #define MAX_TRACE 16 /* stack trace length */ 117 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 118 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 119 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 120 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 121 122 #define BYTES_PER_POINTER sizeof(void *) 123 124 /* GFP bitmask for kmemleak internal allocations */ 125 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 126 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 127 __GFP_NOWARN) 128 129 /* scanning area inside a memory block */ 130 struct kmemleak_scan_area { 131 struct hlist_node node; 132 unsigned long start; 133 size_t size; 134 }; 135 136 #define KMEMLEAK_GREY 0 137 #define KMEMLEAK_BLACK -1 138 139 /* 140 * Structure holding the metadata for each allocated memory block. 141 * Modifications to such objects should be made while holding the 142 * object->lock. Insertions or deletions from object_list, gray_list or 143 * rb_node are already protected by the corresponding locks or mutex (see 144 * the notes on locking above). These objects are reference-counted 145 * (use_count) and freed using the RCU mechanism. 146 */ 147 struct kmemleak_object { 148 spinlock_t lock; 149 unsigned long flags; /* object status flags */ 150 struct list_head object_list; 151 struct list_head gray_list; 152 struct rb_node rb_node; 153 struct rcu_head rcu; /* object_list lockless traversal */ 154 /* object usage count; object freed when use_count == 0 */ 155 atomic_t use_count; 156 unsigned long pointer; 157 size_t size; 158 /* minimum number of a pointers found before it is considered leak */ 159 int min_count; 160 /* the total number of pointers found pointing to this object */ 161 int count; 162 /* checksum for detecting modified objects */ 163 u32 checksum; 164 /* memory ranges to be scanned inside an object (empty for all) */ 165 struct hlist_head area_list; 166 unsigned long trace[MAX_TRACE]; 167 unsigned int trace_len; 168 unsigned long jiffies; /* creation timestamp */ 169 pid_t pid; /* pid of the current task */ 170 char comm[TASK_COMM_LEN]; /* executable name */ 171 }; 172 173 /* flag representing the memory block allocation status */ 174 #define OBJECT_ALLOCATED (1 << 0) 175 /* flag set after the first reporting of an unreference object */ 176 #define OBJECT_REPORTED (1 << 1) 177 /* flag set to not scan the object */ 178 #define OBJECT_NO_SCAN (1 << 2) 179 180 /* number of bytes to print per line; must be 16 or 32 */ 181 #define HEX_ROW_SIZE 16 182 /* number of bytes to print at a time (1, 2, 4, 8) */ 183 #define HEX_GROUP_SIZE 1 184 /* include ASCII after the hex output */ 185 #define HEX_ASCII 1 186 /* max number of lines to be printed */ 187 #define HEX_MAX_LINES 2 188 189 /* the list of all allocated objects */ 190 static LIST_HEAD(object_list); 191 /* the list of gray-colored objects (see color_gray comment below) */ 192 static LIST_HEAD(gray_list); 193 /* search tree for object boundaries */ 194 static struct rb_root object_tree_root = RB_ROOT; 195 /* rw_lock protecting the access to object_list and object_tree_root */ 196 static DEFINE_RWLOCK(kmemleak_lock); 197 198 /* allocation caches for kmemleak internal data */ 199 static struct kmem_cache *object_cache; 200 static struct kmem_cache *scan_area_cache; 201 202 /* set if tracing memory operations is enabled */ 203 static int kmemleak_enabled; 204 /* same as above but only for the kmemleak_free() callback */ 205 static int kmemleak_free_enabled; 206 /* set in the late_initcall if there were no errors */ 207 static int kmemleak_initialized; 208 /* enables or disables early logging of the memory operations */ 209 static int kmemleak_early_log = 1; 210 /* set if a kmemleak warning was issued */ 211 static int kmemleak_warning; 212 /* set if a fatal kmemleak error has occurred */ 213 static int kmemleak_error; 214 215 /* minimum and maximum address that may be valid pointers */ 216 static unsigned long min_addr = ULONG_MAX; 217 static unsigned long max_addr; 218 219 static struct task_struct *scan_thread; 220 /* used to avoid reporting of recently allocated objects */ 221 static unsigned long jiffies_min_age; 222 static unsigned long jiffies_last_scan; 223 /* delay between automatic memory scannings */ 224 static signed long jiffies_scan_wait; 225 /* enables or disables the task stacks scanning */ 226 static int kmemleak_stack_scan = 1; 227 /* protects the memory scanning, parameters and debug/kmemleak file access */ 228 static DEFINE_MUTEX(scan_mutex); 229 /* setting kmemleak=on, will set this var, skipping the disable */ 230 static int kmemleak_skip_disable; 231 /* If there are leaks that can be reported */ 232 static bool kmemleak_found_leaks; 233 234 /* 235 * Early object allocation/freeing logging. Kmemleak is initialized after the 236 * kernel allocator. However, both the kernel allocator and kmemleak may 237 * allocate memory blocks which need to be tracked. Kmemleak defines an 238 * arbitrary buffer to hold the allocation/freeing information before it is 239 * fully initialized. 240 */ 241 242 /* kmemleak operation type for early logging */ 243 enum { 244 KMEMLEAK_ALLOC, 245 KMEMLEAK_ALLOC_PERCPU, 246 KMEMLEAK_FREE, 247 KMEMLEAK_FREE_PART, 248 KMEMLEAK_FREE_PERCPU, 249 KMEMLEAK_NOT_LEAK, 250 KMEMLEAK_IGNORE, 251 KMEMLEAK_SCAN_AREA, 252 KMEMLEAK_NO_SCAN 253 }; 254 255 /* 256 * Structure holding the information passed to kmemleak callbacks during the 257 * early logging. 258 */ 259 struct early_log { 260 int op_type; /* kmemleak operation type */ 261 const void *ptr; /* allocated/freed memory block */ 262 size_t size; /* memory block size */ 263 int min_count; /* minimum reference count */ 264 unsigned long trace[MAX_TRACE]; /* stack trace */ 265 unsigned int trace_len; /* stack trace length */ 266 }; 267 268 /* early logging buffer and current position */ 269 static struct early_log 270 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; 271 static int crt_early_log __initdata; 272 273 static void kmemleak_disable(void); 274 275 /* 276 * Print a warning and dump the stack trace. 277 */ 278 #define kmemleak_warn(x...) do { \ 279 pr_warn(x); \ 280 dump_stack(); \ 281 kmemleak_warning = 1; \ 282 } while (0) 283 284 /* 285 * Macro invoked when a serious kmemleak condition occurred and cannot be 286 * recovered from. Kmemleak will be disabled and further allocation/freeing 287 * tracing no longer available. 288 */ 289 #define kmemleak_stop(x...) do { \ 290 kmemleak_warn(x); \ 291 kmemleak_disable(); \ 292 } while (0) 293 294 /* 295 * Printing of the objects hex dump to the seq file. The number of lines to be 296 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The 297 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called 298 * with the object->lock held. 299 */ 300 static void hex_dump_object(struct seq_file *seq, 301 struct kmemleak_object *object) 302 { 303 const u8 *ptr = (const u8 *)object->pointer; 304 size_t len; 305 306 /* limit the number of lines to HEX_MAX_LINES */ 307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); 308 309 seq_printf(seq, " hex dump (first %zu bytes):\n", len); 310 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, 311 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); 312 } 313 314 /* 315 * Object colors, encoded with count and min_count: 316 * - white - orphan object, not enough references to it (count < min_count) 317 * - gray - not orphan, not marked as false positive (min_count == 0) or 318 * sufficient references to it (count >= min_count) 319 * - black - ignore, it doesn't contain references (e.g. text section) 320 * (min_count == -1). No function defined for this color. 321 * Newly created objects don't have any color assigned (object->count == -1) 322 * before the next memory scan when they become white. 323 */ 324 static bool color_white(const struct kmemleak_object *object) 325 { 326 return object->count != KMEMLEAK_BLACK && 327 object->count < object->min_count; 328 } 329 330 static bool color_gray(const struct kmemleak_object *object) 331 { 332 return object->min_count != KMEMLEAK_BLACK && 333 object->count >= object->min_count; 334 } 335 336 /* 337 * Objects are considered unreferenced only if their color is white, they have 338 * not be deleted and have a minimum age to avoid false positives caused by 339 * pointers temporarily stored in CPU registers. 340 */ 341 static bool unreferenced_object(struct kmemleak_object *object) 342 { 343 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && 344 time_before_eq(object->jiffies + jiffies_min_age, 345 jiffies_last_scan); 346 } 347 348 /* 349 * Printing of the unreferenced objects information to the seq file. The 350 * print_unreferenced function must be called with the object->lock held. 351 */ 352 static void print_unreferenced(struct seq_file *seq, 353 struct kmemleak_object *object) 354 { 355 int i; 356 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); 357 358 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 359 object->pointer, object->size); 360 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", 361 object->comm, object->pid, object->jiffies, 362 msecs_age / 1000, msecs_age % 1000); 363 hex_dump_object(seq, object); 364 seq_printf(seq, " backtrace:\n"); 365 366 for (i = 0; i < object->trace_len; i++) { 367 void *ptr = (void *)object->trace[i]; 368 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); 369 } 370 } 371 372 /* 373 * Print the kmemleak_object information. This function is used mainly for 374 * debugging special cases when kmemleak operations. It must be called with 375 * the object->lock held. 376 */ 377 static void dump_object_info(struct kmemleak_object *object) 378 { 379 struct stack_trace trace; 380 381 trace.nr_entries = object->trace_len; 382 trace.entries = object->trace; 383 384 pr_notice("Object 0x%08lx (size %zu):\n", 385 object->pointer, object->size); 386 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 387 object->comm, object->pid, object->jiffies); 388 pr_notice(" min_count = %d\n", object->min_count); 389 pr_notice(" count = %d\n", object->count); 390 pr_notice(" flags = 0x%lx\n", object->flags); 391 pr_notice(" checksum = %u\n", object->checksum); 392 pr_notice(" backtrace:\n"); 393 print_stack_trace(&trace, 4); 394 } 395 396 /* 397 * Look-up a memory block metadata (kmemleak_object) in the object search 398 * tree based on a pointer value. If alias is 0, only values pointing to the 399 * beginning of the memory block are allowed. The kmemleak_lock must be held 400 * when calling this function. 401 */ 402 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 403 { 404 struct rb_node *rb = object_tree_root.rb_node; 405 406 while (rb) { 407 struct kmemleak_object *object = 408 rb_entry(rb, struct kmemleak_object, rb_node); 409 if (ptr < object->pointer) 410 rb = object->rb_node.rb_left; 411 else if (object->pointer + object->size <= ptr) 412 rb = object->rb_node.rb_right; 413 else if (object->pointer == ptr || alias) 414 return object; 415 else { 416 kmemleak_warn("Found object by alias at 0x%08lx\n", 417 ptr); 418 dump_object_info(object); 419 break; 420 } 421 } 422 return NULL; 423 } 424 425 /* 426 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 427 * that once an object's use_count reached 0, the RCU freeing was already 428 * registered and the object should no longer be used. This function must be 429 * called under the protection of rcu_read_lock(). 430 */ 431 static int get_object(struct kmemleak_object *object) 432 { 433 return atomic_inc_not_zero(&object->use_count); 434 } 435 436 /* 437 * RCU callback to free a kmemleak_object. 438 */ 439 static void free_object_rcu(struct rcu_head *rcu) 440 { 441 struct hlist_node *tmp; 442 struct kmemleak_scan_area *area; 443 struct kmemleak_object *object = 444 container_of(rcu, struct kmemleak_object, rcu); 445 446 /* 447 * Once use_count is 0 (guaranteed by put_object), there is no other 448 * code accessing this object, hence no need for locking. 449 */ 450 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { 451 hlist_del(&area->node); 452 kmem_cache_free(scan_area_cache, area); 453 } 454 kmem_cache_free(object_cache, object); 455 } 456 457 /* 458 * Decrement the object use_count. Once the count is 0, free the object using 459 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 460 * delete_object() path, the delayed RCU freeing ensures that there is no 461 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 462 * is also possible. 463 */ 464 static void put_object(struct kmemleak_object *object) 465 { 466 if (!atomic_dec_and_test(&object->use_count)) 467 return; 468 469 /* should only get here after delete_object was called */ 470 WARN_ON(object->flags & OBJECT_ALLOCATED); 471 472 call_rcu(&object->rcu, free_object_rcu); 473 } 474 475 /* 476 * Look up an object in the object search tree and increase its use_count. 477 */ 478 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 479 { 480 unsigned long flags; 481 struct kmemleak_object *object; 482 483 rcu_read_lock(); 484 read_lock_irqsave(&kmemleak_lock, flags); 485 object = lookup_object(ptr, alias); 486 read_unlock_irqrestore(&kmemleak_lock, flags); 487 488 /* check whether the object is still available */ 489 if (object && !get_object(object)) 490 object = NULL; 491 rcu_read_unlock(); 492 493 return object; 494 } 495 496 /* 497 * Look up an object in the object search tree and remove it from both 498 * object_tree_root and object_list. The returned object's use_count should be 499 * at least 1, as initially set by create_object(). 500 */ 501 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) 502 { 503 unsigned long flags; 504 struct kmemleak_object *object; 505 506 write_lock_irqsave(&kmemleak_lock, flags); 507 object = lookup_object(ptr, alias); 508 if (object) { 509 rb_erase(&object->rb_node, &object_tree_root); 510 list_del_rcu(&object->object_list); 511 } 512 write_unlock_irqrestore(&kmemleak_lock, flags); 513 514 return object; 515 } 516 517 /* 518 * Save stack trace to the given array of MAX_TRACE size. 519 */ 520 static int __save_stack_trace(unsigned long *trace) 521 { 522 struct stack_trace stack_trace; 523 524 stack_trace.max_entries = MAX_TRACE; 525 stack_trace.nr_entries = 0; 526 stack_trace.entries = trace; 527 stack_trace.skip = 2; 528 save_stack_trace(&stack_trace); 529 530 return stack_trace.nr_entries; 531 } 532 533 /* 534 * Create the metadata (struct kmemleak_object) corresponding to an allocated 535 * memory block and add it to the object_list and object_tree_root. 536 */ 537 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, 538 int min_count, gfp_t gfp) 539 { 540 unsigned long flags; 541 struct kmemleak_object *object, *parent; 542 struct rb_node **link, *rb_parent; 543 544 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 545 if (!object) { 546 pr_warn("Cannot allocate a kmemleak_object structure\n"); 547 kmemleak_disable(); 548 return NULL; 549 } 550 551 INIT_LIST_HEAD(&object->object_list); 552 INIT_LIST_HEAD(&object->gray_list); 553 INIT_HLIST_HEAD(&object->area_list); 554 spin_lock_init(&object->lock); 555 atomic_set(&object->use_count, 1); 556 object->flags = OBJECT_ALLOCATED; 557 object->pointer = ptr; 558 object->size = size; 559 object->min_count = min_count; 560 object->count = 0; /* white color initially */ 561 object->jiffies = jiffies; 562 object->checksum = 0; 563 564 /* task information */ 565 if (in_irq()) { 566 object->pid = 0; 567 strncpy(object->comm, "hardirq", sizeof(object->comm)); 568 } else if (in_softirq()) { 569 object->pid = 0; 570 strncpy(object->comm, "softirq", sizeof(object->comm)); 571 } else { 572 object->pid = current->pid; 573 /* 574 * There is a small chance of a race with set_task_comm(), 575 * however using get_task_comm() here may cause locking 576 * dependency issues with current->alloc_lock. In the worst 577 * case, the command line is not correct. 578 */ 579 strncpy(object->comm, current->comm, sizeof(object->comm)); 580 } 581 582 /* kernel backtrace */ 583 object->trace_len = __save_stack_trace(object->trace); 584 585 write_lock_irqsave(&kmemleak_lock, flags); 586 587 min_addr = min(min_addr, ptr); 588 max_addr = max(max_addr, ptr + size); 589 link = &object_tree_root.rb_node; 590 rb_parent = NULL; 591 while (*link) { 592 rb_parent = *link; 593 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); 594 if (ptr + size <= parent->pointer) 595 link = &parent->rb_node.rb_left; 596 else if (parent->pointer + parent->size <= ptr) 597 link = &parent->rb_node.rb_right; 598 else { 599 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", 600 ptr); 601 /* 602 * No need for parent->lock here since "parent" cannot 603 * be freed while the kmemleak_lock is held. 604 */ 605 dump_object_info(parent); 606 kmem_cache_free(object_cache, object); 607 object = NULL; 608 goto out; 609 } 610 } 611 rb_link_node(&object->rb_node, rb_parent, link); 612 rb_insert_color(&object->rb_node, &object_tree_root); 613 614 list_add_tail_rcu(&object->object_list, &object_list); 615 out: 616 write_unlock_irqrestore(&kmemleak_lock, flags); 617 return object; 618 } 619 620 /* 621 * Mark the object as not allocated and schedule RCU freeing via put_object(). 622 */ 623 static void __delete_object(struct kmemleak_object *object) 624 { 625 unsigned long flags; 626 627 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 628 WARN_ON(atomic_read(&object->use_count) < 1); 629 630 /* 631 * Locking here also ensures that the corresponding memory block 632 * cannot be freed when it is being scanned. 633 */ 634 spin_lock_irqsave(&object->lock, flags); 635 object->flags &= ~OBJECT_ALLOCATED; 636 spin_unlock_irqrestore(&object->lock, flags); 637 put_object(object); 638 } 639 640 /* 641 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 642 * delete it. 643 */ 644 static void delete_object_full(unsigned long ptr) 645 { 646 struct kmemleak_object *object; 647 648 object = find_and_remove_object(ptr, 0); 649 if (!object) { 650 #ifdef DEBUG 651 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 652 ptr); 653 #endif 654 return; 655 } 656 __delete_object(object); 657 } 658 659 /* 660 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 661 * delete it. If the memory block is partially freed, the function may create 662 * additional metadata for the remaining parts of the block. 663 */ 664 static void delete_object_part(unsigned long ptr, size_t size) 665 { 666 struct kmemleak_object *object; 667 unsigned long start, end; 668 669 object = find_and_remove_object(ptr, 1); 670 if (!object) { 671 #ifdef DEBUG 672 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", 673 ptr, size); 674 #endif 675 return; 676 } 677 678 /* 679 * Create one or two objects that may result from the memory block 680 * split. Note that partial freeing is only done by free_bootmem() and 681 * this happens before kmemleak_init() is called. The path below is 682 * only executed during early log recording in kmemleak_init(), so 683 * GFP_KERNEL is enough. 684 */ 685 start = object->pointer; 686 end = object->pointer + object->size; 687 if (ptr > start) 688 create_object(start, ptr - start, object->min_count, 689 GFP_KERNEL); 690 if (ptr + size < end) 691 create_object(ptr + size, end - ptr - size, object->min_count, 692 GFP_KERNEL); 693 694 __delete_object(object); 695 } 696 697 static void __paint_it(struct kmemleak_object *object, int color) 698 { 699 object->min_count = color; 700 if (color == KMEMLEAK_BLACK) 701 object->flags |= OBJECT_NO_SCAN; 702 } 703 704 static void paint_it(struct kmemleak_object *object, int color) 705 { 706 unsigned long flags; 707 708 spin_lock_irqsave(&object->lock, flags); 709 __paint_it(object, color); 710 spin_unlock_irqrestore(&object->lock, flags); 711 } 712 713 static void paint_ptr(unsigned long ptr, int color) 714 { 715 struct kmemleak_object *object; 716 717 object = find_and_get_object(ptr, 0); 718 if (!object) { 719 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", 720 ptr, 721 (color == KMEMLEAK_GREY) ? "Grey" : 722 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); 723 return; 724 } 725 paint_it(object, color); 726 put_object(object); 727 } 728 729 /* 730 * Mark an object permanently as gray-colored so that it can no longer be 731 * reported as a leak. This is used in general to mark a false positive. 732 */ 733 static void make_gray_object(unsigned long ptr) 734 { 735 paint_ptr(ptr, KMEMLEAK_GREY); 736 } 737 738 /* 739 * Mark the object as black-colored so that it is ignored from scans and 740 * reporting. 741 */ 742 static void make_black_object(unsigned long ptr) 743 { 744 paint_ptr(ptr, KMEMLEAK_BLACK); 745 } 746 747 /* 748 * Add a scanning area to the object. If at least one such area is added, 749 * kmemleak will only scan these ranges rather than the whole memory block. 750 */ 751 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) 752 { 753 unsigned long flags; 754 struct kmemleak_object *object; 755 struct kmemleak_scan_area *area; 756 757 object = find_and_get_object(ptr, 1); 758 if (!object) { 759 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 760 ptr); 761 return; 762 } 763 764 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 765 if (!area) { 766 pr_warn("Cannot allocate a scan area\n"); 767 goto out; 768 } 769 770 spin_lock_irqsave(&object->lock, flags); 771 if (size == SIZE_MAX) { 772 size = object->pointer + object->size - ptr; 773 } else if (ptr + size > object->pointer + object->size) { 774 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 775 dump_object_info(object); 776 kmem_cache_free(scan_area_cache, area); 777 goto out_unlock; 778 } 779 780 INIT_HLIST_NODE(&area->node); 781 area->start = ptr; 782 area->size = size; 783 784 hlist_add_head(&area->node, &object->area_list); 785 out_unlock: 786 spin_unlock_irqrestore(&object->lock, flags); 787 out: 788 put_object(object); 789 } 790 791 /* 792 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 793 * pointer. Such object will not be scanned by kmemleak but references to it 794 * are searched. 795 */ 796 static void object_no_scan(unsigned long ptr) 797 { 798 unsigned long flags; 799 struct kmemleak_object *object; 800 801 object = find_and_get_object(ptr, 0); 802 if (!object) { 803 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 804 return; 805 } 806 807 spin_lock_irqsave(&object->lock, flags); 808 object->flags |= OBJECT_NO_SCAN; 809 spin_unlock_irqrestore(&object->lock, flags); 810 put_object(object); 811 } 812 813 /* 814 * Log an early kmemleak_* call to the early_log buffer. These calls will be 815 * processed later once kmemleak is fully initialized. 816 */ 817 static void __init log_early(int op_type, const void *ptr, size_t size, 818 int min_count) 819 { 820 unsigned long flags; 821 struct early_log *log; 822 823 if (kmemleak_error) { 824 /* kmemleak stopped recording, just count the requests */ 825 crt_early_log++; 826 return; 827 } 828 829 if (crt_early_log >= ARRAY_SIZE(early_log)) { 830 crt_early_log++; 831 kmemleak_disable(); 832 return; 833 } 834 835 /* 836 * There is no need for locking since the kernel is still in UP mode 837 * at this stage. Disabling the IRQs is enough. 838 */ 839 local_irq_save(flags); 840 log = &early_log[crt_early_log]; 841 log->op_type = op_type; 842 log->ptr = ptr; 843 log->size = size; 844 log->min_count = min_count; 845 log->trace_len = __save_stack_trace(log->trace); 846 crt_early_log++; 847 local_irq_restore(flags); 848 } 849 850 /* 851 * Log an early allocated block and populate the stack trace. 852 */ 853 static void early_alloc(struct early_log *log) 854 { 855 struct kmemleak_object *object; 856 unsigned long flags; 857 int i; 858 859 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) 860 return; 861 862 /* 863 * RCU locking needed to ensure object is not freed via put_object(). 864 */ 865 rcu_read_lock(); 866 object = create_object((unsigned long)log->ptr, log->size, 867 log->min_count, GFP_ATOMIC); 868 if (!object) 869 goto out; 870 spin_lock_irqsave(&object->lock, flags); 871 for (i = 0; i < log->trace_len; i++) 872 object->trace[i] = log->trace[i]; 873 object->trace_len = log->trace_len; 874 spin_unlock_irqrestore(&object->lock, flags); 875 out: 876 rcu_read_unlock(); 877 } 878 879 /* 880 * Log an early allocated block and populate the stack trace. 881 */ 882 static void early_alloc_percpu(struct early_log *log) 883 { 884 unsigned int cpu; 885 const void __percpu *ptr = log->ptr; 886 887 for_each_possible_cpu(cpu) { 888 log->ptr = per_cpu_ptr(ptr, cpu); 889 early_alloc(log); 890 } 891 } 892 893 /** 894 * kmemleak_alloc - register a newly allocated object 895 * @ptr: pointer to beginning of the object 896 * @size: size of the object 897 * @min_count: minimum number of references to this object. If during memory 898 * scanning a number of references less than @min_count is found, 899 * the object is reported as a memory leak. If @min_count is 0, 900 * the object is never reported as a leak. If @min_count is -1, 901 * the object is ignored (not scanned and not reported as a leak) 902 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 903 * 904 * This function is called from the kernel allocators when a new object 905 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). 906 */ 907 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 908 gfp_t gfp) 909 { 910 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 911 912 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 913 create_object((unsigned long)ptr, size, min_count, gfp); 914 else if (kmemleak_early_log) 915 log_early(KMEMLEAK_ALLOC, ptr, size, min_count); 916 } 917 EXPORT_SYMBOL_GPL(kmemleak_alloc); 918 919 /** 920 * kmemleak_alloc_percpu - register a newly allocated __percpu object 921 * @ptr: __percpu pointer to beginning of the object 922 * @size: size of the object 923 * @gfp: flags used for kmemleak internal memory allocations 924 * 925 * This function is called from the kernel percpu allocator when a new object 926 * (memory block) is allocated (alloc_percpu). 927 */ 928 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 929 gfp_t gfp) 930 { 931 unsigned int cpu; 932 933 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); 934 935 /* 936 * Percpu allocations are only scanned and not reported as leaks 937 * (min_count is set to 0). 938 */ 939 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 940 for_each_possible_cpu(cpu) 941 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 942 size, 0, gfp); 943 else if (kmemleak_early_log) 944 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); 945 } 946 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 947 948 /** 949 * kmemleak_free - unregister a previously registered object 950 * @ptr: pointer to beginning of the object 951 * 952 * This function is called from the kernel allocators when an object (memory 953 * block) is freed (kmem_cache_free, kfree, vfree etc.). 954 */ 955 void __ref kmemleak_free(const void *ptr) 956 { 957 pr_debug("%s(0x%p)\n", __func__, ptr); 958 959 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 960 delete_object_full((unsigned long)ptr); 961 else if (kmemleak_early_log) 962 log_early(KMEMLEAK_FREE, ptr, 0, 0); 963 } 964 EXPORT_SYMBOL_GPL(kmemleak_free); 965 966 /** 967 * kmemleak_free_part - partially unregister a previously registered object 968 * @ptr: pointer to the beginning or inside the object. This also 969 * represents the start of the range to be freed 970 * @size: size to be unregistered 971 * 972 * This function is called when only a part of a memory block is freed 973 * (usually from the bootmem allocator). 974 */ 975 void __ref kmemleak_free_part(const void *ptr, size_t size) 976 { 977 pr_debug("%s(0x%p)\n", __func__, ptr); 978 979 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 980 delete_object_part((unsigned long)ptr, size); 981 else if (kmemleak_early_log) 982 log_early(KMEMLEAK_FREE_PART, ptr, size, 0); 983 } 984 EXPORT_SYMBOL_GPL(kmemleak_free_part); 985 986 /** 987 * kmemleak_free_percpu - unregister a previously registered __percpu object 988 * @ptr: __percpu pointer to beginning of the object 989 * 990 * This function is called from the kernel percpu allocator when an object 991 * (memory block) is freed (free_percpu). 992 */ 993 void __ref kmemleak_free_percpu(const void __percpu *ptr) 994 { 995 unsigned int cpu; 996 997 pr_debug("%s(0x%p)\n", __func__, ptr); 998 999 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 1000 for_each_possible_cpu(cpu) 1001 delete_object_full((unsigned long)per_cpu_ptr(ptr, 1002 cpu)); 1003 else if (kmemleak_early_log) 1004 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); 1005 } 1006 EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 1007 1008 /** 1009 * kmemleak_update_trace - update object allocation stack trace 1010 * @ptr: pointer to beginning of the object 1011 * 1012 * Override the object allocation stack trace for cases where the actual 1013 * allocation place is not always useful. 1014 */ 1015 void __ref kmemleak_update_trace(const void *ptr) 1016 { 1017 struct kmemleak_object *object; 1018 unsigned long flags; 1019 1020 pr_debug("%s(0x%p)\n", __func__, ptr); 1021 1022 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) 1023 return; 1024 1025 object = find_and_get_object((unsigned long)ptr, 1); 1026 if (!object) { 1027 #ifdef DEBUG 1028 kmemleak_warn("Updating stack trace for unknown object at %p\n", 1029 ptr); 1030 #endif 1031 return; 1032 } 1033 1034 spin_lock_irqsave(&object->lock, flags); 1035 object->trace_len = __save_stack_trace(object->trace); 1036 spin_unlock_irqrestore(&object->lock, flags); 1037 1038 put_object(object); 1039 } 1040 EXPORT_SYMBOL(kmemleak_update_trace); 1041 1042 /** 1043 * kmemleak_not_leak - mark an allocated object as false positive 1044 * @ptr: pointer to beginning of the object 1045 * 1046 * Calling this function on an object will cause the memory block to no longer 1047 * be reported as leak and always be scanned. 1048 */ 1049 void __ref kmemleak_not_leak(const void *ptr) 1050 { 1051 pr_debug("%s(0x%p)\n", __func__, ptr); 1052 1053 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1054 make_gray_object((unsigned long)ptr); 1055 else if (kmemleak_early_log) 1056 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); 1057 } 1058 EXPORT_SYMBOL(kmemleak_not_leak); 1059 1060 /** 1061 * kmemleak_ignore - ignore an allocated object 1062 * @ptr: pointer to beginning of the object 1063 * 1064 * Calling this function on an object will cause the memory block to be 1065 * ignored (not scanned and not reported as a leak). This is usually done when 1066 * it is known that the corresponding block is not a leak and does not contain 1067 * any references to other allocated memory blocks. 1068 */ 1069 void __ref kmemleak_ignore(const void *ptr) 1070 { 1071 pr_debug("%s(0x%p)\n", __func__, ptr); 1072 1073 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1074 make_black_object((unsigned long)ptr); 1075 else if (kmemleak_early_log) 1076 log_early(KMEMLEAK_IGNORE, ptr, 0, 0); 1077 } 1078 EXPORT_SYMBOL(kmemleak_ignore); 1079 1080 /** 1081 * kmemleak_scan_area - limit the range to be scanned in an allocated object 1082 * @ptr: pointer to beginning or inside the object. This also 1083 * represents the start of the scan area 1084 * @size: size of the scan area 1085 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 1086 * 1087 * This function is used when it is known that only certain parts of an object 1088 * contain references to other objects. Kmemleak will only scan these areas 1089 * reducing the number false negatives. 1090 */ 1091 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 1092 { 1093 pr_debug("%s(0x%p)\n", __func__, ptr); 1094 1095 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1096 add_scan_area((unsigned long)ptr, size, gfp); 1097 else if (kmemleak_early_log) 1098 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 1099 } 1100 EXPORT_SYMBOL(kmemleak_scan_area); 1101 1102 /** 1103 * kmemleak_no_scan - do not scan an allocated object 1104 * @ptr: pointer to beginning of the object 1105 * 1106 * This function notifies kmemleak not to scan the given memory block. Useful 1107 * in situations where it is known that the given object does not contain any 1108 * references to other objects. Kmemleak will not scan such objects reducing 1109 * the number of false negatives. 1110 */ 1111 void __ref kmemleak_no_scan(const void *ptr) 1112 { 1113 pr_debug("%s(0x%p)\n", __func__, ptr); 1114 1115 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1116 object_no_scan((unsigned long)ptr); 1117 else if (kmemleak_early_log) 1118 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); 1119 } 1120 EXPORT_SYMBOL(kmemleak_no_scan); 1121 1122 /* 1123 * Update an object's checksum and return true if it was modified. 1124 */ 1125 static bool update_checksum(struct kmemleak_object *object) 1126 { 1127 u32 old_csum = object->checksum; 1128 1129 if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) 1130 return false; 1131 1132 kasan_disable_current(); 1133 object->checksum = crc32(0, (void *)object->pointer, object->size); 1134 kasan_enable_current(); 1135 1136 return object->checksum != old_csum; 1137 } 1138 1139 /* 1140 * Memory scanning is a long process and it needs to be interruptable. This 1141 * function checks whether such interrupt condition occurred. 1142 */ 1143 static int scan_should_stop(void) 1144 { 1145 if (!kmemleak_enabled) 1146 return 1; 1147 1148 /* 1149 * This function may be called from either process or kthread context, 1150 * hence the need to check for both stop conditions. 1151 */ 1152 if (current->mm) 1153 return signal_pending(current); 1154 else 1155 return kthread_should_stop(); 1156 1157 return 0; 1158 } 1159 1160 /* 1161 * Scan a memory block (exclusive range) for valid pointers and add those 1162 * found to the gray list. 1163 */ 1164 static void scan_block(void *_start, void *_end, 1165 struct kmemleak_object *scanned) 1166 { 1167 unsigned long *ptr; 1168 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1169 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1170 unsigned long flags; 1171 1172 read_lock_irqsave(&kmemleak_lock, flags); 1173 for (ptr = start; ptr < end; ptr++) { 1174 struct kmemleak_object *object; 1175 unsigned long pointer; 1176 1177 if (scan_should_stop()) 1178 break; 1179 1180 /* don't scan uninitialized memory */ 1181 if (!kmemcheck_is_obj_initialized((unsigned long)ptr, 1182 BYTES_PER_POINTER)) 1183 continue; 1184 1185 kasan_disable_current(); 1186 pointer = *ptr; 1187 kasan_enable_current(); 1188 1189 if (pointer < min_addr || pointer >= max_addr) 1190 continue; 1191 1192 /* 1193 * No need for get_object() here since we hold kmemleak_lock. 1194 * object->use_count cannot be dropped to 0 while the object 1195 * is still present in object_tree_root and object_list 1196 * (with updates protected by kmemleak_lock). 1197 */ 1198 object = lookup_object(pointer, 1); 1199 if (!object) 1200 continue; 1201 if (object == scanned) 1202 /* self referenced, ignore */ 1203 continue; 1204 1205 /* 1206 * Avoid the lockdep recursive warning on object->lock being 1207 * previously acquired in scan_object(). These locks are 1208 * enclosed by scan_mutex. 1209 */ 1210 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); 1211 if (!color_white(object)) { 1212 /* non-orphan, ignored or new */ 1213 spin_unlock(&object->lock); 1214 continue; 1215 } 1216 1217 /* 1218 * Increase the object's reference count (number of pointers 1219 * to the memory block). If this count reaches the required 1220 * minimum, the object's color will become gray and it will be 1221 * added to the gray_list. 1222 */ 1223 object->count++; 1224 if (color_gray(object)) { 1225 /* put_object() called when removing from gray_list */ 1226 WARN_ON(!get_object(object)); 1227 list_add_tail(&object->gray_list, &gray_list); 1228 } 1229 spin_unlock(&object->lock); 1230 } 1231 read_unlock_irqrestore(&kmemleak_lock, flags); 1232 } 1233 1234 /* 1235 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. 1236 */ 1237 static void scan_large_block(void *start, void *end) 1238 { 1239 void *next; 1240 1241 while (start < end) { 1242 next = min(start + MAX_SCAN_SIZE, end); 1243 scan_block(start, next, NULL); 1244 start = next; 1245 cond_resched(); 1246 } 1247 } 1248 1249 /* 1250 * Scan a memory block corresponding to a kmemleak_object. A condition is 1251 * that object->use_count >= 1. 1252 */ 1253 static void scan_object(struct kmemleak_object *object) 1254 { 1255 struct kmemleak_scan_area *area; 1256 unsigned long flags; 1257 1258 /* 1259 * Once the object->lock is acquired, the corresponding memory block 1260 * cannot be freed (the same lock is acquired in delete_object). 1261 */ 1262 spin_lock_irqsave(&object->lock, flags); 1263 if (object->flags & OBJECT_NO_SCAN) 1264 goto out; 1265 if (!(object->flags & OBJECT_ALLOCATED)) 1266 /* already freed object */ 1267 goto out; 1268 if (hlist_empty(&object->area_list)) { 1269 void *start = (void *)object->pointer; 1270 void *end = (void *)(object->pointer + object->size); 1271 void *next; 1272 1273 do { 1274 next = min(start + MAX_SCAN_SIZE, end); 1275 scan_block(start, next, object); 1276 1277 start = next; 1278 if (start >= end) 1279 break; 1280 1281 spin_unlock_irqrestore(&object->lock, flags); 1282 cond_resched(); 1283 spin_lock_irqsave(&object->lock, flags); 1284 } while (object->flags & OBJECT_ALLOCATED); 1285 } else 1286 hlist_for_each_entry(area, &object->area_list, node) 1287 scan_block((void *)area->start, 1288 (void *)(area->start + area->size), 1289 object); 1290 out: 1291 spin_unlock_irqrestore(&object->lock, flags); 1292 } 1293 1294 /* 1295 * Scan the objects already referenced (gray objects). More objects will be 1296 * referenced and, if there are no memory leaks, all the objects are scanned. 1297 */ 1298 static void scan_gray_list(void) 1299 { 1300 struct kmemleak_object *object, *tmp; 1301 1302 /* 1303 * The list traversal is safe for both tail additions and removals 1304 * from inside the loop. The kmemleak objects cannot be freed from 1305 * outside the loop because their use_count was incremented. 1306 */ 1307 object = list_entry(gray_list.next, typeof(*object), gray_list); 1308 while (&object->gray_list != &gray_list) { 1309 cond_resched(); 1310 1311 /* may add new objects to the list */ 1312 if (!scan_should_stop()) 1313 scan_object(object); 1314 1315 tmp = list_entry(object->gray_list.next, typeof(*object), 1316 gray_list); 1317 1318 /* remove the object from the list and release it */ 1319 list_del(&object->gray_list); 1320 put_object(object); 1321 1322 object = tmp; 1323 } 1324 WARN_ON(!list_empty(&gray_list)); 1325 } 1326 1327 /* 1328 * Scan data sections and all the referenced memory blocks allocated via the 1329 * kernel's standard allocators. This function must be called with the 1330 * scan_mutex held. 1331 */ 1332 static void kmemleak_scan(void) 1333 { 1334 unsigned long flags; 1335 struct kmemleak_object *object; 1336 int i; 1337 int new_leaks = 0; 1338 1339 jiffies_last_scan = jiffies; 1340 1341 /* prepare the kmemleak_object's */ 1342 rcu_read_lock(); 1343 list_for_each_entry_rcu(object, &object_list, object_list) { 1344 spin_lock_irqsave(&object->lock, flags); 1345 #ifdef DEBUG 1346 /* 1347 * With a few exceptions there should be a maximum of 1348 * 1 reference to any object at this point. 1349 */ 1350 if (atomic_read(&object->use_count) > 1) { 1351 pr_debug("object->use_count = %d\n", 1352 atomic_read(&object->use_count)); 1353 dump_object_info(object); 1354 } 1355 #endif 1356 /* reset the reference count (whiten the object) */ 1357 object->count = 0; 1358 if (color_gray(object) && get_object(object)) 1359 list_add_tail(&object->gray_list, &gray_list); 1360 1361 spin_unlock_irqrestore(&object->lock, flags); 1362 } 1363 rcu_read_unlock(); 1364 1365 /* data/bss scanning */ 1366 scan_large_block(_sdata, _edata); 1367 scan_large_block(__bss_start, __bss_stop); 1368 1369 #ifdef CONFIG_SMP 1370 /* per-cpu sections scanning */ 1371 for_each_possible_cpu(i) 1372 scan_large_block(__per_cpu_start + per_cpu_offset(i), 1373 __per_cpu_end + per_cpu_offset(i)); 1374 #endif 1375 1376 /* 1377 * Struct page scanning for each node. 1378 */ 1379 get_online_mems(); 1380 for_each_online_node(i) { 1381 unsigned long start_pfn = node_start_pfn(i); 1382 unsigned long end_pfn = node_end_pfn(i); 1383 unsigned long pfn; 1384 1385 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1386 struct page *page; 1387 1388 if (!pfn_valid(pfn)) 1389 continue; 1390 page = pfn_to_page(pfn); 1391 /* only scan if page is in use */ 1392 if (page_count(page) == 0) 1393 continue; 1394 scan_block(page, page + 1, NULL); 1395 } 1396 } 1397 put_online_mems(); 1398 1399 /* 1400 * Scanning the task stacks (may introduce false negatives). 1401 */ 1402 if (kmemleak_stack_scan) { 1403 struct task_struct *p, *g; 1404 1405 read_lock(&tasklist_lock); 1406 do_each_thread(g, p) { 1407 scan_block(task_stack_page(p), task_stack_page(p) + 1408 THREAD_SIZE, NULL); 1409 } while_each_thread(g, p); 1410 read_unlock(&tasklist_lock); 1411 } 1412 1413 /* 1414 * Scan the objects already referenced from the sections scanned 1415 * above. 1416 */ 1417 scan_gray_list(); 1418 1419 /* 1420 * Check for new or unreferenced objects modified since the previous 1421 * scan and color them gray until the next scan. 1422 */ 1423 rcu_read_lock(); 1424 list_for_each_entry_rcu(object, &object_list, object_list) { 1425 spin_lock_irqsave(&object->lock, flags); 1426 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) 1427 && update_checksum(object) && get_object(object)) { 1428 /* color it gray temporarily */ 1429 object->count = object->min_count; 1430 list_add_tail(&object->gray_list, &gray_list); 1431 } 1432 spin_unlock_irqrestore(&object->lock, flags); 1433 } 1434 rcu_read_unlock(); 1435 1436 /* 1437 * Re-scan the gray list for modified unreferenced objects. 1438 */ 1439 scan_gray_list(); 1440 1441 /* 1442 * If scanning was stopped do not report any new unreferenced objects. 1443 */ 1444 if (scan_should_stop()) 1445 return; 1446 1447 /* 1448 * Scanning result reporting. 1449 */ 1450 rcu_read_lock(); 1451 list_for_each_entry_rcu(object, &object_list, object_list) { 1452 spin_lock_irqsave(&object->lock, flags); 1453 if (unreferenced_object(object) && 1454 !(object->flags & OBJECT_REPORTED)) { 1455 object->flags |= OBJECT_REPORTED; 1456 new_leaks++; 1457 } 1458 spin_unlock_irqrestore(&object->lock, flags); 1459 } 1460 rcu_read_unlock(); 1461 1462 if (new_leaks) { 1463 kmemleak_found_leaks = true; 1464 1465 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n", 1466 new_leaks); 1467 } 1468 1469 } 1470 1471 /* 1472 * Thread function performing automatic memory scanning. Unreferenced objects 1473 * at the end of a memory scan are reported but only the first time. 1474 */ 1475 static int kmemleak_scan_thread(void *arg) 1476 { 1477 static int first_run = 1; 1478 1479 pr_info("Automatic memory scanning thread started\n"); 1480 set_user_nice(current, 10); 1481 1482 /* 1483 * Wait before the first scan to allow the system to fully initialize. 1484 */ 1485 if (first_run) { 1486 first_run = 0; 1487 ssleep(SECS_FIRST_SCAN); 1488 } 1489 1490 while (!kthread_should_stop()) { 1491 signed long timeout = jiffies_scan_wait; 1492 1493 mutex_lock(&scan_mutex); 1494 kmemleak_scan(); 1495 mutex_unlock(&scan_mutex); 1496 1497 /* wait before the next scan */ 1498 while (timeout && !kthread_should_stop()) 1499 timeout = schedule_timeout_interruptible(timeout); 1500 } 1501 1502 pr_info("Automatic memory scanning thread ended\n"); 1503 1504 return 0; 1505 } 1506 1507 /* 1508 * Start the automatic memory scanning thread. This function must be called 1509 * with the scan_mutex held. 1510 */ 1511 static void start_scan_thread(void) 1512 { 1513 if (scan_thread) 1514 return; 1515 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1516 if (IS_ERR(scan_thread)) { 1517 pr_warn("Failed to create the scan thread\n"); 1518 scan_thread = NULL; 1519 } 1520 } 1521 1522 /* 1523 * Stop the automatic memory scanning thread. This function must be called 1524 * with the scan_mutex held. 1525 */ 1526 static void stop_scan_thread(void) 1527 { 1528 if (scan_thread) { 1529 kthread_stop(scan_thread); 1530 scan_thread = NULL; 1531 } 1532 } 1533 1534 /* 1535 * Iterate over the object_list and return the first valid object at or after 1536 * the required position with its use_count incremented. The function triggers 1537 * a memory scanning when the pos argument points to the first position. 1538 */ 1539 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1540 { 1541 struct kmemleak_object *object; 1542 loff_t n = *pos; 1543 int err; 1544 1545 err = mutex_lock_interruptible(&scan_mutex); 1546 if (err < 0) 1547 return ERR_PTR(err); 1548 1549 rcu_read_lock(); 1550 list_for_each_entry_rcu(object, &object_list, object_list) { 1551 if (n-- > 0) 1552 continue; 1553 if (get_object(object)) 1554 goto out; 1555 } 1556 object = NULL; 1557 out: 1558 return object; 1559 } 1560 1561 /* 1562 * Return the next object in the object_list. The function decrements the 1563 * use_count of the previous object and increases that of the next one. 1564 */ 1565 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1566 { 1567 struct kmemleak_object *prev_obj = v; 1568 struct kmemleak_object *next_obj = NULL; 1569 struct kmemleak_object *obj = prev_obj; 1570 1571 ++(*pos); 1572 1573 list_for_each_entry_continue_rcu(obj, &object_list, object_list) { 1574 if (get_object(obj)) { 1575 next_obj = obj; 1576 break; 1577 } 1578 } 1579 1580 put_object(prev_obj); 1581 return next_obj; 1582 } 1583 1584 /* 1585 * Decrement the use_count of the last object required, if any. 1586 */ 1587 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1588 { 1589 if (!IS_ERR(v)) { 1590 /* 1591 * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1592 * waiting was interrupted, so only release it if !IS_ERR. 1593 */ 1594 rcu_read_unlock(); 1595 mutex_unlock(&scan_mutex); 1596 if (v) 1597 put_object(v); 1598 } 1599 } 1600 1601 /* 1602 * Print the information for an unreferenced object to the seq file. 1603 */ 1604 static int kmemleak_seq_show(struct seq_file *seq, void *v) 1605 { 1606 struct kmemleak_object *object = v; 1607 unsigned long flags; 1608 1609 spin_lock_irqsave(&object->lock, flags); 1610 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1611 print_unreferenced(seq, object); 1612 spin_unlock_irqrestore(&object->lock, flags); 1613 return 0; 1614 } 1615 1616 static const struct seq_operations kmemleak_seq_ops = { 1617 .start = kmemleak_seq_start, 1618 .next = kmemleak_seq_next, 1619 .stop = kmemleak_seq_stop, 1620 .show = kmemleak_seq_show, 1621 }; 1622 1623 static int kmemleak_open(struct inode *inode, struct file *file) 1624 { 1625 return seq_open(file, &kmemleak_seq_ops); 1626 } 1627 1628 static int dump_str_object_info(const char *str) 1629 { 1630 unsigned long flags; 1631 struct kmemleak_object *object; 1632 unsigned long addr; 1633 1634 if (kstrtoul(str, 0, &addr)) 1635 return -EINVAL; 1636 object = find_and_get_object(addr, 0); 1637 if (!object) { 1638 pr_info("Unknown object at 0x%08lx\n", addr); 1639 return -EINVAL; 1640 } 1641 1642 spin_lock_irqsave(&object->lock, flags); 1643 dump_object_info(object); 1644 spin_unlock_irqrestore(&object->lock, flags); 1645 1646 put_object(object); 1647 return 0; 1648 } 1649 1650 /* 1651 * We use grey instead of black to ensure we can do future scans on the same 1652 * objects. If we did not do future scans these black objects could 1653 * potentially contain references to newly allocated objects in the future and 1654 * we'd end up with false positives. 1655 */ 1656 static void kmemleak_clear(void) 1657 { 1658 struct kmemleak_object *object; 1659 unsigned long flags; 1660 1661 rcu_read_lock(); 1662 list_for_each_entry_rcu(object, &object_list, object_list) { 1663 spin_lock_irqsave(&object->lock, flags); 1664 if ((object->flags & OBJECT_REPORTED) && 1665 unreferenced_object(object)) 1666 __paint_it(object, KMEMLEAK_GREY); 1667 spin_unlock_irqrestore(&object->lock, flags); 1668 } 1669 rcu_read_unlock(); 1670 1671 kmemleak_found_leaks = false; 1672 } 1673 1674 static void __kmemleak_do_cleanup(void); 1675 1676 /* 1677 * File write operation to configure kmemleak at run-time. The following 1678 * commands can be written to the /sys/kernel/debug/kmemleak file: 1679 * off - disable kmemleak (irreversible) 1680 * stack=on - enable the task stacks scanning 1681 * stack=off - disable the tasks stacks scanning 1682 * scan=on - start the automatic memory scanning thread 1683 * scan=off - stop the automatic memory scanning thread 1684 * scan=... - set the automatic memory scanning period in seconds (0 to 1685 * disable it) 1686 * scan - trigger a memory scan 1687 * clear - mark all current reported unreferenced kmemleak objects as 1688 * grey to ignore printing them, or free all kmemleak objects 1689 * if kmemleak has been disabled. 1690 * dump=... - dump information about the object found at the given address 1691 */ 1692 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1693 size_t size, loff_t *ppos) 1694 { 1695 char buf[64]; 1696 int buf_size; 1697 int ret; 1698 1699 buf_size = min(size, (sizeof(buf) - 1)); 1700 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1701 return -EFAULT; 1702 buf[buf_size] = 0; 1703 1704 ret = mutex_lock_interruptible(&scan_mutex); 1705 if (ret < 0) 1706 return ret; 1707 1708 if (strncmp(buf, "clear", 5) == 0) { 1709 if (kmemleak_enabled) 1710 kmemleak_clear(); 1711 else 1712 __kmemleak_do_cleanup(); 1713 goto out; 1714 } 1715 1716 if (!kmemleak_enabled) { 1717 ret = -EBUSY; 1718 goto out; 1719 } 1720 1721 if (strncmp(buf, "off", 3) == 0) 1722 kmemleak_disable(); 1723 else if (strncmp(buf, "stack=on", 8) == 0) 1724 kmemleak_stack_scan = 1; 1725 else if (strncmp(buf, "stack=off", 9) == 0) 1726 kmemleak_stack_scan = 0; 1727 else if (strncmp(buf, "scan=on", 7) == 0) 1728 start_scan_thread(); 1729 else if (strncmp(buf, "scan=off", 8) == 0) 1730 stop_scan_thread(); 1731 else if (strncmp(buf, "scan=", 5) == 0) { 1732 unsigned long secs; 1733 1734 ret = kstrtoul(buf + 5, 0, &secs); 1735 if (ret < 0) 1736 goto out; 1737 stop_scan_thread(); 1738 if (secs) { 1739 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); 1740 start_scan_thread(); 1741 } 1742 } else if (strncmp(buf, "scan", 4) == 0) 1743 kmemleak_scan(); 1744 else if (strncmp(buf, "dump=", 5) == 0) 1745 ret = dump_str_object_info(buf + 5); 1746 else 1747 ret = -EINVAL; 1748 1749 out: 1750 mutex_unlock(&scan_mutex); 1751 if (ret < 0) 1752 return ret; 1753 1754 /* ignore the rest of the buffer, only one command at a time */ 1755 *ppos += size; 1756 return size; 1757 } 1758 1759 static const struct file_operations kmemleak_fops = { 1760 .owner = THIS_MODULE, 1761 .open = kmemleak_open, 1762 .read = seq_read, 1763 .write = kmemleak_write, 1764 .llseek = seq_lseek, 1765 .release = seq_release, 1766 }; 1767 1768 static void __kmemleak_do_cleanup(void) 1769 { 1770 struct kmemleak_object *object; 1771 1772 rcu_read_lock(); 1773 list_for_each_entry_rcu(object, &object_list, object_list) 1774 delete_object_full(object->pointer); 1775 rcu_read_unlock(); 1776 } 1777 1778 /* 1779 * Stop the memory scanning thread and free the kmemleak internal objects if 1780 * no previous scan thread (otherwise, kmemleak may still have some useful 1781 * information on memory leaks). 1782 */ 1783 static void kmemleak_do_cleanup(struct work_struct *work) 1784 { 1785 stop_scan_thread(); 1786 1787 /* 1788 * Once the scan thread has stopped, it is safe to no longer track 1789 * object freeing. Ordering of the scan thread stopping and the memory 1790 * accesses below is guaranteed by the kthread_stop() function. 1791 */ 1792 kmemleak_free_enabled = 0; 1793 1794 if (!kmemleak_found_leaks) 1795 __kmemleak_do_cleanup(); 1796 else 1797 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n"); 1798 } 1799 1800 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); 1801 1802 /* 1803 * Disable kmemleak. No memory allocation/freeing will be traced once this 1804 * function is called. Disabling kmemleak is an irreversible operation. 1805 */ 1806 static void kmemleak_disable(void) 1807 { 1808 /* atomically check whether it was already invoked */ 1809 if (cmpxchg(&kmemleak_error, 0, 1)) 1810 return; 1811 1812 /* stop any memory operation tracing */ 1813 kmemleak_enabled = 0; 1814 1815 /* check whether it is too early for a kernel thread */ 1816 if (kmemleak_initialized) 1817 schedule_work(&cleanup_work); 1818 else 1819 kmemleak_free_enabled = 0; 1820 1821 pr_info("Kernel memory leak detector disabled\n"); 1822 } 1823 1824 /* 1825 * Allow boot-time kmemleak disabling (enabled by default). 1826 */ 1827 static int kmemleak_boot_config(char *str) 1828 { 1829 if (!str) 1830 return -EINVAL; 1831 if (strcmp(str, "off") == 0) 1832 kmemleak_disable(); 1833 else if (strcmp(str, "on") == 0) 1834 kmemleak_skip_disable = 1; 1835 else 1836 return -EINVAL; 1837 return 0; 1838 } 1839 early_param("kmemleak", kmemleak_boot_config); 1840 1841 static void __init print_log_trace(struct early_log *log) 1842 { 1843 struct stack_trace trace; 1844 1845 trace.nr_entries = log->trace_len; 1846 trace.entries = log->trace; 1847 1848 pr_notice("Early log backtrace:\n"); 1849 print_stack_trace(&trace, 2); 1850 } 1851 1852 /* 1853 * Kmemleak initialization. 1854 */ 1855 void __init kmemleak_init(void) 1856 { 1857 int i; 1858 unsigned long flags; 1859 1860 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1861 if (!kmemleak_skip_disable) { 1862 kmemleak_early_log = 0; 1863 kmemleak_disable(); 1864 return; 1865 } 1866 #endif 1867 1868 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1869 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1870 1871 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1872 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1873 1874 if (crt_early_log > ARRAY_SIZE(early_log)) 1875 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", 1876 crt_early_log); 1877 1878 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1879 local_irq_save(flags); 1880 kmemleak_early_log = 0; 1881 if (kmemleak_error) { 1882 local_irq_restore(flags); 1883 return; 1884 } else { 1885 kmemleak_enabled = 1; 1886 kmemleak_free_enabled = 1; 1887 } 1888 local_irq_restore(flags); 1889 1890 /* 1891 * This is the point where tracking allocations is safe. Automatic 1892 * scanning is started during the late initcall. Add the early logged 1893 * callbacks to the kmemleak infrastructure. 1894 */ 1895 for (i = 0; i < crt_early_log; i++) { 1896 struct early_log *log = &early_log[i]; 1897 1898 switch (log->op_type) { 1899 case KMEMLEAK_ALLOC: 1900 early_alloc(log); 1901 break; 1902 case KMEMLEAK_ALLOC_PERCPU: 1903 early_alloc_percpu(log); 1904 break; 1905 case KMEMLEAK_FREE: 1906 kmemleak_free(log->ptr); 1907 break; 1908 case KMEMLEAK_FREE_PART: 1909 kmemleak_free_part(log->ptr, log->size); 1910 break; 1911 case KMEMLEAK_FREE_PERCPU: 1912 kmemleak_free_percpu(log->ptr); 1913 break; 1914 case KMEMLEAK_NOT_LEAK: 1915 kmemleak_not_leak(log->ptr); 1916 break; 1917 case KMEMLEAK_IGNORE: 1918 kmemleak_ignore(log->ptr); 1919 break; 1920 case KMEMLEAK_SCAN_AREA: 1921 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); 1922 break; 1923 case KMEMLEAK_NO_SCAN: 1924 kmemleak_no_scan(log->ptr); 1925 break; 1926 default: 1927 kmemleak_warn("Unknown early log operation: %d\n", 1928 log->op_type); 1929 } 1930 1931 if (kmemleak_warning) { 1932 print_log_trace(log); 1933 kmemleak_warning = 0; 1934 } 1935 } 1936 } 1937 1938 /* 1939 * Late initialization function. 1940 */ 1941 static int __init kmemleak_late_init(void) 1942 { 1943 struct dentry *dentry; 1944 1945 kmemleak_initialized = 1; 1946 1947 if (kmemleak_error) { 1948 /* 1949 * Some error occurred and kmemleak was disabled. There is a 1950 * small chance that kmemleak_disable() was called immediately 1951 * after setting kmemleak_initialized and we may end up with 1952 * two clean-up threads but serialized by scan_mutex. 1953 */ 1954 schedule_work(&cleanup_work); 1955 return -ENOMEM; 1956 } 1957 1958 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1959 &kmemleak_fops); 1960 if (!dentry) 1961 pr_warn("Failed to create the debugfs kmemleak file\n"); 1962 mutex_lock(&scan_mutex); 1963 start_scan_thread(); 1964 mutex_unlock(&scan_mutex); 1965 1966 pr_info("Kernel memory leak detector initialized\n"); 1967 1968 return 0; 1969 } 1970 late_initcall(kmemleak_late_init); 1971