1 /* 2 * mm/kmemleak.c 3 * 4 * Copyright (C) 2008 ARM Limited 5 * Written by Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * 21 * For more information on the algorithm and kmemleak usage, please see 22 * Documentation/kmemleak.txt. 23 * 24 * Notes on locking 25 * ---------------- 26 * 27 * The following locks and mutexes are used by kmemleak: 28 * 29 * - kmemleak_lock (rwlock): protects the object_list modifications and 30 * accesses to the object_tree_root. The object_list is the main list 31 * holding the metadata (struct kmemleak_object) for the allocated memory 32 * blocks. The object_tree_root is a red black tree used to look-up 33 * metadata based on a pointer to the corresponding memory block. The 34 * kmemleak_object structures are added to the object_list and 35 * object_tree_root in the create_object() function called from the 36 * kmemleak_alloc() callback and removed in delete_object() called from the 37 * kmemleak_free() callback 38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to 39 * the metadata (e.g. count) are protected by this lock. Note that some 40 * members of this structure may be protected by other means (atomic or 41 * kmemleak_lock). This lock is also held when scanning the corresponding 42 * memory block to avoid the kernel freeing it via the kmemleak_free() 43 * callback. This is less heavyweight than holding a global lock like 44 * kmemleak_lock during scanning 45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 46 * unreferenced objects at a time. The gray_list contains the objects which 47 * are already referenced or marked as false positives and need to be 48 * scanned. This list is only modified during a scanning episode when the 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 50 * Note that the kmemleak_object.use_count is incremented when an object is 51 * added to the gray_list and therefore cannot be freed. This mutex also 52 * prevents multiple users of the "kmemleak" debugfs file together with 53 * modifications to the memory scanning parameters including the scan_thread 54 * pointer 55 * 56 * Locks and mutexes are acquired/nested in the following order: 57 * 58 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) 59 * 60 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex 61 * regions. 62 * 63 * The kmemleak_object structures have a use_count incremented or decremented 64 * using the get_object()/put_object() functions. When the use_count becomes 65 * 0, this count can no longer be incremented and put_object() schedules the 66 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 67 * function must be protected by rcu_read_lock() to avoid accessing a freed 68 * structure. 69 */ 70 71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 72 73 #include <linux/init.h> 74 #include <linux/kernel.h> 75 #include <linux/list.h> 76 #include <linux/sched.h> 77 #include <linux/jiffies.h> 78 #include <linux/delay.h> 79 #include <linux/export.h> 80 #include <linux/kthread.h> 81 #include <linux/rbtree.h> 82 #include <linux/fs.h> 83 #include <linux/debugfs.h> 84 #include <linux/seq_file.h> 85 #include <linux/cpumask.h> 86 #include <linux/spinlock.h> 87 #include <linux/mutex.h> 88 #include <linux/rcupdate.h> 89 #include <linux/stacktrace.h> 90 #include <linux/cache.h> 91 #include <linux/percpu.h> 92 #include <linux/hardirq.h> 93 #include <linux/mmzone.h> 94 #include <linux/slab.h> 95 #include <linux/thread_info.h> 96 #include <linux/err.h> 97 #include <linux/uaccess.h> 98 #include <linux/string.h> 99 #include <linux/nodemask.h> 100 #include <linux/mm.h> 101 #include <linux/workqueue.h> 102 #include <linux/crc32.h> 103 104 #include <asm/sections.h> 105 #include <asm/processor.h> 106 #include <linux/atomic.h> 107 108 #include <linux/kasan.h> 109 #include <linux/kmemcheck.h> 110 #include <linux/kmemleak.h> 111 #include <linux/memory_hotplug.h> 112 113 /* 114 * Kmemleak configuration and common defines. 115 */ 116 #define MAX_TRACE 16 /* stack trace length */ 117 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 118 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 119 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 120 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 121 122 #define BYTES_PER_POINTER sizeof(void *) 123 124 /* GFP bitmask for kmemleak internal allocations */ 125 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ 126 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 127 __GFP_NOWARN) 128 129 /* scanning area inside a memory block */ 130 struct kmemleak_scan_area { 131 struct hlist_node node; 132 unsigned long start; 133 size_t size; 134 }; 135 136 #define KMEMLEAK_GREY 0 137 #define KMEMLEAK_BLACK -1 138 139 /* 140 * Structure holding the metadata for each allocated memory block. 141 * Modifications to such objects should be made while holding the 142 * object->lock. Insertions or deletions from object_list, gray_list or 143 * rb_node are already protected by the corresponding locks or mutex (see 144 * the notes on locking above). These objects are reference-counted 145 * (use_count) and freed using the RCU mechanism. 146 */ 147 struct kmemleak_object { 148 spinlock_t lock; 149 unsigned long flags; /* object status flags */ 150 struct list_head object_list; 151 struct list_head gray_list; 152 struct rb_node rb_node; 153 struct rcu_head rcu; /* object_list lockless traversal */ 154 /* object usage count; object freed when use_count == 0 */ 155 atomic_t use_count; 156 unsigned long pointer; 157 size_t size; 158 /* minimum number of a pointers found before it is considered leak */ 159 int min_count; 160 /* the total number of pointers found pointing to this object */ 161 int count; 162 /* checksum for detecting modified objects */ 163 u32 checksum; 164 /* memory ranges to be scanned inside an object (empty for all) */ 165 struct hlist_head area_list; 166 unsigned long trace[MAX_TRACE]; 167 unsigned int trace_len; 168 unsigned long jiffies; /* creation timestamp */ 169 pid_t pid; /* pid of the current task */ 170 char comm[TASK_COMM_LEN]; /* executable name */ 171 }; 172 173 /* flag representing the memory block allocation status */ 174 #define OBJECT_ALLOCATED (1 << 0) 175 /* flag set after the first reporting of an unreference object */ 176 #define OBJECT_REPORTED (1 << 1) 177 /* flag set to not scan the object */ 178 #define OBJECT_NO_SCAN (1 << 2) 179 180 /* number of bytes to print per line; must be 16 or 32 */ 181 #define HEX_ROW_SIZE 16 182 /* number of bytes to print at a time (1, 2, 4, 8) */ 183 #define HEX_GROUP_SIZE 1 184 /* include ASCII after the hex output */ 185 #define HEX_ASCII 1 186 /* max number of lines to be printed */ 187 #define HEX_MAX_LINES 2 188 189 /* the list of all allocated objects */ 190 static LIST_HEAD(object_list); 191 /* the list of gray-colored objects (see color_gray comment below) */ 192 static LIST_HEAD(gray_list); 193 /* search tree for object boundaries */ 194 static struct rb_root object_tree_root = RB_ROOT; 195 /* rw_lock protecting the access to object_list and object_tree_root */ 196 static DEFINE_RWLOCK(kmemleak_lock); 197 198 /* allocation caches for kmemleak internal data */ 199 static struct kmem_cache *object_cache; 200 static struct kmem_cache *scan_area_cache; 201 202 /* set if tracing memory operations is enabled */ 203 static int kmemleak_enabled; 204 /* same as above but only for the kmemleak_free() callback */ 205 static int kmemleak_free_enabled; 206 /* set in the late_initcall if there were no errors */ 207 static int kmemleak_initialized; 208 /* enables or disables early logging of the memory operations */ 209 static int kmemleak_early_log = 1; 210 /* set if a kmemleak warning was issued */ 211 static int kmemleak_warning; 212 /* set if a fatal kmemleak error has occurred */ 213 static int kmemleak_error; 214 215 /* minimum and maximum address that may be valid pointers */ 216 static unsigned long min_addr = ULONG_MAX; 217 static unsigned long max_addr; 218 219 static struct task_struct *scan_thread; 220 /* used to avoid reporting of recently allocated objects */ 221 static unsigned long jiffies_min_age; 222 static unsigned long jiffies_last_scan; 223 /* delay between automatic memory scannings */ 224 static signed long jiffies_scan_wait; 225 /* enables or disables the task stacks scanning */ 226 static int kmemleak_stack_scan = 1; 227 /* protects the memory scanning, parameters and debug/kmemleak file access */ 228 static DEFINE_MUTEX(scan_mutex); 229 /* setting kmemleak=on, will set this var, skipping the disable */ 230 static int kmemleak_skip_disable; 231 /* If there are leaks that can be reported */ 232 static bool kmemleak_found_leaks; 233 234 /* 235 * Early object allocation/freeing logging. Kmemleak is initialized after the 236 * kernel allocator. However, both the kernel allocator and kmemleak may 237 * allocate memory blocks which need to be tracked. Kmemleak defines an 238 * arbitrary buffer to hold the allocation/freeing information before it is 239 * fully initialized. 240 */ 241 242 /* kmemleak operation type for early logging */ 243 enum { 244 KMEMLEAK_ALLOC, 245 KMEMLEAK_ALLOC_PERCPU, 246 KMEMLEAK_FREE, 247 KMEMLEAK_FREE_PART, 248 KMEMLEAK_FREE_PERCPU, 249 KMEMLEAK_NOT_LEAK, 250 KMEMLEAK_IGNORE, 251 KMEMLEAK_SCAN_AREA, 252 KMEMLEAK_NO_SCAN 253 }; 254 255 /* 256 * Structure holding the information passed to kmemleak callbacks during the 257 * early logging. 258 */ 259 struct early_log { 260 int op_type; /* kmemleak operation type */ 261 const void *ptr; /* allocated/freed memory block */ 262 size_t size; /* memory block size */ 263 int min_count; /* minimum reference count */ 264 unsigned long trace[MAX_TRACE]; /* stack trace */ 265 unsigned int trace_len; /* stack trace length */ 266 }; 267 268 /* early logging buffer and current position */ 269 static struct early_log 270 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; 271 static int crt_early_log __initdata; 272 273 static void kmemleak_disable(void); 274 275 /* 276 * Print a warning and dump the stack trace. 277 */ 278 #define kmemleak_warn(x...) do { \ 279 pr_warning(x); \ 280 dump_stack(); \ 281 kmemleak_warning = 1; \ 282 } while (0) 283 284 /* 285 * Macro invoked when a serious kmemleak condition occurred and cannot be 286 * recovered from. Kmemleak will be disabled and further allocation/freeing 287 * tracing no longer available. 288 */ 289 #define kmemleak_stop(x...) do { \ 290 kmemleak_warn(x); \ 291 kmemleak_disable(); \ 292 } while (0) 293 294 /* 295 * Printing of the objects hex dump to the seq file. The number of lines to be 296 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The 297 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called 298 * with the object->lock held. 299 */ 300 static void hex_dump_object(struct seq_file *seq, 301 struct kmemleak_object *object) 302 { 303 const u8 *ptr = (const u8 *)object->pointer; 304 size_t len; 305 306 /* limit the number of lines to HEX_MAX_LINES */ 307 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); 308 309 seq_printf(seq, " hex dump (first %zu bytes):\n", len); 310 seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, 311 HEX_GROUP_SIZE, ptr, len, HEX_ASCII); 312 } 313 314 /* 315 * Object colors, encoded with count and min_count: 316 * - white - orphan object, not enough references to it (count < min_count) 317 * - gray - not orphan, not marked as false positive (min_count == 0) or 318 * sufficient references to it (count >= min_count) 319 * - black - ignore, it doesn't contain references (e.g. text section) 320 * (min_count == -1). No function defined for this color. 321 * Newly created objects don't have any color assigned (object->count == -1) 322 * before the next memory scan when they become white. 323 */ 324 static bool color_white(const struct kmemleak_object *object) 325 { 326 return object->count != KMEMLEAK_BLACK && 327 object->count < object->min_count; 328 } 329 330 static bool color_gray(const struct kmemleak_object *object) 331 { 332 return object->min_count != KMEMLEAK_BLACK && 333 object->count >= object->min_count; 334 } 335 336 /* 337 * Objects are considered unreferenced only if their color is white, they have 338 * not be deleted and have a minimum age to avoid false positives caused by 339 * pointers temporarily stored in CPU registers. 340 */ 341 static bool unreferenced_object(struct kmemleak_object *object) 342 { 343 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && 344 time_before_eq(object->jiffies + jiffies_min_age, 345 jiffies_last_scan); 346 } 347 348 /* 349 * Printing of the unreferenced objects information to the seq file. The 350 * print_unreferenced function must be called with the object->lock held. 351 */ 352 static void print_unreferenced(struct seq_file *seq, 353 struct kmemleak_object *object) 354 { 355 int i; 356 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); 357 358 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 359 object->pointer, object->size); 360 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", 361 object->comm, object->pid, object->jiffies, 362 msecs_age / 1000, msecs_age % 1000); 363 hex_dump_object(seq, object); 364 seq_printf(seq, " backtrace:\n"); 365 366 for (i = 0; i < object->trace_len; i++) { 367 void *ptr = (void *)object->trace[i]; 368 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); 369 } 370 } 371 372 /* 373 * Print the kmemleak_object information. This function is used mainly for 374 * debugging special cases when kmemleak operations. It must be called with 375 * the object->lock held. 376 */ 377 static void dump_object_info(struct kmemleak_object *object) 378 { 379 struct stack_trace trace; 380 381 trace.nr_entries = object->trace_len; 382 trace.entries = object->trace; 383 384 pr_notice("Object 0x%08lx (size %zu):\n", 385 object->pointer, object->size); 386 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 387 object->comm, object->pid, object->jiffies); 388 pr_notice(" min_count = %d\n", object->min_count); 389 pr_notice(" count = %d\n", object->count); 390 pr_notice(" flags = 0x%lx\n", object->flags); 391 pr_notice(" checksum = %u\n", object->checksum); 392 pr_notice(" backtrace:\n"); 393 print_stack_trace(&trace, 4); 394 } 395 396 /* 397 * Look-up a memory block metadata (kmemleak_object) in the object search 398 * tree based on a pointer value. If alias is 0, only values pointing to the 399 * beginning of the memory block are allowed. The kmemleak_lock must be held 400 * when calling this function. 401 */ 402 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 403 { 404 struct rb_node *rb = object_tree_root.rb_node; 405 406 while (rb) { 407 struct kmemleak_object *object = 408 rb_entry(rb, struct kmemleak_object, rb_node); 409 if (ptr < object->pointer) 410 rb = object->rb_node.rb_left; 411 else if (object->pointer + object->size <= ptr) 412 rb = object->rb_node.rb_right; 413 else if (object->pointer == ptr || alias) 414 return object; 415 else { 416 kmemleak_warn("Found object by alias at 0x%08lx\n", 417 ptr); 418 dump_object_info(object); 419 break; 420 } 421 } 422 return NULL; 423 } 424 425 /* 426 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 427 * that once an object's use_count reached 0, the RCU freeing was already 428 * registered and the object should no longer be used. This function must be 429 * called under the protection of rcu_read_lock(). 430 */ 431 static int get_object(struct kmemleak_object *object) 432 { 433 return atomic_inc_not_zero(&object->use_count); 434 } 435 436 /* 437 * RCU callback to free a kmemleak_object. 438 */ 439 static void free_object_rcu(struct rcu_head *rcu) 440 { 441 struct hlist_node *tmp; 442 struct kmemleak_scan_area *area; 443 struct kmemleak_object *object = 444 container_of(rcu, struct kmemleak_object, rcu); 445 446 /* 447 * Once use_count is 0 (guaranteed by put_object), there is no other 448 * code accessing this object, hence no need for locking. 449 */ 450 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { 451 hlist_del(&area->node); 452 kmem_cache_free(scan_area_cache, area); 453 } 454 kmem_cache_free(object_cache, object); 455 } 456 457 /* 458 * Decrement the object use_count. Once the count is 0, free the object using 459 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 460 * delete_object() path, the delayed RCU freeing ensures that there is no 461 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 462 * is also possible. 463 */ 464 static void put_object(struct kmemleak_object *object) 465 { 466 if (!atomic_dec_and_test(&object->use_count)) 467 return; 468 469 /* should only get here after delete_object was called */ 470 WARN_ON(object->flags & OBJECT_ALLOCATED); 471 472 call_rcu(&object->rcu, free_object_rcu); 473 } 474 475 /* 476 * Look up an object in the object search tree and increase its use_count. 477 */ 478 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 479 { 480 unsigned long flags; 481 struct kmemleak_object *object; 482 483 rcu_read_lock(); 484 read_lock_irqsave(&kmemleak_lock, flags); 485 object = lookup_object(ptr, alias); 486 read_unlock_irqrestore(&kmemleak_lock, flags); 487 488 /* check whether the object is still available */ 489 if (object && !get_object(object)) 490 object = NULL; 491 rcu_read_unlock(); 492 493 return object; 494 } 495 496 /* 497 * Look up an object in the object search tree and remove it from both 498 * object_tree_root and object_list. The returned object's use_count should be 499 * at least 1, as initially set by create_object(). 500 */ 501 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) 502 { 503 unsigned long flags; 504 struct kmemleak_object *object; 505 506 write_lock_irqsave(&kmemleak_lock, flags); 507 object = lookup_object(ptr, alias); 508 if (object) { 509 rb_erase(&object->rb_node, &object_tree_root); 510 list_del_rcu(&object->object_list); 511 } 512 write_unlock_irqrestore(&kmemleak_lock, flags); 513 514 return object; 515 } 516 517 /* 518 * Save stack trace to the given array of MAX_TRACE size. 519 */ 520 static int __save_stack_trace(unsigned long *trace) 521 { 522 struct stack_trace stack_trace; 523 524 stack_trace.max_entries = MAX_TRACE; 525 stack_trace.nr_entries = 0; 526 stack_trace.entries = trace; 527 stack_trace.skip = 2; 528 save_stack_trace(&stack_trace); 529 530 return stack_trace.nr_entries; 531 } 532 533 /* 534 * Create the metadata (struct kmemleak_object) corresponding to an allocated 535 * memory block and add it to the object_list and object_tree_root. 536 */ 537 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, 538 int min_count, gfp_t gfp) 539 { 540 unsigned long flags; 541 struct kmemleak_object *object, *parent; 542 struct rb_node **link, *rb_parent; 543 544 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 545 if (!object) { 546 pr_warning("Cannot allocate a kmemleak_object structure\n"); 547 kmemleak_disable(); 548 return NULL; 549 } 550 551 INIT_LIST_HEAD(&object->object_list); 552 INIT_LIST_HEAD(&object->gray_list); 553 INIT_HLIST_HEAD(&object->area_list); 554 spin_lock_init(&object->lock); 555 atomic_set(&object->use_count, 1); 556 object->flags = OBJECT_ALLOCATED; 557 object->pointer = ptr; 558 object->size = size; 559 object->min_count = min_count; 560 object->count = 0; /* white color initially */ 561 object->jiffies = jiffies; 562 object->checksum = 0; 563 564 /* task information */ 565 if (in_irq()) { 566 object->pid = 0; 567 strncpy(object->comm, "hardirq", sizeof(object->comm)); 568 } else if (in_softirq()) { 569 object->pid = 0; 570 strncpy(object->comm, "softirq", sizeof(object->comm)); 571 } else { 572 object->pid = current->pid; 573 /* 574 * There is a small chance of a race with set_task_comm(), 575 * however using get_task_comm() here may cause locking 576 * dependency issues with current->alloc_lock. In the worst 577 * case, the command line is not correct. 578 */ 579 strncpy(object->comm, current->comm, sizeof(object->comm)); 580 } 581 582 /* kernel backtrace */ 583 object->trace_len = __save_stack_trace(object->trace); 584 585 write_lock_irqsave(&kmemleak_lock, flags); 586 587 min_addr = min(min_addr, ptr); 588 max_addr = max(max_addr, ptr + size); 589 link = &object_tree_root.rb_node; 590 rb_parent = NULL; 591 while (*link) { 592 rb_parent = *link; 593 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); 594 if (ptr + size <= parent->pointer) 595 link = &parent->rb_node.rb_left; 596 else if (parent->pointer + parent->size <= ptr) 597 link = &parent->rb_node.rb_right; 598 else { 599 kmemleak_stop("Cannot insert 0x%lx into the object " 600 "search tree (overlaps existing)\n", 601 ptr); 602 /* 603 * No need for parent->lock here since "parent" cannot 604 * be freed while the kmemleak_lock is held. 605 */ 606 dump_object_info(parent); 607 kmem_cache_free(object_cache, object); 608 object = NULL; 609 goto out; 610 } 611 } 612 rb_link_node(&object->rb_node, rb_parent, link); 613 rb_insert_color(&object->rb_node, &object_tree_root); 614 615 list_add_tail_rcu(&object->object_list, &object_list); 616 out: 617 write_unlock_irqrestore(&kmemleak_lock, flags); 618 return object; 619 } 620 621 /* 622 * Mark the object as not allocated and schedule RCU freeing via put_object(). 623 */ 624 static void __delete_object(struct kmemleak_object *object) 625 { 626 unsigned long flags; 627 628 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 629 WARN_ON(atomic_read(&object->use_count) < 1); 630 631 /* 632 * Locking here also ensures that the corresponding memory block 633 * cannot be freed when it is being scanned. 634 */ 635 spin_lock_irqsave(&object->lock, flags); 636 object->flags &= ~OBJECT_ALLOCATED; 637 spin_unlock_irqrestore(&object->lock, flags); 638 put_object(object); 639 } 640 641 /* 642 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 643 * delete it. 644 */ 645 static void delete_object_full(unsigned long ptr) 646 { 647 struct kmemleak_object *object; 648 649 object = find_and_remove_object(ptr, 0); 650 if (!object) { 651 #ifdef DEBUG 652 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 653 ptr); 654 #endif 655 return; 656 } 657 __delete_object(object); 658 } 659 660 /* 661 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 662 * delete it. If the memory block is partially freed, the function may create 663 * additional metadata for the remaining parts of the block. 664 */ 665 static void delete_object_part(unsigned long ptr, size_t size) 666 { 667 struct kmemleak_object *object; 668 unsigned long start, end; 669 670 object = find_and_remove_object(ptr, 1); 671 if (!object) { 672 #ifdef DEBUG 673 kmemleak_warn("Partially freeing unknown object at 0x%08lx " 674 "(size %zu)\n", ptr, size); 675 #endif 676 return; 677 } 678 679 /* 680 * Create one or two objects that may result from the memory block 681 * split. Note that partial freeing is only done by free_bootmem() and 682 * this happens before kmemleak_init() is called. The path below is 683 * only executed during early log recording in kmemleak_init(), so 684 * GFP_KERNEL is enough. 685 */ 686 start = object->pointer; 687 end = object->pointer + object->size; 688 if (ptr > start) 689 create_object(start, ptr - start, object->min_count, 690 GFP_KERNEL); 691 if (ptr + size < end) 692 create_object(ptr + size, end - ptr - size, object->min_count, 693 GFP_KERNEL); 694 695 __delete_object(object); 696 } 697 698 static void __paint_it(struct kmemleak_object *object, int color) 699 { 700 object->min_count = color; 701 if (color == KMEMLEAK_BLACK) 702 object->flags |= OBJECT_NO_SCAN; 703 } 704 705 static void paint_it(struct kmemleak_object *object, int color) 706 { 707 unsigned long flags; 708 709 spin_lock_irqsave(&object->lock, flags); 710 __paint_it(object, color); 711 spin_unlock_irqrestore(&object->lock, flags); 712 } 713 714 static void paint_ptr(unsigned long ptr, int color) 715 { 716 struct kmemleak_object *object; 717 718 object = find_and_get_object(ptr, 0); 719 if (!object) { 720 kmemleak_warn("Trying to color unknown object " 721 "at 0x%08lx as %s\n", ptr, 722 (color == KMEMLEAK_GREY) ? "Grey" : 723 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); 724 return; 725 } 726 paint_it(object, color); 727 put_object(object); 728 } 729 730 /* 731 * Mark an object permanently as gray-colored so that it can no longer be 732 * reported as a leak. This is used in general to mark a false positive. 733 */ 734 static void make_gray_object(unsigned long ptr) 735 { 736 paint_ptr(ptr, KMEMLEAK_GREY); 737 } 738 739 /* 740 * Mark the object as black-colored so that it is ignored from scans and 741 * reporting. 742 */ 743 static void make_black_object(unsigned long ptr) 744 { 745 paint_ptr(ptr, KMEMLEAK_BLACK); 746 } 747 748 /* 749 * Add a scanning area to the object. If at least one such area is added, 750 * kmemleak will only scan these ranges rather than the whole memory block. 751 */ 752 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) 753 { 754 unsigned long flags; 755 struct kmemleak_object *object; 756 struct kmemleak_scan_area *area; 757 758 object = find_and_get_object(ptr, 1); 759 if (!object) { 760 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 761 ptr); 762 return; 763 } 764 765 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 766 if (!area) { 767 pr_warning("Cannot allocate a scan area\n"); 768 goto out; 769 } 770 771 spin_lock_irqsave(&object->lock, flags); 772 if (size == SIZE_MAX) { 773 size = object->pointer + object->size - ptr; 774 } else if (ptr + size > object->pointer + object->size) { 775 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 776 dump_object_info(object); 777 kmem_cache_free(scan_area_cache, area); 778 goto out_unlock; 779 } 780 781 INIT_HLIST_NODE(&area->node); 782 area->start = ptr; 783 area->size = size; 784 785 hlist_add_head(&area->node, &object->area_list); 786 out_unlock: 787 spin_unlock_irqrestore(&object->lock, flags); 788 out: 789 put_object(object); 790 } 791 792 /* 793 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 794 * pointer. Such object will not be scanned by kmemleak but references to it 795 * are searched. 796 */ 797 static void object_no_scan(unsigned long ptr) 798 { 799 unsigned long flags; 800 struct kmemleak_object *object; 801 802 object = find_and_get_object(ptr, 0); 803 if (!object) { 804 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 805 return; 806 } 807 808 spin_lock_irqsave(&object->lock, flags); 809 object->flags |= OBJECT_NO_SCAN; 810 spin_unlock_irqrestore(&object->lock, flags); 811 put_object(object); 812 } 813 814 /* 815 * Log an early kmemleak_* call to the early_log buffer. These calls will be 816 * processed later once kmemleak is fully initialized. 817 */ 818 static void __init log_early(int op_type, const void *ptr, size_t size, 819 int min_count) 820 { 821 unsigned long flags; 822 struct early_log *log; 823 824 if (kmemleak_error) { 825 /* kmemleak stopped recording, just count the requests */ 826 crt_early_log++; 827 return; 828 } 829 830 if (crt_early_log >= ARRAY_SIZE(early_log)) { 831 crt_early_log++; 832 kmemleak_disable(); 833 return; 834 } 835 836 /* 837 * There is no need for locking since the kernel is still in UP mode 838 * at this stage. Disabling the IRQs is enough. 839 */ 840 local_irq_save(flags); 841 log = &early_log[crt_early_log]; 842 log->op_type = op_type; 843 log->ptr = ptr; 844 log->size = size; 845 log->min_count = min_count; 846 log->trace_len = __save_stack_trace(log->trace); 847 crt_early_log++; 848 local_irq_restore(flags); 849 } 850 851 /* 852 * Log an early allocated block and populate the stack trace. 853 */ 854 static void early_alloc(struct early_log *log) 855 { 856 struct kmemleak_object *object; 857 unsigned long flags; 858 int i; 859 860 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) 861 return; 862 863 /* 864 * RCU locking needed to ensure object is not freed via put_object(). 865 */ 866 rcu_read_lock(); 867 object = create_object((unsigned long)log->ptr, log->size, 868 log->min_count, GFP_ATOMIC); 869 if (!object) 870 goto out; 871 spin_lock_irqsave(&object->lock, flags); 872 for (i = 0; i < log->trace_len; i++) 873 object->trace[i] = log->trace[i]; 874 object->trace_len = log->trace_len; 875 spin_unlock_irqrestore(&object->lock, flags); 876 out: 877 rcu_read_unlock(); 878 } 879 880 /* 881 * Log an early allocated block and populate the stack trace. 882 */ 883 static void early_alloc_percpu(struct early_log *log) 884 { 885 unsigned int cpu; 886 const void __percpu *ptr = log->ptr; 887 888 for_each_possible_cpu(cpu) { 889 log->ptr = per_cpu_ptr(ptr, cpu); 890 early_alloc(log); 891 } 892 } 893 894 /** 895 * kmemleak_alloc - register a newly allocated object 896 * @ptr: pointer to beginning of the object 897 * @size: size of the object 898 * @min_count: minimum number of references to this object. If during memory 899 * scanning a number of references less than @min_count is found, 900 * the object is reported as a memory leak. If @min_count is 0, 901 * the object is never reported as a leak. If @min_count is -1, 902 * the object is ignored (not scanned and not reported as a leak) 903 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 904 * 905 * This function is called from the kernel allocators when a new object 906 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). 907 */ 908 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 909 gfp_t gfp) 910 { 911 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 912 913 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 914 create_object((unsigned long)ptr, size, min_count, gfp); 915 else if (kmemleak_early_log) 916 log_early(KMEMLEAK_ALLOC, ptr, size, min_count); 917 } 918 EXPORT_SYMBOL_GPL(kmemleak_alloc); 919 920 /** 921 * kmemleak_alloc_percpu - register a newly allocated __percpu object 922 * @ptr: __percpu pointer to beginning of the object 923 * @size: size of the object 924 * @gfp: flags used for kmemleak internal memory allocations 925 * 926 * This function is called from the kernel percpu allocator when a new object 927 * (memory block) is allocated (alloc_percpu). 928 */ 929 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 930 gfp_t gfp) 931 { 932 unsigned int cpu; 933 934 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); 935 936 /* 937 * Percpu allocations are only scanned and not reported as leaks 938 * (min_count is set to 0). 939 */ 940 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 941 for_each_possible_cpu(cpu) 942 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 943 size, 0, gfp); 944 else if (kmemleak_early_log) 945 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); 946 } 947 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 948 949 /** 950 * kmemleak_free - unregister a previously registered object 951 * @ptr: pointer to beginning of the object 952 * 953 * This function is called from the kernel allocators when an object (memory 954 * block) is freed (kmem_cache_free, kfree, vfree etc.). 955 */ 956 void __ref kmemleak_free(const void *ptr) 957 { 958 pr_debug("%s(0x%p)\n", __func__, ptr); 959 960 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 961 delete_object_full((unsigned long)ptr); 962 else if (kmemleak_early_log) 963 log_early(KMEMLEAK_FREE, ptr, 0, 0); 964 } 965 EXPORT_SYMBOL_GPL(kmemleak_free); 966 967 /** 968 * kmemleak_free_part - partially unregister a previously registered object 969 * @ptr: pointer to the beginning or inside the object. This also 970 * represents the start of the range to be freed 971 * @size: size to be unregistered 972 * 973 * This function is called when only a part of a memory block is freed 974 * (usually from the bootmem allocator). 975 */ 976 void __ref kmemleak_free_part(const void *ptr, size_t size) 977 { 978 pr_debug("%s(0x%p)\n", __func__, ptr); 979 980 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 981 delete_object_part((unsigned long)ptr, size); 982 else if (kmemleak_early_log) 983 log_early(KMEMLEAK_FREE_PART, ptr, size, 0); 984 } 985 EXPORT_SYMBOL_GPL(kmemleak_free_part); 986 987 /** 988 * kmemleak_free_percpu - unregister a previously registered __percpu object 989 * @ptr: __percpu pointer to beginning of the object 990 * 991 * This function is called from the kernel percpu allocator when an object 992 * (memory block) is freed (free_percpu). 993 */ 994 void __ref kmemleak_free_percpu(const void __percpu *ptr) 995 { 996 unsigned int cpu; 997 998 pr_debug("%s(0x%p)\n", __func__, ptr); 999 1000 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 1001 for_each_possible_cpu(cpu) 1002 delete_object_full((unsigned long)per_cpu_ptr(ptr, 1003 cpu)); 1004 else if (kmemleak_early_log) 1005 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); 1006 } 1007 EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 1008 1009 /** 1010 * kmemleak_update_trace - update object allocation stack trace 1011 * @ptr: pointer to beginning of the object 1012 * 1013 * Override the object allocation stack trace for cases where the actual 1014 * allocation place is not always useful. 1015 */ 1016 void __ref kmemleak_update_trace(const void *ptr) 1017 { 1018 struct kmemleak_object *object; 1019 unsigned long flags; 1020 1021 pr_debug("%s(0x%p)\n", __func__, ptr); 1022 1023 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) 1024 return; 1025 1026 object = find_and_get_object((unsigned long)ptr, 1); 1027 if (!object) { 1028 #ifdef DEBUG 1029 kmemleak_warn("Updating stack trace for unknown object at %p\n", 1030 ptr); 1031 #endif 1032 return; 1033 } 1034 1035 spin_lock_irqsave(&object->lock, flags); 1036 object->trace_len = __save_stack_trace(object->trace); 1037 spin_unlock_irqrestore(&object->lock, flags); 1038 1039 put_object(object); 1040 } 1041 EXPORT_SYMBOL(kmemleak_update_trace); 1042 1043 /** 1044 * kmemleak_not_leak - mark an allocated object as false positive 1045 * @ptr: pointer to beginning of the object 1046 * 1047 * Calling this function on an object will cause the memory block to no longer 1048 * be reported as leak and always be scanned. 1049 */ 1050 void __ref kmemleak_not_leak(const void *ptr) 1051 { 1052 pr_debug("%s(0x%p)\n", __func__, ptr); 1053 1054 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1055 make_gray_object((unsigned long)ptr); 1056 else if (kmemleak_early_log) 1057 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); 1058 } 1059 EXPORT_SYMBOL(kmemleak_not_leak); 1060 1061 /** 1062 * kmemleak_ignore - ignore an allocated object 1063 * @ptr: pointer to beginning of the object 1064 * 1065 * Calling this function on an object will cause the memory block to be 1066 * ignored (not scanned and not reported as a leak). This is usually done when 1067 * it is known that the corresponding block is not a leak and does not contain 1068 * any references to other allocated memory blocks. 1069 */ 1070 void __ref kmemleak_ignore(const void *ptr) 1071 { 1072 pr_debug("%s(0x%p)\n", __func__, ptr); 1073 1074 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1075 make_black_object((unsigned long)ptr); 1076 else if (kmemleak_early_log) 1077 log_early(KMEMLEAK_IGNORE, ptr, 0, 0); 1078 } 1079 EXPORT_SYMBOL(kmemleak_ignore); 1080 1081 /** 1082 * kmemleak_scan_area - limit the range to be scanned in an allocated object 1083 * @ptr: pointer to beginning or inside the object. This also 1084 * represents the start of the scan area 1085 * @size: size of the scan area 1086 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 1087 * 1088 * This function is used when it is known that only certain parts of an object 1089 * contain references to other objects. Kmemleak will only scan these areas 1090 * reducing the number false negatives. 1091 */ 1092 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 1093 { 1094 pr_debug("%s(0x%p)\n", __func__, ptr); 1095 1096 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1097 add_scan_area((unsigned long)ptr, size, gfp); 1098 else if (kmemleak_early_log) 1099 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 1100 } 1101 EXPORT_SYMBOL(kmemleak_scan_area); 1102 1103 /** 1104 * kmemleak_no_scan - do not scan an allocated object 1105 * @ptr: pointer to beginning of the object 1106 * 1107 * This function notifies kmemleak not to scan the given memory block. Useful 1108 * in situations where it is known that the given object does not contain any 1109 * references to other objects. Kmemleak will not scan such objects reducing 1110 * the number of false negatives. 1111 */ 1112 void __ref kmemleak_no_scan(const void *ptr) 1113 { 1114 pr_debug("%s(0x%p)\n", __func__, ptr); 1115 1116 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1117 object_no_scan((unsigned long)ptr); 1118 else if (kmemleak_early_log) 1119 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); 1120 } 1121 EXPORT_SYMBOL(kmemleak_no_scan); 1122 1123 /* 1124 * Update an object's checksum and return true if it was modified. 1125 */ 1126 static bool update_checksum(struct kmemleak_object *object) 1127 { 1128 u32 old_csum = object->checksum; 1129 1130 if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) 1131 return false; 1132 1133 kasan_disable_current(); 1134 object->checksum = crc32(0, (void *)object->pointer, object->size); 1135 kasan_enable_current(); 1136 1137 return object->checksum != old_csum; 1138 } 1139 1140 /* 1141 * Memory scanning is a long process and it needs to be interruptable. This 1142 * function checks whether such interrupt condition occurred. 1143 */ 1144 static int scan_should_stop(void) 1145 { 1146 if (!kmemleak_enabled) 1147 return 1; 1148 1149 /* 1150 * This function may be called from either process or kthread context, 1151 * hence the need to check for both stop conditions. 1152 */ 1153 if (current->mm) 1154 return signal_pending(current); 1155 else 1156 return kthread_should_stop(); 1157 1158 return 0; 1159 } 1160 1161 /* 1162 * Scan a memory block (exclusive range) for valid pointers and add those 1163 * found to the gray list. 1164 */ 1165 static void scan_block(void *_start, void *_end, 1166 struct kmemleak_object *scanned) 1167 { 1168 unsigned long *ptr; 1169 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1170 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1171 unsigned long flags; 1172 1173 read_lock_irqsave(&kmemleak_lock, flags); 1174 for (ptr = start; ptr < end; ptr++) { 1175 struct kmemleak_object *object; 1176 unsigned long pointer; 1177 1178 if (scan_should_stop()) 1179 break; 1180 1181 /* don't scan uninitialized memory */ 1182 if (!kmemcheck_is_obj_initialized((unsigned long)ptr, 1183 BYTES_PER_POINTER)) 1184 continue; 1185 1186 kasan_disable_current(); 1187 pointer = *ptr; 1188 kasan_enable_current(); 1189 1190 if (pointer < min_addr || pointer >= max_addr) 1191 continue; 1192 1193 /* 1194 * No need for get_object() here since we hold kmemleak_lock. 1195 * object->use_count cannot be dropped to 0 while the object 1196 * is still present in object_tree_root and object_list 1197 * (with updates protected by kmemleak_lock). 1198 */ 1199 object = lookup_object(pointer, 1); 1200 if (!object) 1201 continue; 1202 if (object == scanned) 1203 /* self referenced, ignore */ 1204 continue; 1205 1206 /* 1207 * Avoid the lockdep recursive warning on object->lock being 1208 * previously acquired in scan_object(). These locks are 1209 * enclosed by scan_mutex. 1210 */ 1211 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); 1212 if (!color_white(object)) { 1213 /* non-orphan, ignored or new */ 1214 spin_unlock(&object->lock); 1215 continue; 1216 } 1217 1218 /* 1219 * Increase the object's reference count (number of pointers 1220 * to the memory block). If this count reaches the required 1221 * minimum, the object's color will become gray and it will be 1222 * added to the gray_list. 1223 */ 1224 object->count++; 1225 if (color_gray(object)) { 1226 /* put_object() called when removing from gray_list */ 1227 WARN_ON(!get_object(object)); 1228 list_add_tail(&object->gray_list, &gray_list); 1229 } 1230 spin_unlock(&object->lock); 1231 } 1232 read_unlock_irqrestore(&kmemleak_lock, flags); 1233 } 1234 1235 /* 1236 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. 1237 */ 1238 static void scan_large_block(void *start, void *end) 1239 { 1240 void *next; 1241 1242 while (start < end) { 1243 next = min(start + MAX_SCAN_SIZE, end); 1244 scan_block(start, next, NULL); 1245 start = next; 1246 cond_resched(); 1247 } 1248 } 1249 1250 /* 1251 * Scan a memory block corresponding to a kmemleak_object. A condition is 1252 * that object->use_count >= 1. 1253 */ 1254 static void scan_object(struct kmemleak_object *object) 1255 { 1256 struct kmemleak_scan_area *area; 1257 unsigned long flags; 1258 1259 /* 1260 * Once the object->lock is acquired, the corresponding memory block 1261 * cannot be freed (the same lock is acquired in delete_object). 1262 */ 1263 spin_lock_irqsave(&object->lock, flags); 1264 if (object->flags & OBJECT_NO_SCAN) 1265 goto out; 1266 if (!(object->flags & OBJECT_ALLOCATED)) 1267 /* already freed object */ 1268 goto out; 1269 if (hlist_empty(&object->area_list)) { 1270 void *start = (void *)object->pointer; 1271 void *end = (void *)(object->pointer + object->size); 1272 void *next; 1273 1274 do { 1275 next = min(start + MAX_SCAN_SIZE, end); 1276 scan_block(start, next, object); 1277 1278 start = next; 1279 if (start >= end) 1280 break; 1281 1282 spin_unlock_irqrestore(&object->lock, flags); 1283 cond_resched(); 1284 spin_lock_irqsave(&object->lock, flags); 1285 } while (object->flags & OBJECT_ALLOCATED); 1286 } else 1287 hlist_for_each_entry(area, &object->area_list, node) 1288 scan_block((void *)area->start, 1289 (void *)(area->start + area->size), 1290 object); 1291 out: 1292 spin_unlock_irqrestore(&object->lock, flags); 1293 } 1294 1295 /* 1296 * Scan the objects already referenced (gray objects). More objects will be 1297 * referenced and, if there are no memory leaks, all the objects are scanned. 1298 */ 1299 static void scan_gray_list(void) 1300 { 1301 struct kmemleak_object *object, *tmp; 1302 1303 /* 1304 * The list traversal is safe for both tail additions and removals 1305 * from inside the loop. The kmemleak objects cannot be freed from 1306 * outside the loop because their use_count was incremented. 1307 */ 1308 object = list_entry(gray_list.next, typeof(*object), gray_list); 1309 while (&object->gray_list != &gray_list) { 1310 cond_resched(); 1311 1312 /* may add new objects to the list */ 1313 if (!scan_should_stop()) 1314 scan_object(object); 1315 1316 tmp = list_entry(object->gray_list.next, typeof(*object), 1317 gray_list); 1318 1319 /* remove the object from the list and release it */ 1320 list_del(&object->gray_list); 1321 put_object(object); 1322 1323 object = tmp; 1324 } 1325 WARN_ON(!list_empty(&gray_list)); 1326 } 1327 1328 /* 1329 * Scan data sections and all the referenced memory blocks allocated via the 1330 * kernel's standard allocators. This function must be called with the 1331 * scan_mutex held. 1332 */ 1333 static void kmemleak_scan(void) 1334 { 1335 unsigned long flags; 1336 struct kmemleak_object *object; 1337 int i; 1338 int new_leaks = 0; 1339 1340 jiffies_last_scan = jiffies; 1341 1342 /* prepare the kmemleak_object's */ 1343 rcu_read_lock(); 1344 list_for_each_entry_rcu(object, &object_list, object_list) { 1345 spin_lock_irqsave(&object->lock, flags); 1346 #ifdef DEBUG 1347 /* 1348 * With a few exceptions there should be a maximum of 1349 * 1 reference to any object at this point. 1350 */ 1351 if (atomic_read(&object->use_count) > 1) { 1352 pr_debug("object->use_count = %d\n", 1353 atomic_read(&object->use_count)); 1354 dump_object_info(object); 1355 } 1356 #endif 1357 /* reset the reference count (whiten the object) */ 1358 object->count = 0; 1359 if (color_gray(object) && get_object(object)) 1360 list_add_tail(&object->gray_list, &gray_list); 1361 1362 spin_unlock_irqrestore(&object->lock, flags); 1363 } 1364 rcu_read_unlock(); 1365 1366 /* data/bss scanning */ 1367 scan_large_block(_sdata, _edata); 1368 scan_large_block(__bss_start, __bss_stop); 1369 1370 #ifdef CONFIG_SMP 1371 /* per-cpu sections scanning */ 1372 for_each_possible_cpu(i) 1373 scan_large_block(__per_cpu_start + per_cpu_offset(i), 1374 __per_cpu_end + per_cpu_offset(i)); 1375 #endif 1376 1377 /* 1378 * Struct page scanning for each node. 1379 */ 1380 get_online_mems(); 1381 for_each_online_node(i) { 1382 unsigned long start_pfn = node_start_pfn(i); 1383 unsigned long end_pfn = node_end_pfn(i); 1384 unsigned long pfn; 1385 1386 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1387 struct page *page; 1388 1389 if (!pfn_valid(pfn)) 1390 continue; 1391 page = pfn_to_page(pfn); 1392 /* only scan if page is in use */ 1393 if (page_count(page) == 0) 1394 continue; 1395 scan_block(page, page + 1, NULL); 1396 } 1397 } 1398 put_online_mems(); 1399 1400 /* 1401 * Scanning the task stacks (may introduce false negatives). 1402 */ 1403 if (kmemleak_stack_scan) { 1404 struct task_struct *p, *g; 1405 1406 read_lock(&tasklist_lock); 1407 do_each_thread(g, p) { 1408 scan_block(task_stack_page(p), task_stack_page(p) + 1409 THREAD_SIZE, NULL); 1410 } while_each_thread(g, p); 1411 read_unlock(&tasklist_lock); 1412 } 1413 1414 /* 1415 * Scan the objects already referenced from the sections scanned 1416 * above. 1417 */ 1418 scan_gray_list(); 1419 1420 /* 1421 * Check for new or unreferenced objects modified since the previous 1422 * scan and color them gray until the next scan. 1423 */ 1424 rcu_read_lock(); 1425 list_for_each_entry_rcu(object, &object_list, object_list) { 1426 spin_lock_irqsave(&object->lock, flags); 1427 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) 1428 && update_checksum(object) && get_object(object)) { 1429 /* color it gray temporarily */ 1430 object->count = object->min_count; 1431 list_add_tail(&object->gray_list, &gray_list); 1432 } 1433 spin_unlock_irqrestore(&object->lock, flags); 1434 } 1435 rcu_read_unlock(); 1436 1437 /* 1438 * Re-scan the gray list for modified unreferenced objects. 1439 */ 1440 scan_gray_list(); 1441 1442 /* 1443 * If scanning was stopped do not report any new unreferenced objects. 1444 */ 1445 if (scan_should_stop()) 1446 return; 1447 1448 /* 1449 * Scanning result reporting. 1450 */ 1451 rcu_read_lock(); 1452 list_for_each_entry_rcu(object, &object_list, object_list) { 1453 spin_lock_irqsave(&object->lock, flags); 1454 if (unreferenced_object(object) && 1455 !(object->flags & OBJECT_REPORTED)) { 1456 object->flags |= OBJECT_REPORTED; 1457 new_leaks++; 1458 } 1459 spin_unlock_irqrestore(&object->lock, flags); 1460 } 1461 rcu_read_unlock(); 1462 1463 if (new_leaks) { 1464 kmemleak_found_leaks = true; 1465 1466 pr_info("%d new suspected memory leaks (see " 1467 "/sys/kernel/debug/kmemleak)\n", new_leaks); 1468 } 1469 1470 } 1471 1472 /* 1473 * Thread function performing automatic memory scanning. Unreferenced objects 1474 * at the end of a memory scan are reported but only the first time. 1475 */ 1476 static int kmemleak_scan_thread(void *arg) 1477 { 1478 static int first_run = 1; 1479 1480 pr_info("Automatic memory scanning thread started\n"); 1481 set_user_nice(current, 10); 1482 1483 /* 1484 * Wait before the first scan to allow the system to fully initialize. 1485 */ 1486 if (first_run) { 1487 first_run = 0; 1488 ssleep(SECS_FIRST_SCAN); 1489 } 1490 1491 while (!kthread_should_stop()) { 1492 signed long timeout = jiffies_scan_wait; 1493 1494 mutex_lock(&scan_mutex); 1495 kmemleak_scan(); 1496 mutex_unlock(&scan_mutex); 1497 1498 /* wait before the next scan */ 1499 while (timeout && !kthread_should_stop()) 1500 timeout = schedule_timeout_interruptible(timeout); 1501 } 1502 1503 pr_info("Automatic memory scanning thread ended\n"); 1504 1505 return 0; 1506 } 1507 1508 /* 1509 * Start the automatic memory scanning thread. This function must be called 1510 * with the scan_mutex held. 1511 */ 1512 static void start_scan_thread(void) 1513 { 1514 if (scan_thread) 1515 return; 1516 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1517 if (IS_ERR(scan_thread)) { 1518 pr_warning("Failed to create the scan thread\n"); 1519 scan_thread = NULL; 1520 } 1521 } 1522 1523 /* 1524 * Stop the automatic memory scanning thread. This function must be called 1525 * with the scan_mutex held. 1526 */ 1527 static void stop_scan_thread(void) 1528 { 1529 if (scan_thread) { 1530 kthread_stop(scan_thread); 1531 scan_thread = NULL; 1532 } 1533 } 1534 1535 /* 1536 * Iterate over the object_list and return the first valid object at or after 1537 * the required position with its use_count incremented. The function triggers 1538 * a memory scanning when the pos argument points to the first position. 1539 */ 1540 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1541 { 1542 struct kmemleak_object *object; 1543 loff_t n = *pos; 1544 int err; 1545 1546 err = mutex_lock_interruptible(&scan_mutex); 1547 if (err < 0) 1548 return ERR_PTR(err); 1549 1550 rcu_read_lock(); 1551 list_for_each_entry_rcu(object, &object_list, object_list) { 1552 if (n-- > 0) 1553 continue; 1554 if (get_object(object)) 1555 goto out; 1556 } 1557 object = NULL; 1558 out: 1559 return object; 1560 } 1561 1562 /* 1563 * Return the next object in the object_list. The function decrements the 1564 * use_count of the previous object and increases that of the next one. 1565 */ 1566 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1567 { 1568 struct kmemleak_object *prev_obj = v; 1569 struct kmemleak_object *next_obj = NULL; 1570 struct kmemleak_object *obj = prev_obj; 1571 1572 ++(*pos); 1573 1574 list_for_each_entry_continue_rcu(obj, &object_list, object_list) { 1575 if (get_object(obj)) { 1576 next_obj = obj; 1577 break; 1578 } 1579 } 1580 1581 put_object(prev_obj); 1582 return next_obj; 1583 } 1584 1585 /* 1586 * Decrement the use_count of the last object required, if any. 1587 */ 1588 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1589 { 1590 if (!IS_ERR(v)) { 1591 /* 1592 * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1593 * waiting was interrupted, so only release it if !IS_ERR. 1594 */ 1595 rcu_read_unlock(); 1596 mutex_unlock(&scan_mutex); 1597 if (v) 1598 put_object(v); 1599 } 1600 } 1601 1602 /* 1603 * Print the information for an unreferenced object to the seq file. 1604 */ 1605 static int kmemleak_seq_show(struct seq_file *seq, void *v) 1606 { 1607 struct kmemleak_object *object = v; 1608 unsigned long flags; 1609 1610 spin_lock_irqsave(&object->lock, flags); 1611 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1612 print_unreferenced(seq, object); 1613 spin_unlock_irqrestore(&object->lock, flags); 1614 return 0; 1615 } 1616 1617 static const struct seq_operations kmemleak_seq_ops = { 1618 .start = kmemleak_seq_start, 1619 .next = kmemleak_seq_next, 1620 .stop = kmemleak_seq_stop, 1621 .show = kmemleak_seq_show, 1622 }; 1623 1624 static int kmemleak_open(struct inode *inode, struct file *file) 1625 { 1626 return seq_open(file, &kmemleak_seq_ops); 1627 } 1628 1629 static int dump_str_object_info(const char *str) 1630 { 1631 unsigned long flags; 1632 struct kmemleak_object *object; 1633 unsigned long addr; 1634 1635 if (kstrtoul(str, 0, &addr)) 1636 return -EINVAL; 1637 object = find_and_get_object(addr, 0); 1638 if (!object) { 1639 pr_info("Unknown object at 0x%08lx\n", addr); 1640 return -EINVAL; 1641 } 1642 1643 spin_lock_irqsave(&object->lock, flags); 1644 dump_object_info(object); 1645 spin_unlock_irqrestore(&object->lock, flags); 1646 1647 put_object(object); 1648 return 0; 1649 } 1650 1651 /* 1652 * We use grey instead of black to ensure we can do future scans on the same 1653 * objects. If we did not do future scans these black objects could 1654 * potentially contain references to newly allocated objects in the future and 1655 * we'd end up with false positives. 1656 */ 1657 static void kmemleak_clear(void) 1658 { 1659 struct kmemleak_object *object; 1660 unsigned long flags; 1661 1662 rcu_read_lock(); 1663 list_for_each_entry_rcu(object, &object_list, object_list) { 1664 spin_lock_irqsave(&object->lock, flags); 1665 if ((object->flags & OBJECT_REPORTED) && 1666 unreferenced_object(object)) 1667 __paint_it(object, KMEMLEAK_GREY); 1668 spin_unlock_irqrestore(&object->lock, flags); 1669 } 1670 rcu_read_unlock(); 1671 1672 kmemleak_found_leaks = false; 1673 } 1674 1675 static void __kmemleak_do_cleanup(void); 1676 1677 /* 1678 * File write operation to configure kmemleak at run-time. The following 1679 * commands can be written to the /sys/kernel/debug/kmemleak file: 1680 * off - disable kmemleak (irreversible) 1681 * stack=on - enable the task stacks scanning 1682 * stack=off - disable the tasks stacks scanning 1683 * scan=on - start the automatic memory scanning thread 1684 * scan=off - stop the automatic memory scanning thread 1685 * scan=... - set the automatic memory scanning period in seconds (0 to 1686 * disable it) 1687 * scan - trigger a memory scan 1688 * clear - mark all current reported unreferenced kmemleak objects as 1689 * grey to ignore printing them, or free all kmemleak objects 1690 * if kmemleak has been disabled. 1691 * dump=... - dump information about the object found at the given address 1692 */ 1693 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1694 size_t size, loff_t *ppos) 1695 { 1696 char buf[64]; 1697 int buf_size; 1698 int ret; 1699 1700 buf_size = min(size, (sizeof(buf) - 1)); 1701 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1702 return -EFAULT; 1703 buf[buf_size] = 0; 1704 1705 ret = mutex_lock_interruptible(&scan_mutex); 1706 if (ret < 0) 1707 return ret; 1708 1709 if (strncmp(buf, "clear", 5) == 0) { 1710 if (kmemleak_enabled) 1711 kmemleak_clear(); 1712 else 1713 __kmemleak_do_cleanup(); 1714 goto out; 1715 } 1716 1717 if (!kmemleak_enabled) { 1718 ret = -EBUSY; 1719 goto out; 1720 } 1721 1722 if (strncmp(buf, "off", 3) == 0) 1723 kmemleak_disable(); 1724 else if (strncmp(buf, "stack=on", 8) == 0) 1725 kmemleak_stack_scan = 1; 1726 else if (strncmp(buf, "stack=off", 9) == 0) 1727 kmemleak_stack_scan = 0; 1728 else if (strncmp(buf, "scan=on", 7) == 0) 1729 start_scan_thread(); 1730 else if (strncmp(buf, "scan=off", 8) == 0) 1731 stop_scan_thread(); 1732 else if (strncmp(buf, "scan=", 5) == 0) { 1733 unsigned long secs; 1734 1735 ret = kstrtoul(buf + 5, 0, &secs); 1736 if (ret < 0) 1737 goto out; 1738 stop_scan_thread(); 1739 if (secs) { 1740 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); 1741 start_scan_thread(); 1742 } 1743 } else if (strncmp(buf, "scan", 4) == 0) 1744 kmemleak_scan(); 1745 else if (strncmp(buf, "dump=", 5) == 0) 1746 ret = dump_str_object_info(buf + 5); 1747 else 1748 ret = -EINVAL; 1749 1750 out: 1751 mutex_unlock(&scan_mutex); 1752 if (ret < 0) 1753 return ret; 1754 1755 /* ignore the rest of the buffer, only one command at a time */ 1756 *ppos += size; 1757 return size; 1758 } 1759 1760 static const struct file_operations kmemleak_fops = { 1761 .owner = THIS_MODULE, 1762 .open = kmemleak_open, 1763 .read = seq_read, 1764 .write = kmemleak_write, 1765 .llseek = seq_lseek, 1766 .release = seq_release, 1767 }; 1768 1769 static void __kmemleak_do_cleanup(void) 1770 { 1771 struct kmemleak_object *object; 1772 1773 rcu_read_lock(); 1774 list_for_each_entry_rcu(object, &object_list, object_list) 1775 delete_object_full(object->pointer); 1776 rcu_read_unlock(); 1777 } 1778 1779 /* 1780 * Stop the memory scanning thread and free the kmemleak internal objects if 1781 * no previous scan thread (otherwise, kmemleak may still have some useful 1782 * information on memory leaks). 1783 */ 1784 static void kmemleak_do_cleanup(struct work_struct *work) 1785 { 1786 stop_scan_thread(); 1787 1788 /* 1789 * Once the scan thread has stopped, it is safe to no longer track 1790 * object freeing. Ordering of the scan thread stopping and the memory 1791 * accesses below is guaranteed by the kthread_stop() function. 1792 */ 1793 kmemleak_free_enabled = 0; 1794 1795 if (!kmemleak_found_leaks) 1796 __kmemleak_do_cleanup(); 1797 else 1798 pr_info("Kmemleak disabled without freeing internal data. " 1799 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n"); 1800 } 1801 1802 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); 1803 1804 /* 1805 * Disable kmemleak. No memory allocation/freeing will be traced once this 1806 * function is called. Disabling kmemleak is an irreversible operation. 1807 */ 1808 static void kmemleak_disable(void) 1809 { 1810 /* atomically check whether it was already invoked */ 1811 if (cmpxchg(&kmemleak_error, 0, 1)) 1812 return; 1813 1814 /* stop any memory operation tracing */ 1815 kmemleak_enabled = 0; 1816 1817 /* check whether it is too early for a kernel thread */ 1818 if (kmemleak_initialized) 1819 schedule_work(&cleanup_work); 1820 else 1821 kmemleak_free_enabled = 0; 1822 1823 pr_info("Kernel memory leak detector disabled\n"); 1824 } 1825 1826 /* 1827 * Allow boot-time kmemleak disabling (enabled by default). 1828 */ 1829 static int kmemleak_boot_config(char *str) 1830 { 1831 if (!str) 1832 return -EINVAL; 1833 if (strcmp(str, "off") == 0) 1834 kmemleak_disable(); 1835 else if (strcmp(str, "on") == 0) 1836 kmemleak_skip_disable = 1; 1837 else 1838 return -EINVAL; 1839 return 0; 1840 } 1841 early_param("kmemleak", kmemleak_boot_config); 1842 1843 static void __init print_log_trace(struct early_log *log) 1844 { 1845 struct stack_trace trace; 1846 1847 trace.nr_entries = log->trace_len; 1848 trace.entries = log->trace; 1849 1850 pr_notice("Early log backtrace:\n"); 1851 print_stack_trace(&trace, 2); 1852 } 1853 1854 /* 1855 * Kmemleak initialization. 1856 */ 1857 void __init kmemleak_init(void) 1858 { 1859 int i; 1860 unsigned long flags; 1861 1862 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1863 if (!kmemleak_skip_disable) { 1864 kmemleak_early_log = 0; 1865 kmemleak_disable(); 1866 return; 1867 } 1868 #endif 1869 1870 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1871 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1872 1873 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1874 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1875 1876 if (crt_early_log > ARRAY_SIZE(early_log)) 1877 pr_warning("Early log buffer exceeded (%d), please increase " 1878 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); 1879 1880 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1881 local_irq_save(flags); 1882 kmemleak_early_log = 0; 1883 if (kmemleak_error) { 1884 local_irq_restore(flags); 1885 return; 1886 } else { 1887 kmemleak_enabled = 1; 1888 kmemleak_free_enabled = 1; 1889 } 1890 local_irq_restore(flags); 1891 1892 /* 1893 * This is the point where tracking allocations is safe. Automatic 1894 * scanning is started during the late initcall. Add the early logged 1895 * callbacks to the kmemleak infrastructure. 1896 */ 1897 for (i = 0; i < crt_early_log; i++) { 1898 struct early_log *log = &early_log[i]; 1899 1900 switch (log->op_type) { 1901 case KMEMLEAK_ALLOC: 1902 early_alloc(log); 1903 break; 1904 case KMEMLEAK_ALLOC_PERCPU: 1905 early_alloc_percpu(log); 1906 break; 1907 case KMEMLEAK_FREE: 1908 kmemleak_free(log->ptr); 1909 break; 1910 case KMEMLEAK_FREE_PART: 1911 kmemleak_free_part(log->ptr, log->size); 1912 break; 1913 case KMEMLEAK_FREE_PERCPU: 1914 kmemleak_free_percpu(log->ptr); 1915 break; 1916 case KMEMLEAK_NOT_LEAK: 1917 kmemleak_not_leak(log->ptr); 1918 break; 1919 case KMEMLEAK_IGNORE: 1920 kmemleak_ignore(log->ptr); 1921 break; 1922 case KMEMLEAK_SCAN_AREA: 1923 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); 1924 break; 1925 case KMEMLEAK_NO_SCAN: 1926 kmemleak_no_scan(log->ptr); 1927 break; 1928 default: 1929 kmemleak_warn("Unknown early log operation: %d\n", 1930 log->op_type); 1931 } 1932 1933 if (kmemleak_warning) { 1934 print_log_trace(log); 1935 kmemleak_warning = 0; 1936 } 1937 } 1938 } 1939 1940 /* 1941 * Late initialization function. 1942 */ 1943 static int __init kmemleak_late_init(void) 1944 { 1945 struct dentry *dentry; 1946 1947 kmemleak_initialized = 1; 1948 1949 if (kmemleak_error) { 1950 /* 1951 * Some error occurred and kmemleak was disabled. There is a 1952 * small chance that kmemleak_disable() was called immediately 1953 * after setting kmemleak_initialized and we may end up with 1954 * two clean-up threads but serialized by scan_mutex. 1955 */ 1956 schedule_work(&cleanup_work); 1957 return -ENOMEM; 1958 } 1959 1960 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1961 &kmemleak_fops); 1962 if (!dentry) 1963 pr_warning("Failed to create the debugfs kmemleak file\n"); 1964 mutex_lock(&scan_mutex); 1965 start_scan_thread(); 1966 mutex_unlock(&scan_mutex); 1967 1968 pr_info("Kernel memory leak detector initialized\n"); 1969 1970 return 0; 1971 } 1972 late_initcall(kmemleak_late_init); 1973