1 /* 2 * mm/kmemleak.c 3 * 4 * Copyright (C) 2008 ARM Limited 5 * Written by Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * 21 * For more information on the algorithm and kmemleak usage, please see 22 * Documentation/kmemleak.txt. 23 * 24 * Notes on locking 25 * ---------------- 26 * 27 * The following locks and mutexes are used by kmemleak: 28 * 29 * - kmemleak_lock (rwlock): protects the object_list modifications and 30 * accesses to the object_tree_root. The object_list is the main list 31 * holding the metadata (struct kmemleak_object) for the allocated memory 32 * blocks. The object_tree_root is a red black tree used to look-up 33 * metadata based on a pointer to the corresponding memory block. The 34 * kmemleak_object structures are added to the object_list and 35 * object_tree_root in the create_object() function called from the 36 * kmemleak_alloc() callback and removed in delete_object() called from the 37 * kmemleak_free() callback 38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to 39 * the metadata (e.g. count) are protected by this lock. Note that some 40 * members of this structure may be protected by other means (atomic or 41 * kmemleak_lock). This lock is also held when scanning the corresponding 42 * memory block to avoid the kernel freeing it via the kmemleak_free() 43 * callback. This is less heavyweight than holding a global lock like 44 * kmemleak_lock during scanning 45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 46 * unreferenced objects at a time. The gray_list contains the objects which 47 * are already referenced or marked as false positives and need to be 48 * scanned. This list is only modified during a scanning episode when the 49 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 50 * Note that the kmemleak_object.use_count is incremented when an object is 51 * added to the gray_list and therefore cannot be freed. This mutex also 52 * prevents multiple users of the "kmemleak" debugfs file together with 53 * modifications to the memory scanning parameters including the scan_thread 54 * pointer 55 * 56 * The kmemleak_object structures have a use_count incremented or decremented 57 * using the get_object()/put_object() functions. When the use_count becomes 58 * 0, this count can no longer be incremented and put_object() schedules the 59 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 60 * function must be protected by rcu_read_lock() to avoid accessing a freed 61 * structure. 62 */ 63 64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 65 66 #include <linux/init.h> 67 #include <linux/kernel.h> 68 #include <linux/list.h> 69 #include <linux/sched.h> 70 #include <linux/jiffies.h> 71 #include <linux/delay.h> 72 #include <linux/export.h> 73 #include <linux/kthread.h> 74 #include <linux/rbtree.h> 75 #include <linux/fs.h> 76 #include <linux/debugfs.h> 77 #include <linux/seq_file.h> 78 #include <linux/cpumask.h> 79 #include <linux/spinlock.h> 80 #include <linux/mutex.h> 81 #include <linux/rcupdate.h> 82 #include <linux/stacktrace.h> 83 #include <linux/cache.h> 84 #include <linux/percpu.h> 85 #include <linux/hardirq.h> 86 #include <linux/mmzone.h> 87 #include <linux/slab.h> 88 #include <linux/thread_info.h> 89 #include <linux/err.h> 90 #include <linux/uaccess.h> 91 #include <linux/string.h> 92 #include <linux/nodemask.h> 93 #include <linux/mm.h> 94 #include <linux/workqueue.h> 95 #include <linux/crc32.h> 96 97 #include <asm/sections.h> 98 #include <asm/processor.h> 99 #include <linux/atomic.h> 100 101 #include <linux/kasan.h> 102 #include <linux/kmemcheck.h> 103 #include <linux/kmemleak.h> 104 #include <linux/memory_hotplug.h> 105 106 /* 107 * Kmemleak configuration and common defines. 108 */ 109 #define MAX_TRACE 16 /* stack trace length */ 110 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 111 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 112 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 113 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 114 115 #define BYTES_PER_POINTER sizeof(void *) 116 117 /* GFP bitmask for kmemleak internal allocations */ 118 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ 119 __GFP_NOACCOUNT)) | \ 120 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 121 __GFP_NOWARN) 122 123 /* scanning area inside a memory block */ 124 struct kmemleak_scan_area { 125 struct hlist_node node; 126 unsigned long start; 127 size_t size; 128 }; 129 130 #define KMEMLEAK_GREY 0 131 #define KMEMLEAK_BLACK -1 132 133 /* 134 * Structure holding the metadata for each allocated memory block. 135 * Modifications to such objects should be made while holding the 136 * object->lock. Insertions or deletions from object_list, gray_list or 137 * rb_node are already protected by the corresponding locks or mutex (see 138 * the notes on locking above). These objects are reference-counted 139 * (use_count) and freed using the RCU mechanism. 140 */ 141 struct kmemleak_object { 142 spinlock_t lock; 143 unsigned long flags; /* object status flags */ 144 struct list_head object_list; 145 struct list_head gray_list; 146 struct rb_node rb_node; 147 struct rcu_head rcu; /* object_list lockless traversal */ 148 /* object usage count; object freed when use_count == 0 */ 149 atomic_t use_count; 150 unsigned long pointer; 151 size_t size; 152 /* minimum number of a pointers found before it is considered leak */ 153 int min_count; 154 /* the total number of pointers found pointing to this object */ 155 int count; 156 /* checksum for detecting modified objects */ 157 u32 checksum; 158 /* memory ranges to be scanned inside an object (empty for all) */ 159 struct hlist_head area_list; 160 unsigned long trace[MAX_TRACE]; 161 unsigned int trace_len; 162 unsigned long jiffies; /* creation timestamp */ 163 pid_t pid; /* pid of the current task */ 164 char comm[TASK_COMM_LEN]; /* executable name */ 165 }; 166 167 /* flag representing the memory block allocation status */ 168 #define OBJECT_ALLOCATED (1 << 0) 169 /* flag set after the first reporting of an unreference object */ 170 #define OBJECT_REPORTED (1 << 1) 171 /* flag set to not scan the object */ 172 #define OBJECT_NO_SCAN (1 << 2) 173 174 /* number of bytes to print per line; must be 16 or 32 */ 175 #define HEX_ROW_SIZE 16 176 /* number of bytes to print at a time (1, 2, 4, 8) */ 177 #define HEX_GROUP_SIZE 1 178 /* include ASCII after the hex output */ 179 #define HEX_ASCII 1 180 /* max number of lines to be printed */ 181 #define HEX_MAX_LINES 2 182 183 /* the list of all allocated objects */ 184 static LIST_HEAD(object_list); 185 /* the list of gray-colored objects (see color_gray comment below) */ 186 static LIST_HEAD(gray_list); 187 /* search tree for object boundaries */ 188 static struct rb_root object_tree_root = RB_ROOT; 189 /* rw_lock protecting the access to object_list and object_tree_root */ 190 static DEFINE_RWLOCK(kmemleak_lock); 191 192 /* allocation caches for kmemleak internal data */ 193 static struct kmem_cache *object_cache; 194 static struct kmem_cache *scan_area_cache; 195 196 /* set if tracing memory operations is enabled */ 197 static int kmemleak_enabled; 198 /* set in the late_initcall if there were no errors */ 199 static int kmemleak_initialized; 200 /* enables or disables early logging of the memory operations */ 201 static int kmemleak_early_log = 1; 202 /* set if a kmemleak warning was issued */ 203 static int kmemleak_warning; 204 /* set if a fatal kmemleak error has occurred */ 205 static int kmemleak_error; 206 207 /* minimum and maximum address that may be valid pointers */ 208 static unsigned long min_addr = ULONG_MAX; 209 static unsigned long max_addr; 210 211 static struct task_struct *scan_thread; 212 /* used to avoid reporting of recently allocated objects */ 213 static unsigned long jiffies_min_age; 214 static unsigned long jiffies_last_scan; 215 /* delay between automatic memory scannings */ 216 static signed long jiffies_scan_wait; 217 /* enables or disables the task stacks scanning */ 218 static int kmemleak_stack_scan = 1; 219 /* protects the memory scanning, parameters and debug/kmemleak file access */ 220 static DEFINE_MUTEX(scan_mutex); 221 /* setting kmemleak=on, will set this var, skipping the disable */ 222 static int kmemleak_skip_disable; 223 /* If there are leaks that can be reported */ 224 static bool kmemleak_found_leaks; 225 226 /* 227 * Early object allocation/freeing logging. Kmemleak is initialized after the 228 * kernel allocator. However, both the kernel allocator and kmemleak may 229 * allocate memory blocks which need to be tracked. Kmemleak defines an 230 * arbitrary buffer to hold the allocation/freeing information before it is 231 * fully initialized. 232 */ 233 234 /* kmemleak operation type for early logging */ 235 enum { 236 KMEMLEAK_ALLOC, 237 KMEMLEAK_ALLOC_PERCPU, 238 KMEMLEAK_FREE, 239 KMEMLEAK_FREE_PART, 240 KMEMLEAK_FREE_PERCPU, 241 KMEMLEAK_NOT_LEAK, 242 KMEMLEAK_IGNORE, 243 KMEMLEAK_SCAN_AREA, 244 KMEMLEAK_NO_SCAN 245 }; 246 247 /* 248 * Structure holding the information passed to kmemleak callbacks during the 249 * early logging. 250 */ 251 struct early_log { 252 int op_type; /* kmemleak operation type */ 253 const void *ptr; /* allocated/freed memory block */ 254 size_t size; /* memory block size */ 255 int min_count; /* minimum reference count */ 256 unsigned long trace[MAX_TRACE]; /* stack trace */ 257 unsigned int trace_len; /* stack trace length */ 258 }; 259 260 /* early logging buffer and current position */ 261 static struct early_log 262 early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; 263 static int crt_early_log __initdata; 264 265 static void kmemleak_disable(void); 266 267 /* 268 * Print a warning and dump the stack trace. 269 */ 270 #define kmemleak_warn(x...) do { \ 271 pr_warning(x); \ 272 dump_stack(); \ 273 kmemleak_warning = 1; \ 274 } while (0) 275 276 /* 277 * Macro invoked when a serious kmemleak condition occurred and cannot be 278 * recovered from. Kmemleak will be disabled and further allocation/freeing 279 * tracing no longer available. 280 */ 281 #define kmemleak_stop(x...) do { \ 282 kmemleak_warn(x); \ 283 kmemleak_disable(); \ 284 } while (0) 285 286 /* 287 * Printing of the objects hex dump to the seq file. The number of lines to be 288 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The 289 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called 290 * with the object->lock held. 291 */ 292 static void hex_dump_object(struct seq_file *seq, 293 struct kmemleak_object *object) 294 { 295 const u8 *ptr = (const u8 *)object->pointer; 296 int i, len, remaining; 297 unsigned char linebuf[HEX_ROW_SIZE * 5]; 298 299 /* limit the number of lines to HEX_MAX_LINES */ 300 remaining = len = 301 min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE)); 302 303 seq_printf(seq, " hex dump (first %d bytes):\n", len); 304 for (i = 0; i < len; i += HEX_ROW_SIZE) { 305 int linelen = min(remaining, HEX_ROW_SIZE); 306 307 remaining -= HEX_ROW_SIZE; 308 hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE, 309 HEX_GROUP_SIZE, linebuf, sizeof(linebuf), 310 HEX_ASCII); 311 seq_printf(seq, " %s\n", linebuf); 312 } 313 } 314 315 /* 316 * Object colors, encoded with count and min_count: 317 * - white - orphan object, not enough references to it (count < min_count) 318 * - gray - not orphan, not marked as false positive (min_count == 0) or 319 * sufficient references to it (count >= min_count) 320 * - black - ignore, it doesn't contain references (e.g. text section) 321 * (min_count == -1). No function defined for this color. 322 * Newly created objects don't have any color assigned (object->count == -1) 323 * before the next memory scan when they become white. 324 */ 325 static bool color_white(const struct kmemleak_object *object) 326 { 327 return object->count != KMEMLEAK_BLACK && 328 object->count < object->min_count; 329 } 330 331 static bool color_gray(const struct kmemleak_object *object) 332 { 333 return object->min_count != KMEMLEAK_BLACK && 334 object->count >= object->min_count; 335 } 336 337 /* 338 * Objects are considered unreferenced only if their color is white, they have 339 * not be deleted and have a minimum age to avoid false positives caused by 340 * pointers temporarily stored in CPU registers. 341 */ 342 static bool unreferenced_object(struct kmemleak_object *object) 343 { 344 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && 345 time_before_eq(object->jiffies + jiffies_min_age, 346 jiffies_last_scan); 347 } 348 349 /* 350 * Printing of the unreferenced objects information to the seq file. The 351 * print_unreferenced function must be called with the object->lock held. 352 */ 353 static void print_unreferenced(struct seq_file *seq, 354 struct kmemleak_object *object) 355 { 356 int i; 357 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); 358 359 seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 360 object->pointer, object->size); 361 seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", 362 object->comm, object->pid, object->jiffies, 363 msecs_age / 1000, msecs_age % 1000); 364 hex_dump_object(seq, object); 365 seq_printf(seq, " backtrace:\n"); 366 367 for (i = 0; i < object->trace_len; i++) { 368 void *ptr = (void *)object->trace[i]; 369 seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); 370 } 371 } 372 373 /* 374 * Print the kmemleak_object information. This function is used mainly for 375 * debugging special cases when kmemleak operations. It must be called with 376 * the object->lock held. 377 */ 378 static void dump_object_info(struct kmemleak_object *object) 379 { 380 struct stack_trace trace; 381 382 trace.nr_entries = object->trace_len; 383 trace.entries = object->trace; 384 385 pr_notice("Object 0x%08lx (size %zu):\n", 386 object->pointer, object->size); 387 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 388 object->comm, object->pid, object->jiffies); 389 pr_notice(" min_count = %d\n", object->min_count); 390 pr_notice(" count = %d\n", object->count); 391 pr_notice(" flags = 0x%lx\n", object->flags); 392 pr_notice(" checksum = %u\n", object->checksum); 393 pr_notice(" backtrace:\n"); 394 print_stack_trace(&trace, 4); 395 } 396 397 /* 398 * Look-up a memory block metadata (kmemleak_object) in the object search 399 * tree based on a pointer value. If alias is 0, only values pointing to the 400 * beginning of the memory block are allowed. The kmemleak_lock must be held 401 * when calling this function. 402 */ 403 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 404 { 405 struct rb_node *rb = object_tree_root.rb_node; 406 407 while (rb) { 408 struct kmemleak_object *object = 409 rb_entry(rb, struct kmemleak_object, rb_node); 410 if (ptr < object->pointer) 411 rb = object->rb_node.rb_left; 412 else if (object->pointer + object->size <= ptr) 413 rb = object->rb_node.rb_right; 414 else if (object->pointer == ptr || alias) 415 return object; 416 else { 417 kmemleak_warn("Found object by alias at 0x%08lx\n", 418 ptr); 419 dump_object_info(object); 420 break; 421 } 422 } 423 return NULL; 424 } 425 426 /* 427 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 428 * that once an object's use_count reached 0, the RCU freeing was already 429 * registered and the object should no longer be used. This function must be 430 * called under the protection of rcu_read_lock(). 431 */ 432 static int get_object(struct kmemleak_object *object) 433 { 434 return atomic_inc_not_zero(&object->use_count); 435 } 436 437 /* 438 * RCU callback to free a kmemleak_object. 439 */ 440 static void free_object_rcu(struct rcu_head *rcu) 441 { 442 struct hlist_node *tmp; 443 struct kmemleak_scan_area *area; 444 struct kmemleak_object *object = 445 container_of(rcu, struct kmemleak_object, rcu); 446 447 /* 448 * Once use_count is 0 (guaranteed by put_object), there is no other 449 * code accessing this object, hence no need for locking. 450 */ 451 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { 452 hlist_del(&area->node); 453 kmem_cache_free(scan_area_cache, area); 454 } 455 kmem_cache_free(object_cache, object); 456 } 457 458 /* 459 * Decrement the object use_count. Once the count is 0, free the object using 460 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 461 * delete_object() path, the delayed RCU freeing ensures that there is no 462 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 463 * is also possible. 464 */ 465 static void put_object(struct kmemleak_object *object) 466 { 467 if (!atomic_dec_and_test(&object->use_count)) 468 return; 469 470 /* should only get here after delete_object was called */ 471 WARN_ON(object->flags & OBJECT_ALLOCATED); 472 473 call_rcu(&object->rcu, free_object_rcu); 474 } 475 476 /* 477 * Look up an object in the object search tree and increase its use_count. 478 */ 479 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 480 { 481 unsigned long flags; 482 struct kmemleak_object *object = NULL; 483 484 rcu_read_lock(); 485 read_lock_irqsave(&kmemleak_lock, flags); 486 if (ptr >= min_addr && ptr < max_addr) 487 object = lookup_object(ptr, alias); 488 read_unlock_irqrestore(&kmemleak_lock, flags); 489 490 /* check whether the object is still available */ 491 if (object && !get_object(object)) 492 object = NULL; 493 rcu_read_unlock(); 494 495 return object; 496 } 497 498 /* 499 * Save stack trace to the given array of MAX_TRACE size. 500 */ 501 static int __save_stack_trace(unsigned long *trace) 502 { 503 struct stack_trace stack_trace; 504 505 stack_trace.max_entries = MAX_TRACE; 506 stack_trace.nr_entries = 0; 507 stack_trace.entries = trace; 508 stack_trace.skip = 2; 509 save_stack_trace(&stack_trace); 510 511 return stack_trace.nr_entries; 512 } 513 514 /* 515 * Create the metadata (struct kmemleak_object) corresponding to an allocated 516 * memory block and add it to the object_list and object_tree_root. 517 */ 518 static struct kmemleak_object *create_object(unsigned long ptr, size_t size, 519 int min_count, gfp_t gfp) 520 { 521 unsigned long flags; 522 struct kmemleak_object *object, *parent; 523 struct rb_node **link, *rb_parent; 524 525 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 526 if (!object) { 527 pr_warning("Cannot allocate a kmemleak_object structure\n"); 528 kmemleak_disable(); 529 return NULL; 530 } 531 532 INIT_LIST_HEAD(&object->object_list); 533 INIT_LIST_HEAD(&object->gray_list); 534 INIT_HLIST_HEAD(&object->area_list); 535 spin_lock_init(&object->lock); 536 atomic_set(&object->use_count, 1); 537 object->flags = OBJECT_ALLOCATED; 538 object->pointer = ptr; 539 object->size = size; 540 object->min_count = min_count; 541 object->count = 0; /* white color initially */ 542 object->jiffies = jiffies; 543 object->checksum = 0; 544 545 /* task information */ 546 if (in_irq()) { 547 object->pid = 0; 548 strncpy(object->comm, "hardirq", sizeof(object->comm)); 549 } else if (in_softirq()) { 550 object->pid = 0; 551 strncpy(object->comm, "softirq", sizeof(object->comm)); 552 } else { 553 object->pid = current->pid; 554 /* 555 * There is a small chance of a race with set_task_comm(), 556 * however using get_task_comm() here may cause locking 557 * dependency issues with current->alloc_lock. In the worst 558 * case, the command line is not correct. 559 */ 560 strncpy(object->comm, current->comm, sizeof(object->comm)); 561 } 562 563 /* kernel backtrace */ 564 object->trace_len = __save_stack_trace(object->trace); 565 566 write_lock_irqsave(&kmemleak_lock, flags); 567 568 min_addr = min(min_addr, ptr); 569 max_addr = max(max_addr, ptr + size); 570 link = &object_tree_root.rb_node; 571 rb_parent = NULL; 572 while (*link) { 573 rb_parent = *link; 574 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); 575 if (ptr + size <= parent->pointer) 576 link = &parent->rb_node.rb_left; 577 else if (parent->pointer + parent->size <= ptr) 578 link = &parent->rb_node.rb_right; 579 else { 580 kmemleak_stop("Cannot insert 0x%lx into the object " 581 "search tree (overlaps existing)\n", 582 ptr); 583 kmem_cache_free(object_cache, object); 584 object = parent; 585 spin_lock(&object->lock); 586 dump_object_info(object); 587 spin_unlock(&object->lock); 588 goto out; 589 } 590 } 591 rb_link_node(&object->rb_node, rb_parent, link); 592 rb_insert_color(&object->rb_node, &object_tree_root); 593 594 list_add_tail_rcu(&object->object_list, &object_list); 595 out: 596 write_unlock_irqrestore(&kmemleak_lock, flags); 597 return object; 598 } 599 600 /* 601 * Remove the metadata (struct kmemleak_object) for a memory block from the 602 * object_list and object_tree_root and decrement its use_count. 603 */ 604 static void __delete_object(struct kmemleak_object *object) 605 { 606 unsigned long flags; 607 608 write_lock_irqsave(&kmemleak_lock, flags); 609 rb_erase(&object->rb_node, &object_tree_root); 610 list_del_rcu(&object->object_list); 611 write_unlock_irqrestore(&kmemleak_lock, flags); 612 613 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 614 WARN_ON(atomic_read(&object->use_count) < 2); 615 616 /* 617 * Locking here also ensures that the corresponding memory block 618 * cannot be freed when it is being scanned. 619 */ 620 spin_lock_irqsave(&object->lock, flags); 621 object->flags &= ~OBJECT_ALLOCATED; 622 spin_unlock_irqrestore(&object->lock, flags); 623 put_object(object); 624 } 625 626 /* 627 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 628 * delete it. 629 */ 630 static void delete_object_full(unsigned long ptr) 631 { 632 struct kmemleak_object *object; 633 634 object = find_and_get_object(ptr, 0); 635 if (!object) { 636 #ifdef DEBUG 637 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 638 ptr); 639 #endif 640 return; 641 } 642 __delete_object(object); 643 put_object(object); 644 } 645 646 /* 647 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 648 * delete it. If the memory block is partially freed, the function may create 649 * additional metadata for the remaining parts of the block. 650 */ 651 static void delete_object_part(unsigned long ptr, size_t size) 652 { 653 struct kmemleak_object *object; 654 unsigned long start, end; 655 656 object = find_and_get_object(ptr, 1); 657 if (!object) { 658 #ifdef DEBUG 659 kmemleak_warn("Partially freeing unknown object at 0x%08lx " 660 "(size %zu)\n", ptr, size); 661 #endif 662 return; 663 } 664 __delete_object(object); 665 666 /* 667 * Create one or two objects that may result from the memory block 668 * split. Note that partial freeing is only done by free_bootmem() and 669 * this happens before kmemleak_init() is called. The path below is 670 * only executed during early log recording in kmemleak_init(), so 671 * GFP_KERNEL is enough. 672 */ 673 start = object->pointer; 674 end = object->pointer + object->size; 675 if (ptr > start) 676 create_object(start, ptr - start, object->min_count, 677 GFP_KERNEL); 678 if (ptr + size < end) 679 create_object(ptr + size, end - ptr - size, object->min_count, 680 GFP_KERNEL); 681 682 put_object(object); 683 } 684 685 static void __paint_it(struct kmemleak_object *object, int color) 686 { 687 object->min_count = color; 688 if (color == KMEMLEAK_BLACK) 689 object->flags |= OBJECT_NO_SCAN; 690 } 691 692 static void paint_it(struct kmemleak_object *object, int color) 693 { 694 unsigned long flags; 695 696 spin_lock_irqsave(&object->lock, flags); 697 __paint_it(object, color); 698 spin_unlock_irqrestore(&object->lock, flags); 699 } 700 701 static void paint_ptr(unsigned long ptr, int color) 702 { 703 struct kmemleak_object *object; 704 705 object = find_and_get_object(ptr, 0); 706 if (!object) { 707 kmemleak_warn("Trying to color unknown object " 708 "at 0x%08lx as %s\n", ptr, 709 (color == KMEMLEAK_GREY) ? "Grey" : 710 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); 711 return; 712 } 713 paint_it(object, color); 714 put_object(object); 715 } 716 717 /* 718 * Mark an object permanently as gray-colored so that it can no longer be 719 * reported as a leak. This is used in general to mark a false positive. 720 */ 721 static void make_gray_object(unsigned long ptr) 722 { 723 paint_ptr(ptr, KMEMLEAK_GREY); 724 } 725 726 /* 727 * Mark the object as black-colored so that it is ignored from scans and 728 * reporting. 729 */ 730 static void make_black_object(unsigned long ptr) 731 { 732 paint_ptr(ptr, KMEMLEAK_BLACK); 733 } 734 735 /* 736 * Add a scanning area to the object. If at least one such area is added, 737 * kmemleak will only scan these ranges rather than the whole memory block. 738 */ 739 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) 740 { 741 unsigned long flags; 742 struct kmemleak_object *object; 743 struct kmemleak_scan_area *area; 744 745 object = find_and_get_object(ptr, 1); 746 if (!object) { 747 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 748 ptr); 749 return; 750 } 751 752 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 753 if (!area) { 754 pr_warning("Cannot allocate a scan area\n"); 755 goto out; 756 } 757 758 spin_lock_irqsave(&object->lock, flags); 759 if (size == SIZE_MAX) { 760 size = object->pointer + object->size - ptr; 761 } else if (ptr + size > object->pointer + object->size) { 762 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 763 dump_object_info(object); 764 kmem_cache_free(scan_area_cache, area); 765 goto out_unlock; 766 } 767 768 INIT_HLIST_NODE(&area->node); 769 area->start = ptr; 770 area->size = size; 771 772 hlist_add_head(&area->node, &object->area_list); 773 out_unlock: 774 spin_unlock_irqrestore(&object->lock, flags); 775 out: 776 put_object(object); 777 } 778 779 /* 780 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 781 * pointer. Such object will not be scanned by kmemleak but references to it 782 * are searched. 783 */ 784 static void object_no_scan(unsigned long ptr) 785 { 786 unsigned long flags; 787 struct kmemleak_object *object; 788 789 object = find_and_get_object(ptr, 0); 790 if (!object) { 791 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 792 return; 793 } 794 795 spin_lock_irqsave(&object->lock, flags); 796 object->flags |= OBJECT_NO_SCAN; 797 spin_unlock_irqrestore(&object->lock, flags); 798 put_object(object); 799 } 800 801 /* 802 * Log an early kmemleak_* call to the early_log buffer. These calls will be 803 * processed later once kmemleak is fully initialized. 804 */ 805 static void __init log_early(int op_type, const void *ptr, size_t size, 806 int min_count) 807 { 808 unsigned long flags; 809 struct early_log *log; 810 811 if (kmemleak_error) { 812 /* kmemleak stopped recording, just count the requests */ 813 crt_early_log++; 814 return; 815 } 816 817 if (crt_early_log >= ARRAY_SIZE(early_log)) { 818 kmemleak_disable(); 819 return; 820 } 821 822 /* 823 * There is no need for locking since the kernel is still in UP mode 824 * at this stage. Disabling the IRQs is enough. 825 */ 826 local_irq_save(flags); 827 log = &early_log[crt_early_log]; 828 log->op_type = op_type; 829 log->ptr = ptr; 830 log->size = size; 831 log->min_count = min_count; 832 log->trace_len = __save_stack_trace(log->trace); 833 crt_early_log++; 834 local_irq_restore(flags); 835 } 836 837 /* 838 * Log an early allocated block and populate the stack trace. 839 */ 840 static void early_alloc(struct early_log *log) 841 { 842 struct kmemleak_object *object; 843 unsigned long flags; 844 int i; 845 846 if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) 847 return; 848 849 /* 850 * RCU locking needed to ensure object is not freed via put_object(). 851 */ 852 rcu_read_lock(); 853 object = create_object((unsigned long)log->ptr, log->size, 854 log->min_count, GFP_ATOMIC); 855 if (!object) 856 goto out; 857 spin_lock_irqsave(&object->lock, flags); 858 for (i = 0; i < log->trace_len; i++) 859 object->trace[i] = log->trace[i]; 860 object->trace_len = log->trace_len; 861 spin_unlock_irqrestore(&object->lock, flags); 862 out: 863 rcu_read_unlock(); 864 } 865 866 /* 867 * Log an early allocated block and populate the stack trace. 868 */ 869 static void early_alloc_percpu(struct early_log *log) 870 { 871 unsigned int cpu; 872 const void __percpu *ptr = log->ptr; 873 874 for_each_possible_cpu(cpu) { 875 log->ptr = per_cpu_ptr(ptr, cpu); 876 early_alloc(log); 877 } 878 } 879 880 /** 881 * kmemleak_alloc - register a newly allocated object 882 * @ptr: pointer to beginning of the object 883 * @size: size of the object 884 * @min_count: minimum number of references to this object. If during memory 885 * scanning a number of references less than @min_count is found, 886 * the object is reported as a memory leak. If @min_count is 0, 887 * the object is never reported as a leak. If @min_count is -1, 888 * the object is ignored (not scanned and not reported as a leak) 889 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 890 * 891 * This function is called from the kernel allocators when a new object 892 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). 893 */ 894 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 895 gfp_t gfp) 896 { 897 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 898 899 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 900 create_object((unsigned long)ptr, size, min_count, gfp); 901 else if (kmemleak_early_log) 902 log_early(KMEMLEAK_ALLOC, ptr, size, min_count); 903 } 904 EXPORT_SYMBOL_GPL(kmemleak_alloc); 905 906 /** 907 * kmemleak_alloc_percpu - register a newly allocated __percpu object 908 * @ptr: __percpu pointer to beginning of the object 909 * @size: size of the object 910 * 911 * This function is called from the kernel percpu allocator when a new object 912 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL 913 * allocation. 914 */ 915 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) 916 { 917 unsigned int cpu; 918 919 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); 920 921 /* 922 * Percpu allocations are only scanned and not reported as leaks 923 * (min_count is set to 0). 924 */ 925 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 926 for_each_possible_cpu(cpu) 927 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 928 size, 0, GFP_KERNEL); 929 else if (kmemleak_early_log) 930 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); 931 } 932 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 933 934 /** 935 * kmemleak_free - unregister a previously registered object 936 * @ptr: pointer to beginning of the object 937 * 938 * This function is called from the kernel allocators when an object (memory 939 * block) is freed (kmem_cache_free, kfree, vfree etc.). 940 */ 941 void __ref kmemleak_free(const void *ptr) 942 { 943 pr_debug("%s(0x%p)\n", __func__, ptr); 944 945 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 946 delete_object_full((unsigned long)ptr); 947 else if (kmemleak_early_log) 948 log_early(KMEMLEAK_FREE, ptr, 0, 0); 949 } 950 EXPORT_SYMBOL_GPL(kmemleak_free); 951 952 /** 953 * kmemleak_free_part - partially unregister a previously registered object 954 * @ptr: pointer to the beginning or inside the object. This also 955 * represents the start of the range to be freed 956 * @size: size to be unregistered 957 * 958 * This function is called when only a part of a memory block is freed 959 * (usually from the bootmem allocator). 960 */ 961 void __ref kmemleak_free_part(const void *ptr, size_t size) 962 { 963 pr_debug("%s(0x%p)\n", __func__, ptr); 964 965 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 966 delete_object_part((unsigned long)ptr, size); 967 else if (kmemleak_early_log) 968 log_early(KMEMLEAK_FREE_PART, ptr, size, 0); 969 } 970 EXPORT_SYMBOL_GPL(kmemleak_free_part); 971 972 /** 973 * kmemleak_free_percpu - unregister a previously registered __percpu object 974 * @ptr: __percpu pointer to beginning of the object 975 * 976 * This function is called from the kernel percpu allocator when an object 977 * (memory block) is freed (free_percpu). 978 */ 979 void __ref kmemleak_free_percpu(const void __percpu *ptr) 980 { 981 unsigned int cpu; 982 983 pr_debug("%s(0x%p)\n", __func__, ptr); 984 985 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 986 for_each_possible_cpu(cpu) 987 delete_object_full((unsigned long)per_cpu_ptr(ptr, 988 cpu)); 989 else if (kmemleak_early_log) 990 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); 991 } 992 EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 993 994 /** 995 * kmemleak_update_trace - update object allocation stack trace 996 * @ptr: pointer to beginning of the object 997 * 998 * Override the object allocation stack trace for cases where the actual 999 * allocation place is not always useful. 1000 */ 1001 void __ref kmemleak_update_trace(const void *ptr) 1002 { 1003 struct kmemleak_object *object; 1004 unsigned long flags; 1005 1006 pr_debug("%s(0x%p)\n", __func__, ptr); 1007 1008 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) 1009 return; 1010 1011 object = find_and_get_object((unsigned long)ptr, 1); 1012 if (!object) { 1013 #ifdef DEBUG 1014 kmemleak_warn("Updating stack trace for unknown object at %p\n", 1015 ptr); 1016 #endif 1017 return; 1018 } 1019 1020 spin_lock_irqsave(&object->lock, flags); 1021 object->trace_len = __save_stack_trace(object->trace); 1022 spin_unlock_irqrestore(&object->lock, flags); 1023 1024 put_object(object); 1025 } 1026 EXPORT_SYMBOL(kmemleak_update_trace); 1027 1028 /** 1029 * kmemleak_not_leak - mark an allocated object as false positive 1030 * @ptr: pointer to beginning of the object 1031 * 1032 * Calling this function on an object will cause the memory block to no longer 1033 * be reported as leak and always be scanned. 1034 */ 1035 void __ref kmemleak_not_leak(const void *ptr) 1036 { 1037 pr_debug("%s(0x%p)\n", __func__, ptr); 1038 1039 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1040 make_gray_object((unsigned long)ptr); 1041 else if (kmemleak_early_log) 1042 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); 1043 } 1044 EXPORT_SYMBOL(kmemleak_not_leak); 1045 1046 /** 1047 * kmemleak_ignore - ignore an allocated object 1048 * @ptr: pointer to beginning of the object 1049 * 1050 * Calling this function on an object will cause the memory block to be 1051 * ignored (not scanned and not reported as a leak). This is usually done when 1052 * it is known that the corresponding block is not a leak and does not contain 1053 * any references to other allocated memory blocks. 1054 */ 1055 void __ref kmemleak_ignore(const void *ptr) 1056 { 1057 pr_debug("%s(0x%p)\n", __func__, ptr); 1058 1059 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1060 make_black_object((unsigned long)ptr); 1061 else if (kmemleak_early_log) 1062 log_early(KMEMLEAK_IGNORE, ptr, 0, 0); 1063 } 1064 EXPORT_SYMBOL(kmemleak_ignore); 1065 1066 /** 1067 * kmemleak_scan_area - limit the range to be scanned in an allocated object 1068 * @ptr: pointer to beginning or inside the object. This also 1069 * represents the start of the scan area 1070 * @size: size of the scan area 1071 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 1072 * 1073 * This function is used when it is known that only certain parts of an object 1074 * contain references to other objects. Kmemleak will only scan these areas 1075 * reducing the number false negatives. 1076 */ 1077 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 1078 { 1079 pr_debug("%s(0x%p)\n", __func__, ptr); 1080 1081 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1082 add_scan_area((unsigned long)ptr, size, gfp); 1083 else if (kmemleak_early_log) 1084 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); 1085 } 1086 EXPORT_SYMBOL(kmemleak_scan_area); 1087 1088 /** 1089 * kmemleak_no_scan - do not scan an allocated object 1090 * @ptr: pointer to beginning of the object 1091 * 1092 * This function notifies kmemleak not to scan the given memory block. Useful 1093 * in situations where it is known that the given object does not contain any 1094 * references to other objects. Kmemleak will not scan such objects reducing 1095 * the number of false negatives. 1096 */ 1097 void __ref kmemleak_no_scan(const void *ptr) 1098 { 1099 pr_debug("%s(0x%p)\n", __func__, ptr); 1100 1101 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1102 object_no_scan((unsigned long)ptr); 1103 else if (kmemleak_early_log) 1104 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); 1105 } 1106 EXPORT_SYMBOL(kmemleak_no_scan); 1107 1108 /* 1109 * Update an object's checksum and return true if it was modified. 1110 */ 1111 static bool update_checksum(struct kmemleak_object *object) 1112 { 1113 u32 old_csum = object->checksum; 1114 1115 if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) 1116 return false; 1117 1118 kasan_disable_current(); 1119 object->checksum = crc32(0, (void *)object->pointer, object->size); 1120 kasan_enable_current(); 1121 1122 return object->checksum != old_csum; 1123 } 1124 1125 /* 1126 * Memory scanning is a long process and it needs to be interruptable. This 1127 * function checks whether such interrupt condition occurred. 1128 */ 1129 static int scan_should_stop(void) 1130 { 1131 if (!kmemleak_enabled) 1132 return 1; 1133 1134 /* 1135 * This function may be called from either process or kthread context, 1136 * hence the need to check for both stop conditions. 1137 */ 1138 if (current->mm) 1139 return signal_pending(current); 1140 else 1141 return kthread_should_stop(); 1142 1143 return 0; 1144 } 1145 1146 /* 1147 * Scan a memory block (exclusive range) for valid pointers and add those 1148 * found to the gray list. 1149 */ 1150 static void scan_block(void *_start, void *_end, 1151 struct kmemleak_object *scanned, int allow_resched) 1152 { 1153 unsigned long *ptr; 1154 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1155 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1156 1157 for (ptr = start; ptr < end; ptr++) { 1158 struct kmemleak_object *object; 1159 unsigned long flags; 1160 unsigned long pointer; 1161 1162 if (allow_resched) 1163 cond_resched(); 1164 if (scan_should_stop()) 1165 break; 1166 1167 /* don't scan uninitialized memory */ 1168 if (!kmemcheck_is_obj_initialized((unsigned long)ptr, 1169 BYTES_PER_POINTER)) 1170 continue; 1171 1172 kasan_disable_current(); 1173 pointer = *ptr; 1174 kasan_enable_current(); 1175 1176 object = find_and_get_object(pointer, 1); 1177 if (!object) 1178 continue; 1179 if (object == scanned) { 1180 /* self referenced, ignore */ 1181 put_object(object); 1182 continue; 1183 } 1184 1185 /* 1186 * Avoid the lockdep recursive warning on object->lock being 1187 * previously acquired in scan_object(). These locks are 1188 * enclosed by scan_mutex. 1189 */ 1190 spin_lock_irqsave_nested(&object->lock, flags, 1191 SINGLE_DEPTH_NESTING); 1192 if (!color_white(object)) { 1193 /* non-orphan, ignored or new */ 1194 spin_unlock_irqrestore(&object->lock, flags); 1195 put_object(object); 1196 continue; 1197 } 1198 1199 /* 1200 * Increase the object's reference count (number of pointers 1201 * to the memory block). If this count reaches the required 1202 * minimum, the object's color will become gray and it will be 1203 * added to the gray_list. 1204 */ 1205 object->count++; 1206 if (color_gray(object)) { 1207 list_add_tail(&object->gray_list, &gray_list); 1208 spin_unlock_irqrestore(&object->lock, flags); 1209 continue; 1210 } 1211 1212 spin_unlock_irqrestore(&object->lock, flags); 1213 put_object(object); 1214 } 1215 } 1216 1217 /* 1218 * Scan a memory block corresponding to a kmemleak_object. A condition is 1219 * that object->use_count >= 1. 1220 */ 1221 static void scan_object(struct kmemleak_object *object) 1222 { 1223 struct kmemleak_scan_area *area; 1224 unsigned long flags; 1225 1226 /* 1227 * Once the object->lock is acquired, the corresponding memory block 1228 * cannot be freed (the same lock is acquired in delete_object). 1229 */ 1230 spin_lock_irqsave(&object->lock, flags); 1231 if (object->flags & OBJECT_NO_SCAN) 1232 goto out; 1233 if (!(object->flags & OBJECT_ALLOCATED)) 1234 /* already freed object */ 1235 goto out; 1236 if (hlist_empty(&object->area_list)) { 1237 void *start = (void *)object->pointer; 1238 void *end = (void *)(object->pointer + object->size); 1239 1240 while (start < end && (object->flags & OBJECT_ALLOCATED) && 1241 !(object->flags & OBJECT_NO_SCAN)) { 1242 scan_block(start, min(start + MAX_SCAN_SIZE, end), 1243 object, 0); 1244 start += MAX_SCAN_SIZE; 1245 1246 spin_unlock_irqrestore(&object->lock, flags); 1247 cond_resched(); 1248 spin_lock_irqsave(&object->lock, flags); 1249 } 1250 } else 1251 hlist_for_each_entry(area, &object->area_list, node) 1252 scan_block((void *)area->start, 1253 (void *)(area->start + area->size), 1254 object, 0); 1255 out: 1256 spin_unlock_irqrestore(&object->lock, flags); 1257 } 1258 1259 /* 1260 * Scan the objects already referenced (gray objects). More objects will be 1261 * referenced and, if there are no memory leaks, all the objects are scanned. 1262 */ 1263 static void scan_gray_list(void) 1264 { 1265 struct kmemleak_object *object, *tmp; 1266 1267 /* 1268 * The list traversal is safe for both tail additions and removals 1269 * from inside the loop. The kmemleak objects cannot be freed from 1270 * outside the loop because their use_count was incremented. 1271 */ 1272 object = list_entry(gray_list.next, typeof(*object), gray_list); 1273 while (&object->gray_list != &gray_list) { 1274 cond_resched(); 1275 1276 /* may add new objects to the list */ 1277 if (!scan_should_stop()) 1278 scan_object(object); 1279 1280 tmp = list_entry(object->gray_list.next, typeof(*object), 1281 gray_list); 1282 1283 /* remove the object from the list and release it */ 1284 list_del(&object->gray_list); 1285 put_object(object); 1286 1287 object = tmp; 1288 } 1289 WARN_ON(!list_empty(&gray_list)); 1290 } 1291 1292 /* 1293 * Scan data sections and all the referenced memory blocks allocated via the 1294 * kernel's standard allocators. This function must be called with the 1295 * scan_mutex held. 1296 */ 1297 static void kmemleak_scan(void) 1298 { 1299 unsigned long flags; 1300 struct kmemleak_object *object; 1301 int i; 1302 int new_leaks = 0; 1303 1304 jiffies_last_scan = jiffies; 1305 1306 /* prepare the kmemleak_object's */ 1307 rcu_read_lock(); 1308 list_for_each_entry_rcu(object, &object_list, object_list) { 1309 spin_lock_irqsave(&object->lock, flags); 1310 #ifdef DEBUG 1311 /* 1312 * With a few exceptions there should be a maximum of 1313 * 1 reference to any object at this point. 1314 */ 1315 if (atomic_read(&object->use_count) > 1) { 1316 pr_debug("object->use_count = %d\n", 1317 atomic_read(&object->use_count)); 1318 dump_object_info(object); 1319 } 1320 #endif 1321 /* reset the reference count (whiten the object) */ 1322 object->count = 0; 1323 if (color_gray(object) && get_object(object)) 1324 list_add_tail(&object->gray_list, &gray_list); 1325 1326 spin_unlock_irqrestore(&object->lock, flags); 1327 } 1328 rcu_read_unlock(); 1329 1330 /* data/bss scanning */ 1331 scan_block(_sdata, _edata, NULL, 1); 1332 scan_block(__bss_start, __bss_stop, NULL, 1); 1333 1334 #ifdef CONFIG_SMP 1335 /* per-cpu sections scanning */ 1336 for_each_possible_cpu(i) 1337 scan_block(__per_cpu_start + per_cpu_offset(i), 1338 __per_cpu_end + per_cpu_offset(i), NULL, 1); 1339 #endif 1340 1341 /* 1342 * Struct page scanning for each node. 1343 */ 1344 get_online_mems(); 1345 for_each_online_node(i) { 1346 unsigned long start_pfn = node_start_pfn(i); 1347 unsigned long end_pfn = node_end_pfn(i); 1348 unsigned long pfn; 1349 1350 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1351 struct page *page; 1352 1353 if (!pfn_valid(pfn)) 1354 continue; 1355 page = pfn_to_page(pfn); 1356 /* only scan if page is in use */ 1357 if (page_count(page) == 0) 1358 continue; 1359 scan_block(page, page + 1, NULL, 1); 1360 } 1361 } 1362 put_online_mems(); 1363 1364 /* 1365 * Scanning the task stacks (may introduce false negatives). 1366 */ 1367 if (kmemleak_stack_scan) { 1368 struct task_struct *p, *g; 1369 1370 read_lock(&tasklist_lock); 1371 do_each_thread(g, p) { 1372 scan_block(task_stack_page(p), task_stack_page(p) + 1373 THREAD_SIZE, NULL, 0); 1374 } while_each_thread(g, p); 1375 read_unlock(&tasklist_lock); 1376 } 1377 1378 /* 1379 * Scan the objects already referenced from the sections scanned 1380 * above. 1381 */ 1382 scan_gray_list(); 1383 1384 /* 1385 * Check for new or unreferenced objects modified since the previous 1386 * scan and color them gray until the next scan. 1387 */ 1388 rcu_read_lock(); 1389 list_for_each_entry_rcu(object, &object_list, object_list) { 1390 spin_lock_irqsave(&object->lock, flags); 1391 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) 1392 && update_checksum(object) && get_object(object)) { 1393 /* color it gray temporarily */ 1394 object->count = object->min_count; 1395 list_add_tail(&object->gray_list, &gray_list); 1396 } 1397 spin_unlock_irqrestore(&object->lock, flags); 1398 } 1399 rcu_read_unlock(); 1400 1401 /* 1402 * Re-scan the gray list for modified unreferenced objects. 1403 */ 1404 scan_gray_list(); 1405 1406 /* 1407 * If scanning was stopped do not report any new unreferenced objects. 1408 */ 1409 if (scan_should_stop()) 1410 return; 1411 1412 /* 1413 * Scanning result reporting. 1414 */ 1415 rcu_read_lock(); 1416 list_for_each_entry_rcu(object, &object_list, object_list) { 1417 spin_lock_irqsave(&object->lock, flags); 1418 if (unreferenced_object(object) && 1419 !(object->flags & OBJECT_REPORTED)) { 1420 object->flags |= OBJECT_REPORTED; 1421 new_leaks++; 1422 } 1423 spin_unlock_irqrestore(&object->lock, flags); 1424 } 1425 rcu_read_unlock(); 1426 1427 if (new_leaks) { 1428 kmemleak_found_leaks = true; 1429 1430 pr_info("%d new suspected memory leaks (see " 1431 "/sys/kernel/debug/kmemleak)\n", new_leaks); 1432 } 1433 1434 } 1435 1436 /* 1437 * Thread function performing automatic memory scanning. Unreferenced objects 1438 * at the end of a memory scan are reported but only the first time. 1439 */ 1440 static int kmemleak_scan_thread(void *arg) 1441 { 1442 static int first_run = 1; 1443 1444 pr_info("Automatic memory scanning thread started\n"); 1445 set_user_nice(current, 10); 1446 1447 /* 1448 * Wait before the first scan to allow the system to fully initialize. 1449 */ 1450 if (first_run) { 1451 first_run = 0; 1452 ssleep(SECS_FIRST_SCAN); 1453 } 1454 1455 while (!kthread_should_stop()) { 1456 signed long timeout = jiffies_scan_wait; 1457 1458 mutex_lock(&scan_mutex); 1459 kmemleak_scan(); 1460 mutex_unlock(&scan_mutex); 1461 1462 /* wait before the next scan */ 1463 while (timeout && !kthread_should_stop()) 1464 timeout = schedule_timeout_interruptible(timeout); 1465 } 1466 1467 pr_info("Automatic memory scanning thread ended\n"); 1468 1469 return 0; 1470 } 1471 1472 /* 1473 * Start the automatic memory scanning thread. This function must be called 1474 * with the scan_mutex held. 1475 */ 1476 static void start_scan_thread(void) 1477 { 1478 if (scan_thread) 1479 return; 1480 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1481 if (IS_ERR(scan_thread)) { 1482 pr_warning("Failed to create the scan thread\n"); 1483 scan_thread = NULL; 1484 } 1485 } 1486 1487 /* 1488 * Stop the automatic memory scanning thread. This function must be called 1489 * with the scan_mutex held. 1490 */ 1491 static void stop_scan_thread(void) 1492 { 1493 if (scan_thread) { 1494 kthread_stop(scan_thread); 1495 scan_thread = NULL; 1496 } 1497 } 1498 1499 /* 1500 * Iterate over the object_list and return the first valid object at or after 1501 * the required position with its use_count incremented. The function triggers 1502 * a memory scanning when the pos argument points to the first position. 1503 */ 1504 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1505 { 1506 struct kmemleak_object *object; 1507 loff_t n = *pos; 1508 int err; 1509 1510 err = mutex_lock_interruptible(&scan_mutex); 1511 if (err < 0) 1512 return ERR_PTR(err); 1513 1514 rcu_read_lock(); 1515 list_for_each_entry_rcu(object, &object_list, object_list) { 1516 if (n-- > 0) 1517 continue; 1518 if (get_object(object)) 1519 goto out; 1520 } 1521 object = NULL; 1522 out: 1523 return object; 1524 } 1525 1526 /* 1527 * Return the next object in the object_list. The function decrements the 1528 * use_count of the previous object and increases that of the next one. 1529 */ 1530 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1531 { 1532 struct kmemleak_object *prev_obj = v; 1533 struct kmemleak_object *next_obj = NULL; 1534 struct kmemleak_object *obj = prev_obj; 1535 1536 ++(*pos); 1537 1538 list_for_each_entry_continue_rcu(obj, &object_list, object_list) { 1539 if (get_object(obj)) { 1540 next_obj = obj; 1541 break; 1542 } 1543 } 1544 1545 put_object(prev_obj); 1546 return next_obj; 1547 } 1548 1549 /* 1550 * Decrement the use_count of the last object required, if any. 1551 */ 1552 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1553 { 1554 if (!IS_ERR(v)) { 1555 /* 1556 * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1557 * waiting was interrupted, so only release it if !IS_ERR. 1558 */ 1559 rcu_read_unlock(); 1560 mutex_unlock(&scan_mutex); 1561 if (v) 1562 put_object(v); 1563 } 1564 } 1565 1566 /* 1567 * Print the information for an unreferenced object to the seq file. 1568 */ 1569 static int kmemleak_seq_show(struct seq_file *seq, void *v) 1570 { 1571 struct kmemleak_object *object = v; 1572 unsigned long flags; 1573 1574 spin_lock_irqsave(&object->lock, flags); 1575 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1576 print_unreferenced(seq, object); 1577 spin_unlock_irqrestore(&object->lock, flags); 1578 return 0; 1579 } 1580 1581 static const struct seq_operations kmemleak_seq_ops = { 1582 .start = kmemleak_seq_start, 1583 .next = kmemleak_seq_next, 1584 .stop = kmemleak_seq_stop, 1585 .show = kmemleak_seq_show, 1586 }; 1587 1588 static int kmemleak_open(struct inode *inode, struct file *file) 1589 { 1590 return seq_open(file, &kmemleak_seq_ops); 1591 } 1592 1593 static int dump_str_object_info(const char *str) 1594 { 1595 unsigned long flags; 1596 struct kmemleak_object *object; 1597 unsigned long addr; 1598 1599 if (kstrtoul(str, 0, &addr)) 1600 return -EINVAL; 1601 object = find_and_get_object(addr, 0); 1602 if (!object) { 1603 pr_info("Unknown object at 0x%08lx\n", addr); 1604 return -EINVAL; 1605 } 1606 1607 spin_lock_irqsave(&object->lock, flags); 1608 dump_object_info(object); 1609 spin_unlock_irqrestore(&object->lock, flags); 1610 1611 put_object(object); 1612 return 0; 1613 } 1614 1615 /* 1616 * We use grey instead of black to ensure we can do future scans on the same 1617 * objects. If we did not do future scans these black objects could 1618 * potentially contain references to newly allocated objects in the future and 1619 * we'd end up with false positives. 1620 */ 1621 static void kmemleak_clear(void) 1622 { 1623 struct kmemleak_object *object; 1624 unsigned long flags; 1625 1626 rcu_read_lock(); 1627 list_for_each_entry_rcu(object, &object_list, object_list) { 1628 spin_lock_irqsave(&object->lock, flags); 1629 if ((object->flags & OBJECT_REPORTED) && 1630 unreferenced_object(object)) 1631 __paint_it(object, KMEMLEAK_GREY); 1632 spin_unlock_irqrestore(&object->lock, flags); 1633 } 1634 rcu_read_unlock(); 1635 1636 kmemleak_found_leaks = false; 1637 } 1638 1639 static void __kmemleak_do_cleanup(void); 1640 1641 /* 1642 * File write operation to configure kmemleak at run-time. The following 1643 * commands can be written to the /sys/kernel/debug/kmemleak file: 1644 * off - disable kmemleak (irreversible) 1645 * stack=on - enable the task stacks scanning 1646 * stack=off - disable the tasks stacks scanning 1647 * scan=on - start the automatic memory scanning thread 1648 * scan=off - stop the automatic memory scanning thread 1649 * scan=... - set the automatic memory scanning period in seconds (0 to 1650 * disable it) 1651 * scan - trigger a memory scan 1652 * clear - mark all current reported unreferenced kmemleak objects as 1653 * grey to ignore printing them, or free all kmemleak objects 1654 * if kmemleak has been disabled. 1655 * dump=... - dump information about the object found at the given address 1656 */ 1657 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1658 size_t size, loff_t *ppos) 1659 { 1660 char buf[64]; 1661 int buf_size; 1662 int ret; 1663 1664 buf_size = min(size, (sizeof(buf) - 1)); 1665 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1666 return -EFAULT; 1667 buf[buf_size] = 0; 1668 1669 ret = mutex_lock_interruptible(&scan_mutex); 1670 if (ret < 0) 1671 return ret; 1672 1673 if (strncmp(buf, "clear", 5) == 0) { 1674 if (kmemleak_enabled) 1675 kmemleak_clear(); 1676 else 1677 __kmemleak_do_cleanup(); 1678 goto out; 1679 } 1680 1681 if (!kmemleak_enabled) { 1682 ret = -EBUSY; 1683 goto out; 1684 } 1685 1686 if (strncmp(buf, "off", 3) == 0) 1687 kmemleak_disable(); 1688 else if (strncmp(buf, "stack=on", 8) == 0) 1689 kmemleak_stack_scan = 1; 1690 else if (strncmp(buf, "stack=off", 9) == 0) 1691 kmemleak_stack_scan = 0; 1692 else if (strncmp(buf, "scan=on", 7) == 0) 1693 start_scan_thread(); 1694 else if (strncmp(buf, "scan=off", 8) == 0) 1695 stop_scan_thread(); 1696 else if (strncmp(buf, "scan=", 5) == 0) { 1697 unsigned long secs; 1698 1699 ret = kstrtoul(buf + 5, 0, &secs); 1700 if (ret < 0) 1701 goto out; 1702 stop_scan_thread(); 1703 if (secs) { 1704 jiffies_scan_wait = msecs_to_jiffies(secs * 1000); 1705 start_scan_thread(); 1706 } 1707 } else if (strncmp(buf, "scan", 4) == 0) 1708 kmemleak_scan(); 1709 else if (strncmp(buf, "dump=", 5) == 0) 1710 ret = dump_str_object_info(buf + 5); 1711 else 1712 ret = -EINVAL; 1713 1714 out: 1715 mutex_unlock(&scan_mutex); 1716 if (ret < 0) 1717 return ret; 1718 1719 /* ignore the rest of the buffer, only one command at a time */ 1720 *ppos += size; 1721 return size; 1722 } 1723 1724 static const struct file_operations kmemleak_fops = { 1725 .owner = THIS_MODULE, 1726 .open = kmemleak_open, 1727 .read = seq_read, 1728 .write = kmemleak_write, 1729 .llseek = seq_lseek, 1730 .release = seq_release, 1731 }; 1732 1733 static void __kmemleak_do_cleanup(void) 1734 { 1735 struct kmemleak_object *object; 1736 1737 rcu_read_lock(); 1738 list_for_each_entry_rcu(object, &object_list, object_list) 1739 delete_object_full(object->pointer); 1740 rcu_read_unlock(); 1741 } 1742 1743 /* 1744 * Stop the memory scanning thread and free the kmemleak internal objects if 1745 * no previous scan thread (otherwise, kmemleak may still have some useful 1746 * information on memory leaks). 1747 */ 1748 static void kmemleak_do_cleanup(struct work_struct *work) 1749 { 1750 mutex_lock(&scan_mutex); 1751 stop_scan_thread(); 1752 1753 if (!kmemleak_found_leaks) 1754 __kmemleak_do_cleanup(); 1755 else 1756 pr_info("Kmemleak disabled without freeing internal data. " 1757 "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n"); 1758 mutex_unlock(&scan_mutex); 1759 } 1760 1761 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); 1762 1763 /* 1764 * Disable kmemleak. No memory allocation/freeing will be traced once this 1765 * function is called. Disabling kmemleak is an irreversible operation. 1766 */ 1767 static void kmemleak_disable(void) 1768 { 1769 /* atomically check whether it was already invoked */ 1770 if (cmpxchg(&kmemleak_error, 0, 1)) 1771 return; 1772 1773 /* stop any memory operation tracing */ 1774 kmemleak_enabled = 0; 1775 1776 /* check whether it is too early for a kernel thread */ 1777 if (kmemleak_initialized) 1778 schedule_work(&cleanup_work); 1779 1780 pr_info("Kernel memory leak detector disabled\n"); 1781 } 1782 1783 /* 1784 * Allow boot-time kmemleak disabling (enabled by default). 1785 */ 1786 static int kmemleak_boot_config(char *str) 1787 { 1788 if (!str) 1789 return -EINVAL; 1790 if (strcmp(str, "off") == 0) 1791 kmemleak_disable(); 1792 else if (strcmp(str, "on") == 0) 1793 kmemleak_skip_disable = 1; 1794 else 1795 return -EINVAL; 1796 return 0; 1797 } 1798 early_param("kmemleak", kmemleak_boot_config); 1799 1800 static void __init print_log_trace(struct early_log *log) 1801 { 1802 struct stack_trace trace; 1803 1804 trace.nr_entries = log->trace_len; 1805 trace.entries = log->trace; 1806 1807 pr_notice("Early log backtrace:\n"); 1808 print_stack_trace(&trace, 2); 1809 } 1810 1811 /* 1812 * Kmemleak initialization. 1813 */ 1814 void __init kmemleak_init(void) 1815 { 1816 int i; 1817 unsigned long flags; 1818 1819 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 1820 if (!kmemleak_skip_disable) { 1821 kmemleak_early_log = 0; 1822 kmemleak_disable(); 1823 return; 1824 } 1825 #endif 1826 1827 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 1828 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 1829 1830 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 1831 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 1832 1833 if (crt_early_log >= ARRAY_SIZE(early_log)) 1834 pr_warning("Early log buffer exceeded (%d), please increase " 1835 "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log); 1836 1837 /* the kernel is still in UP mode, so disabling the IRQs is enough */ 1838 local_irq_save(flags); 1839 kmemleak_early_log = 0; 1840 if (kmemleak_error) { 1841 local_irq_restore(flags); 1842 return; 1843 } else 1844 kmemleak_enabled = 1; 1845 local_irq_restore(flags); 1846 1847 /* 1848 * This is the point where tracking allocations is safe. Automatic 1849 * scanning is started during the late initcall. Add the early logged 1850 * callbacks to the kmemleak infrastructure. 1851 */ 1852 for (i = 0; i < crt_early_log; i++) { 1853 struct early_log *log = &early_log[i]; 1854 1855 switch (log->op_type) { 1856 case KMEMLEAK_ALLOC: 1857 early_alloc(log); 1858 break; 1859 case KMEMLEAK_ALLOC_PERCPU: 1860 early_alloc_percpu(log); 1861 break; 1862 case KMEMLEAK_FREE: 1863 kmemleak_free(log->ptr); 1864 break; 1865 case KMEMLEAK_FREE_PART: 1866 kmemleak_free_part(log->ptr, log->size); 1867 break; 1868 case KMEMLEAK_FREE_PERCPU: 1869 kmemleak_free_percpu(log->ptr); 1870 break; 1871 case KMEMLEAK_NOT_LEAK: 1872 kmemleak_not_leak(log->ptr); 1873 break; 1874 case KMEMLEAK_IGNORE: 1875 kmemleak_ignore(log->ptr); 1876 break; 1877 case KMEMLEAK_SCAN_AREA: 1878 kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); 1879 break; 1880 case KMEMLEAK_NO_SCAN: 1881 kmemleak_no_scan(log->ptr); 1882 break; 1883 default: 1884 kmemleak_warn("Unknown early log operation: %d\n", 1885 log->op_type); 1886 } 1887 1888 if (kmemleak_warning) { 1889 print_log_trace(log); 1890 kmemleak_warning = 0; 1891 } 1892 } 1893 } 1894 1895 /* 1896 * Late initialization function. 1897 */ 1898 static int __init kmemleak_late_init(void) 1899 { 1900 struct dentry *dentry; 1901 1902 kmemleak_initialized = 1; 1903 1904 if (kmemleak_error) { 1905 /* 1906 * Some error occurred and kmemleak was disabled. There is a 1907 * small chance that kmemleak_disable() was called immediately 1908 * after setting kmemleak_initialized and we may end up with 1909 * two clean-up threads but serialized by scan_mutex. 1910 */ 1911 schedule_work(&cleanup_work); 1912 return -ENOMEM; 1913 } 1914 1915 dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, 1916 &kmemleak_fops); 1917 if (!dentry) 1918 pr_warning("Failed to create the debugfs kmemleak file\n"); 1919 mutex_lock(&scan_mutex); 1920 start_scan_thread(); 1921 mutex_unlock(&scan_mutex); 1922 1923 pr_info("Kernel memory leak detector initialized\n"); 1924 1925 return 0; 1926 } 1927 late_initcall(kmemleak_late_init); 1928