1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/kmemleak.c 4 * 5 * Copyright (C) 2008 ARM Limited 6 * Written by Catalin Marinas <catalin.marinas@arm.com> 7 * 8 * For more information on the algorithm and kmemleak usage, please see 9 * Documentation/dev-tools/kmemleak.rst. 10 * 11 * Notes on locking 12 * ---------------- 13 * 14 * The following locks and mutexes are used by kmemleak: 15 * 16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and 17 * accesses to the object_tree_root (or object_phys_tree_root). The 18 * object_list is the main list holding the metadata (struct kmemleak_object) 19 * for the allocated memory blocks. The object_tree_root and object_phys_tree_root 20 * are red black trees used to look-up metadata based on a pointer to the 21 * corresponding memory block. The object_phys_tree_root is for objects 22 * allocated with physical address. The kmemleak_object structures are 23 * added to the object_list and object_tree_root (or object_phys_tree_root) 24 * in the create_object() function called from the kmemleak_alloc() (or 25 * kmemleak_alloc_phys()) callback and removed in delete_object() called from 26 * the kmemleak_free() callback 27 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object. 28 * Accesses to the metadata (e.g. count) are protected by this lock. Note 29 * that some members of this structure may be protected by other means 30 * (atomic or kmemleak_lock). This lock is also held when scanning the 31 * corresponding memory block to avoid the kernel freeing it via the 32 * kmemleak_free() callback. This is less heavyweight than holding a global 33 * lock like kmemleak_lock during scanning. 34 * - scan_mutex (mutex): ensures that only one thread may scan the memory for 35 * unreferenced objects at a time. The gray_list contains the objects which 36 * are already referenced or marked as false positives and need to be 37 * scanned. This list is only modified during a scanning episode when the 38 * scan_mutex is held. At the end of a scan, the gray_list is always empty. 39 * Note that the kmemleak_object.use_count is incremented when an object is 40 * added to the gray_list and therefore cannot be freed. This mutex also 41 * prevents multiple users of the "kmemleak" debugfs file together with 42 * modifications to the memory scanning parameters including the scan_thread 43 * pointer 44 * 45 * Locks and mutexes are acquired/nested in the following order: 46 * 47 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) 48 * 49 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex 50 * regions. 51 * 52 * The kmemleak_object structures have a use_count incremented or decremented 53 * using the get_object()/put_object() functions. When the use_count becomes 54 * 0, this count can no longer be incremented and put_object() schedules the 55 * kmemleak_object freeing via an RCU callback. All calls to the get_object() 56 * function must be protected by rcu_read_lock() to avoid accessing a freed 57 * structure. 58 */ 59 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 #include <linux/init.h> 63 #include <linux/kernel.h> 64 #include <linux/list.h> 65 #include <linux/sched/signal.h> 66 #include <linux/sched/task.h> 67 #include <linux/sched/task_stack.h> 68 #include <linux/jiffies.h> 69 #include <linux/delay.h> 70 #include <linux/export.h> 71 #include <linux/kthread.h> 72 #include <linux/rbtree.h> 73 #include <linux/fs.h> 74 #include <linux/debugfs.h> 75 #include <linux/seq_file.h> 76 #include <linux/cpumask.h> 77 #include <linux/spinlock.h> 78 #include <linux/module.h> 79 #include <linux/mutex.h> 80 #include <linux/rcupdate.h> 81 #include <linux/stacktrace.h> 82 #include <linux/stackdepot.h> 83 #include <linux/cache.h> 84 #include <linux/percpu.h> 85 #include <linux/memblock.h> 86 #include <linux/pfn.h> 87 #include <linux/mmzone.h> 88 #include <linux/slab.h> 89 #include <linux/thread_info.h> 90 #include <linux/err.h> 91 #include <linux/uaccess.h> 92 #include <linux/string.h> 93 #include <linux/nodemask.h> 94 #include <linux/mm.h> 95 #include <linux/workqueue.h> 96 #include <linux/crc32.h> 97 98 #include <asm/sections.h> 99 #include <asm/processor.h> 100 #include <linux/atomic.h> 101 102 #include <linux/kasan.h> 103 #include <linux/kfence.h> 104 #include <linux/kmemleak.h> 105 #include <linux/memory_hotplug.h> 106 107 /* 108 * Kmemleak configuration and common defines. 109 */ 110 #define MAX_TRACE 16 /* stack trace length */ 111 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ 112 #define SECS_FIRST_SCAN 60 /* delay before the first scan */ 113 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ 114 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ 115 116 #define BYTES_PER_POINTER sizeof(void *) 117 118 /* GFP bitmask for kmemleak internal allocations */ 119 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ 120 __GFP_NOLOCKDEP)) | \ 121 __GFP_NORETRY | __GFP_NOMEMALLOC | \ 122 __GFP_NOWARN) 123 124 /* scanning area inside a memory block */ 125 struct kmemleak_scan_area { 126 struct hlist_node node; 127 unsigned long start; 128 size_t size; 129 }; 130 131 #define KMEMLEAK_GREY 0 132 #define KMEMLEAK_BLACK -1 133 134 /* 135 * Structure holding the metadata for each allocated memory block. 136 * Modifications to such objects should be made while holding the 137 * object->lock. Insertions or deletions from object_list, gray_list or 138 * rb_node are already protected by the corresponding locks or mutex (see 139 * the notes on locking above). These objects are reference-counted 140 * (use_count) and freed using the RCU mechanism. 141 */ 142 struct kmemleak_object { 143 raw_spinlock_t lock; 144 unsigned int flags; /* object status flags */ 145 struct list_head object_list; 146 struct list_head gray_list; 147 struct rb_node rb_node; 148 struct rcu_head rcu; /* object_list lockless traversal */ 149 /* object usage count; object freed when use_count == 0 */ 150 atomic_t use_count; 151 unsigned long pointer; 152 size_t size; 153 /* pass surplus references to this pointer */ 154 unsigned long excess_ref; 155 /* minimum number of a pointers found before it is considered leak */ 156 int min_count; 157 /* the total number of pointers found pointing to this object */ 158 int count; 159 /* checksum for detecting modified objects */ 160 u32 checksum; 161 /* memory ranges to be scanned inside an object (empty for all) */ 162 struct hlist_head area_list; 163 depot_stack_handle_t trace_handle; 164 unsigned long jiffies; /* creation timestamp */ 165 pid_t pid; /* pid of the current task */ 166 char comm[TASK_COMM_LEN]; /* executable name */ 167 }; 168 169 /* flag representing the memory block allocation status */ 170 #define OBJECT_ALLOCATED (1 << 0) 171 /* flag set after the first reporting of an unreference object */ 172 #define OBJECT_REPORTED (1 << 1) 173 /* flag set to not scan the object */ 174 #define OBJECT_NO_SCAN (1 << 2) 175 /* flag set to fully scan the object when scan_area allocation failed */ 176 #define OBJECT_FULL_SCAN (1 << 3) 177 /* flag set for object allocated with physical address */ 178 #define OBJECT_PHYS (1 << 4) 179 180 #define HEX_PREFIX " " 181 /* number of bytes to print per line; must be 16 or 32 */ 182 #define HEX_ROW_SIZE 16 183 /* number of bytes to print at a time (1, 2, 4, 8) */ 184 #define HEX_GROUP_SIZE 1 185 /* include ASCII after the hex output */ 186 #define HEX_ASCII 1 187 /* max number of lines to be printed */ 188 #define HEX_MAX_LINES 2 189 190 /* the list of all allocated objects */ 191 static LIST_HEAD(object_list); 192 /* the list of gray-colored objects (see color_gray comment below) */ 193 static LIST_HEAD(gray_list); 194 /* memory pool allocation */ 195 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE]; 196 static int mem_pool_free_count = ARRAY_SIZE(mem_pool); 197 static LIST_HEAD(mem_pool_free_list); 198 /* search tree for object boundaries */ 199 static struct rb_root object_tree_root = RB_ROOT; 200 /* search tree for object (with OBJECT_PHYS flag) boundaries */ 201 static struct rb_root object_phys_tree_root = RB_ROOT; 202 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */ 203 static DEFINE_RAW_SPINLOCK(kmemleak_lock); 204 205 /* allocation caches for kmemleak internal data */ 206 static struct kmem_cache *object_cache; 207 static struct kmem_cache *scan_area_cache; 208 209 /* set if tracing memory operations is enabled */ 210 static int kmemleak_enabled = 1; 211 /* same as above but only for the kmemleak_free() callback */ 212 static int kmemleak_free_enabled = 1; 213 /* set in the late_initcall if there were no errors */ 214 static int kmemleak_initialized; 215 /* set if a kmemleak warning was issued */ 216 static int kmemleak_warning; 217 /* set if a fatal kmemleak error has occurred */ 218 static int kmemleak_error; 219 220 /* minimum and maximum address that may be valid pointers */ 221 static unsigned long min_addr = ULONG_MAX; 222 static unsigned long max_addr; 223 224 static struct task_struct *scan_thread; 225 /* used to avoid reporting of recently allocated objects */ 226 static unsigned long jiffies_min_age; 227 static unsigned long jiffies_last_scan; 228 /* delay between automatic memory scannings */ 229 static unsigned long jiffies_scan_wait; 230 /* enables or disables the task stacks scanning */ 231 static int kmemleak_stack_scan = 1; 232 /* protects the memory scanning, parameters and debug/kmemleak file access */ 233 static DEFINE_MUTEX(scan_mutex); 234 /* setting kmemleak=on, will set this var, skipping the disable */ 235 static int kmemleak_skip_disable; 236 /* If there are leaks that can be reported */ 237 static bool kmemleak_found_leaks; 238 239 static bool kmemleak_verbose; 240 module_param_named(verbose, kmemleak_verbose, bool, 0600); 241 242 static void kmemleak_disable(void); 243 244 /* 245 * Print a warning and dump the stack trace. 246 */ 247 #define kmemleak_warn(x...) do { \ 248 pr_warn(x); \ 249 dump_stack(); \ 250 kmemleak_warning = 1; \ 251 } while (0) 252 253 /* 254 * Macro invoked when a serious kmemleak condition occurred and cannot be 255 * recovered from. Kmemleak will be disabled and further allocation/freeing 256 * tracing no longer available. 257 */ 258 #define kmemleak_stop(x...) do { \ 259 kmemleak_warn(x); \ 260 kmemleak_disable(); \ 261 } while (0) 262 263 #define warn_or_seq_printf(seq, fmt, ...) do { \ 264 if (seq) \ 265 seq_printf(seq, fmt, ##__VA_ARGS__); \ 266 else \ 267 pr_warn(fmt, ##__VA_ARGS__); \ 268 } while (0) 269 270 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type, 271 int rowsize, int groupsize, const void *buf, 272 size_t len, bool ascii) 273 { 274 if (seq) 275 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize, 276 buf, len, ascii); 277 else 278 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type, 279 rowsize, groupsize, buf, len, ascii); 280 } 281 282 /* 283 * Printing of the objects hex dump to the seq file. The number of lines to be 284 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The 285 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called 286 * with the object->lock held. 287 */ 288 static void hex_dump_object(struct seq_file *seq, 289 struct kmemleak_object *object) 290 { 291 const u8 *ptr = (const u8 *)object->pointer; 292 size_t len; 293 294 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) 295 return; 296 297 /* limit the number of lines to HEX_MAX_LINES */ 298 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); 299 300 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len); 301 kasan_disable_current(); 302 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE, 303 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII); 304 kasan_enable_current(); 305 } 306 307 /* 308 * Object colors, encoded with count and min_count: 309 * - white - orphan object, not enough references to it (count < min_count) 310 * - gray - not orphan, not marked as false positive (min_count == 0) or 311 * sufficient references to it (count >= min_count) 312 * - black - ignore, it doesn't contain references (e.g. text section) 313 * (min_count == -1). No function defined for this color. 314 * Newly created objects don't have any color assigned (object->count == -1) 315 * before the next memory scan when they become white. 316 */ 317 static bool color_white(const struct kmemleak_object *object) 318 { 319 return object->count != KMEMLEAK_BLACK && 320 object->count < object->min_count; 321 } 322 323 static bool color_gray(const struct kmemleak_object *object) 324 { 325 return object->min_count != KMEMLEAK_BLACK && 326 object->count >= object->min_count; 327 } 328 329 /* 330 * Objects are considered unreferenced only if their color is white, they have 331 * not be deleted and have a minimum age to avoid false positives caused by 332 * pointers temporarily stored in CPU registers. 333 */ 334 static bool unreferenced_object(struct kmemleak_object *object) 335 { 336 return (color_white(object) && object->flags & OBJECT_ALLOCATED) && 337 time_before_eq(object->jiffies + jiffies_min_age, 338 jiffies_last_scan); 339 } 340 341 /* 342 * Printing of the unreferenced objects information to the seq file. The 343 * print_unreferenced function must be called with the object->lock held. 344 */ 345 static void print_unreferenced(struct seq_file *seq, 346 struct kmemleak_object *object) 347 { 348 int i; 349 unsigned long *entries; 350 unsigned int nr_entries; 351 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); 352 353 nr_entries = stack_depot_fetch(object->trace_handle, &entries); 354 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", 355 object->pointer, object->size); 356 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", 357 object->comm, object->pid, object->jiffies, 358 msecs_age / 1000, msecs_age % 1000); 359 hex_dump_object(seq, object); 360 warn_or_seq_printf(seq, " backtrace:\n"); 361 362 for (i = 0; i < nr_entries; i++) { 363 void *ptr = (void *)entries[i]; 364 warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr); 365 } 366 } 367 368 /* 369 * Print the kmemleak_object information. This function is used mainly for 370 * debugging special cases when kmemleak operations. It must be called with 371 * the object->lock held. 372 */ 373 static void dump_object_info(struct kmemleak_object *object) 374 { 375 pr_notice("Object 0x%08lx (size %zu):\n", 376 object->pointer, object->size); 377 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", 378 object->comm, object->pid, object->jiffies); 379 pr_notice(" min_count = %d\n", object->min_count); 380 pr_notice(" count = %d\n", object->count); 381 pr_notice(" flags = 0x%x\n", object->flags); 382 pr_notice(" checksum = %u\n", object->checksum); 383 pr_notice(" backtrace:\n"); 384 if (object->trace_handle) 385 stack_depot_print(object->trace_handle); 386 } 387 388 /* 389 * Look-up a memory block metadata (kmemleak_object) in the object search 390 * tree based on a pointer value. If alias is 0, only values pointing to the 391 * beginning of the memory block are allowed. The kmemleak_lock must be held 392 * when calling this function. 393 */ 394 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias, 395 bool is_phys) 396 { 397 struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node : 398 object_tree_root.rb_node; 399 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); 400 401 while (rb) { 402 struct kmemleak_object *object; 403 unsigned long untagged_objp; 404 405 object = rb_entry(rb, struct kmemleak_object, rb_node); 406 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); 407 408 if (untagged_ptr < untagged_objp) 409 rb = object->rb_node.rb_left; 410 else if (untagged_objp + object->size <= untagged_ptr) 411 rb = object->rb_node.rb_right; 412 else if (untagged_objp == untagged_ptr || alias) 413 return object; 414 else { 415 kmemleak_warn("Found object by alias at 0x%08lx\n", 416 ptr); 417 dump_object_info(object); 418 break; 419 } 420 } 421 return NULL; 422 } 423 424 /* Look-up a kmemleak object which allocated with virtual address. */ 425 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) 426 { 427 return __lookup_object(ptr, alias, false); 428 } 429 430 /* 431 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note 432 * that once an object's use_count reached 0, the RCU freeing was already 433 * registered and the object should no longer be used. This function must be 434 * called under the protection of rcu_read_lock(). 435 */ 436 static int get_object(struct kmemleak_object *object) 437 { 438 return atomic_inc_not_zero(&object->use_count); 439 } 440 441 /* 442 * Memory pool allocation and freeing. kmemleak_lock must not be held. 443 */ 444 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp) 445 { 446 unsigned long flags; 447 struct kmemleak_object *object; 448 449 /* try the slab allocator first */ 450 if (object_cache) { 451 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); 452 if (object) 453 return object; 454 } 455 456 /* slab allocation failed, try the memory pool */ 457 raw_spin_lock_irqsave(&kmemleak_lock, flags); 458 object = list_first_entry_or_null(&mem_pool_free_list, 459 typeof(*object), object_list); 460 if (object) 461 list_del(&object->object_list); 462 else if (mem_pool_free_count) 463 object = &mem_pool[--mem_pool_free_count]; 464 else 465 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n"); 466 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 467 468 return object; 469 } 470 471 /* 472 * Return the object to either the slab allocator or the memory pool. 473 */ 474 static void mem_pool_free(struct kmemleak_object *object) 475 { 476 unsigned long flags; 477 478 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) { 479 kmem_cache_free(object_cache, object); 480 return; 481 } 482 483 /* add the object to the memory pool free list */ 484 raw_spin_lock_irqsave(&kmemleak_lock, flags); 485 list_add(&object->object_list, &mem_pool_free_list); 486 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 487 } 488 489 /* 490 * RCU callback to free a kmemleak_object. 491 */ 492 static void free_object_rcu(struct rcu_head *rcu) 493 { 494 struct hlist_node *tmp; 495 struct kmemleak_scan_area *area; 496 struct kmemleak_object *object = 497 container_of(rcu, struct kmemleak_object, rcu); 498 499 /* 500 * Once use_count is 0 (guaranteed by put_object), there is no other 501 * code accessing this object, hence no need for locking. 502 */ 503 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { 504 hlist_del(&area->node); 505 kmem_cache_free(scan_area_cache, area); 506 } 507 mem_pool_free(object); 508 } 509 510 /* 511 * Decrement the object use_count. Once the count is 0, free the object using 512 * an RCU callback. Since put_object() may be called via the kmemleak_free() -> 513 * delete_object() path, the delayed RCU freeing ensures that there is no 514 * recursive call to the kernel allocator. Lock-less RCU object_list traversal 515 * is also possible. 516 */ 517 static void put_object(struct kmemleak_object *object) 518 { 519 if (!atomic_dec_and_test(&object->use_count)) 520 return; 521 522 /* should only get here after delete_object was called */ 523 WARN_ON(object->flags & OBJECT_ALLOCATED); 524 525 /* 526 * It may be too early for the RCU callbacks, however, there is no 527 * concurrent object_list traversal when !object_cache and all objects 528 * came from the memory pool. Free the object directly. 529 */ 530 if (object_cache) 531 call_rcu(&object->rcu, free_object_rcu); 532 else 533 free_object_rcu(&object->rcu); 534 } 535 536 /* 537 * Look up an object in the object search tree and increase its use_count. 538 */ 539 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias, 540 bool is_phys) 541 { 542 unsigned long flags; 543 struct kmemleak_object *object; 544 545 rcu_read_lock(); 546 raw_spin_lock_irqsave(&kmemleak_lock, flags); 547 object = __lookup_object(ptr, alias, is_phys); 548 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 549 550 /* check whether the object is still available */ 551 if (object && !get_object(object)) 552 object = NULL; 553 rcu_read_unlock(); 554 555 return object; 556 } 557 558 /* Look up and get an object which allocated with virtual address. */ 559 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) 560 { 561 return __find_and_get_object(ptr, alias, false); 562 } 563 564 /* 565 * Remove an object from the object_tree_root (or object_phys_tree_root) 566 * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak 567 * is still enabled. 568 */ 569 static void __remove_object(struct kmemleak_object *object) 570 { 571 rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ? 572 &object_phys_tree_root : 573 &object_tree_root); 574 list_del_rcu(&object->object_list); 575 } 576 577 /* 578 * Look up an object in the object search tree and remove it from both 579 * object_tree_root (or object_phys_tree_root) and object_list. The 580 * returned object's use_count should be at least 1, as initially set 581 * by create_object(). 582 */ 583 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias, 584 bool is_phys) 585 { 586 unsigned long flags; 587 struct kmemleak_object *object; 588 589 raw_spin_lock_irqsave(&kmemleak_lock, flags); 590 object = __lookup_object(ptr, alias, is_phys); 591 if (object) 592 __remove_object(object); 593 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 594 595 return object; 596 } 597 598 static noinline depot_stack_handle_t set_track_prepare(void) 599 { 600 depot_stack_handle_t trace_handle; 601 unsigned long entries[MAX_TRACE]; 602 unsigned int nr_entries; 603 604 if (!kmemleak_initialized) 605 return 0; 606 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); 607 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); 608 609 return trace_handle; 610 } 611 612 /* 613 * Create the metadata (struct kmemleak_object) corresponding to an allocated 614 * memory block and add it to the object_list and object_tree_root (or 615 * object_phys_tree_root). 616 */ 617 static void __create_object(unsigned long ptr, size_t size, 618 int min_count, gfp_t gfp, bool is_phys) 619 { 620 unsigned long flags; 621 struct kmemleak_object *object, *parent; 622 struct rb_node **link, *rb_parent; 623 unsigned long untagged_ptr; 624 unsigned long untagged_objp; 625 626 object = mem_pool_alloc(gfp); 627 if (!object) { 628 pr_warn("Cannot allocate a kmemleak_object structure\n"); 629 kmemleak_disable(); 630 return; 631 } 632 633 INIT_LIST_HEAD(&object->object_list); 634 INIT_LIST_HEAD(&object->gray_list); 635 INIT_HLIST_HEAD(&object->area_list); 636 raw_spin_lock_init(&object->lock); 637 atomic_set(&object->use_count, 1); 638 object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0); 639 object->pointer = ptr; 640 object->size = kfence_ksize((void *)ptr) ?: size; 641 object->excess_ref = 0; 642 object->min_count = min_count; 643 object->count = 0; /* white color initially */ 644 object->jiffies = jiffies; 645 object->checksum = 0; 646 647 /* task information */ 648 if (in_hardirq()) { 649 object->pid = 0; 650 strncpy(object->comm, "hardirq", sizeof(object->comm)); 651 } else if (in_serving_softirq()) { 652 object->pid = 0; 653 strncpy(object->comm, "softirq", sizeof(object->comm)); 654 } else { 655 object->pid = current->pid; 656 /* 657 * There is a small chance of a race with set_task_comm(), 658 * however using get_task_comm() here may cause locking 659 * dependency issues with current->alloc_lock. In the worst 660 * case, the command line is not correct. 661 */ 662 strncpy(object->comm, current->comm, sizeof(object->comm)); 663 } 664 665 /* kernel backtrace */ 666 object->trace_handle = set_track_prepare(); 667 668 raw_spin_lock_irqsave(&kmemleak_lock, flags); 669 670 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); 671 /* 672 * Only update min_addr and max_addr with object 673 * storing virtual address. 674 */ 675 if (!is_phys) { 676 min_addr = min(min_addr, untagged_ptr); 677 max_addr = max(max_addr, untagged_ptr + size); 678 } 679 link = is_phys ? &object_phys_tree_root.rb_node : 680 &object_tree_root.rb_node; 681 rb_parent = NULL; 682 while (*link) { 683 rb_parent = *link; 684 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); 685 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer); 686 if (untagged_ptr + size <= untagged_objp) 687 link = &parent->rb_node.rb_left; 688 else if (untagged_objp + parent->size <= untagged_ptr) 689 link = &parent->rb_node.rb_right; 690 else { 691 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", 692 ptr); 693 /* 694 * No need for parent->lock here since "parent" cannot 695 * be freed while the kmemleak_lock is held. 696 */ 697 dump_object_info(parent); 698 kmem_cache_free(object_cache, object); 699 goto out; 700 } 701 } 702 rb_link_node(&object->rb_node, rb_parent, link); 703 rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root : 704 &object_tree_root); 705 list_add_tail_rcu(&object->object_list, &object_list); 706 out: 707 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 708 } 709 710 /* Create kmemleak object which allocated with virtual address. */ 711 static void create_object(unsigned long ptr, size_t size, 712 int min_count, gfp_t gfp) 713 { 714 __create_object(ptr, size, min_count, gfp, false); 715 } 716 717 /* Create kmemleak object which allocated with physical address. */ 718 static void create_object_phys(unsigned long ptr, size_t size, 719 int min_count, gfp_t gfp) 720 { 721 __create_object(ptr, size, min_count, gfp, true); 722 } 723 724 /* 725 * Mark the object as not allocated and schedule RCU freeing via put_object(). 726 */ 727 static void __delete_object(struct kmemleak_object *object) 728 { 729 unsigned long flags; 730 731 WARN_ON(!(object->flags & OBJECT_ALLOCATED)); 732 WARN_ON(atomic_read(&object->use_count) < 1); 733 734 /* 735 * Locking here also ensures that the corresponding memory block 736 * cannot be freed when it is being scanned. 737 */ 738 raw_spin_lock_irqsave(&object->lock, flags); 739 object->flags &= ~OBJECT_ALLOCATED; 740 raw_spin_unlock_irqrestore(&object->lock, flags); 741 put_object(object); 742 } 743 744 /* 745 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 746 * delete it. 747 */ 748 static void delete_object_full(unsigned long ptr) 749 { 750 struct kmemleak_object *object; 751 752 object = find_and_remove_object(ptr, 0, false); 753 if (!object) { 754 #ifdef DEBUG 755 kmemleak_warn("Freeing unknown object at 0x%08lx\n", 756 ptr); 757 #endif 758 return; 759 } 760 __delete_object(object); 761 } 762 763 /* 764 * Look up the metadata (struct kmemleak_object) corresponding to ptr and 765 * delete it. If the memory block is partially freed, the function may create 766 * additional metadata for the remaining parts of the block. 767 */ 768 static void delete_object_part(unsigned long ptr, size_t size, bool is_phys) 769 { 770 struct kmemleak_object *object; 771 unsigned long start, end; 772 773 object = find_and_remove_object(ptr, 1, is_phys); 774 if (!object) { 775 #ifdef DEBUG 776 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", 777 ptr, size); 778 #endif 779 return; 780 } 781 782 /* 783 * Create one or two objects that may result from the memory block 784 * split. Note that partial freeing is only done by free_bootmem() and 785 * this happens before kmemleak_init() is called. 786 */ 787 start = object->pointer; 788 end = object->pointer + object->size; 789 if (ptr > start) 790 __create_object(start, ptr - start, object->min_count, 791 GFP_KERNEL, is_phys); 792 if (ptr + size < end) 793 __create_object(ptr + size, end - ptr - size, object->min_count, 794 GFP_KERNEL, is_phys); 795 796 __delete_object(object); 797 } 798 799 static void __paint_it(struct kmemleak_object *object, int color) 800 { 801 object->min_count = color; 802 if (color == KMEMLEAK_BLACK) 803 object->flags |= OBJECT_NO_SCAN; 804 } 805 806 static void paint_it(struct kmemleak_object *object, int color) 807 { 808 unsigned long flags; 809 810 raw_spin_lock_irqsave(&object->lock, flags); 811 __paint_it(object, color); 812 raw_spin_unlock_irqrestore(&object->lock, flags); 813 } 814 815 static void paint_ptr(unsigned long ptr, int color, bool is_phys) 816 { 817 struct kmemleak_object *object; 818 819 object = __find_and_get_object(ptr, 0, is_phys); 820 if (!object) { 821 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", 822 ptr, 823 (color == KMEMLEAK_GREY) ? "Grey" : 824 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); 825 return; 826 } 827 paint_it(object, color); 828 put_object(object); 829 } 830 831 /* 832 * Mark an object permanently as gray-colored so that it can no longer be 833 * reported as a leak. This is used in general to mark a false positive. 834 */ 835 static void make_gray_object(unsigned long ptr) 836 { 837 paint_ptr(ptr, KMEMLEAK_GREY, false); 838 } 839 840 /* 841 * Mark the object as black-colored so that it is ignored from scans and 842 * reporting. 843 */ 844 static void make_black_object(unsigned long ptr, bool is_phys) 845 { 846 paint_ptr(ptr, KMEMLEAK_BLACK, is_phys); 847 } 848 849 /* 850 * Add a scanning area to the object. If at least one such area is added, 851 * kmemleak will only scan these ranges rather than the whole memory block. 852 */ 853 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) 854 { 855 unsigned long flags; 856 struct kmemleak_object *object; 857 struct kmemleak_scan_area *area = NULL; 858 unsigned long untagged_ptr; 859 unsigned long untagged_objp; 860 861 object = find_and_get_object(ptr, 1); 862 if (!object) { 863 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", 864 ptr); 865 return; 866 } 867 868 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); 869 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer); 870 871 if (scan_area_cache) 872 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); 873 874 raw_spin_lock_irqsave(&object->lock, flags); 875 if (!area) { 876 pr_warn_once("Cannot allocate a scan area, scanning the full object\n"); 877 /* mark the object for full scan to avoid false positives */ 878 object->flags |= OBJECT_FULL_SCAN; 879 goto out_unlock; 880 } 881 if (size == SIZE_MAX) { 882 size = untagged_objp + object->size - untagged_ptr; 883 } else if (untagged_ptr + size > untagged_objp + object->size) { 884 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); 885 dump_object_info(object); 886 kmem_cache_free(scan_area_cache, area); 887 goto out_unlock; 888 } 889 890 INIT_HLIST_NODE(&area->node); 891 area->start = ptr; 892 area->size = size; 893 894 hlist_add_head(&area->node, &object->area_list); 895 out_unlock: 896 raw_spin_unlock_irqrestore(&object->lock, flags); 897 put_object(object); 898 } 899 900 /* 901 * Any surplus references (object already gray) to 'ptr' are passed to 902 * 'excess_ref'. This is used in the vmalloc() case where a pointer to 903 * vm_struct may be used as an alternative reference to the vmalloc'ed object 904 * (see free_thread_stack()). 905 */ 906 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref) 907 { 908 unsigned long flags; 909 struct kmemleak_object *object; 910 911 object = find_and_get_object(ptr, 0); 912 if (!object) { 913 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n", 914 ptr); 915 return; 916 } 917 918 raw_spin_lock_irqsave(&object->lock, flags); 919 object->excess_ref = excess_ref; 920 raw_spin_unlock_irqrestore(&object->lock, flags); 921 put_object(object); 922 } 923 924 /* 925 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give 926 * pointer. Such object will not be scanned by kmemleak but references to it 927 * are searched. 928 */ 929 static void object_no_scan(unsigned long ptr) 930 { 931 unsigned long flags; 932 struct kmemleak_object *object; 933 934 object = find_and_get_object(ptr, 0); 935 if (!object) { 936 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); 937 return; 938 } 939 940 raw_spin_lock_irqsave(&object->lock, flags); 941 object->flags |= OBJECT_NO_SCAN; 942 raw_spin_unlock_irqrestore(&object->lock, flags); 943 put_object(object); 944 } 945 946 /** 947 * kmemleak_alloc - register a newly allocated object 948 * @ptr: pointer to beginning of the object 949 * @size: size of the object 950 * @min_count: minimum number of references to this object. If during memory 951 * scanning a number of references less than @min_count is found, 952 * the object is reported as a memory leak. If @min_count is 0, 953 * the object is never reported as a leak. If @min_count is -1, 954 * the object is ignored (not scanned and not reported as a leak) 955 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 956 * 957 * This function is called from the kernel allocators when a new object 958 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.). 959 */ 960 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, 961 gfp_t gfp) 962 { 963 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); 964 965 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 966 create_object((unsigned long)ptr, size, min_count, gfp); 967 } 968 EXPORT_SYMBOL_GPL(kmemleak_alloc); 969 970 /** 971 * kmemleak_alloc_percpu - register a newly allocated __percpu object 972 * @ptr: __percpu pointer to beginning of the object 973 * @size: size of the object 974 * @gfp: flags used for kmemleak internal memory allocations 975 * 976 * This function is called from the kernel percpu allocator when a new object 977 * (memory block) is allocated (alloc_percpu). 978 */ 979 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, 980 gfp_t gfp) 981 { 982 unsigned int cpu; 983 984 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); 985 986 /* 987 * Percpu allocations are only scanned and not reported as leaks 988 * (min_count is set to 0). 989 */ 990 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 991 for_each_possible_cpu(cpu) 992 create_object((unsigned long)per_cpu_ptr(ptr, cpu), 993 size, 0, gfp); 994 } 995 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); 996 997 /** 998 * kmemleak_vmalloc - register a newly vmalloc'ed object 999 * @area: pointer to vm_struct 1000 * @size: size of the object 1001 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations 1002 * 1003 * This function is called from the vmalloc() kernel allocator when a new 1004 * object (memory block) is allocated. 1005 */ 1006 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp) 1007 { 1008 pr_debug("%s(0x%p, %zu)\n", __func__, area, size); 1009 1010 /* 1011 * A min_count = 2 is needed because vm_struct contains a reference to 1012 * the virtual address of the vmalloc'ed block. 1013 */ 1014 if (kmemleak_enabled) { 1015 create_object((unsigned long)area->addr, size, 2, gfp); 1016 object_set_excess_ref((unsigned long)area, 1017 (unsigned long)area->addr); 1018 } 1019 } 1020 EXPORT_SYMBOL_GPL(kmemleak_vmalloc); 1021 1022 /** 1023 * kmemleak_free - unregister a previously registered object 1024 * @ptr: pointer to beginning of the object 1025 * 1026 * This function is called from the kernel allocators when an object (memory 1027 * block) is freed (kmem_cache_free, kfree, vfree etc.). 1028 */ 1029 void __ref kmemleak_free(const void *ptr) 1030 { 1031 pr_debug("%s(0x%p)\n", __func__, ptr); 1032 1033 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 1034 delete_object_full((unsigned long)ptr); 1035 } 1036 EXPORT_SYMBOL_GPL(kmemleak_free); 1037 1038 /** 1039 * kmemleak_free_part - partially unregister a previously registered object 1040 * @ptr: pointer to the beginning or inside the object. This also 1041 * represents the start of the range to be freed 1042 * @size: size to be unregistered 1043 * 1044 * This function is called when only a part of a memory block is freed 1045 * (usually from the bootmem allocator). 1046 */ 1047 void __ref kmemleak_free_part(const void *ptr, size_t size) 1048 { 1049 pr_debug("%s(0x%p)\n", __func__, ptr); 1050 1051 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1052 delete_object_part((unsigned long)ptr, size, false); 1053 } 1054 EXPORT_SYMBOL_GPL(kmemleak_free_part); 1055 1056 /** 1057 * kmemleak_free_percpu - unregister a previously registered __percpu object 1058 * @ptr: __percpu pointer to beginning of the object 1059 * 1060 * This function is called from the kernel percpu allocator when an object 1061 * (memory block) is freed (free_percpu). 1062 */ 1063 void __ref kmemleak_free_percpu(const void __percpu *ptr) 1064 { 1065 unsigned int cpu; 1066 1067 pr_debug("%s(0x%p)\n", __func__, ptr); 1068 1069 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) 1070 for_each_possible_cpu(cpu) 1071 delete_object_full((unsigned long)per_cpu_ptr(ptr, 1072 cpu)); 1073 } 1074 EXPORT_SYMBOL_GPL(kmemleak_free_percpu); 1075 1076 /** 1077 * kmemleak_update_trace - update object allocation stack trace 1078 * @ptr: pointer to beginning of the object 1079 * 1080 * Override the object allocation stack trace for cases where the actual 1081 * allocation place is not always useful. 1082 */ 1083 void __ref kmemleak_update_trace(const void *ptr) 1084 { 1085 struct kmemleak_object *object; 1086 unsigned long flags; 1087 1088 pr_debug("%s(0x%p)\n", __func__, ptr); 1089 1090 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) 1091 return; 1092 1093 object = find_and_get_object((unsigned long)ptr, 1); 1094 if (!object) { 1095 #ifdef DEBUG 1096 kmemleak_warn("Updating stack trace for unknown object at %p\n", 1097 ptr); 1098 #endif 1099 return; 1100 } 1101 1102 raw_spin_lock_irqsave(&object->lock, flags); 1103 object->trace_handle = set_track_prepare(); 1104 raw_spin_unlock_irqrestore(&object->lock, flags); 1105 1106 put_object(object); 1107 } 1108 EXPORT_SYMBOL(kmemleak_update_trace); 1109 1110 /** 1111 * kmemleak_not_leak - mark an allocated object as false positive 1112 * @ptr: pointer to beginning of the object 1113 * 1114 * Calling this function on an object will cause the memory block to no longer 1115 * be reported as leak and always be scanned. 1116 */ 1117 void __ref kmemleak_not_leak(const void *ptr) 1118 { 1119 pr_debug("%s(0x%p)\n", __func__, ptr); 1120 1121 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1122 make_gray_object((unsigned long)ptr); 1123 } 1124 EXPORT_SYMBOL(kmemleak_not_leak); 1125 1126 /** 1127 * kmemleak_ignore - ignore an allocated object 1128 * @ptr: pointer to beginning of the object 1129 * 1130 * Calling this function on an object will cause the memory block to be 1131 * ignored (not scanned and not reported as a leak). This is usually done when 1132 * it is known that the corresponding block is not a leak and does not contain 1133 * any references to other allocated memory blocks. 1134 */ 1135 void __ref kmemleak_ignore(const void *ptr) 1136 { 1137 pr_debug("%s(0x%p)\n", __func__, ptr); 1138 1139 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1140 make_black_object((unsigned long)ptr, false); 1141 } 1142 EXPORT_SYMBOL(kmemleak_ignore); 1143 1144 /** 1145 * kmemleak_scan_area - limit the range to be scanned in an allocated object 1146 * @ptr: pointer to beginning or inside the object. This also 1147 * represents the start of the scan area 1148 * @size: size of the scan area 1149 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 1150 * 1151 * This function is used when it is known that only certain parts of an object 1152 * contain references to other objects. Kmemleak will only scan these areas 1153 * reducing the number false negatives. 1154 */ 1155 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) 1156 { 1157 pr_debug("%s(0x%p)\n", __func__, ptr); 1158 1159 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) 1160 add_scan_area((unsigned long)ptr, size, gfp); 1161 } 1162 EXPORT_SYMBOL(kmemleak_scan_area); 1163 1164 /** 1165 * kmemleak_no_scan - do not scan an allocated object 1166 * @ptr: pointer to beginning of the object 1167 * 1168 * This function notifies kmemleak not to scan the given memory block. Useful 1169 * in situations where it is known that the given object does not contain any 1170 * references to other objects. Kmemleak will not scan such objects reducing 1171 * the number of false negatives. 1172 */ 1173 void __ref kmemleak_no_scan(const void *ptr) 1174 { 1175 pr_debug("%s(0x%p)\n", __func__, ptr); 1176 1177 if (kmemleak_enabled && ptr && !IS_ERR(ptr)) 1178 object_no_scan((unsigned long)ptr); 1179 } 1180 EXPORT_SYMBOL(kmemleak_no_scan); 1181 1182 /** 1183 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical 1184 * address argument 1185 * @phys: physical address of the object 1186 * @size: size of the object 1187 * @gfp: kmalloc() flags used for kmemleak internal memory allocations 1188 */ 1189 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp) 1190 { 1191 pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size); 1192 1193 if (kmemleak_enabled) 1194 /* 1195 * Create object with OBJECT_PHYS flag and 1196 * assume min_count 0. 1197 */ 1198 create_object_phys((unsigned long)phys, size, 0, gfp); 1199 } 1200 EXPORT_SYMBOL(kmemleak_alloc_phys); 1201 1202 /** 1203 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a 1204 * physical address argument 1205 * @phys: physical address if the beginning or inside an object. This 1206 * also represents the start of the range to be freed 1207 * @size: size to be unregistered 1208 */ 1209 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) 1210 { 1211 pr_debug("%s(0x%pa)\n", __func__, &phys); 1212 1213 if (kmemleak_enabled) 1214 delete_object_part((unsigned long)phys, size, true); 1215 } 1216 EXPORT_SYMBOL(kmemleak_free_part_phys); 1217 1218 /** 1219 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical 1220 * address argument 1221 * @phys: physical address of the object 1222 */ 1223 void __ref kmemleak_ignore_phys(phys_addr_t phys) 1224 { 1225 pr_debug("%s(0x%pa)\n", __func__, &phys); 1226 1227 if (kmemleak_enabled) 1228 make_black_object((unsigned long)phys, true); 1229 } 1230 EXPORT_SYMBOL(kmemleak_ignore_phys); 1231 1232 /* 1233 * Update an object's checksum and return true if it was modified. 1234 */ 1235 static bool update_checksum(struct kmemleak_object *object) 1236 { 1237 u32 old_csum = object->checksum; 1238 1239 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS)) 1240 return false; 1241 1242 kasan_disable_current(); 1243 kcsan_disable_current(); 1244 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size); 1245 kasan_enable_current(); 1246 kcsan_enable_current(); 1247 1248 return object->checksum != old_csum; 1249 } 1250 1251 /* 1252 * Update an object's references. object->lock must be held by the caller. 1253 */ 1254 static void update_refs(struct kmemleak_object *object) 1255 { 1256 if (!color_white(object)) { 1257 /* non-orphan, ignored or new */ 1258 return; 1259 } 1260 1261 /* 1262 * Increase the object's reference count (number of pointers to the 1263 * memory block). If this count reaches the required minimum, the 1264 * object's color will become gray and it will be added to the 1265 * gray_list. 1266 */ 1267 object->count++; 1268 if (color_gray(object)) { 1269 /* put_object() called when removing from gray_list */ 1270 WARN_ON(!get_object(object)); 1271 list_add_tail(&object->gray_list, &gray_list); 1272 } 1273 } 1274 1275 /* 1276 * Memory scanning is a long process and it needs to be interruptible. This 1277 * function checks whether such interrupt condition occurred. 1278 */ 1279 static int scan_should_stop(void) 1280 { 1281 if (!kmemleak_enabled) 1282 return 1; 1283 1284 /* 1285 * This function may be called from either process or kthread context, 1286 * hence the need to check for both stop conditions. 1287 */ 1288 if (current->mm) 1289 return signal_pending(current); 1290 else 1291 return kthread_should_stop(); 1292 1293 return 0; 1294 } 1295 1296 /* 1297 * Scan a memory block (exclusive range) for valid pointers and add those 1298 * found to the gray list. 1299 */ 1300 static void scan_block(void *_start, void *_end, 1301 struct kmemleak_object *scanned) 1302 { 1303 unsigned long *ptr; 1304 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); 1305 unsigned long *end = _end - (BYTES_PER_POINTER - 1); 1306 unsigned long flags; 1307 unsigned long untagged_ptr; 1308 1309 raw_spin_lock_irqsave(&kmemleak_lock, flags); 1310 for (ptr = start; ptr < end; ptr++) { 1311 struct kmemleak_object *object; 1312 unsigned long pointer; 1313 unsigned long excess_ref; 1314 1315 if (scan_should_stop()) 1316 break; 1317 1318 kasan_disable_current(); 1319 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr); 1320 kasan_enable_current(); 1321 1322 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); 1323 if (untagged_ptr < min_addr || untagged_ptr >= max_addr) 1324 continue; 1325 1326 /* 1327 * No need for get_object() here since we hold kmemleak_lock. 1328 * object->use_count cannot be dropped to 0 while the object 1329 * is still present in object_tree_root and object_list 1330 * (with updates protected by kmemleak_lock). 1331 */ 1332 object = lookup_object(pointer, 1); 1333 if (!object) 1334 continue; 1335 if (object == scanned) 1336 /* self referenced, ignore */ 1337 continue; 1338 1339 /* 1340 * Avoid the lockdep recursive warning on object->lock being 1341 * previously acquired in scan_object(). These locks are 1342 * enclosed by scan_mutex. 1343 */ 1344 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); 1345 /* only pass surplus references (object already gray) */ 1346 if (color_gray(object)) { 1347 excess_ref = object->excess_ref; 1348 /* no need for update_refs() if object already gray */ 1349 } else { 1350 excess_ref = 0; 1351 update_refs(object); 1352 } 1353 raw_spin_unlock(&object->lock); 1354 1355 if (excess_ref) { 1356 object = lookup_object(excess_ref, 0); 1357 if (!object) 1358 continue; 1359 if (object == scanned) 1360 /* circular reference, ignore */ 1361 continue; 1362 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); 1363 update_refs(object); 1364 raw_spin_unlock(&object->lock); 1365 } 1366 } 1367 raw_spin_unlock_irqrestore(&kmemleak_lock, flags); 1368 } 1369 1370 /* 1371 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. 1372 */ 1373 #ifdef CONFIG_SMP 1374 static void scan_large_block(void *start, void *end) 1375 { 1376 void *next; 1377 1378 while (start < end) { 1379 next = min(start + MAX_SCAN_SIZE, end); 1380 scan_block(start, next, NULL); 1381 start = next; 1382 cond_resched(); 1383 } 1384 } 1385 #endif 1386 1387 /* 1388 * Scan a memory block corresponding to a kmemleak_object. A condition is 1389 * that object->use_count >= 1. 1390 */ 1391 static void scan_object(struct kmemleak_object *object) 1392 { 1393 struct kmemleak_scan_area *area; 1394 unsigned long flags; 1395 void *obj_ptr; 1396 1397 /* 1398 * Once the object->lock is acquired, the corresponding memory block 1399 * cannot be freed (the same lock is acquired in delete_object). 1400 */ 1401 raw_spin_lock_irqsave(&object->lock, flags); 1402 if (object->flags & OBJECT_NO_SCAN) 1403 goto out; 1404 if (!(object->flags & OBJECT_ALLOCATED)) 1405 /* already freed object */ 1406 goto out; 1407 1408 obj_ptr = object->flags & OBJECT_PHYS ? 1409 __va((phys_addr_t)object->pointer) : 1410 (void *)object->pointer; 1411 1412 if (hlist_empty(&object->area_list) || 1413 object->flags & OBJECT_FULL_SCAN) { 1414 void *start = obj_ptr; 1415 void *end = obj_ptr + object->size; 1416 void *next; 1417 1418 do { 1419 next = min(start + MAX_SCAN_SIZE, end); 1420 scan_block(start, next, object); 1421 1422 start = next; 1423 if (start >= end) 1424 break; 1425 1426 raw_spin_unlock_irqrestore(&object->lock, flags); 1427 cond_resched(); 1428 raw_spin_lock_irqsave(&object->lock, flags); 1429 } while (object->flags & OBJECT_ALLOCATED); 1430 } else 1431 hlist_for_each_entry(area, &object->area_list, node) 1432 scan_block((void *)area->start, 1433 (void *)(area->start + area->size), 1434 object); 1435 out: 1436 raw_spin_unlock_irqrestore(&object->lock, flags); 1437 } 1438 1439 /* 1440 * Scan the objects already referenced (gray objects). More objects will be 1441 * referenced and, if there are no memory leaks, all the objects are scanned. 1442 */ 1443 static void scan_gray_list(void) 1444 { 1445 struct kmemleak_object *object, *tmp; 1446 1447 /* 1448 * The list traversal is safe for both tail additions and removals 1449 * from inside the loop. The kmemleak objects cannot be freed from 1450 * outside the loop because their use_count was incremented. 1451 */ 1452 object = list_entry(gray_list.next, typeof(*object), gray_list); 1453 while (&object->gray_list != &gray_list) { 1454 cond_resched(); 1455 1456 /* may add new objects to the list */ 1457 if (!scan_should_stop()) 1458 scan_object(object); 1459 1460 tmp = list_entry(object->gray_list.next, typeof(*object), 1461 gray_list); 1462 1463 /* remove the object from the list and release it */ 1464 list_del(&object->gray_list); 1465 put_object(object); 1466 1467 object = tmp; 1468 } 1469 WARN_ON(!list_empty(&gray_list)); 1470 } 1471 1472 /* 1473 * Conditionally call resched() in an object iteration loop while making sure 1474 * that the given object won't go away without RCU read lock by performing a 1475 * get_object() if !pinned. 1476 * 1477 * Return: false if can't do a cond_resched() due to get_object() failure 1478 * true otherwise 1479 */ 1480 static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned) 1481 { 1482 if (!pinned && !get_object(object)) 1483 return false; 1484 1485 rcu_read_unlock(); 1486 cond_resched(); 1487 rcu_read_lock(); 1488 if (!pinned) 1489 put_object(object); 1490 return true; 1491 } 1492 1493 /* 1494 * Scan data sections and all the referenced memory blocks allocated via the 1495 * kernel's standard allocators. This function must be called with the 1496 * scan_mutex held. 1497 */ 1498 static void kmemleak_scan(void) 1499 { 1500 struct kmemleak_object *object; 1501 struct zone *zone; 1502 int __maybe_unused i; 1503 int new_leaks = 0; 1504 int loop_cnt = 0; 1505 1506 jiffies_last_scan = jiffies; 1507 1508 /* prepare the kmemleak_object's */ 1509 rcu_read_lock(); 1510 list_for_each_entry_rcu(object, &object_list, object_list) { 1511 bool obj_pinned = false; 1512 1513 raw_spin_lock_irq(&object->lock); 1514 #ifdef DEBUG 1515 /* 1516 * With a few exceptions there should be a maximum of 1517 * 1 reference to any object at this point. 1518 */ 1519 if (atomic_read(&object->use_count) > 1) { 1520 pr_debug("object->use_count = %d\n", 1521 atomic_read(&object->use_count)); 1522 dump_object_info(object); 1523 } 1524 #endif 1525 1526 /* ignore objects outside lowmem (paint them black) */ 1527 if ((object->flags & OBJECT_PHYS) && 1528 !(object->flags & OBJECT_NO_SCAN)) { 1529 unsigned long phys = object->pointer; 1530 1531 if (PHYS_PFN(phys) < min_low_pfn || 1532 PHYS_PFN(phys + object->size) >= max_low_pfn) 1533 __paint_it(object, KMEMLEAK_BLACK); 1534 } 1535 1536 /* reset the reference count (whiten the object) */ 1537 object->count = 0; 1538 if (color_gray(object) && get_object(object)) { 1539 list_add_tail(&object->gray_list, &gray_list); 1540 obj_pinned = true; 1541 } 1542 1543 raw_spin_unlock_irq(&object->lock); 1544 1545 /* 1546 * Do a cond_resched() every 64k objects to avoid soft lockup. 1547 */ 1548 if (!(++loop_cnt & 0xffff) && 1549 !kmemleak_cond_resched(object, obj_pinned)) 1550 loop_cnt--; /* Try again on next object */ 1551 } 1552 rcu_read_unlock(); 1553 1554 #ifdef CONFIG_SMP 1555 /* per-cpu sections scanning */ 1556 for_each_possible_cpu(i) 1557 scan_large_block(__per_cpu_start + per_cpu_offset(i), 1558 __per_cpu_end + per_cpu_offset(i)); 1559 #endif 1560 1561 /* 1562 * Struct page scanning for each node. 1563 */ 1564 get_online_mems(); 1565 for_each_populated_zone(zone) { 1566 unsigned long start_pfn = zone->zone_start_pfn; 1567 unsigned long end_pfn = zone_end_pfn(zone); 1568 unsigned long pfn; 1569 1570 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1571 struct page *page = pfn_to_online_page(pfn); 1572 1573 if (!page) 1574 continue; 1575 1576 /* only scan pages belonging to this zone */ 1577 if (page_zone(page) != zone) 1578 continue; 1579 /* only scan if page is in use */ 1580 if (page_count(page) == 0) 1581 continue; 1582 scan_block(page, page + 1, NULL); 1583 if (!(pfn & 63)) 1584 cond_resched(); 1585 } 1586 } 1587 put_online_mems(); 1588 1589 /* 1590 * Scanning the task stacks (may introduce false negatives). 1591 */ 1592 if (kmemleak_stack_scan) { 1593 struct task_struct *p, *g; 1594 1595 rcu_read_lock(); 1596 for_each_process_thread(g, p) { 1597 void *stack = try_get_task_stack(p); 1598 if (stack) { 1599 scan_block(stack, stack + THREAD_SIZE, NULL); 1600 put_task_stack(p); 1601 } 1602 } 1603 rcu_read_unlock(); 1604 } 1605 1606 /* 1607 * Scan the objects already referenced from the sections scanned 1608 * above. 1609 */ 1610 scan_gray_list(); 1611 1612 /* 1613 * Check for new or unreferenced objects modified since the previous 1614 * scan and color them gray until the next scan. 1615 */ 1616 rcu_read_lock(); 1617 loop_cnt = 0; 1618 list_for_each_entry_rcu(object, &object_list, object_list) { 1619 /* 1620 * Do a cond_resched() every 64k objects to avoid soft lockup. 1621 */ 1622 if (!(++loop_cnt & 0xffff) && 1623 !kmemleak_cond_resched(object, false)) 1624 loop_cnt--; /* Try again on next object */ 1625 1626 /* 1627 * This is racy but we can save the overhead of lock/unlock 1628 * calls. The missed objects, if any, should be caught in 1629 * the next scan. 1630 */ 1631 if (!color_white(object)) 1632 continue; 1633 raw_spin_lock_irq(&object->lock); 1634 if (color_white(object) && (object->flags & OBJECT_ALLOCATED) 1635 && update_checksum(object) && get_object(object)) { 1636 /* color it gray temporarily */ 1637 object->count = object->min_count; 1638 list_add_tail(&object->gray_list, &gray_list); 1639 } 1640 raw_spin_unlock_irq(&object->lock); 1641 } 1642 rcu_read_unlock(); 1643 1644 /* 1645 * Re-scan the gray list for modified unreferenced objects. 1646 */ 1647 scan_gray_list(); 1648 1649 /* 1650 * If scanning was stopped do not report any new unreferenced objects. 1651 */ 1652 if (scan_should_stop()) 1653 return; 1654 1655 /* 1656 * Scanning result reporting. 1657 */ 1658 rcu_read_lock(); 1659 loop_cnt = 0; 1660 list_for_each_entry_rcu(object, &object_list, object_list) { 1661 /* 1662 * Do a cond_resched() every 64k objects to avoid soft lockup. 1663 */ 1664 if (!(++loop_cnt & 0xffff) && 1665 !kmemleak_cond_resched(object, false)) 1666 loop_cnt--; /* Try again on next object */ 1667 1668 /* 1669 * This is racy but we can save the overhead of lock/unlock 1670 * calls. The missed objects, if any, should be caught in 1671 * the next scan. 1672 */ 1673 if (!color_white(object)) 1674 continue; 1675 raw_spin_lock_irq(&object->lock); 1676 if (unreferenced_object(object) && 1677 !(object->flags & OBJECT_REPORTED)) { 1678 object->flags |= OBJECT_REPORTED; 1679 1680 if (kmemleak_verbose) 1681 print_unreferenced(NULL, object); 1682 1683 new_leaks++; 1684 } 1685 raw_spin_unlock_irq(&object->lock); 1686 } 1687 rcu_read_unlock(); 1688 1689 if (new_leaks) { 1690 kmemleak_found_leaks = true; 1691 1692 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n", 1693 new_leaks); 1694 } 1695 1696 } 1697 1698 /* 1699 * Thread function performing automatic memory scanning. Unreferenced objects 1700 * at the end of a memory scan are reported but only the first time. 1701 */ 1702 static int kmemleak_scan_thread(void *arg) 1703 { 1704 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN); 1705 1706 pr_info("Automatic memory scanning thread started\n"); 1707 set_user_nice(current, 10); 1708 1709 /* 1710 * Wait before the first scan to allow the system to fully initialize. 1711 */ 1712 if (first_run) { 1713 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); 1714 first_run = 0; 1715 while (timeout && !kthread_should_stop()) 1716 timeout = schedule_timeout_interruptible(timeout); 1717 } 1718 1719 while (!kthread_should_stop()) { 1720 signed long timeout = READ_ONCE(jiffies_scan_wait); 1721 1722 mutex_lock(&scan_mutex); 1723 kmemleak_scan(); 1724 mutex_unlock(&scan_mutex); 1725 1726 /* wait before the next scan */ 1727 while (timeout && !kthread_should_stop()) 1728 timeout = schedule_timeout_interruptible(timeout); 1729 } 1730 1731 pr_info("Automatic memory scanning thread ended\n"); 1732 1733 return 0; 1734 } 1735 1736 /* 1737 * Start the automatic memory scanning thread. This function must be called 1738 * with the scan_mutex held. 1739 */ 1740 static void start_scan_thread(void) 1741 { 1742 if (scan_thread) 1743 return; 1744 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); 1745 if (IS_ERR(scan_thread)) { 1746 pr_warn("Failed to create the scan thread\n"); 1747 scan_thread = NULL; 1748 } 1749 } 1750 1751 /* 1752 * Stop the automatic memory scanning thread. 1753 */ 1754 static void stop_scan_thread(void) 1755 { 1756 if (scan_thread) { 1757 kthread_stop(scan_thread); 1758 scan_thread = NULL; 1759 } 1760 } 1761 1762 /* 1763 * Iterate over the object_list and return the first valid object at or after 1764 * the required position with its use_count incremented. The function triggers 1765 * a memory scanning when the pos argument points to the first position. 1766 */ 1767 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) 1768 { 1769 struct kmemleak_object *object; 1770 loff_t n = *pos; 1771 int err; 1772 1773 err = mutex_lock_interruptible(&scan_mutex); 1774 if (err < 0) 1775 return ERR_PTR(err); 1776 1777 rcu_read_lock(); 1778 list_for_each_entry_rcu(object, &object_list, object_list) { 1779 if (n-- > 0) 1780 continue; 1781 if (get_object(object)) 1782 goto out; 1783 } 1784 object = NULL; 1785 out: 1786 return object; 1787 } 1788 1789 /* 1790 * Return the next object in the object_list. The function decrements the 1791 * use_count of the previous object and increases that of the next one. 1792 */ 1793 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1794 { 1795 struct kmemleak_object *prev_obj = v; 1796 struct kmemleak_object *next_obj = NULL; 1797 struct kmemleak_object *obj = prev_obj; 1798 1799 ++(*pos); 1800 1801 list_for_each_entry_continue_rcu(obj, &object_list, object_list) { 1802 if (get_object(obj)) { 1803 next_obj = obj; 1804 break; 1805 } 1806 } 1807 1808 put_object(prev_obj); 1809 return next_obj; 1810 } 1811 1812 /* 1813 * Decrement the use_count of the last object required, if any. 1814 */ 1815 static void kmemleak_seq_stop(struct seq_file *seq, void *v) 1816 { 1817 if (!IS_ERR(v)) { 1818 /* 1819 * kmemleak_seq_start may return ERR_PTR if the scan_mutex 1820 * waiting was interrupted, so only release it if !IS_ERR. 1821 */ 1822 rcu_read_unlock(); 1823 mutex_unlock(&scan_mutex); 1824 if (v) 1825 put_object(v); 1826 } 1827 } 1828 1829 /* 1830 * Print the information for an unreferenced object to the seq file. 1831 */ 1832 static int kmemleak_seq_show(struct seq_file *seq, void *v) 1833 { 1834 struct kmemleak_object *object = v; 1835 unsigned long flags; 1836 1837 raw_spin_lock_irqsave(&object->lock, flags); 1838 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) 1839 print_unreferenced(seq, object); 1840 raw_spin_unlock_irqrestore(&object->lock, flags); 1841 return 0; 1842 } 1843 1844 static const struct seq_operations kmemleak_seq_ops = { 1845 .start = kmemleak_seq_start, 1846 .next = kmemleak_seq_next, 1847 .stop = kmemleak_seq_stop, 1848 .show = kmemleak_seq_show, 1849 }; 1850 1851 static int kmemleak_open(struct inode *inode, struct file *file) 1852 { 1853 return seq_open(file, &kmemleak_seq_ops); 1854 } 1855 1856 static int dump_str_object_info(const char *str) 1857 { 1858 unsigned long flags; 1859 struct kmemleak_object *object; 1860 unsigned long addr; 1861 1862 if (kstrtoul(str, 0, &addr)) 1863 return -EINVAL; 1864 object = find_and_get_object(addr, 0); 1865 if (!object) { 1866 pr_info("Unknown object at 0x%08lx\n", addr); 1867 return -EINVAL; 1868 } 1869 1870 raw_spin_lock_irqsave(&object->lock, flags); 1871 dump_object_info(object); 1872 raw_spin_unlock_irqrestore(&object->lock, flags); 1873 1874 put_object(object); 1875 return 0; 1876 } 1877 1878 /* 1879 * We use grey instead of black to ensure we can do future scans on the same 1880 * objects. If we did not do future scans these black objects could 1881 * potentially contain references to newly allocated objects in the future and 1882 * we'd end up with false positives. 1883 */ 1884 static void kmemleak_clear(void) 1885 { 1886 struct kmemleak_object *object; 1887 1888 rcu_read_lock(); 1889 list_for_each_entry_rcu(object, &object_list, object_list) { 1890 raw_spin_lock_irq(&object->lock); 1891 if ((object->flags & OBJECT_REPORTED) && 1892 unreferenced_object(object)) 1893 __paint_it(object, KMEMLEAK_GREY); 1894 raw_spin_unlock_irq(&object->lock); 1895 } 1896 rcu_read_unlock(); 1897 1898 kmemleak_found_leaks = false; 1899 } 1900 1901 static void __kmemleak_do_cleanup(void); 1902 1903 /* 1904 * File write operation to configure kmemleak at run-time. The following 1905 * commands can be written to the /sys/kernel/debug/kmemleak file: 1906 * off - disable kmemleak (irreversible) 1907 * stack=on - enable the task stacks scanning 1908 * stack=off - disable the tasks stacks scanning 1909 * scan=on - start the automatic memory scanning thread 1910 * scan=off - stop the automatic memory scanning thread 1911 * scan=... - set the automatic memory scanning period in seconds (0 to 1912 * disable it) 1913 * scan - trigger a memory scan 1914 * clear - mark all current reported unreferenced kmemleak objects as 1915 * grey to ignore printing them, or free all kmemleak objects 1916 * if kmemleak has been disabled. 1917 * dump=... - dump information about the object found at the given address 1918 */ 1919 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, 1920 size_t size, loff_t *ppos) 1921 { 1922 char buf[64]; 1923 int buf_size; 1924 int ret; 1925 1926 buf_size = min(size, (sizeof(buf) - 1)); 1927 if (strncpy_from_user(buf, user_buf, buf_size) < 0) 1928 return -EFAULT; 1929 buf[buf_size] = 0; 1930 1931 ret = mutex_lock_interruptible(&scan_mutex); 1932 if (ret < 0) 1933 return ret; 1934 1935 if (strncmp(buf, "clear", 5) == 0) { 1936 if (kmemleak_enabled) 1937 kmemleak_clear(); 1938 else 1939 __kmemleak_do_cleanup(); 1940 goto out; 1941 } 1942 1943 if (!kmemleak_enabled) { 1944 ret = -EPERM; 1945 goto out; 1946 } 1947 1948 if (strncmp(buf, "off", 3) == 0) 1949 kmemleak_disable(); 1950 else if (strncmp(buf, "stack=on", 8) == 0) 1951 kmemleak_stack_scan = 1; 1952 else if (strncmp(buf, "stack=off", 9) == 0) 1953 kmemleak_stack_scan = 0; 1954 else if (strncmp(buf, "scan=on", 7) == 0) 1955 start_scan_thread(); 1956 else if (strncmp(buf, "scan=off", 8) == 0) 1957 stop_scan_thread(); 1958 else if (strncmp(buf, "scan=", 5) == 0) { 1959 unsigned secs; 1960 unsigned long msecs; 1961 1962 ret = kstrtouint(buf + 5, 0, &secs); 1963 if (ret < 0) 1964 goto out; 1965 1966 msecs = secs * MSEC_PER_SEC; 1967 if (msecs > UINT_MAX) 1968 msecs = UINT_MAX; 1969 1970 stop_scan_thread(); 1971 if (msecs) { 1972 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs)); 1973 start_scan_thread(); 1974 } 1975 } else if (strncmp(buf, "scan", 4) == 0) 1976 kmemleak_scan(); 1977 else if (strncmp(buf, "dump=", 5) == 0) 1978 ret = dump_str_object_info(buf + 5); 1979 else 1980 ret = -EINVAL; 1981 1982 out: 1983 mutex_unlock(&scan_mutex); 1984 if (ret < 0) 1985 return ret; 1986 1987 /* ignore the rest of the buffer, only one command at a time */ 1988 *ppos += size; 1989 return size; 1990 } 1991 1992 static const struct file_operations kmemleak_fops = { 1993 .owner = THIS_MODULE, 1994 .open = kmemleak_open, 1995 .read = seq_read, 1996 .write = kmemleak_write, 1997 .llseek = seq_lseek, 1998 .release = seq_release, 1999 }; 2000 2001 static void __kmemleak_do_cleanup(void) 2002 { 2003 struct kmemleak_object *object, *tmp; 2004 2005 /* 2006 * Kmemleak has already been disabled, no need for RCU list traversal 2007 * or kmemleak_lock held. 2008 */ 2009 list_for_each_entry_safe(object, tmp, &object_list, object_list) { 2010 __remove_object(object); 2011 __delete_object(object); 2012 } 2013 } 2014 2015 /* 2016 * Stop the memory scanning thread and free the kmemleak internal objects if 2017 * no previous scan thread (otherwise, kmemleak may still have some useful 2018 * information on memory leaks). 2019 */ 2020 static void kmemleak_do_cleanup(struct work_struct *work) 2021 { 2022 stop_scan_thread(); 2023 2024 mutex_lock(&scan_mutex); 2025 /* 2026 * Once it is made sure that kmemleak_scan has stopped, it is safe to no 2027 * longer track object freeing. Ordering of the scan thread stopping and 2028 * the memory accesses below is guaranteed by the kthread_stop() 2029 * function. 2030 */ 2031 kmemleak_free_enabled = 0; 2032 mutex_unlock(&scan_mutex); 2033 2034 if (!kmemleak_found_leaks) 2035 __kmemleak_do_cleanup(); 2036 else 2037 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n"); 2038 } 2039 2040 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); 2041 2042 /* 2043 * Disable kmemleak. No memory allocation/freeing will be traced once this 2044 * function is called. Disabling kmemleak is an irreversible operation. 2045 */ 2046 static void kmemleak_disable(void) 2047 { 2048 /* atomically check whether it was already invoked */ 2049 if (cmpxchg(&kmemleak_error, 0, 1)) 2050 return; 2051 2052 /* stop any memory operation tracing */ 2053 kmemleak_enabled = 0; 2054 2055 /* check whether it is too early for a kernel thread */ 2056 if (kmemleak_initialized) 2057 schedule_work(&cleanup_work); 2058 else 2059 kmemleak_free_enabled = 0; 2060 2061 pr_info("Kernel memory leak detector disabled\n"); 2062 } 2063 2064 /* 2065 * Allow boot-time kmemleak disabling (enabled by default). 2066 */ 2067 static int __init kmemleak_boot_config(char *str) 2068 { 2069 if (!str) 2070 return -EINVAL; 2071 if (strcmp(str, "off") == 0) 2072 kmemleak_disable(); 2073 else if (strcmp(str, "on") == 0) 2074 kmemleak_skip_disable = 1; 2075 else 2076 return -EINVAL; 2077 return 0; 2078 } 2079 early_param("kmemleak", kmemleak_boot_config); 2080 2081 /* 2082 * Kmemleak initialization. 2083 */ 2084 void __init kmemleak_init(void) 2085 { 2086 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 2087 if (!kmemleak_skip_disable) { 2088 kmemleak_disable(); 2089 return; 2090 } 2091 #endif 2092 2093 if (kmemleak_error) 2094 return; 2095 2096 stack_depot_init(); 2097 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); 2098 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); 2099 2100 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); 2101 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); 2102 2103 /* register the data/bss sections */ 2104 create_object((unsigned long)_sdata, _edata - _sdata, 2105 KMEMLEAK_GREY, GFP_ATOMIC); 2106 create_object((unsigned long)__bss_start, __bss_stop - __bss_start, 2107 KMEMLEAK_GREY, GFP_ATOMIC); 2108 /* only register .data..ro_after_init if not within .data */ 2109 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata) 2110 create_object((unsigned long)__start_ro_after_init, 2111 __end_ro_after_init - __start_ro_after_init, 2112 KMEMLEAK_GREY, GFP_ATOMIC); 2113 } 2114 2115 /* 2116 * Late initialization function. 2117 */ 2118 static int __init kmemleak_late_init(void) 2119 { 2120 kmemleak_initialized = 1; 2121 2122 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops); 2123 2124 if (kmemleak_error) { 2125 /* 2126 * Some error occurred and kmemleak was disabled. There is a 2127 * small chance that kmemleak_disable() was called immediately 2128 * after setting kmemleak_initialized and we may end up with 2129 * two clean-up threads but serialized by scan_mutex. 2130 */ 2131 schedule_work(&cleanup_work); 2132 return -ENOMEM; 2133 } 2134 2135 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) { 2136 mutex_lock(&scan_mutex); 2137 start_scan_thread(); 2138 mutex_unlock(&scan_mutex); 2139 } 2140 2141 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n", 2142 mem_pool_free_count); 2143 2144 return 0; 2145 } 2146 late_initcall(kmemleak_late_init); 2147