xref: /openbmc/linux/mm/kmemleak.c (revision d8eabc37)
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/dev-tools/kmemleak.rst.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a red black tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * Locks and mutexes are acquired/nested in the following order:
57  *
58  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59  *
60  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61  * regions.
62  *
63  * The kmemleak_object structures have a use_count incremented or decremented
64  * using the get_object()/put_object() functions. When the use_count becomes
65  * 0, this count can no longer be incremented and put_object() schedules the
66  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67  * function must be protected by rcu_read_lock() to avoid accessing a freed
68  * structure.
69  */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
84 #include <linux/fs.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/module.h>
90 #include <linux/mutex.h>
91 #include <linux/rcupdate.h>
92 #include <linux/stacktrace.h>
93 #include <linux/cache.h>
94 #include <linux/percpu.h>
95 #include <linux/memblock.h>
96 #include <linux/pfn.h>
97 #include <linux/mmzone.h>
98 #include <linux/slab.h>
99 #include <linux/thread_info.h>
100 #include <linux/err.h>
101 #include <linux/uaccess.h>
102 #include <linux/string.h>
103 #include <linux/nodemask.h>
104 #include <linux/mm.h>
105 #include <linux/workqueue.h>
106 #include <linux/crc32.h>
107 
108 #include <asm/sections.h>
109 #include <asm/processor.h>
110 #include <linux/atomic.h>
111 
112 #include <linux/kasan.h>
113 #include <linux/kmemleak.h>
114 #include <linux/memory_hotplug.h>
115 
116 /*
117  * Kmemleak configuration and common defines.
118  */
119 #define MAX_TRACE		16	/* stack trace length */
120 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
121 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
122 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
123 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
124 
125 #define BYTES_PER_POINTER	sizeof(void *)
126 
127 /* GFP bitmask for kmemleak internal allocations */
128 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
130 				 __GFP_NOWARN | __GFP_NOFAIL)
131 
132 /* scanning area inside a memory block */
133 struct kmemleak_scan_area {
134 	struct hlist_node node;
135 	unsigned long start;
136 	size_t size;
137 };
138 
139 #define KMEMLEAK_GREY	0
140 #define KMEMLEAK_BLACK	-1
141 
142 /*
143  * Structure holding the metadata for each allocated memory block.
144  * Modifications to such objects should be made while holding the
145  * object->lock. Insertions or deletions from object_list, gray_list or
146  * rb_node are already protected by the corresponding locks or mutex (see
147  * the notes on locking above). These objects are reference-counted
148  * (use_count) and freed using the RCU mechanism.
149  */
150 struct kmemleak_object {
151 	spinlock_t lock;
152 	unsigned int flags;		/* object status flags */
153 	struct list_head object_list;
154 	struct list_head gray_list;
155 	struct rb_node rb_node;
156 	struct rcu_head rcu;		/* object_list lockless traversal */
157 	/* object usage count; object freed when use_count == 0 */
158 	atomic_t use_count;
159 	unsigned long pointer;
160 	size_t size;
161 	/* pass surplus references to this pointer */
162 	unsigned long excess_ref;
163 	/* minimum number of a pointers found before it is considered leak */
164 	int min_count;
165 	/* the total number of pointers found pointing to this object */
166 	int count;
167 	/* checksum for detecting modified objects */
168 	u32 checksum;
169 	/* memory ranges to be scanned inside an object (empty for all) */
170 	struct hlist_head area_list;
171 	unsigned long trace[MAX_TRACE];
172 	unsigned int trace_len;
173 	unsigned long jiffies;		/* creation timestamp */
174 	pid_t pid;			/* pid of the current task */
175 	char comm[TASK_COMM_LEN];	/* executable name */
176 };
177 
178 /* flag representing the memory block allocation status */
179 #define OBJECT_ALLOCATED	(1 << 0)
180 /* flag set after the first reporting of an unreference object */
181 #define OBJECT_REPORTED		(1 << 1)
182 /* flag set to not scan the object */
183 #define OBJECT_NO_SCAN		(1 << 2)
184 
185 #define HEX_PREFIX		"    "
186 /* number of bytes to print per line; must be 16 or 32 */
187 #define HEX_ROW_SIZE		16
188 /* number of bytes to print at a time (1, 2, 4, 8) */
189 #define HEX_GROUP_SIZE		1
190 /* include ASCII after the hex output */
191 #define HEX_ASCII		1
192 /* max number of lines to be printed */
193 #define HEX_MAX_LINES		2
194 
195 /* the list of all allocated objects */
196 static LIST_HEAD(object_list);
197 /* the list of gray-colored objects (see color_gray comment below) */
198 static LIST_HEAD(gray_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* rw_lock protecting the access to object_list and object_tree_root */
202 static DEFINE_RWLOCK(kmemleak_lock);
203 
204 /* allocation caches for kmemleak internal data */
205 static struct kmem_cache *object_cache;
206 static struct kmem_cache *scan_area_cache;
207 
208 /* set if tracing memory operations is enabled */
209 static int kmemleak_enabled;
210 /* same as above but only for the kmemleak_free() callback */
211 static int kmemleak_free_enabled;
212 /* set in the late_initcall if there were no errors */
213 static int kmemleak_initialized;
214 /* enables or disables early logging of the memory operations */
215 static int kmemleak_early_log = 1;
216 /* set if a kmemleak warning was issued */
217 static int kmemleak_warning;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
220 
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
224 
225 static struct task_struct *scan_thread;
226 /* used to avoid reporting of recently allocated objects */
227 static unsigned long jiffies_min_age;
228 static unsigned long jiffies_last_scan;
229 /* delay between automatic memory scannings */
230 static signed long jiffies_scan_wait;
231 /* enables or disables the task stacks scanning */
232 static int kmemleak_stack_scan = 1;
233 /* protects the memory scanning, parameters and debug/kmemleak file access */
234 static DEFINE_MUTEX(scan_mutex);
235 /* setting kmemleak=on, will set this var, skipping the disable */
236 static int kmemleak_skip_disable;
237 /* If there are leaks that can be reported */
238 static bool kmemleak_found_leaks;
239 
240 static bool kmemleak_verbose;
241 module_param_named(verbose, kmemleak_verbose, bool, 0600);
242 
243 /*
244  * Early object allocation/freeing logging. Kmemleak is initialized after the
245  * kernel allocator. However, both the kernel allocator and kmemleak may
246  * allocate memory blocks which need to be tracked. Kmemleak defines an
247  * arbitrary buffer to hold the allocation/freeing information before it is
248  * fully initialized.
249  */
250 
251 /* kmemleak operation type for early logging */
252 enum {
253 	KMEMLEAK_ALLOC,
254 	KMEMLEAK_ALLOC_PERCPU,
255 	KMEMLEAK_FREE,
256 	KMEMLEAK_FREE_PART,
257 	KMEMLEAK_FREE_PERCPU,
258 	KMEMLEAK_NOT_LEAK,
259 	KMEMLEAK_IGNORE,
260 	KMEMLEAK_SCAN_AREA,
261 	KMEMLEAK_NO_SCAN,
262 	KMEMLEAK_SET_EXCESS_REF
263 };
264 
265 /*
266  * Structure holding the information passed to kmemleak callbacks during the
267  * early logging.
268  */
269 struct early_log {
270 	int op_type;			/* kmemleak operation type */
271 	int min_count;			/* minimum reference count */
272 	const void *ptr;		/* allocated/freed memory block */
273 	union {
274 		size_t size;		/* memory block size */
275 		unsigned long excess_ref; /* surplus reference passing */
276 	};
277 	unsigned long trace[MAX_TRACE];	/* stack trace */
278 	unsigned int trace_len;		/* stack trace length */
279 };
280 
281 /* early logging buffer and current position */
282 static struct early_log
283 	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
284 static int crt_early_log __initdata;
285 
286 static void kmemleak_disable(void);
287 
288 /*
289  * Print a warning and dump the stack trace.
290  */
291 #define kmemleak_warn(x...)	do {		\
292 	pr_warn(x);				\
293 	dump_stack();				\
294 	kmemleak_warning = 1;			\
295 } while (0)
296 
297 /*
298  * Macro invoked when a serious kmemleak condition occurred and cannot be
299  * recovered from. Kmemleak will be disabled and further allocation/freeing
300  * tracing no longer available.
301  */
302 #define kmemleak_stop(x...)	do {	\
303 	kmemleak_warn(x);		\
304 	kmemleak_disable();		\
305 } while (0)
306 
307 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
308 	if (seq)					\
309 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
310 	else						\
311 		pr_warn(fmt, ##__VA_ARGS__);		\
312 } while (0)
313 
314 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
315 				 int rowsize, int groupsize, const void *buf,
316 				 size_t len, bool ascii)
317 {
318 	if (seq)
319 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
320 			     buf, len, ascii);
321 	else
322 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
323 			       rowsize, groupsize, buf, len, ascii);
324 }
325 
326 /*
327  * Printing of the objects hex dump to the seq file. The number of lines to be
328  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
329  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
330  * with the object->lock held.
331  */
332 static void hex_dump_object(struct seq_file *seq,
333 			    struct kmemleak_object *object)
334 {
335 	const u8 *ptr = (const u8 *)object->pointer;
336 	size_t len;
337 
338 	/* limit the number of lines to HEX_MAX_LINES */
339 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
340 
341 	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
342 	kasan_disable_current();
343 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
344 			     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
345 	kasan_enable_current();
346 }
347 
348 /*
349  * Object colors, encoded with count and min_count:
350  * - white - orphan object, not enough references to it (count < min_count)
351  * - gray  - not orphan, not marked as false positive (min_count == 0) or
352  *		sufficient references to it (count >= min_count)
353  * - black - ignore, it doesn't contain references (e.g. text section)
354  *		(min_count == -1). No function defined for this color.
355  * Newly created objects don't have any color assigned (object->count == -1)
356  * before the next memory scan when they become white.
357  */
358 static bool color_white(const struct kmemleak_object *object)
359 {
360 	return object->count != KMEMLEAK_BLACK &&
361 		object->count < object->min_count;
362 }
363 
364 static bool color_gray(const struct kmemleak_object *object)
365 {
366 	return object->min_count != KMEMLEAK_BLACK &&
367 		object->count >= object->min_count;
368 }
369 
370 /*
371  * Objects are considered unreferenced only if their color is white, they have
372  * not be deleted and have a minimum age to avoid false positives caused by
373  * pointers temporarily stored in CPU registers.
374  */
375 static bool unreferenced_object(struct kmemleak_object *object)
376 {
377 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
378 		time_before_eq(object->jiffies + jiffies_min_age,
379 			       jiffies_last_scan);
380 }
381 
382 /*
383  * Printing of the unreferenced objects information to the seq file. The
384  * print_unreferenced function must be called with the object->lock held.
385  */
386 static void print_unreferenced(struct seq_file *seq,
387 			       struct kmemleak_object *object)
388 {
389 	int i;
390 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
391 
392 	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
393 		   object->pointer, object->size);
394 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
395 		   object->comm, object->pid, object->jiffies,
396 		   msecs_age / 1000, msecs_age % 1000);
397 	hex_dump_object(seq, object);
398 	warn_or_seq_printf(seq, "  backtrace:\n");
399 
400 	for (i = 0; i < object->trace_len; i++) {
401 		void *ptr = (void *)object->trace[i];
402 		warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
403 	}
404 }
405 
406 /*
407  * Print the kmemleak_object information. This function is used mainly for
408  * debugging special cases when kmemleak operations. It must be called with
409  * the object->lock held.
410  */
411 static void dump_object_info(struct kmemleak_object *object)
412 {
413 	struct stack_trace trace;
414 
415 	trace.nr_entries = object->trace_len;
416 	trace.entries = object->trace;
417 
418 	pr_notice("Object 0x%08lx (size %zu):\n",
419 		  object->pointer, object->size);
420 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
421 		  object->comm, object->pid, object->jiffies);
422 	pr_notice("  min_count = %d\n", object->min_count);
423 	pr_notice("  count = %d\n", object->count);
424 	pr_notice("  flags = 0x%x\n", object->flags);
425 	pr_notice("  checksum = %u\n", object->checksum);
426 	pr_notice("  backtrace:\n");
427 	print_stack_trace(&trace, 4);
428 }
429 
430 /*
431  * Look-up a memory block metadata (kmemleak_object) in the object search
432  * tree based on a pointer value. If alias is 0, only values pointing to the
433  * beginning of the memory block are allowed. The kmemleak_lock must be held
434  * when calling this function.
435  */
436 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
437 {
438 	struct rb_node *rb = object_tree_root.rb_node;
439 
440 	while (rb) {
441 		struct kmemleak_object *object =
442 			rb_entry(rb, struct kmemleak_object, rb_node);
443 		if (ptr < object->pointer)
444 			rb = object->rb_node.rb_left;
445 		else if (object->pointer + object->size <= ptr)
446 			rb = object->rb_node.rb_right;
447 		else if (object->pointer == ptr || alias)
448 			return object;
449 		else {
450 			kmemleak_warn("Found object by alias at 0x%08lx\n",
451 				      ptr);
452 			dump_object_info(object);
453 			break;
454 		}
455 	}
456 	return NULL;
457 }
458 
459 /*
460  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
461  * that once an object's use_count reached 0, the RCU freeing was already
462  * registered and the object should no longer be used. This function must be
463  * called under the protection of rcu_read_lock().
464  */
465 static int get_object(struct kmemleak_object *object)
466 {
467 	return atomic_inc_not_zero(&object->use_count);
468 }
469 
470 /*
471  * RCU callback to free a kmemleak_object.
472  */
473 static void free_object_rcu(struct rcu_head *rcu)
474 {
475 	struct hlist_node *tmp;
476 	struct kmemleak_scan_area *area;
477 	struct kmemleak_object *object =
478 		container_of(rcu, struct kmemleak_object, rcu);
479 
480 	/*
481 	 * Once use_count is 0 (guaranteed by put_object), there is no other
482 	 * code accessing this object, hence no need for locking.
483 	 */
484 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
485 		hlist_del(&area->node);
486 		kmem_cache_free(scan_area_cache, area);
487 	}
488 	kmem_cache_free(object_cache, object);
489 }
490 
491 /*
492  * Decrement the object use_count. Once the count is 0, free the object using
493  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
494  * delete_object() path, the delayed RCU freeing ensures that there is no
495  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
496  * is also possible.
497  */
498 static void put_object(struct kmemleak_object *object)
499 {
500 	if (!atomic_dec_and_test(&object->use_count))
501 		return;
502 
503 	/* should only get here after delete_object was called */
504 	WARN_ON(object->flags & OBJECT_ALLOCATED);
505 
506 	call_rcu(&object->rcu, free_object_rcu);
507 }
508 
509 /*
510  * Look up an object in the object search tree and increase its use_count.
511  */
512 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
513 {
514 	unsigned long flags;
515 	struct kmemleak_object *object;
516 
517 	rcu_read_lock();
518 	read_lock_irqsave(&kmemleak_lock, flags);
519 	object = lookup_object(ptr, alias);
520 	read_unlock_irqrestore(&kmemleak_lock, flags);
521 
522 	/* check whether the object is still available */
523 	if (object && !get_object(object))
524 		object = NULL;
525 	rcu_read_unlock();
526 
527 	return object;
528 }
529 
530 /*
531  * Look up an object in the object search tree and remove it from both
532  * object_tree_root and object_list. The returned object's use_count should be
533  * at least 1, as initially set by create_object().
534  */
535 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
536 {
537 	unsigned long flags;
538 	struct kmemleak_object *object;
539 
540 	write_lock_irqsave(&kmemleak_lock, flags);
541 	object = lookup_object(ptr, alias);
542 	if (object) {
543 		rb_erase(&object->rb_node, &object_tree_root);
544 		list_del_rcu(&object->object_list);
545 	}
546 	write_unlock_irqrestore(&kmemleak_lock, flags);
547 
548 	return object;
549 }
550 
551 /*
552  * Save stack trace to the given array of MAX_TRACE size.
553  */
554 static int __save_stack_trace(unsigned long *trace)
555 {
556 	struct stack_trace stack_trace;
557 
558 	stack_trace.max_entries = MAX_TRACE;
559 	stack_trace.nr_entries = 0;
560 	stack_trace.entries = trace;
561 	stack_trace.skip = 2;
562 	save_stack_trace(&stack_trace);
563 
564 	return stack_trace.nr_entries;
565 }
566 
567 /*
568  * Create the metadata (struct kmemleak_object) corresponding to an allocated
569  * memory block and add it to the object_list and object_tree_root.
570  */
571 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
572 					     int min_count, gfp_t gfp)
573 {
574 	unsigned long flags;
575 	struct kmemleak_object *object, *parent;
576 	struct rb_node **link, *rb_parent;
577 	unsigned long untagged_ptr;
578 
579 	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
580 	if (!object) {
581 		pr_warn("Cannot allocate a kmemleak_object structure\n");
582 		kmemleak_disable();
583 		return NULL;
584 	}
585 
586 	INIT_LIST_HEAD(&object->object_list);
587 	INIT_LIST_HEAD(&object->gray_list);
588 	INIT_HLIST_HEAD(&object->area_list);
589 	spin_lock_init(&object->lock);
590 	atomic_set(&object->use_count, 1);
591 	object->flags = OBJECT_ALLOCATED;
592 	object->pointer = ptr;
593 	object->size = size;
594 	object->excess_ref = 0;
595 	object->min_count = min_count;
596 	object->count = 0;			/* white color initially */
597 	object->jiffies = jiffies;
598 	object->checksum = 0;
599 
600 	/* task information */
601 	if (in_irq()) {
602 		object->pid = 0;
603 		strncpy(object->comm, "hardirq", sizeof(object->comm));
604 	} else if (in_softirq()) {
605 		object->pid = 0;
606 		strncpy(object->comm, "softirq", sizeof(object->comm));
607 	} else {
608 		object->pid = current->pid;
609 		/*
610 		 * There is a small chance of a race with set_task_comm(),
611 		 * however using get_task_comm() here may cause locking
612 		 * dependency issues with current->alloc_lock. In the worst
613 		 * case, the command line is not correct.
614 		 */
615 		strncpy(object->comm, current->comm, sizeof(object->comm));
616 	}
617 
618 	/* kernel backtrace */
619 	object->trace_len = __save_stack_trace(object->trace);
620 
621 	write_lock_irqsave(&kmemleak_lock, flags);
622 
623 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
624 	min_addr = min(min_addr, untagged_ptr);
625 	max_addr = max(max_addr, untagged_ptr + size);
626 	link = &object_tree_root.rb_node;
627 	rb_parent = NULL;
628 	while (*link) {
629 		rb_parent = *link;
630 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
631 		if (ptr + size <= parent->pointer)
632 			link = &parent->rb_node.rb_left;
633 		else if (parent->pointer + parent->size <= ptr)
634 			link = &parent->rb_node.rb_right;
635 		else {
636 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
637 				      ptr);
638 			/*
639 			 * No need for parent->lock here since "parent" cannot
640 			 * be freed while the kmemleak_lock is held.
641 			 */
642 			dump_object_info(parent);
643 			kmem_cache_free(object_cache, object);
644 			object = NULL;
645 			goto out;
646 		}
647 	}
648 	rb_link_node(&object->rb_node, rb_parent, link);
649 	rb_insert_color(&object->rb_node, &object_tree_root);
650 
651 	list_add_tail_rcu(&object->object_list, &object_list);
652 out:
653 	write_unlock_irqrestore(&kmemleak_lock, flags);
654 	return object;
655 }
656 
657 /*
658  * Mark the object as not allocated and schedule RCU freeing via put_object().
659  */
660 static void __delete_object(struct kmemleak_object *object)
661 {
662 	unsigned long flags;
663 
664 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
665 	WARN_ON(atomic_read(&object->use_count) < 1);
666 
667 	/*
668 	 * Locking here also ensures that the corresponding memory block
669 	 * cannot be freed when it is being scanned.
670 	 */
671 	spin_lock_irqsave(&object->lock, flags);
672 	object->flags &= ~OBJECT_ALLOCATED;
673 	spin_unlock_irqrestore(&object->lock, flags);
674 	put_object(object);
675 }
676 
677 /*
678  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
679  * delete it.
680  */
681 static void delete_object_full(unsigned long ptr)
682 {
683 	struct kmemleak_object *object;
684 
685 	object = find_and_remove_object(ptr, 0);
686 	if (!object) {
687 #ifdef DEBUG
688 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
689 			      ptr);
690 #endif
691 		return;
692 	}
693 	__delete_object(object);
694 }
695 
696 /*
697  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
698  * delete it. If the memory block is partially freed, the function may create
699  * additional metadata for the remaining parts of the block.
700  */
701 static void delete_object_part(unsigned long ptr, size_t size)
702 {
703 	struct kmemleak_object *object;
704 	unsigned long start, end;
705 
706 	object = find_and_remove_object(ptr, 1);
707 	if (!object) {
708 #ifdef DEBUG
709 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
710 			      ptr, size);
711 #endif
712 		return;
713 	}
714 
715 	/*
716 	 * Create one or two objects that may result from the memory block
717 	 * split. Note that partial freeing is only done by free_bootmem() and
718 	 * this happens before kmemleak_init() is called. The path below is
719 	 * only executed during early log recording in kmemleak_init(), so
720 	 * GFP_KERNEL is enough.
721 	 */
722 	start = object->pointer;
723 	end = object->pointer + object->size;
724 	if (ptr > start)
725 		create_object(start, ptr - start, object->min_count,
726 			      GFP_KERNEL);
727 	if (ptr + size < end)
728 		create_object(ptr + size, end - ptr - size, object->min_count,
729 			      GFP_KERNEL);
730 
731 	__delete_object(object);
732 }
733 
734 static void __paint_it(struct kmemleak_object *object, int color)
735 {
736 	object->min_count = color;
737 	if (color == KMEMLEAK_BLACK)
738 		object->flags |= OBJECT_NO_SCAN;
739 }
740 
741 static void paint_it(struct kmemleak_object *object, int color)
742 {
743 	unsigned long flags;
744 
745 	spin_lock_irqsave(&object->lock, flags);
746 	__paint_it(object, color);
747 	spin_unlock_irqrestore(&object->lock, flags);
748 }
749 
750 static void paint_ptr(unsigned long ptr, int color)
751 {
752 	struct kmemleak_object *object;
753 
754 	object = find_and_get_object(ptr, 0);
755 	if (!object) {
756 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
757 			      ptr,
758 			      (color == KMEMLEAK_GREY) ? "Grey" :
759 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
760 		return;
761 	}
762 	paint_it(object, color);
763 	put_object(object);
764 }
765 
766 /*
767  * Mark an object permanently as gray-colored so that it can no longer be
768  * reported as a leak. This is used in general to mark a false positive.
769  */
770 static void make_gray_object(unsigned long ptr)
771 {
772 	paint_ptr(ptr, KMEMLEAK_GREY);
773 }
774 
775 /*
776  * Mark the object as black-colored so that it is ignored from scans and
777  * reporting.
778  */
779 static void make_black_object(unsigned long ptr)
780 {
781 	paint_ptr(ptr, KMEMLEAK_BLACK);
782 }
783 
784 /*
785  * Add a scanning area to the object. If at least one such area is added,
786  * kmemleak will only scan these ranges rather than the whole memory block.
787  */
788 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
789 {
790 	unsigned long flags;
791 	struct kmemleak_object *object;
792 	struct kmemleak_scan_area *area;
793 
794 	object = find_and_get_object(ptr, 1);
795 	if (!object) {
796 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
797 			      ptr);
798 		return;
799 	}
800 
801 	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
802 	if (!area) {
803 		pr_warn("Cannot allocate a scan area\n");
804 		goto out;
805 	}
806 
807 	spin_lock_irqsave(&object->lock, flags);
808 	if (size == SIZE_MAX) {
809 		size = object->pointer + object->size - ptr;
810 	} else if (ptr + size > object->pointer + object->size) {
811 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
812 		dump_object_info(object);
813 		kmem_cache_free(scan_area_cache, area);
814 		goto out_unlock;
815 	}
816 
817 	INIT_HLIST_NODE(&area->node);
818 	area->start = ptr;
819 	area->size = size;
820 
821 	hlist_add_head(&area->node, &object->area_list);
822 out_unlock:
823 	spin_unlock_irqrestore(&object->lock, flags);
824 out:
825 	put_object(object);
826 }
827 
828 /*
829  * Any surplus references (object already gray) to 'ptr' are passed to
830  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
831  * vm_struct may be used as an alternative reference to the vmalloc'ed object
832  * (see free_thread_stack()).
833  */
834 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
835 {
836 	unsigned long flags;
837 	struct kmemleak_object *object;
838 
839 	object = find_and_get_object(ptr, 0);
840 	if (!object) {
841 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
842 			      ptr);
843 		return;
844 	}
845 
846 	spin_lock_irqsave(&object->lock, flags);
847 	object->excess_ref = excess_ref;
848 	spin_unlock_irqrestore(&object->lock, flags);
849 	put_object(object);
850 }
851 
852 /*
853  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
854  * pointer. Such object will not be scanned by kmemleak but references to it
855  * are searched.
856  */
857 static void object_no_scan(unsigned long ptr)
858 {
859 	unsigned long flags;
860 	struct kmemleak_object *object;
861 
862 	object = find_and_get_object(ptr, 0);
863 	if (!object) {
864 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
865 		return;
866 	}
867 
868 	spin_lock_irqsave(&object->lock, flags);
869 	object->flags |= OBJECT_NO_SCAN;
870 	spin_unlock_irqrestore(&object->lock, flags);
871 	put_object(object);
872 }
873 
874 /*
875  * Log an early kmemleak_* call to the early_log buffer. These calls will be
876  * processed later once kmemleak is fully initialized.
877  */
878 static void __init log_early(int op_type, const void *ptr, size_t size,
879 			     int min_count)
880 {
881 	unsigned long flags;
882 	struct early_log *log;
883 
884 	if (kmemleak_error) {
885 		/* kmemleak stopped recording, just count the requests */
886 		crt_early_log++;
887 		return;
888 	}
889 
890 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
891 		crt_early_log++;
892 		kmemleak_disable();
893 		return;
894 	}
895 
896 	/*
897 	 * There is no need for locking since the kernel is still in UP mode
898 	 * at this stage. Disabling the IRQs is enough.
899 	 */
900 	local_irq_save(flags);
901 	log = &early_log[crt_early_log];
902 	log->op_type = op_type;
903 	log->ptr = ptr;
904 	log->size = size;
905 	log->min_count = min_count;
906 	log->trace_len = __save_stack_trace(log->trace);
907 	crt_early_log++;
908 	local_irq_restore(flags);
909 }
910 
911 /*
912  * Log an early allocated block and populate the stack trace.
913  */
914 static void early_alloc(struct early_log *log)
915 {
916 	struct kmemleak_object *object;
917 	unsigned long flags;
918 	int i;
919 
920 	if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
921 		return;
922 
923 	/*
924 	 * RCU locking needed to ensure object is not freed via put_object().
925 	 */
926 	rcu_read_lock();
927 	object = create_object((unsigned long)log->ptr, log->size,
928 			       log->min_count, GFP_ATOMIC);
929 	if (!object)
930 		goto out;
931 	spin_lock_irqsave(&object->lock, flags);
932 	for (i = 0; i < log->trace_len; i++)
933 		object->trace[i] = log->trace[i];
934 	object->trace_len = log->trace_len;
935 	spin_unlock_irqrestore(&object->lock, flags);
936 out:
937 	rcu_read_unlock();
938 }
939 
940 /*
941  * Log an early allocated block and populate the stack trace.
942  */
943 static void early_alloc_percpu(struct early_log *log)
944 {
945 	unsigned int cpu;
946 	const void __percpu *ptr = log->ptr;
947 
948 	for_each_possible_cpu(cpu) {
949 		log->ptr = per_cpu_ptr(ptr, cpu);
950 		early_alloc(log);
951 	}
952 }
953 
954 /**
955  * kmemleak_alloc - register a newly allocated object
956  * @ptr:	pointer to beginning of the object
957  * @size:	size of the object
958  * @min_count:	minimum number of references to this object. If during memory
959  *		scanning a number of references less than @min_count is found,
960  *		the object is reported as a memory leak. If @min_count is 0,
961  *		the object is never reported as a leak. If @min_count is -1,
962  *		the object is ignored (not scanned and not reported as a leak)
963  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
964  *
965  * This function is called from the kernel allocators when a new object
966  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
967  */
968 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
969 			  gfp_t gfp)
970 {
971 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
972 
973 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
974 		create_object((unsigned long)ptr, size, min_count, gfp);
975 	else if (kmemleak_early_log)
976 		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
977 }
978 EXPORT_SYMBOL_GPL(kmemleak_alloc);
979 
980 /**
981  * kmemleak_alloc_percpu - register a newly allocated __percpu object
982  * @ptr:	__percpu pointer to beginning of the object
983  * @size:	size of the object
984  * @gfp:	flags used for kmemleak internal memory allocations
985  *
986  * This function is called from the kernel percpu allocator when a new object
987  * (memory block) is allocated (alloc_percpu).
988  */
989 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
990 				 gfp_t gfp)
991 {
992 	unsigned int cpu;
993 
994 	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
995 
996 	/*
997 	 * Percpu allocations are only scanned and not reported as leaks
998 	 * (min_count is set to 0).
999 	 */
1000 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1001 		for_each_possible_cpu(cpu)
1002 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
1003 				      size, 0, gfp);
1004 	else if (kmemleak_early_log)
1005 		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
1006 }
1007 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1008 
1009 /**
1010  * kmemleak_vmalloc - register a newly vmalloc'ed object
1011  * @area:	pointer to vm_struct
1012  * @size:	size of the object
1013  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1014  *
1015  * This function is called from the vmalloc() kernel allocator when a new
1016  * object (memory block) is allocated.
1017  */
1018 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1019 {
1020 	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1021 
1022 	/*
1023 	 * A min_count = 2 is needed because vm_struct contains a reference to
1024 	 * the virtual address of the vmalloc'ed block.
1025 	 */
1026 	if (kmemleak_enabled) {
1027 		create_object((unsigned long)area->addr, size, 2, gfp);
1028 		object_set_excess_ref((unsigned long)area,
1029 				      (unsigned long)area->addr);
1030 	} else if (kmemleak_early_log) {
1031 		log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1032 		/* reusing early_log.size for storing area->addr */
1033 		log_early(KMEMLEAK_SET_EXCESS_REF,
1034 			  area, (unsigned long)area->addr, 0);
1035 	}
1036 }
1037 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1038 
1039 /**
1040  * kmemleak_free - unregister a previously registered object
1041  * @ptr:	pointer to beginning of the object
1042  *
1043  * This function is called from the kernel allocators when an object (memory
1044  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1045  */
1046 void __ref kmemleak_free(const void *ptr)
1047 {
1048 	pr_debug("%s(0x%p)\n", __func__, ptr);
1049 
1050 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1051 		delete_object_full((unsigned long)ptr);
1052 	else if (kmemleak_early_log)
1053 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
1054 }
1055 EXPORT_SYMBOL_GPL(kmemleak_free);
1056 
1057 /**
1058  * kmemleak_free_part - partially unregister a previously registered object
1059  * @ptr:	pointer to the beginning or inside the object. This also
1060  *		represents the start of the range to be freed
1061  * @size:	size to be unregistered
1062  *
1063  * This function is called when only a part of a memory block is freed
1064  * (usually from the bootmem allocator).
1065  */
1066 void __ref kmemleak_free_part(const void *ptr, size_t size)
1067 {
1068 	pr_debug("%s(0x%p)\n", __func__, ptr);
1069 
1070 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1071 		delete_object_part((unsigned long)ptr, size);
1072 	else if (kmemleak_early_log)
1073 		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1074 }
1075 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1076 
1077 /**
1078  * kmemleak_free_percpu - unregister a previously registered __percpu object
1079  * @ptr:	__percpu pointer to beginning of the object
1080  *
1081  * This function is called from the kernel percpu allocator when an object
1082  * (memory block) is freed (free_percpu).
1083  */
1084 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1085 {
1086 	unsigned int cpu;
1087 
1088 	pr_debug("%s(0x%p)\n", __func__, ptr);
1089 
1090 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1091 		for_each_possible_cpu(cpu)
1092 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1093 								      cpu));
1094 	else if (kmemleak_early_log)
1095 		log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1096 }
1097 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1098 
1099 /**
1100  * kmemleak_update_trace - update object allocation stack trace
1101  * @ptr:	pointer to beginning of the object
1102  *
1103  * Override the object allocation stack trace for cases where the actual
1104  * allocation place is not always useful.
1105  */
1106 void __ref kmemleak_update_trace(const void *ptr)
1107 {
1108 	struct kmemleak_object *object;
1109 	unsigned long flags;
1110 
1111 	pr_debug("%s(0x%p)\n", __func__, ptr);
1112 
1113 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1114 		return;
1115 
1116 	object = find_and_get_object((unsigned long)ptr, 1);
1117 	if (!object) {
1118 #ifdef DEBUG
1119 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1120 			      ptr);
1121 #endif
1122 		return;
1123 	}
1124 
1125 	spin_lock_irqsave(&object->lock, flags);
1126 	object->trace_len = __save_stack_trace(object->trace);
1127 	spin_unlock_irqrestore(&object->lock, flags);
1128 
1129 	put_object(object);
1130 }
1131 EXPORT_SYMBOL(kmemleak_update_trace);
1132 
1133 /**
1134  * kmemleak_not_leak - mark an allocated object as false positive
1135  * @ptr:	pointer to beginning of the object
1136  *
1137  * Calling this function on an object will cause the memory block to no longer
1138  * be reported as leak and always be scanned.
1139  */
1140 void __ref kmemleak_not_leak(const void *ptr)
1141 {
1142 	pr_debug("%s(0x%p)\n", __func__, ptr);
1143 
1144 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1145 		make_gray_object((unsigned long)ptr);
1146 	else if (kmemleak_early_log)
1147 		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1148 }
1149 EXPORT_SYMBOL(kmemleak_not_leak);
1150 
1151 /**
1152  * kmemleak_ignore - ignore an allocated object
1153  * @ptr:	pointer to beginning of the object
1154  *
1155  * Calling this function on an object will cause the memory block to be
1156  * ignored (not scanned and not reported as a leak). This is usually done when
1157  * it is known that the corresponding block is not a leak and does not contain
1158  * any references to other allocated memory blocks.
1159  */
1160 void __ref kmemleak_ignore(const void *ptr)
1161 {
1162 	pr_debug("%s(0x%p)\n", __func__, ptr);
1163 
1164 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1165 		make_black_object((unsigned long)ptr);
1166 	else if (kmemleak_early_log)
1167 		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1168 }
1169 EXPORT_SYMBOL(kmemleak_ignore);
1170 
1171 /**
1172  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1173  * @ptr:	pointer to beginning or inside the object. This also
1174  *		represents the start of the scan area
1175  * @size:	size of the scan area
1176  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1177  *
1178  * This function is used when it is known that only certain parts of an object
1179  * contain references to other objects. Kmemleak will only scan these areas
1180  * reducing the number false negatives.
1181  */
1182 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1183 {
1184 	pr_debug("%s(0x%p)\n", __func__, ptr);
1185 
1186 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1187 		add_scan_area((unsigned long)ptr, size, gfp);
1188 	else if (kmemleak_early_log)
1189 		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1190 }
1191 EXPORT_SYMBOL(kmemleak_scan_area);
1192 
1193 /**
1194  * kmemleak_no_scan - do not scan an allocated object
1195  * @ptr:	pointer to beginning of the object
1196  *
1197  * This function notifies kmemleak not to scan the given memory block. Useful
1198  * in situations where it is known that the given object does not contain any
1199  * references to other objects. Kmemleak will not scan such objects reducing
1200  * the number of false negatives.
1201  */
1202 void __ref kmemleak_no_scan(const void *ptr)
1203 {
1204 	pr_debug("%s(0x%p)\n", __func__, ptr);
1205 
1206 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1207 		object_no_scan((unsigned long)ptr);
1208 	else if (kmemleak_early_log)
1209 		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1210 }
1211 EXPORT_SYMBOL(kmemleak_no_scan);
1212 
1213 /**
1214  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1215  *			 address argument
1216  * @phys:	physical address of the object
1217  * @size:	size of the object
1218  * @min_count:	minimum number of references to this object.
1219  *              See kmemleak_alloc()
1220  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1221  */
1222 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1223 			       gfp_t gfp)
1224 {
1225 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1226 		kmemleak_alloc(__va(phys), size, min_count, gfp);
1227 }
1228 EXPORT_SYMBOL(kmemleak_alloc_phys);
1229 
1230 /**
1231  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1232  *			     physical address argument
1233  * @phys:	physical address if the beginning or inside an object. This
1234  *		also represents the start of the range to be freed
1235  * @size:	size to be unregistered
1236  */
1237 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1238 {
1239 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1240 		kmemleak_free_part(__va(phys), size);
1241 }
1242 EXPORT_SYMBOL(kmemleak_free_part_phys);
1243 
1244 /**
1245  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1246  *			    address argument
1247  * @phys:	physical address of the object
1248  */
1249 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1250 {
1251 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1252 		kmemleak_not_leak(__va(phys));
1253 }
1254 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1255 
1256 /**
1257  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1258  *			  address argument
1259  * @phys:	physical address of the object
1260  */
1261 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1262 {
1263 	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1264 		kmemleak_ignore(__va(phys));
1265 }
1266 EXPORT_SYMBOL(kmemleak_ignore_phys);
1267 
1268 /*
1269  * Update an object's checksum and return true if it was modified.
1270  */
1271 static bool update_checksum(struct kmemleak_object *object)
1272 {
1273 	u32 old_csum = object->checksum;
1274 
1275 	kasan_disable_current();
1276 	object->checksum = crc32(0, (void *)object->pointer, object->size);
1277 	kasan_enable_current();
1278 
1279 	return object->checksum != old_csum;
1280 }
1281 
1282 /*
1283  * Update an object's references. object->lock must be held by the caller.
1284  */
1285 static void update_refs(struct kmemleak_object *object)
1286 {
1287 	if (!color_white(object)) {
1288 		/* non-orphan, ignored or new */
1289 		return;
1290 	}
1291 
1292 	/*
1293 	 * Increase the object's reference count (number of pointers to the
1294 	 * memory block). If this count reaches the required minimum, the
1295 	 * object's color will become gray and it will be added to the
1296 	 * gray_list.
1297 	 */
1298 	object->count++;
1299 	if (color_gray(object)) {
1300 		/* put_object() called when removing from gray_list */
1301 		WARN_ON(!get_object(object));
1302 		list_add_tail(&object->gray_list, &gray_list);
1303 	}
1304 }
1305 
1306 /*
1307  * Memory scanning is a long process and it needs to be interruptable. This
1308  * function checks whether such interrupt condition occurred.
1309  */
1310 static int scan_should_stop(void)
1311 {
1312 	if (!kmemleak_enabled)
1313 		return 1;
1314 
1315 	/*
1316 	 * This function may be called from either process or kthread context,
1317 	 * hence the need to check for both stop conditions.
1318 	 */
1319 	if (current->mm)
1320 		return signal_pending(current);
1321 	else
1322 		return kthread_should_stop();
1323 
1324 	return 0;
1325 }
1326 
1327 /*
1328  * Scan a memory block (exclusive range) for valid pointers and add those
1329  * found to the gray list.
1330  */
1331 static void scan_block(void *_start, void *_end,
1332 		       struct kmemleak_object *scanned)
1333 {
1334 	unsigned long *ptr;
1335 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1336 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1337 	unsigned long flags;
1338 	unsigned long untagged_ptr;
1339 
1340 	read_lock_irqsave(&kmemleak_lock, flags);
1341 	for (ptr = start; ptr < end; ptr++) {
1342 		struct kmemleak_object *object;
1343 		unsigned long pointer;
1344 		unsigned long excess_ref;
1345 
1346 		if (scan_should_stop())
1347 			break;
1348 
1349 		kasan_disable_current();
1350 		pointer = *ptr;
1351 		kasan_enable_current();
1352 
1353 		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1354 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1355 			continue;
1356 
1357 		/*
1358 		 * No need for get_object() here since we hold kmemleak_lock.
1359 		 * object->use_count cannot be dropped to 0 while the object
1360 		 * is still present in object_tree_root and object_list
1361 		 * (with updates protected by kmemleak_lock).
1362 		 */
1363 		object = lookup_object(pointer, 1);
1364 		if (!object)
1365 			continue;
1366 		if (object == scanned)
1367 			/* self referenced, ignore */
1368 			continue;
1369 
1370 		/*
1371 		 * Avoid the lockdep recursive warning on object->lock being
1372 		 * previously acquired in scan_object(). These locks are
1373 		 * enclosed by scan_mutex.
1374 		 */
1375 		spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1376 		/* only pass surplus references (object already gray) */
1377 		if (color_gray(object)) {
1378 			excess_ref = object->excess_ref;
1379 			/* no need for update_refs() if object already gray */
1380 		} else {
1381 			excess_ref = 0;
1382 			update_refs(object);
1383 		}
1384 		spin_unlock(&object->lock);
1385 
1386 		if (excess_ref) {
1387 			object = lookup_object(excess_ref, 0);
1388 			if (!object)
1389 				continue;
1390 			if (object == scanned)
1391 				/* circular reference, ignore */
1392 				continue;
1393 			spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1394 			update_refs(object);
1395 			spin_unlock(&object->lock);
1396 		}
1397 	}
1398 	read_unlock_irqrestore(&kmemleak_lock, flags);
1399 }
1400 
1401 /*
1402  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1403  */
1404 static void scan_large_block(void *start, void *end)
1405 {
1406 	void *next;
1407 
1408 	while (start < end) {
1409 		next = min(start + MAX_SCAN_SIZE, end);
1410 		scan_block(start, next, NULL);
1411 		start = next;
1412 		cond_resched();
1413 	}
1414 }
1415 
1416 /*
1417  * Scan a memory block corresponding to a kmemleak_object. A condition is
1418  * that object->use_count >= 1.
1419  */
1420 static void scan_object(struct kmemleak_object *object)
1421 {
1422 	struct kmemleak_scan_area *area;
1423 	unsigned long flags;
1424 
1425 	/*
1426 	 * Once the object->lock is acquired, the corresponding memory block
1427 	 * cannot be freed (the same lock is acquired in delete_object).
1428 	 */
1429 	spin_lock_irqsave(&object->lock, flags);
1430 	if (object->flags & OBJECT_NO_SCAN)
1431 		goto out;
1432 	if (!(object->flags & OBJECT_ALLOCATED))
1433 		/* already freed object */
1434 		goto out;
1435 	if (hlist_empty(&object->area_list)) {
1436 		void *start = (void *)object->pointer;
1437 		void *end = (void *)(object->pointer + object->size);
1438 		void *next;
1439 
1440 		do {
1441 			next = min(start + MAX_SCAN_SIZE, end);
1442 			scan_block(start, next, object);
1443 
1444 			start = next;
1445 			if (start >= end)
1446 				break;
1447 
1448 			spin_unlock_irqrestore(&object->lock, flags);
1449 			cond_resched();
1450 			spin_lock_irqsave(&object->lock, flags);
1451 		} while (object->flags & OBJECT_ALLOCATED);
1452 	} else
1453 		hlist_for_each_entry(area, &object->area_list, node)
1454 			scan_block((void *)area->start,
1455 				   (void *)(area->start + area->size),
1456 				   object);
1457 out:
1458 	spin_unlock_irqrestore(&object->lock, flags);
1459 }
1460 
1461 /*
1462  * Scan the objects already referenced (gray objects). More objects will be
1463  * referenced and, if there are no memory leaks, all the objects are scanned.
1464  */
1465 static void scan_gray_list(void)
1466 {
1467 	struct kmemleak_object *object, *tmp;
1468 
1469 	/*
1470 	 * The list traversal is safe for both tail additions and removals
1471 	 * from inside the loop. The kmemleak objects cannot be freed from
1472 	 * outside the loop because their use_count was incremented.
1473 	 */
1474 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1475 	while (&object->gray_list != &gray_list) {
1476 		cond_resched();
1477 
1478 		/* may add new objects to the list */
1479 		if (!scan_should_stop())
1480 			scan_object(object);
1481 
1482 		tmp = list_entry(object->gray_list.next, typeof(*object),
1483 				 gray_list);
1484 
1485 		/* remove the object from the list and release it */
1486 		list_del(&object->gray_list);
1487 		put_object(object);
1488 
1489 		object = tmp;
1490 	}
1491 	WARN_ON(!list_empty(&gray_list));
1492 }
1493 
1494 /*
1495  * Scan data sections and all the referenced memory blocks allocated via the
1496  * kernel's standard allocators. This function must be called with the
1497  * scan_mutex held.
1498  */
1499 static void kmemleak_scan(void)
1500 {
1501 	unsigned long flags;
1502 	struct kmemleak_object *object;
1503 	int i;
1504 	int new_leaks = 0;
1505 
1506 	jiffies_last_scan = jiffies;
1507 
1508 	/* prepare the kmemleak_object's */
1509 	rcu_read_lock();
1510 	list_for_each_entry_rcu(object, &object_list, object_list) {
1511 		spin_lock_irqsave(&object->lock, flags);
1512 #ifdef DEBUG
1513 		/*
1514 		 * With a few exceptions there should be a maximum of
1515 		 * 1 reference to any object at this point.
1516 		 */
1517 		if (atomic_read(&object->use_count) > 1) {
1518 			pr_debug("object->use_count = %d\n",
1519 				 atomic_read(&object->use_count));
1520 			dump_object_info(object);
1521 		}
1522 #endif
1523 		/* reset the reference count (whiten the object) */
1524 		object->count = 0;
1525 		if (color_gray(object) && get_object(object))
1526 			list_add_tail(&object->gray_list, &gray_list);
1527 
1528 		spin_unlock_irqrestore(&object->lock, flags);
1529 	}
1530 	rcu_read_unlock();
1531 
1532 	/* data/bss scanning */
1533 	scan_large_block(_sdata, _edata);
1534 	scan_large_block(__bss_start, __bss_stop);
1535 	scan_large_block(__start_ro_after_init, __end_ro_after_init);
1536 
1537 #ifdef CONFIG_SMP
1538 	/* per-cpu sections scanning */
1539 	for_each_possible_cpu(i)
1540 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1541 				 __per_cpu_end + per_cpu_offset(i));
1542 #endif
1543 
1544 	/*
1545 	 * Struct page scanning for each node.
1546 	 */
1547 	get_online_mems();
1548 	for_each_online_node(i) {
1549 		unsigned long start_pfn = node_start_pfn(i);
1550 		unsigned long end_pfn = node_end_pfn(i);
1551 		unsigned long pfn;
1552 
1553 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1554 			struct page *page = pfn_to_online_page(pfn);
1555 
1556 			if (!page)
1557 				continue;
1558 
1559 			/* only scan pages belonging to this node */
1560 			if (page_to_nid(page) != i)
1561 				continue;
1562 			/* only scan if page is in use */
1563 			if (page_count(page) == 0)
1564 				continue;
1565 			scan_block(page, page + 1, NULL);
1566 			if (!(pfn & 63))
1567 				cond_resched();
1568 		}
1569 	}
1570 	put_online_mems();
1571 
1572 	/*
1573 	 * Scanning the task stacks (may introduce false negatives).
1574 	 */
1575 	if (kmemleak_stack_scan) {
1576 		struct task_struct *p, *g;
1577 
1578 		read_lock(&tasklist_lock);
1579 		do_each_thread(g, p) {
1580 			void *stack = try_get_task_stack(p);
1581 			if (stack) {
1582 				scan_block(stack, stack + THREAD_SIZE, NULL);
1583 				put_task_stack(p);
1584 			}
1585 		} while_each_thread(g, p);
1586 		read_unlock(&tasklist_lock);
1587 	}
1588 
1589 	/*
1590 	 * Scan the objects already referenced from the sections scanned
1591 	 * above.
1592 	 */
1593 	scan_gray_list();
1594 
1595 	/*
1596 	 * Check for new or unreferenced objects modified since the previous
1597 	 * scan and color them gray until the next scan.
1598 	 */
1599 	rcu_read_lock();
1600 	list_for_each_entry_rcu(object, &object_list, object_list) {
1601 		spin_lock_irqsave(&object->lock, flags);
1602 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1603 		    && update_checksum(object) && get_object(object)) {
1604 			/* color it gray temporarily */
1605 			object->count = object->min_count;
1606 			list_add_tail(&object->gray_list, &gray_list);
1607 		}
1608 		spin_unlock_irqrestore(&object->lock, flags);
1609 	}
1610 	rcu_read_unlock();
1611 
1612 	/*
1613 	 * Re-scan the gray list for modified unreferenced objects.
1614 	 */
1615 	scan_gray_list();
1616 
1617 	/*
1618 	 * If scanning was stopped do not report any new unreferenced objects.
1619 	 */
1620 	if (scan_should_stop())
1621 		return;
1622 
1623 	/*
1624 	 * Scanning result reporting.
1625 	 */
1626 	rcu_read_lock();
1627 	list_for_each_entry_rcu(object, &object_list, object_list) {
1628 		spin_lock_irqsave(&object->lock, flags);
1629 		if (unreferenced_object(object) &&
1630 		    !(object->flags & OBJECT_REPORTED)) {
1631 			object->flags |= OBJECT_REPORTED;
1632 
1633 			if (kmemleak_verbose)
1634 				print_unreferenced(NULL, object);
1635 
1636 			new_leaks++;
1637 		}
1638 		spin_unlock_irqrestore(&object->lock, flags);
1639 	}
1640 	rcu_read_unlock();
1641 
1642 	if (new_leaks) {
1643 		kmemleak_found_leaks = true;
1644 
1645 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1646 			new_leaks);
1647 	}
1648 
1649 }
1650 
1651 /*
1652  * Thread function performing automatic memory scanning. Unreferenced objects
1653  * at the end of a memory scan are reported but only the first time.
1654  */
1655 static int kmemleak_scan_thread(void *arg)
1656 {
1657 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1658 
1659 	pr_info("Automatic memory scanning thread started\n");
1660 	set_user_nice(current, 10);
1661 
1662 	/*
1663 	 * Wait before the first scan to allow the system to fully initialize.
1664 	 */
1665 	if (first_run) {
1666 		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1667 		first_run = 0;
1668 		while (timeout && !kthread_should_stop())
1669 			timeout = schedule_timeout_interruptible(timeout);
1670 	}
1671 
1672 	while (!kthread_should_stop()) {
1673 		signed long timeout = jiffies_scan_wait;
1674 
1675 		mutex_lock(&scan_mutex);
1676 		kmemleak_scan();
1677 		mutex_unlock(&scan_mutex);
1678 
1679 		/* wait before the next scan */
1680 		while (timeout && !kthread_should_stop())
1681 			timeout = schedule_timeout_interruptible(timeout);
1682 	}
1683 
1684 	pr_info("Automatic memory scanning thread ended\n");
1685 
1686 	return 0;
1687 }
1688 
1689 /*
1690  * Start the automatic memory scanning thread. This function must be called
1691  * with the scan_mutex held.
1692  */
1693 static void start_scan_thread(void)
1694 {
1695 	if (scan_thread)
1696 		return;
1697 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1698 	if (IS_ERR(scan_thread)) {
1699 		pr_warn("Failed to create the scan thread\n");
1700 		scan_thread = NULL;
1701 	}
1702 }
1703 
1704 /*
1705  * Stop the automatic memory scanning thread.
1706  */
1707 static void stop_scan_thread(void)
1708 {
1709 	if (scan_thread) {
1710 		kthread_stop(scan_thread);
1711 		scan_thread = NULL;
1712 	}
1713 }
1714 
1715 /*
1716  * Iterate over the object_list and return the first valid object at or after
1717  * the required position with its use_count incremented. The function triggers
1718  * a memory scanning when the pos argument points to the first position.
1719  */
1720 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1721 {
1722 	struct kmemleak_object *object;
1723 	loff_t n = *pos;
1724 	int err;
1725 
1726 	err = mutex_lock_interruptible(&scan_mutex);
1727 	if (err < 0)
1728 		return ERR_PTR(err);
1729 
1730 	rcu_read_lock();
1731 	list_for_each_entry_rcu(object, &object_list, object_list) {
1732 		if (n-- > 0)
1733 			continue;
1734 		if (get_object(object))
1735 			goto out;
1736 	}
1737 	object = NULL;
1738 out:
1739 	return object;
1740 }
1741 
1742 /*
1743  * Return the next object in the object_list. The function decrements the
1744  * use_count of the previous object and increases that of the next one.
1745  */
1746 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1747 {
1748 	struct kmemleak_object *prev_obj = v;
1749 	struct kmemleak_object *next_obj = NULL;
1750 	struct kmemleak_object *obj = prev_obj;
1751 
1752 	++(*pos);
1753 
1754 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1755 		if (get_object(obj)) {
1756 			next_obj = obj;
1757 			break;
1758 		}
1759 	}
1760 
1761 	put_object(prev_obj);
1762 	return next_obj;
1763 }
1764 
1765 /*
1766  * Decrement the use_count of the last object required, if any.
1767  */
1768 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1769 {
1770 	if (!IS_ERR(v)) {
1771 		/*
1772 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1773 		 * waiting was interrupted, so only release it if !IS_ERR.
1774 		 */
1775 		rcu_read_unlock();
1776 		mutex_unlock(&scan_mutex);
1777 		if (v)
1778 			put_object(v);
1779 	}
1780 }
1781 
1782 /*
1783  * Print the information for an unreferenced object to the seq file.
1784  */
1785 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1786 {
1787 	struct kmemleak_object *object = v;
1788 	unsigned long flags;
1789 
1790 	spin_lock_irqsave(&object->lock, flags);
1791 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1792 		print_unreferenced(seq, object);
1793 	spin_unlock_irqrestore(&object->lock, flags);
1794 	return 0;
1795 }
1796 
1797 static const struct seq_operations kmemleak_seq_ops = {
1798 	.start = kmemleak_seq_start,
1799 	.next  = kmemleak_seq_next,
1800 	.stop  = kmemleak_seq_stop,
1801 	.show  = kmemleak_seq_show,
1802 };
1803 
1804 static int kmemleak_open(struct inode *inode, struct file *file)
1805 {
1806 	return seq_open(file, &kmemleak_seq_ops);
1807 }
1808 
1809 static int dump_str_object_info(const char *str)
1810 {
1811 	unsigned long flags;
1812 	struct kmemleak_object *object;
1813 	unsigned long addr;
1814 
1815 	if (kstrtoul(str, 0, &addr))
1816 		return -EINVAL;
1817 	object = find_and_get_object(addr, 0);
1818 	if (!object) {
1819 		pr_info("Unknown object at 0x%08lx\n", addr);
1820 		return -EINVAL;
1821 	}
1822 
1823 	spin_lock_irqsave(&object->lock, flags);
1824 	dump_object_info(object);
1825 	spin_unlock_irqrestore(&object->lock, flags);
1826 
1827 	put_object(object);
1828 	return 0;
1829 }
1830 
1831 /*
1832  * We use grey instead of black to ensure we can do future scans on the same
1833  * objects. If we did not do future scans these black objects could
1834  * potentially contain references to newly allocated objects in the future and
1835  * we'd end up with false positives.
1836  */
1837 static void kmemleak_clear(void)
1838 {
1839 	struct kmemleak_object *object;
1840 	unsigned long flags;
1841 
1842 	rcu_read_lock();
1843 	list_for_each_entry_rcu(object, &object_list, object_list) {
1844 		spin_lock_irqsave(&object->lock, flags);
1845 		if ((object->flags & OBJECT_REPORTED) &&
1846 		    unreferenced_object(object))
1847 			__paint_it(object, KMEMLEAK_GREY);
1848 		spin_unlock_irqrestore(&object->lock, flags);
1849 	}
1850 	rcu_read_unlock();
1851 
1852 	kmemleak_found_leaks = false;
1853 }
1854 
1855 static void __kmemleak_do_cleanup(void);
1856 
1857 /*
1858  * File write operation to configure kmemleak at run-time. The following
1859  * commands can be written to the /sys/kernel/debug/kmemleak file:
1860  *   off	- disable kmemleak (irreversible)
1861  *   stack=on	- enable the task stacks scanning
1862  *   stack=off	- disable the tasks stacks scanning
1863  *   scan=on	- start the automatic memory scanning thread
1864  *   scan=off	- stop the automatic memory scanning thread
1865  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1866  *		  disable it)
1867  *   scan	- trigger a memory scan
1868  *   clear	- mark all current reported unreferenced kmemleak objects as
1869  *		  grey to ignore printing them, or free all kmemleak objects
1870  *		  if kmemleak has been disabled.
1871  *   dump=...	- dump information about the object found at the given address
1872  */
1873 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1874 			      size_t size, loff_t *ppos)
1875 {
1876 	char buf[64];
1877 	int buf_size;
1878 	int ret;
1879 
1880 	buf_size = min(size, (sizeof(buf) - 1));
1881 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1882 		return -EFAULT;
1883 	buf[buf_size] = 0;
1884 
1885 	ret = mutex_lock_interruptible(&scan_mutex);
1886 	if (ret < 0)
1887 		return ret;
1888 
1889 	if (strncmp(buf, "clear", 5) == 0) {
1890 		if (kmemleak_enabled)
1891 			kmemleak_clear();
1892 		else
1893 			__kmemleak_do_cleanup();
1894 		goto out;
1895 	}
1896 
1897 	if (!kmemleak_enabled) {
1898 		ret = -EBUSY;
1899 		goto out;
1900 	}
1901 
1902 	if (strncmp(buf, "off", 3) == 0)
1903 		kmemleak_disable();
1904 	else if (strncmp(buf, "stack=on", 8) == 0)
1905 		kmemleak_stack_scan = 1;
1906 	else if (strncmp(buf, "stack=off", 9) == 0)
1907 		kmemleak_stack_scan = 0;
1908 	else if (strncmp(buf, "scan=on", 7) == 0)
1909 		start_scan_thread();
1910 	else if (strncmp(buf, "scan=off", 8) == 0)
1911 		stop_scan_thread();
1912 	else if (strncmp(buf, "scan=", 5) == 0) {
1913 		unsigned long secs;
1914 
1915 		ret = kstrtoul(buf + 5, 0, &secs);
1916 		if (ret < 0)
1917 			goto out;
1918 		stop_scan_thread();
1919 		if (secs) {
1920 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1921 			start_scan_thread();
1922 		}
1923 	} else if (strncmp(buf, "scan", 4) == 0)
1924 		kmemleak_scan();
1925 	else if (strncmp(buf, "dump=", 5) == 0)
1926 		ret = dump_str_object_info(buf + 5);
1927 	else
1928 		ret = -EINVAL;
1929 
1930 out:
1931 	mutex_unlock(&scan_mutex);
1932 	if (ret < 0)
1933 		return ret;
1934 
1935 	/* ignore the rest of the buffer, only one command at a time */
1936 	*ppos += size;
1937 	return size;
1938 }
1939 
1940 static const struct file_operations kmemleak_fops = {
1941 	.owner		= THIS_MODULE,
1942 	.open		= kmemleak_open,
1943 	.read		= seq_read,
1944 	.write		= kmemleak_write,
1945 	.llseek		= seq_lseek,
1946 	.release	= seq_release,
1947 };
1948 
1949 static void __kmemleak_do_cleanup(void)
1950 {
1951 	struct kmemleak_object *object;
1952 
1953 	rcu_read_lock();
1954 	list_for_each_entry_rcu(object, &object_list, object_list)
1955 		delete_object_full(object->pointer);
1956 	rcu_read_unlock();
1957 }
1958 
1959 /*
1960  * Stop the memory scanning thread and free the kmemleak internal objects if
1961  * no previous scan thread (otherwise, kmemleak may still have some useful
1962  * information on memory leaks).
1963  */
1964 static void kmemleak_do_cleanup(struct work_struct *work)
1965 {
1966 	stop_scan_thread();
1967 
1968 	mutex_lock(&scan_mutex);
1969 	/*
1970 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1971 	 * longer track object freeing. Ordering of the scan thread stopping and
1972 	 * the memory accesses below is guaranteed by the kthread_stop()
1973 	 * function.
1974 	 */
1975 	kmemleak_free_enabled = 0;
1976 	mutex_unlock(&scan_mutex);
1977 
1978 	if (!kmemleak_found_leaks)
1979 		__kmemleak_do_cleanup();
1980 	else
1981 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1982 }
1983 
1984 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1985 
1986 /*
1987  * Disable kmemleak. No memory allocation/freeing will be traced once this
1988  * function is called. Disabling kmemleak is an irreversible operation.
1989  */
1990 static void kmemleak_disable(void)
1991 {
1992 	/* atomically check whether it was already invoked */
1993 	if (cmpxchg(&kmemleak_error, 0, 1))
1994 		return;
1995 
1996 	/* stop any memory operation tracing */
1997 	kmemleak_enabled = 0;
1998 
1999 	/* check whether it is too early for a kernel thread */
2000 	if (kmemleak_initialized)
2001 		schedule_work(&cleanup_work);
2002 	else
2003 		kmemleak_free_enabled = 0;
2004 
2005 	pr_info("Kernel memory leak detector disabled\n");
2006 }
2007 
2008 /*
2009  * Allow boot-time kmemleak disabling (enabled by default).
2010  */
2011 static int __init kmemleak_boot_config(char *str)
2012 {
2013 	if (!str)
2014 		return -EINVAL;
2015 	if (strcmp(str, "off") == 0)
2016 		kmemleak_disable();
2017 	else if (strcmp(str, "on") == 0)
2018 		kmemleak_skip_disable = 1;
2019 	else
2020 		return -EINVAL;
2021 	return 0;
2022 }
2023 early_param("kmemleak", kmemleak_boot_config);
2024 
2025 static void __init print_log_trace(struct early_log *log)
2026 {
2027 	struct stack_trace trace;
2028 
2029 	trace.nr_entries = log->trace_len;
2030 	trace.entries = log->trace;
2031 
2032 	pr_notice("Early log backtrace:\n");
2033 	print_stack_trace(&trace, 2);
2034 }
2035 
2036 /*
2037  * Kmemleak initialization.
2038  */
2039 void __init kmemleak_init(void)
2040 {
2041 	int i;
2042 	unsigned long flags;
2043 
2044 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2045 	if (!kmemleak_skip_disable) {
2046 		kmemleak_early_log = 0;
2047 		kmemleak_disable();
2048 		return;
2049 	}
2050 #endif
2051 
2052 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2053 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2054 
2055 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2056 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2057 
2058 	if (crt_early_log > ARRAY_SIZE(early_log))
2059 		pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2060 			crt_early_log);
2061 
2062 	/* the kernel is still in UP mode, so disabling the IRQs is enough */
2063 	local_irq_save(flags);
2064 	kmemleak_early_log = 0;
2065 	if (kmemleak_error) {
2066 		local_irq_restore(flags);
2067 		return;
2068 	} else {
2069 		kmemleak_enabled = 1;
2070 		kmemleak_free_enabled = 1;
2071 	}
2072 	local_irq_restore(flags);
2073 
2074 	/*
2075 	 * This is the point where tracking allocations is safe. Automatic
2076 	 * scanning is started during the late initcall. Add the early logged
2077 	 * callbacks to the kmemleak infrastructure.
2078 	 */
2079 	for (i = 0; i < crt_early_log; i++) {
2080 		struct early_log *log = &early_log[i];
2081 
2082 		switch (log->op_type) {
2083 		case KMEMLEAK_ALLOC:
2084 			early_alloc(log);
2085 			break;
2086 		case KMEMLEAK_ALLOC_PERCPU:
2087 			early_alloc_percpu(log);
2088 			break;
2089 		case KMEMLEAK_FREE:
2090 			kmemleak_free(log->ptr);
2091 			break;
2092 		case KMEMLEAK_FREE_PART:
2093 			kmemleak_free_part(log->ptr, log->size);
2094 			break;
2095 		case KMEMLEAK_FREE_PERCPU:
2096 			kmemleak_free_percpu(log->ptr);
2097 			break;
2098 		case KMEMLEAK_NOT_LEAK:
2099 			kmemleak_not_leak(log->ptr);
2100 			break;
2101 		case KMEMLEAK_IGNORE:
2102 			kmemleak_ignore(log->ptr);
2103 			break;
2104 		case KMEMLEAK_SCAN_AREA:
2105 			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2106 			break;
2107 		case KMEMLEAK_NO_SCAN:
2108 			kmemleak_no_scan(log->ptr);
2109 			break;
2110 		case KMEMLEAK_SET_EXCESS_REF:
2111 			object_set_excess_ref((unsigned long)log->ptr,
2112 					      log->excess_ref);
2113 			break;
2114 		default:
2115 			kmemleak_warn("Unknown early log operation: %d\n",
2116 				      log->op_type);
2117 		}
2118 
2119 		if (kmemleak_warning) {
2120 			print_log_trace(log);
2121 			kmemleak_warning = 0;
2122 		}
2123 	}
2124 }
2125 
2126 /*
2127  * Late initialization function.
2128  */
2129 static int __init kmemleak_late_init(void)
2130 {
2131 	struct dentry *dentry;
2132 
2133 	kmemleak_initialized = 1;
2134 
2135 	dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2136 				     &kmemleak_fops);
2137 	if (!dentry)
2138 		pr_warn("Failed to create the debugfs kmemleak file\n");
2139 
2140 	if (kmemleak_error) {
2141 		/*
2142 		 * Some error occurred and kmemleak was disabled. There is a
2143 		 * small chance that kmemleak_disable() was called immediately
2144 		 * after setting kmemleak_initialized and we may end up with
2145 		 * two clean-up threads but serialized by scan_mutex.
2146 		 */
2147 		schedule_work(&cleanup_work);
2148 		return -ENOMEM;
2149 	}
2150 
2151 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2152 		mutex_lock(&scan_mutex);
2153 		start_scan_thread();
2154 		mutex_unlock(&scan_mutex);
2155 	}
2156 
2157 	pr_info("Kernel memory leak detector initialized\n");
2158 
2159 	return 0;
2160 }
2161 late_initcall(kmemleak_late_init);
2162