xref: /openbmc/linux/mm/kmemleak.c (revision 7033b937)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/kmemleak.c
4  *
5  * Copyright (C) 2008 ARM Limited
6  * Written by Catalin Marinas <catalin.marinas@arm.com>
7  *
8  * For more information on the algorithm and kmemleak usage, please see
9  * Documentation/dev-tools/kmemleak.rst.
10  *
11  * Notes on locking
12  * ----------------
13  *
14  * The following locks and mutexes are used by kmemleak:
15  *
16  * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17  *   accesses to the object_tree_root (or object_phys_tree_root). The
18  *   object_list is the main list holding the metadata (struct kmemleak_object)
19  *   for the allocated memory blocks. The object_tree_root and object_phys_tree_root
20  *   are red black trees used to look-up metadata based on a pointer to the
21  *   corresponding memory block. The object_phys_tree_root is for objects
22  *   allocated with physical address. The kmemleak_object structures are
23  *   added to the object_list and object_tree_root (or object_phys_tree_root)
24  *   in the create_object() function called from the kmemleak_alloc() (or
25  *   kmemleak_alloc_phys()) callback and removed in delete_object() called from
26  *   the kmemleak_free() callback
27  * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
28  *   Accesses to the metadata (e.g. count) are protected by this lock. Note
29  *   that some members of this structure may be protected by other means
30  *   (atomic or kmemleak_lock). This lock is also held when scanning the
31  *   corresponding memory block to avoid the kernel freeing it via the
32  *   kmemleak_free() callback. This is less heavyweight than holding a global
33  *   lock like kmemleak_lock during scanning.
34  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
35  *   unreferenced objects at a time. The gray_list contains the objects which
36  *   are already referenced or marked as false positives and need to be
37  *   scanned. This list is only modified during a scanning episode when the
38  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
39  *   Note that the kmemleak_object.use_count is incremented when an object is
40  *   added to the gray_list and therefore cannot be freed. This mutex also
41  *   prevents multiple users of the "kmemleak" debugfs file together with
42  *   modifications to the memory scanning parameters including the scan_thread
43  *   pointer
44  *
45  * Locks and mutexes are acquired/nested in the following order:
46  *
47  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48  *
49  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
50  * regions.
51  *
52  * The kmemleak_object structures have a use_count incremented or decremented
53  * using the get_object()/put_object() functions. When the use_count becomes
54  * 0, this count can no longer be incremented and put_object() schedules the
55  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
56  * function must be protected by rcu_read_lock() to avoid accessing a freed
57  * structure.
58  */
59 
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61 
62 #include <linux/init.h>
63 #include <linux/kernel.h>
64 #include <linux/list.h>
65 #include <linux/sched/signal.h>
66 #include <linux/sched/task.h>
67 #include <linux/sched/task_stack.h>
68 #include <linux/jiffies.h>
69 #include <linux/delay.h>
70 #include <linux/export.h>
71 #include <linux/kthread.h>
72 #include <linux/rbtree.h>
73 #include <linux/fs.h>
74 #include <linux/debugfs.h>
75 #include <linux/seq_file.h>
76 #include <linux/cpumask.h>
77 #include <linux/spinlock.h>
78 #include <linux/module.h>
79 #include <linux/mutex.h>
80 #include <linux/rcupdate.h>
81 #include <linux/stacktrace.h>
82 #include <linux/cache.h>
83 #include <linux/percpu.h>
84 #include <linux/memblock.h>
85 #include <linux/pfn.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96 
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100 
101 #include <linux/kasan.h>
102 #include <linux/kfence.h>
103 #include <linux/kmemleak.h>
104 #include <linux/memory_hotplug.h>
105 
106 /*
107  * Kmemleak configuration and common defines.
108  */
109 #define MAX_TRACE		16	/* stack trace length */
110 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
111 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
112 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
113 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
114 
115 #define BYTES_PER_POINTER	sizeof(void *)
116 
117 /* GFP bitmask for kmemleak internal allocations */
118 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
119 					   __GFP_NOLOCKDEP)) | \
120 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
121 				 __GFP_NOWARN)
122 
123 /* scanning area inside a memory block */
124 struct kmemleak_scan_area {
125 	struct hlist_node node;
126 	unsigned long start;
127 	size_t size;
128 };
129 
130 #define KMEMLEAK_GREY	0
131 #define KMEMLEAK_BLACK	-1
132 
133 /*
134  * Structure holding the metadata for each allocated memory block.
135  * Modifications to such objects should be made while holding the
136  * object->lock. Insertions or deletions from object_list, gray_list or
137  * rb_node are already protected by the corresponding locks or mutex (see
138  * the notes on locking above). These objects are reference-counted
139  * (use_count) and freed using the RCU mechanism.
140  */
141 struct kmemleak_object {
142 	raw_spinlock_t lock;
143 	unsigned int flags;		/* object status flags */
144 	struct list_head object_list;
145 	struct list_head gray_list;
146 	struct rb_node rb_node;
147 	struct rcu_head rcu;		/* object_list lockless traversal */
148 	/* object usage count; object freed when use_count == 0 */
149 	atomic_t use_count;
150 	unsigned long pointer;
151 	size_t size;
152 	/* pass surplus references to this pointer */
153 	unsigned long excess_ref;
154 	/* minimum number of a pointers found before it is considered leak */
155 	int min_count;
156 	/* the total number of pointers found pointing to this object */
157 	int count;
158 	/* checksum for detecting modified objects */
159 	u32 checksum;
160 	/* memory ranges to be scanned inside an object (empty for all) */
161 	struct hlist_head area_list;
162 	unsigned long trace[MAX_TRACE];
163 	unsigned int trace_len;
164 	unsigned long jiffies;		/* creation timestamp */
165 	pid_t pid;			/* pid of the current task */
166 	char comm[TASK_COMM_LEN];	/* executable name */
167 };
168 
169 /* flag representing the memory block allocation status */
170 #define OBJECT_ALLOCATED	(1 << 0)
171 /* flag set after the first reporting of an unreference object */
172 #define OBJECT_REPORTED		(1 << 1)
173 /* flag set to not scan the object */
174 #define OBJECT_NO_SCAN		(1 << 2)
175 /* flag set to fully scan the object when scan_area allocation failed */
176 #define OBJECT_FULL_SCAN	(1 << 3)
177 /* flag set for object allocated with physical address */
178 #define OBJECT_PHYS		(1 << 4)
179 
180 #define HEX_PREFIX		"    "
181 /* number of bytes to print per line; must be 16 or 32 */
182 #define HEX_ROW_SIZE		16
183 /* number of bytes to print at a time (1, 2, 4, 8) */
184 #define HEX_GROUP_SIZE		1
185 /* include ASCII after the hex output */
186 #define HEX_ASCII		1
187 /* max number of lines to be printed */
188 #define HEX_MAX_LINES		2
189 
190 /* the list of all allocated objects */
191 static LIST_HEAD(object_list);
192 /* the list of gray-colored objects (see color_gray comment below) */
193 static LIST_HEAD(gray_list);
194 /* memory pool allocation */
195 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
196 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
197 static LIST_HEAD(mem_pool_free_list);
198 /* search tree for object boundaries */
199 static struct rb_root object_tree_root = RB_ROOT;
200 /* search tree for object (with OBJECT_PHYS flag) boundaries */
201 static struct rb_root object_phys_tree_root = RB_ROOT;
202 /* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
203 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
204 
205 /* allocation caches for kmemleak internal data */
206 static struct kmem_cache *object_cache;
207 static struct kmem_cache *scan_area_cache;
208 
209 /* set if tracing memory operations is enabled */
210 static int kmemleak_enabled = 1;
211 /* same as above but only for the kmemleak_free() callback */
212 static int kmemleak_free_enabled = 1;
213 /* set in the late_initcall if there were no errors */
214 static int kmemleak_initialized;
215 /* set if a kmemleak warning was issued */
216 static int kmemleak_warning;
217 /* set if a fatal kmemleak error has occurred */
218 static int kmemleak_error;
219 
220 /* minimum and maximum address that may be valid pointers */
221 static unsigned long min_addr = ULONG_MAX;
222 static unsigned long max_addr;
223 
224 static struct task_struct *scan_thread;
225 /* used to avoid reporting of recently allocated objects */
226 static unsigned long jiffies_min_age;
227 static unsigned long jiffies_last_scan;
228 /* delay between automatic memory scannings */
229 static unsigned long jiffies_scan_wait;
230 /* enables or disables the task stacks scanning */
231 static int kmemleak_stack_scan = 1;
232 /* protects the memory scanning, parameters and debug/kmemleak file access */
233 static DEFINE_MUTEX(scan_mutex);
234 /* setting kmemleak=on, will set this var, skipping the disable */
235 static int kmemleak_skip_disable;
236 /* If there are leaks that can be reported */
237 static bool kmemleak_found_leaks;
238 
239 static bool kmemleak_verbose;
240 module_param_named(verbose, kmemleak_verbose, bool, 0600);
241 
242 static void kmemleak_disable(void);
243 
244 /*
245  * Print a warning and dump the stack trace.
246  */
247 #define kmemleak_warn(x...)	do {		\
248 	pr_warn(x);				\
249 	dump_stack();				\
250 	kmemleak_warning = 1;			\
251 } while (0)
252 
253 /*
254  * Macro invoked when a serious kmemleak condition occurred and cannot be
255  * recovered from. Kmemleak will be disabled and further allocation/freeing
256  * tracing no longer available.
257  */
258 #define kmemleak_stop(x...)	do {	\
259 	kmemleak_warn(x);		\
260 	kmemleak_disable();		\
261 } while (0)
262 
263 #define warn_or_seq_printf(seq, fmt, ...)	do {	\
264 	if (seq)					\
265 		seq_printf(seq, fmt, ##__VA_ARGS__);	\
266 	else						\
267 		pr_warn(fmt, ##__VA_ARGS__);		\
268 } while (0)
269 
270 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
271 				 int rowsize, int groupsize, const void *buf,
272 				 size_t len, bool ascii)
273 {
274 	if (seq)
275 		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
276 			     buf, len, ascii);
277 	else
278 		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
279 			       rowsize, groupsize, buf, len, ascii);
280 }
281 
282 /*
283  * Printing of the objects hex dump to the seq file. The number of lines to be
284  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
285  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
286  * with the object->lock held.
287  */
288 static void hex_dump_object(struct seq_file *seq,
289 			    struct kmemleak_object *object)
290 {
291 	const u8 *ptr = (const u8 *)object->pointer;
292 	size_t len;
293 
294 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
295 		return;
296 
297 	/* limit the number of lines to HEX_MAX_LINES */
298 	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
299 
300 	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
301 	kasan_disable_current();
302 	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
303 			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
304 	kasan_enable_current();
305 }
306 
307 /*
308  * Object colors, encoded with count and min_count:
309  * - white - orphan object, not enough references to it (count < min_count)
310  * - gray  - not orphan, not marked as false positive (min_count == 0) or
311  *		sufficient references to it (count >= min_count)
312  * - black - ignore, it doesn't contain references (e.g. text section)
313  *		(min_count == -1). No function defined for this color.
314  * Newly created objects don't have any color assigned (object->count == -1)
315  * before the next memory scan when they become white.
316  */
317 static bool color_white(const struct kmemleak_object *object)
318 {
319 	return object->count != KMEMLEAK_BLACK &&
320 		object->count < object->min_count;
321 }
322 
323 static bool color_gray(const struct kmemleak_object *object)
324 {
325 	return object->min_count != KMEMLEAK_BLACK &&
326 		object->count >= object->min_count;
327 }
328 
329 /*
330  * Objects are considered unreferenced only if their color is white, they have
331  * not be deleted and have a minimum age to avoid false positives caused by
332  * pointers temporarily stored in CPU registers.
333  */
334 static bool unreferenced_object(struct kmemleak_object *object)
335 {
336 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
337 		time_before_eq(object->jiffies + jiffies_min_age,
338 			       jiffies_last_scan);
339 }
340 
341 /*
342  * Printing of the unreferenced objects information to the seq file. The
343  * print_unreferenced function must be called with the object->lock held.
344  */
345 static void print_unreferenced(struct seq_file *seq,
346 			       struct kmemleak_object *object)
347 {
348 	int i;
349 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
350 
351 	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
352 		   object->pointer, object->size);
353 	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
354 		   object->comm, object->pid, object->jiffies,
355 		   msecs_age / 1000, msecs_age % 1000);
356 	hex_dump_object(seq, object);
357 	warn_or_seq_printf(seq, "  backtrace:\n");
358 
359 	for (i = 0; i < object->trace_len; i++) {
360 		void *ptr = (void *)object->trace[i];
361 		warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
362 	}
363 }
364 
365 /*
366  * Print the kmemleak_object information. This function is used mainly for
367  * debugging special cases when kmemleak operations. It must be called with
368  * the object->lock held.
369  */
370 static void dump_object_info(struct kmemleak_object *object)
371 {
372 	pr_notice("Object 0x%08lx (size %zu):\n",
373 		  object->pointer, object->size);
374 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
375 		  object->comm, object->pid, object->jiffies);
376 	pr_notice("  min_count = %d\n", object->min_count);
377 	pr_notice("  count = %d\n", object->count);
378 	pr_notice("  flags = 0x%x\n", object->flags);
379 	pr_notice("  checksum = %u\n", object->checksum);
380 	pr_notice("  backtrace:\n");
381 	stack_trace_print(object->trace, object->trace_len, 4);
382 }
383 
384 /*
385  * Look-up a memory block metadata (kmemleak_object) in the object search
386  * tree based on a pointer value. If alias is 0, only values pointing to the
387  * beginning of the memory block are allowed. The kmemleak_lock must be held
388  * when calling this function.
389  */
390 static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
391 					       bool is_phys)
392 {
393 	struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
394 			     object_tree_root.rb_node;
395 	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
396 
397 	while (rb) {
398 		struct kmemleak_object *object;
399 		unsigned long untagged_objp;
400 
401 		object = rb_entry(rb, struct kmemleak_object, rb_node);
402 		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
403 
404 		if (untagged_ptr < untagged_objp)
405 			rb = object->rb_node.rb_left;
406 		else if (untagged_objp + object->size <= untagged_ptr)
407 			rb = object->rb_node.rb_right;
408 		else if (untagged_objp == untagged_ptr || alias)
409 			return object;
410 		else {
411 			kmemleak_warn("Found object by alias at 0x%08lx\n",
412 				      ptr);
413 			dump_object_info(object);
414 			break;
415 		}
416 	}
417 	return NULL;
418 }
419 
420 /* Look-up a kmemleak object which allocated with virtual address. */
421 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
422 {
423 	return __lookup_object(ptr, alias, false);
424 }
425 
426 /*
427  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
428  * that once an object's use_count reached 0, the RCU freeing was already
429  * registered and the object should no longer be used. This function must be
430  * called under the protection of rcu_read_lock().
431  */
432 static int get_object(struct kmemleak_object *object)
433 {
434 	return atomic_inc_not_zero(&object->use_count);
435 }
436 
437 /*
438  * Memory pool allocation and freeing. kmemleak_lock must not be held.
439  */
440 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
441 {
442 	unsigned long flags;
443 	struct kmemleak_object *object;
444 
445 	/* try the slab allocator first */
446 	if (object_cache) {
447 		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
448 		if (object)
449 			return object;
450 	}
451 
452 	/* slab allocation failed, try the memory pool */
453 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
454 	object = list_first_entry_or_null(&mem_pool_free_list,
455 					  typeof(*object), object_list);
456 	if (object)
457 		list_del(&object->object_list);
458 	else if (mem_pool_free_count)
459 		object = &mem_pool[--mem_pool_free_count];
460 	else
461 		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
462 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
463 
464 	return object;
465 }
466 
467 /*
468  * Return the object to either the slab allocator or the memory pool.
469  */
470 static void mem_pool_free(struct kmemleak_object *object)
471 {
472 	unsigned long flags;
473 
474 	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
475 		kmem_cache_free(object_cache, object);
476 		return;
477 	}
478 
479 	/* add the object to the memory pool free list */
480 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
481 	list_add(&object->object_list, &mem_pool_free_list);
482 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
483 }
484 
485 /*
486  * RCU callback to free a kmemleak_object.
487  */
488 static void free_object_rcu(struct rcu_head *rcu)
489 {
490 	struct hlist_node *tmp;
491 	struct kmemleak_scan_area *area;
492 	struct kmemleak_object *object =
493 		container_of(rcu, struct kmemleak_object, rcu);
494 
495 	/*
496 	 * Once use_count is 0 (guaranteed by put_object), there is no other
497 	 * code accessing this object, hence no need for locking.
498 	 */
499 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
500 		hlist_del(&area->node);
501 		kmem_cache_free(scan_area_cache, area);
502 	}
503 	mem_pool_free(object);
504 }
505 
506 /*
507  * Decrement the object use_count. Once the count is 0, free the object using
508  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
509  * delete_object() path, the delayed RCU freeing ensures that there is no
510  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
511  * is also possible.
512  */
513 static void put_object(struct kmemleak_object *object)
514 {
515 	if (!atomic_dec_and_test(&object->use_count))
516 		return;
517 
518 	/* should only get here after delete_object was called */
519 	WARN_ON(object->flags & OBJECT_ALLOCATED);
520 
521 	/*
522 	 * It may be too early for the RCU callbacks, however, there is no
523 	 * concurrent object_list traversal when !object_cache and all objects
524 	 * came from the memory pool. Free the object directly.
525 	 */
526 	if (object_cache)
527 		call_rcu(&object->rcu, free_object_rcu);
528 	else
529 		free_object_rcu(&object->rcu);
530 }
531 
532 /*
533  * Look up an object in the object search tree and increase its use_count.
534  */
535 static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
536 						     bool is_phys)
537 {
538 	unsigned long flags;
539 	struct kmemleak_object *object;
540 
541 	rcu_read_lock();
542 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
543 	object = __lookup_object(ptr, alias, is_phys);
544 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
545 
546 	/* check whether the object is still available */
547 	if (object && !get_object(object))
548 		object = NULL;
549 	rcu_read_unlock();
550 
551 	return object;
552 }
553 
554 /* Look up and get an object which allocated with virtual address. */
555 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
556 {
557 	return __find_and_get_object(ptr, alias, false);
558 }
559 
560 /*
561  * Remove an object from the object_tree_root (or object_phys_tree_root)
562  * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
563  * is still enabled.
564  */
565 static void __remove_object(struct kmemleak_object *object)
566 {
567 	rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
568 				   &object_phys_tree_root :
569 				   &object_tree_root);
570 	list_del_rcu(&object->object_list);
571 }
572 
573 /*
574  * Look up an object in the object search tree and remove it from both
575  * object_tree_root (or object_phys_tree_root) and object_list. The
576  * returned object's use_count should be at least 1, as initially set
577  * by create_object().
578  */
579 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
580 						      bool is_phys)
581 {
582 	unsigned long flags;
583 	struct kmemleak_object *object;
584 
585 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
586 	object = __lookup_object(ptr, alias, is_phys);
587 	if (object)
588 		__remove_object(object);
589 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
590 
591 	return object;
592 }
593 
594 /*
595  * Save stack trace to the given array of MAX_TRACE size.
596  */
597 static int __save_stack_trace(unsigned long *trace)
598 {
599 	return stack_trace_save(trace, MAX_TRACE, 2);
600 }
601 
602 /*
603  * Create the metadata (struct kmemleak_object) corresponding to an allocated
604  * memory block and add it to the object_list and object_tree_root (or
605  * object_phys_tree_root).
606  */
607 static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
608 					     int min_count, gfp_t gfp,
609 					     bool is_phys)
610 {
611 	unsigned long flags;
612 	struct kmemleak_object *object, *parent;
613 	struct rb_node **link, *rb_parent;
614 	unsigned long untagged_ptr;
615 	unsigned long untagged_objp;
616 
617 	object = mem_pool_alloc(gfp);
618 	if (!object) {
619 		pr_warn("Cannot allocate a kmemleak_object structure\n");
620 		kmemleak_disable();
621 		return NULL;
622 	}
623 
624 	INIT_LIST_HEAD(&object->object_list);
625 	INIT_LIST_HEAD(&object->gray_list);
626 	INIT_HLIST_HEAD(&object->area_list);
627 	raw_spin_lock_init(&object->lock);
628 	atomic_set(&object->use_count, 1);
629 	object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
630 	object->pointer = ptr;
631 	object->size = kfence_ksize((void *)ptr) ?: size;
632 	object->excess_ref = 0;
633 	object->min_count = min_count;
634 	object->count = 0;			/* white color initially */
635 	object->jiffies = jiffies;
636 	object->checksum = 0;
637 
638 	/* task information */
639 	if (in_hardirq()) {
640 		object->pid = 0;
641 		strncpy(object->comm, "hardirq", sizeof(object->comm));
642 	} else if (in_serving_softirq()) {
643 		object->pid = 0;
644 		strncpy(object->comm, "softirq", sizeof(object->comm));
645 	} else {
646 		object->pid = current->pid;
647 		/*
648 		 * There is a small chance of a race with set_task_comm(),
649 		 * however using get_task_comm() here may cause locking
650 		 * dependency issues with current->alloc_lock. In the worst
651 		 * case, the command line is not correct.
652 		 */
653 		strncpy(object->comm, current->comm, sizeof(object->comm));
654 	}
655 
656 	/* kernel backtrace */
657 	object->trace_len = __save_stack_trace(object->trace);
658 
659 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
660 
661 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
662 	/*
663 	 * Only update min_addr and max_addr with object
664 	 * storing virtual address.
665 	 */
666 	if (!is_phys) {
667 		min_addr = min(min_addr, untagged_ptr);
668 		max_addr = max(max_addr, untagged_ptr + size);
669 	}
670 	link = is_phys ? &object_phys_tree_root.rb_node :
671 		&object_tree_root.rb_node;
672 	rb_parent = NULL;
673 	while (*link) {
674 		rb_parent = *link;
675 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
676 		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
677 		if (untagged_ptr + size <= untagged_objp)
678 			link = &parent->rb_node.rb_left;
679 		else if (untagged_objp + parent->size <= untagged_ptr)
680 			link = &parent->rb_node.rb_right;
681 		else {
682 			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
683 				      ptr);
684 			/*
685 			 * No need for parent->lock here since "parent" cannot
686 			 * be freed while the kmemleak_lock is held.
687 			 */
688 			dump_object_info(parent);
689 			kmem_cache_free(object_cache, object);
690 			object = NULL;
691 			goto out;
692 		}
693 	}
694 	rb_link_node(&object->rb_node, rb_parent, link);
695 	rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
696 					  &object_tree_root);
697 
698 	list_add_tail_rcu(&object->object_list, &object_list);
699 out:
700 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
701 	return object;
702 }
703 
704 /* Create kmemleak object which allocated with virtual address. */
705 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
706 					     int min_count, gfp_t gfp)
707 {
708 	return __create_object(ptr, size, min_count, gfp, false);
709 }
710 
711 /* Create kmemleak object which allocated with physical address. */
712 static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
713 					     int min_count, gfp_t gfp)
714 {
715 	return __create_object(ptr, size, min_count, gfp, true);
716 }
717 
718 /*
719  * Mark the object as not allocated and schedule RCU freeing via put_object().
720  */
721 static void __delete_object(struct kmemleak_object *object)
722 {
723 	unsigned long flags;
724 
725 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
726 	WARN_ON(atomic_read(&object->use_count) < 1);
727 
728 	/*
729 	 * Locking here also ensures that the corresponding memory block
730 	 * cannot be freed when it is being scanned.
731 	 */
732 	raw_spin_lock_irqsave(&object->lock, flags);
733 	object->flags &= ~OBJECT_ALLOCATED;
734 	raw_spin_unlock_irqrestore(&object->lock, flags);
735 	put_object(object);
736 }
737 
738 /*
739  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
740  * delete it.
741  */
742 static void delete_object_full(unsigned long ptr)
743 {
744 	struct kmemleak_object *object;
745 
746 	object = find_and_remove_object(ptr, 0, false);
747 	if (!object) {
748 #ifdef DEBUG
749 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
750 			      ptr);
751 #endif
752 		return;
753 	}
754 	__delete_object(object);
755 }
756 
757 /*
758  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
759  * delete it. If the memory block is partially freed, the function may create
760  * additional metadata for the remaining parts of the block.
761  */
762 static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
763 {
764 	struct kmemleak_object *object;
765 	unsigned long start, end;
766 
767 	object = find_and_remove_object(ptr, 1, is_phys);
768 	if (!object) {
769 #ifdef DEBUG
770 		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
771 			      ptr, size);
772 #endif
773 		return;
774 	}
775 
776 	/*
777 	 * Create one or two objects that may result from the memory block
778 	 * split. Note that partial freeing is only done by free_bootmem() and
779 	 * this happens before kmemleak_init() is called.
780 	 */
781 	start = object->pointer;
782 	end = object->pointer + object->size;
783 	if (ptr > start)
784 		__create_object(start, ptr - start, object->min_count,
785 			      GFP_KERNEL, is_phys);
786 	if (ptr + size < end)
787 		__create_object(ptr + size, end - ptr - size, object->min_count,
788 			      GFP_KERNEL, is_phys);
789 
790 	__delete_object(object);
791 }
792 
793 static void __paint_it(struct kmemleak_object *object, int color)
794 {
795 	object->min_count = color;
796 	if (color == KMEMLEAK_BLACK)
797 		object->flags |= OBJECT_NO_SCAN;
798 }
799 
800 static void paint_it(struct kmemleak_object *object, int color)
801 {
802 	unsigned long flags;
803 
804 	raw_spin_lock_irqsave(&object->lock, flags);
805 	__paint_it(object, color);
806 	raw_spin_unlock_irqrestore(&object->lock, flags);
807 }
808 
809 static void paint_ptr(unsigned long ptr, int color, bool is_phys)
810 {
811 	struct kmemleak_object *object;
812 
813 	object = __find_and_get_object(ptr, 0, is_phys);
814 	if (!object) {
815 		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
816 			      ptr,
817 			      (color == KMEMLEAK_GREY) ? "Grey" :
818 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
819 		return;
820 	}
821 	paint_it(object, color);
822 	put_object(object);
823 }
824 
825 /*
826  * Mark an object permanently as gray-colored so that it can no longer be
827  * reported as a leak. This is used in general to mark a false positive.
828  */
829 static void make_gray_object(unsigned long ptr)
830 {
831 	paint_ptr(ptr, KMEMLEAK_GREY, false);
832 }
833 
834 /*
835  * Mark the object as black-colored so that it is ignored from scans and
836  * reporting.
837  */
838 static void make_black_object(unsigned long ptr, bool is_phys)
839 {
840 	paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
841 }
842 
843 /*
844  * Add a scanning area to the object. If at least one such area is added,
845  * kmemleak will only scan these ranges rather than the whole memory block.
846  */
847 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
848 {
849 	unsigned long flags;
850 	struct kmemleak_object *object;
851 	struct kmemleak_scan_area *area = NULL;
852 	unsigned long untagged_ptr;
853 	unsigned long untagged_objp;
854 
855 	object = find_and_get_object(ptr, 1);
856 	if (!object) {
857 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
858 			      ptr);
859 		return;
860 	}
861 
862 	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
863 	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
864 
865 	if (scan_area_cache)
866 		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
867 
868 	raw_spin_lock_irqsave(&object->lock, flags);
869 	if (!area) {
870 		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
871 		/* mark the object for full scan to avoid false positives */
872 		object->flags |= OBJECT_FULL_SCAN;
873 		goto out_unlock;
874 	}
875 	if (size == SIZE_MAX) {
876 		size = untagged_objp + object->size - untagged_ptr;
877 	} else if (untagged_ptr + size > untagged_objp + object->size) {
878 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
879 		dump_object_info(object);
880 		kmem_cache_free(scan_area_cache, area);
881 		goto out_unlock;
882 	}
883 
884 	INIT_HLIST_NODE(&area->node);
885 	area->start = ptr;
886 	area->size = size;
887 
888 	hlist_add_head(&area->node, &object->area_list);
889 out_unlock:
890 	raw_spin_unlock_irqrestore(&object->lock, flags);
891 	put_object(object);
892 }
893 
894 /*
895  * Any surplus references (object already gray) to 'ptr' are passed to
896  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
897  * vm_struct may be used as an alternative reference to the vmalloc'ed object
898  * (see free_thread_stack()).
899  */
900 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
901 {
902 	unsigned long flags;
903 	struct kmemleak_object *object;
904 
905 	object = find_and_get_object(ptr, 0);
906 	if (!object) {
907 		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
908 			      ptr);
909 		return;
910 	}
911 
912 	raw_spin_lock_irqsave(&object->lock, flags);
913 	object->excess_ref = excess_ref;
914 	raw_spin_unlock_irqrestore(&object->lock, flags);
915 	put_object(object);
916 }
917 
918 /*
919  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
920  * pointer. Such object will not be scanned by kmemleak but references to it
921  * are searched.
922  */
923 static void object_no_scan(unsigned long ptr)
924 {
925 	unsigned long flags;
926 	struct kmemleak_object *object;
927 
928 	object = find_and_get_object(ptr, 0);
929 	if (!object) {
930 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
931 		return;
932 	}
933 
934 	raw_spin_lock_irqsave(&object->lock, flags);
935 	object->flags |= OBJECT_NO_SCAN;
936 	raw_spin_unlock_irqrestore(&object->lock, flags);
937 	put_object(object);
938 }
939 
940 /**
941  * kmemleak_alloc - register a newly allocated object
942  * @ptr:	pointer to beginning of the object
943  * @size:	size of the object
944  * @min_count:	minimum number of references to this object. If during memory
945  *		scanning a number of references less than @min_count is found,
946  *		the object is reported as a memory leak. If @min_count is 0,
947  *		the object is never reported as a leak. If @min_count is -1,
948  *		the object is ignored (not scanned and not reported as a leak)
949  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
950  *
951  * This function is called from the kernel allocators when a new object
952  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
953  */
954 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
955 			  gfp_t gfp)
956 {
957 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
958 
959 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
960 		create_object((unsigned long)ptr, size, min_count, gfp);
961 }
962 EXPORT_SYMBOL_GPL(kmemleak_alloc);
963 
964 /**
965  * kmemleak_alloc_percpu - register a newly allocated __percpu object
966  * @ptr:	__percpu pointer to beginning of the object
967  * @size:	size of the object
968  * @gfp:	flags used for kmemleak internal memory allocations
969  *
970  * This function is called from the kernel percpu allocator when a new object
971  * (memory block) is allocated (alloc_percpu).
972  */
973 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
974 				 gfp_t gfp)
975 {
976 	unsigned int cpu;
977 
978 	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
979 
980 	/*
981 	 * Percpu allocations are only scanned and not reported as leaks
982 	 * (min_count is set to 0).
983 	 */
984 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
985 		for_each_possible_cpu(cpu)
986 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
987 				      size, 0, gfp);
988 }
989 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
990 
991 /**
992  * kmemleak_vmalloc - register a newly vmalloc'ed object
993  * @area:	pointer to vm_struct
994  * @size:	size of the object
995  * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
996  *
997  * This function is called from the vmalloc() kernel allocator when a new
998  * object (memory block) is allocated.
999  */
1000 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1001 {
1002 	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1003 
1004 	/*
1005 	 * A min_count = 2 is needed because vm_struct contains a reference to
1006 	 * the virtual address of the vmalloc'ed block.
1007 	 */
1008 	if (kmemleak_enabled) {
1009 		create_object((unsigned long)area->addr, size, 2, gfp);
1010 		object_set_excess_ref((unsigned long)area,
1011 				      (unsigned long)area->addr);
1012 	}
1013 }
1014 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1015 
1016 /**
1017  * kmemleak_free - unregister a previously registered object
1018  * @ptr:	pointer to beginning of the object
1019  *
1020  * This function is called from the kernel allocators when an object (memory
1021  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1022  */
1023 void __ref kmemleak_free(const void *ptr)
1024 {
1025 	pr_debug("%s(0x%p)\n", __func__, ptr);
1026 
1027 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1028 		delete_object_full((unsigned long)ptr);
1029 }
1030 EXPORT_SYMBOL_GPL(kmemleak_free);
1031 
1032 /**
1033  * kmemleak_free_part - partially unregister a previously registered object
1034  * @ptr:	pointer to the beginning or inside the object. This also
1035  *		represents the start of the range to be freed
1036  * @size:	size to be unregistered
1037  *
1038  * This function is called when only a part of a memory block is freed
1039  * (usually from the bootmem allocator).
1040  */
1041 void __ref kmemleak_free_part(const void *ptr, size_t size)
1042 {
1043 	pr_debug("%s(0x%p)\n", __func__, ptr);
1044 
1045 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1046 		delete_object_part((unsigned long)ptr, size, false);
1047 }
1048 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1049 
1050 /**
1051  * kmemleak_free_percpu - unregister a previously registered __percpu object
1052  * @ptr:	__percpu pointer to beginning of the object
1053  *
1054  * This function is called from the kernel percpu allocator when an object
1055  * (memory block) is freed (free_percpu).
1056  */
1057 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1058 {
1059 	unsigned int cpu;
1060 
1061 	pr_debug("%s(0x%p)\n", __func__, ptr);
1062 
1063 	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1064 		for_each_possible_cpu(cpu)
1065 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1066 								      cpu));
1067 }
1068 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1069 
1070 /**
1071  * kmemleak_update_trace - update object allocation stack trace
1072  * @ptr:	pointer to beginning of the object
1073  *
1074  * Override the object allocation stack trace for cases where the actual
1075  * allocation place is not always useful.
1076  */
1077 void __ref kmemleak_update_trace(const void *ptr)
1078 {
1079 	struct kmemleak_object *object;
1080 	unsigned long flags;
1081 
1082 	pr_debug("%s(0x%p)\n", __func__, ptr);
1083 
1084 	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1085 		return;
1086 
1087 	object = find_and_get_object((unsigned long)ptr, 1);
1088 	if (!object) {
1089 #ifdef DEBUG
1090 		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1091 			      ptr);
1092 #endif
1093 		return;
1094 	}
1095 
1096 	raw_spin_lock_irqsave(&object->lock, flags);
1097 	object->trace_len = __save_stack_trace(object->trace);
1098 	raw_spin_unlock_irqrestore(&object->lock, flags);
1099 
1100 	put_object(object);
1101 }
1102 EXPORT_SYMBOL(kmemleak_update_trace);
1103 
1104 /**
1105  * kmemleak_not_leak - mark an allocated object as false positive
1106  * @ptr:	pointer to beginning of the object
1107  *
1108  * Calling this function on an object will cause the memory block to no longer
1109  * be reported as leak and always be scanned.
1110  */
1111 void __ref kmemleak_not_leak(const void *ptr)
1112 {
1113 	pr_debug("%s(0x%p)\n", __func__, ptr);
1114 
1115 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1116 		make_gray_object((unsigned long)ptr);
1117 }
1118 EXPORT_SYMBOL(kmemleak_not_leak);
1119 
1120 /**
1121  * kmemleak_ignore - ignore an allocated object
1122  * @ptr:	pointer to beginning of the object
1123  *
1124  * Calling this function on an object will cause the memory block to be
1125  * ignored (not scanned and not reported as a leak). This is usually done when
1126  * it is known that the corresponding block is not a leak and does not contain
1127  * any references to other allocated memory blocks.
1128  */
1129 void __ref kmemleak_ignore(const void *ptr)
1130 {
1131 	pr_debug("%s(0x%p)\n", __func__, ptr);
1132 
1133 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1134 		make_black_object((unsigned long)ptr, false);
1135 }
1136 EXPORT_SYMBOL(kmemleak_ignore);
1137 
1138 /**
1139  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1140  * @ptr:	pointer to beginning or inside the object. This also
1141  *		represents the start of the scan area
1142  * @size:	size of the scan area
1143  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1144  *
1145  * This function is used when it is known that only certain parts of an object
1146  * contain references to other objects. Kmemleak will only scan these areas
1147  * reducing the number false negatives.
1148  */
1149 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1150 {
1151 	pr_debug("%s(0x%p)\n", __func__, ptr);
1152 
1153 	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1154 		add_scan_area((unsigned long)ptr, size, gfp);
1155 }
1156 EXPORT_SYMBOL(kmemleak_scan_area);
1157 
1158 /**
1159  * kmemleak_no_scan - do not scan an allocated object
1160  * @ptr:	pointer to beginning of the object
1161  *
1162  * This function notifies kmemleak not to scan the given memory block. Useful
1163  * in situations where it is known that the given object does not contain any
1164  * references to other objects. Kmemleak will not scan such objects reducing
1165  * the number of false negatives.
1166  */
1167 void __ref kmemleak_no_scan(const void *ptr)
1168 {
1169 	pr_debug("%s(0x%p)\n", __func__, ptr);
1170 
1171 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1172 		object_no_scan((unsigned long)ptr);
1173 }
1174 EXPORT_SYMBOL(kmemleak_no_scan);
1175 
1176 /**
1177  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1178  *			 address argument
1179  * @phys:	physical address of the object
1180  * @size:	size of the object
1181  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1182  */
1183 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1184 {
1185 	pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
1186 
1187 	if (kmemleak_enabled)
1188 		/*
1189 		 * Create object with OBJECT_PHYS flag and
1190 		 * assume min_count 0.
1191 		 */
1192 		create_object_phys((unsigned long)phys, size, 0, gfp);
1193 }
1194 EXPORT_SYMBOL(kmemleak_alloc_phys);
1195 
1196 /**
1197  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1198  *			     physical address argument
1199  * @phys:	physical address if the beginning or inside an object. This
1200  *		also represents the start of the range to be freed
1201  * @size:	size to be unregistered
1202  */
1203 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1204 {
1205 	pr_debug("%s(0x%pa)\n", __func__, &phys);
1206 
1207 	if (kmemleak_enabled)
1208 		delete_object_part((unsigned long)phys, size, true);
1209 }
1210 EXPORT_SYMBOL(kmemleak_free_part_phys);
1211 
1212 /**
1213  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1214  *			  address argument
1215  * @phys:	physical address of the object
1216  */
1217 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1218 {
1219 	pr_debug("%s(0x%pa)\n", __func__, &phys);
1220 
1221 	if (kmemleak_enabled)
1222 		make_black_object((unsigned long)phys, true);
1223 }
1224 EXPORT_SYMBOL(kmemleak_ignore_phys);
1225 
1226 /*
1227  * Update an object's checksum and return true if it was modified.
1228  */
1229 static bool update_checksum(struct kmemleak_object *object)
1230 {
1231 	u32 old_csum = object->checksum;
1232 
1233 	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1234 		return false;
1235 
1236 	kasan_disable_current();
1237 	kcsan_disable_current();
1238 	object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1239 	kasan_enable_current();
1240 	kcsan_enable_current();
1241 
1242 	return object->checksum != old_csum;
1243 }
1244 
1245 /*
1246  * Update an object's references. object->lock must be held by the caller.
1247  */
1248 static void update_refs(struct kmemleak_object *object)
1249 {
1250 	if (!color_white(object)) {
1251 		/* non-orphan, ignored or new */
1252 		return;
1253 	}
1254 
1255 	/*
1256 	 * Increase the object's reference count (number of pointers to the
1257 	 * memory block). If this count reaches the required minimum, the
1258 	 * object's color will become gray and it will be added to the
1259 	 * gray_list.
1260 	 */
1261 	object->count++;
1262 	if (color_gray(object)) {
1263 		/* put_object() called when removing from gray_list */
1264 		WARN_ON(!get_object(object));
1265 		list_add_tail(&object->gray_list, &gray_list);
1266 	}
1267 }
1268 
1269 /*
1270  * Memory scanning is a long process and it needs to be interruptible. This
1271  * function checks whether such interrupt condition occurred.
1272  */
1273 static int scan_should_stop(void)
1274 {
1275 	if (!kmemleak_enabled)
1276 		return 1;
1277 
1278 	/*
1279 	 * This function may be called from either process or kthread context,
1280 	 * hence the need to check for both stop conditions.
1281 	 */
1282 	if (current->mm)
1283 		return signal_pending(current);
1284 	else
1285 		return kthread_should_stop();
1286 
1287 	return 0;
1288 }
1289 
1290 /*
1291  * Scan a memory block (exclusive range) for valid pointers and add those
1292  * found to the gray list.
1293  */
1294 static void scan_block(void *_start, void *_end,
1295 		       struct kmemleak_object *scanned)
1296 {
1297 	unsigned long *ptr;
1298 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1299 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1300 	unsigned long flags;
1301 	unsigned long untagged_ptr;
1302 
1303 	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1304 	for (ptr = start; ptr < end; ptr++) {
1305 		struct kmemleak_object *object;
1306 		unsigned long pointer;
1307 		unsigned long excess_ref;
1308 
1309 		if (scan_should_stop())
1310 			break;
1311 
1312 		kasan_disable_current();
1313 		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1314 		kasan_enable_current();
1315 
1316 		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1317 		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1318 			continue;
1319 
1320 		/*
1321 		 * No need for get_object() here since we hold kmemleak_lock.
1322 		 * object->use_count cannot be dropped to 0 while the object
1323 		 * is still present in object_tree_root and object_list
1324 		 * (with updates protected by kmemleak_lock).
1325 		 */
1326 		object = lookup_object(pointer, 1);
1327 		if (!object)
1328 			continue;
1329 		if (object == scanned)
1330 			/* self referenced, ignore */
1331 			continue;
1332 
1333 		/*
1334 		 * Avoid the lockdep recursive warning on object->lock being
1335 		 * previously acquired in scan_object(). These locks are
1336 		 * enclosed by scan_mutex.
1337 		 */
1338 		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1339 		/* only pass surplus references (object already gray) */
1340 		if (color_gray(object)) {
1341 			excess_ref = object->excess_ref;
1342 			/* no need for update_refs() if object already gray */
1343 		} else {
1344 			excess_ref = 0;
1345 			update_refs(object);
1346 		}
1347 		raw_spin_unlock(&object->lock);
1348 
1349 		if (excess_ref) {
1350 			object = lookup_object(excess_ref, 0);
1351 			if (!object)
1352 				continue;
1353 			if (object == scanned)
1354 				/* circular reference, ignore */
1355 				continue;
1356 			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1357 			update_refs(object);
1358 			raw_spin_unlock(&object->lock);
1359 		}
1360 	}
1361 	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1362 }
1363 
1364 /*
1365  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1366  */
1367 #ifdef CONFIG_SMP
1368 static void scan_large_block(void *start, void *end)
1369 {
1370 	void *next;
1371 
1372 	while (start < end) {
1373 		next = min(start + MAX_SCAN_SIZE, end);
1374 		scan_block(start, next, NULL);
1375 		start = next;
1376 		cond_resched();
1377 	}
1378 }
1379 #endif
1380 
1381 /*
1382  * Scan a memory block corresponding to a kmemleak_object. A condition is
1383  * that object->use_count >= 1.
1384  */
1385 static void scan_object(struct kmemleak_object *object)
1386 {
1387 	struct kmemleak_scan_area *area;
1388 	unsigned long flags;
1389 	void *obj_ptr;
1390 
1391 	/*
1392 	 * Once the object->lock is acquired, the corresponding memory block
1393 	 * cannot be freed (the same lock is acquired in delete_object).
1394 	 */
1395 	raw_spin_lock_irqsave(&object->lock, flags);
1396 	if (object->flags & OBJECT_NO_SCAN)
1397 		goto out;
1398 	if (!(object->flags & OBJECT_ALLOCATED))
1399 		/* already freed object */
1400 		goto out;
1401 
1402 	obj_ptr = object->flags & OBJECT_PHYS ?
1403 		  __va((phys_addr_t)object->pointer) :
1404 		  (void *)object->pointer;
1405 
1406 	if (hlist_empty(&object->area_list) ||
1407 	    object->flags & OBJECT_FULL_SCAN) {
1408 		void *start = obj_ptr;
1409 		void *end = obj_ptr + object->size;
1410 		void *next;
1411 
1412 		do {
1413 			next = min(start + MAX_SCAN_SIZE, end);
1414 			scan_block(start, next, object);
1415 
1416 			start = next;
1417 			if (start >= end)
1418 				break;
1419 
1420 			raw_spin_unlock_irqrestore(&object->lock, flags);
1421 			cond_resched();
1422 			raw_spin_lock_irqsave(&object->lock, flags);
1423 		} while (object->flags & OBJECT_ALLOCATED);
1424 	} else
1425 		hlist_for_each_entry(area, &object->area_list, node)
1426 			scan_block((void *)area->start,
1427 				   (void *)(area->start + area->size),
1428 				   object);
1429 out:
1430 	raw_spin_unlock_irqrestore(&object->lock, flags);
1431 }
1432 
1433 /*
1434  * Scan the objects already referenced (gray objects). More objects will be
1435  * referenced and, if there are no memory leaks, all the objects are scanned.
1436  */
1437 static void scan_gray_list(void)
1438 {
1439 	struct kmemleak_object *object, *tmp;
1440 
1441 	/*
1442 	 * The list traversal is safe for both tail additions and removals
1443 	 * from inside the loop. The kmemleak objects cannot be freed from
1444 	 * outside the loop because their use_count was incremented.
1445 	 */
1446 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1447 	while (&object->gray_list != &gray_list) {
1448 		cond_resched();
1449 
1450 		/* may add new objects to the list */
1451 		if (!scan_should_stop())
1452 			scan_object(object);
1453 
1454 		tmp = list_entry(object->gray_list.next, typeof(*object),
1455 				 gray_list);
1456 
1457 		/* remove the object from the list and release it */
1458 		list_del(&object->gray_list);
1459 		put_object(object);
1460 
1461 		object = tmp;
1462 	}
1463 	WARN_ON(!list_empty(&gray_list));
1464 }
1465 
1466 /*
1467  * Scan data sections and all the referenced memory blocks allocated via the
1468  * kernel's standard allocators. This function must be called with the
1469  * scan_mutex held.
1470  */
1471 static void kmemleak_scan(void)
1472 {
1473 	struct kmemleak_object *object;
1474 	struct zone *zone;
1475 	int __maybe_unused i;
1476 	int new_leaks = 0;
1477 	int loop1_cnt = 0;
1478 
1479 	jiffies_last_scan = jiffies;
1480 
1481 	/* prepare the kmemleak_object's */
1482 	rcu_read_lock();
1483 	list_for_each_entry_rcu(object, &object_list, object_list) {
1484 		bool obj_pinned = false;
1485 
1486 		loop1_cnt++;
1487 		raw_spin_lock_irq(&object->lock);
1488 #ifdef DEBUG
1489 		/*
1490 		 * With a few exceptions there should be a maximum of
1491 		 * 1 reference to any object at this point.
1492 		 */
1493 		if (atomic_read(&object->use_count) > 1) {
1494 			pr_debug("object->use_count = %d\n",
1495 				 atomic_read(&object->use_count));
1496 			dump_object_info(object);
1497 		}
1498 #endif
1499 
1500 		/* ignore objects outside lowmem (paint them black) */
1501 		if ((object->flags & OBJECT_PHYS) &&
1502 		   !(object->flags & OBJECT_NO_SCAN)) {
1503 			unsigned long phys = object->pointer;
1504 
1505 			if (PHYS_PFN(phys) < min_low_pfn ||
1506 			    PHYS_PFN(phys + object->size) >= max_low_pfn)
1507 				__paint_it(object, KMEMLEAK_BLACK);
1508 		}
1509 
1510 		/* reset the reference count (whiten the object) */
1511 		object->count = 0;
1512 		if (color_gray(object) && get_object(object)) {
1513 			list_add_tail(&object->gray_list, &gray_list);
1514 			obj_pinned = true;
1515 		}
1516 
1517 		raw_spin_unlock_irq(&object->lock);
1518 
1519 		/*
1520 		 * Do a cond_resched() to avoid soft lockup every 64k objects.
1521 		 * Make sure a reference has been taken so that the object
1522 		 * won't go away without RCU read lock.
1523 		 */
1524 		if (!(loop1_cnt & 0xffff)) {
1525 			if (!obj_pinned && !get_object(object)) {
1526 				/* Try the next object instead */
1527 				loop1_cnt--;
1528 				continue;
1529 			}
1530 
1531 			rcu_read_unlock();
1532 			cond_resched();
1533 			rcu_read_lock();
1534 
1535 			if (!obj_pinned)
1536 				put_object(object);
1537 		}
1538 	}
1539 	rcu_read_unlock();
1540 
1541 #ifdef CONFIG_SMP
1542 	/* per-cpu sections scanning */
1543 	for_each_possible_cpu(i)
1544 		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1545 				 __per_cpu_end + per_cpu_offset(i));
1546 #endif
1547 
1548 	/*
1549 	 * Struct page scanning for each node.
1550 	 */
1551 	get_online_mems();
1552 	for_each_populated_zone(zone) {
1553 		unsigned long start_pfn = zone->zone_start_pfn;
1554 		unsigned long end_pfn = zone_end_pfn(zone);
1555 		unsigned long pfn;
1556 
1557 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1558 			struct page *page = pfn_to_online_page(pfn);
1559 
1560 			if (!page)
1561 				continue;
1562 
1563 			/* only scan pages belonging to this zone */
1564 			if (page_zone(page) != zone)
1565 				continue;
1566 			/* only scan if page is in use */
1567 			if (page_count(page) == 0)
1568 				continue;
1569 			scan_block(page, page + 1, NULL);
1570 			if (!(pfn & 63))
1571 				cond_resched();
1572 		}
1573 	}
1574 	put_online_mems();
1575 
1576 	/*
1577 	 * Scanning the task stacks (may introduce false negatives).
1578 	 */
1579 	if (kmemleak_stack_scan) {
1580 		struct task_struct *p, *g;
1581 
1582 		rcu_read_lock();
1583 		for_each_process_thread(g, p) {
1584 			void *stack = try_get_task_stack(p);
1585 			if (stack) {
1586 				scan_block(stack, stack + THREAD_SIZE, NULL);
1587 				put_task_stack(p);
1588 			}
1589 		}
1590 		rcu_read_unlock();
1591 	}
1592 
1593 	/*
1594 	 * Scan the objects already referenced from the sections scanned
1595 	 * above.
1596 	 */
1597 	scan_gray_list();
1598 
1599 	/*
1600 	 * Check for new or unreferenced objects modified since the previous
1601 	 * scan and color them gray until the next scan.
1602 	 */
1603 	rcu_read_lock();
1604 	list_for_each_entry_rcu(object, &object_list, object_list) {
1605 		/*
1606 		 * This is racy but we can save the overhead of lock/unlock
1607 		 * calls. The missed objects, if any, should be caught in
1608 		 * the next scan.
1609 		 */
1610 		if (!color_white(object))
1611 			continue;
1612 		raw_spin_lock_irq(&object->lock);
1613 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1614 		    && update_checksum(object) && get_object(object)) {
1615 			/* color it gray temporarily */
1616 			object->count = object->min_count;
1617 			list_add_tail(&object->gray_list, &gray_list);
1618 		}
1619 		raw_spin_unlock_irq(&object->lock);
1620 	}
1621 	rcu_read_unlock();
1622 
1623 	/*
1624 	 * Re-scan the gray list for modified unreferenced objects.
1625 	 */
1626 	scan_gray_list();
1627 
1628 	/*
1629 	 * If scanning was stopped do not report any new unreferenced objects.
1630 	 */
1631 	if (scan_should_stop())
1632 		return;
1633 
1634 	/*
1635 	 * Scanning result reporting.
1636 	 */
1637 	rcu_read_lock();
1638 	list_for_each_entry_rcu(object, &object_list, object_list) {
1639 		/*
1640 		 * This is racy but we can save the overhead of lock/unlock
1641 		 * calls. The missed objects, if any, should be caught in
1642 		 * the next scan.
1643 		 */
1644 		if (!color_white(object))
1645 			continue;
1646 		raw_spin_lock_irq(&object->lock);
1647 		if (unreferenced_object(object) &&
1648 		    !(object->flags & OBJECT_REPORTED)) {
1649 			object->flags |= OBJECT_REPORTED;
1650 
1651 			if (kmemleak_verbose)
1652 				print_unreferenced(NULL, object);
1653 
1654 			new_leaks++;
1655 		}
1656 		raw_spin_unlock_irq(&object->lock);
1657 	}
1658 	rcu_read_unlock();
1659 
1660 	if (new_leaks) {
1661 		kmemleak_found_leaks = true;
1662 
1663 		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1664 			new_leaks);
1665 	}
1666 
1667 }
1668 
1669 /*
1670  * Thread function performing automatic memory scanning. Unreferenced objects
1671  * at the end of a memory scan are reported but only the first time.
1672  */
1673 static int kmemleak_scan_thread(void *arg)
1674 {
1675 	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1676 
1677 	pr_info("Automatic memory scanning thread started\n");
1678 	set_user_nice(current, 10);
1679 
1680 	/*
1681 	 * Wait before the first scan to allow the system to fully initialize.
1682 	 */
1683 	if (first_run) {
1684 		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1685 		first_run = 0;
1686 		while (timeout && !kthread_should_stop())
1687 			timeout = schedule_timeout_interruptible(timeout);
1688 	}
1689 
1690 	while (!kthread_should_stop()) {
1691 		signed long timeout = READ_ONCE(jiffies_scan_wait);
1692 
1693 		mutex_lock(&scan_mutex);
1694 		kmemleak_scan();
1695 		mutex_unlock(&scan_mutex);
1696 
1697 		/* wait before the next scan */
1698 		while (timeout && !kthread_should_stop())
1699 			timeout = schedule_timeout_interruptible(timeout);
1700 	}
1701 
1702 	pr_info("Automatic memory scanning thread ended\n");
1703 
1704 	return 0;
1705 }
1706 
1707 /*
1708  * Start the automatic memory scanning thread. This function must be called
1709  * with the scan_mutex held.
1710  */
1711 static void start_scan_thread(void)
1712 {
1713 	if (scan_thread)
1714 		return;
1715 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1716 	if (IS_ERR(scan_thread)) {
1717 		pr_warn("Failed to create the scan thread\n");
1718 		scan_thread = NULL;
1719 	}
1720 }
1721 
1722 /*
1723  * Stop the automatic memory scanning thread.
1724  */
1725 static void stop_scan_thread(void)
1726 {
1727 	if (scan_thread) {
1728 		kthread_stop(scan_thread);
1729 		scan_thread = NULL;
1730 	}
1731 }
1732 
1733 /*
1734  * Iterate over the object_list and return the first valid object at or after
1735  * the required position with its use_count incremented. The function triggers
1736  * a memory scanning when the pos argument points to the first position.
1737  */
1738 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1739 {
1740 	struct kmemleak_object *object;
1741 	loff_t n = *pos;
1742 	int err;
1743 
1744 	err = mutex_lock_interruptible(&scan_mutex);
1745 	if (err < 0)
1746 		return ERR_PTR(err);
1747 
1748 	rcu_read_lock();
1749 	list_for_each_entry_rcu(object, &object_list, object_list) {
1750 		if (n-- > 0)
1751 			continue;
1752 		if (get_object(object))
1753 			goto out;
1754 	}
1755 	object = NULL;
1756 out:
1757 	return object;
1758 }
1759 
1760 /*
1761  * Return the next object in the object_list. The function decrements the
1762  * use_count of the previous object and increases that of the next one.
1763  */
1764 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1765 {
1766 	struct kmemleak_object *prev_obj = v;
1767 	struct kmemleak_object *next_obj = NULL;
1768 	struct kmemleak_object *obj = prev_obj;
1769 
1770 	++(*pos);
1771 
1772 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1773 		if (get_object(obj)) {
1774 			next_obj = obj;
1775 			break;
1776 		}
1777 	}
1778 
1779 	put_object(prev_obj);
1780 	return next_obj;
1781 }
1782 
1783 /*
1784  * Decrement the use_count of the last object required, if any.
1785  */
1786 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1787 {
1788 	if (!IS_ERR(v)) {
1789 		/*
1790 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1791 		 * waiting was interrupted, so only release it if !IS_ERR.
1792 		 */
1793 		rcu_read_unlock();
1794 		mutex_unlock(&scan_mutex);
1795 		if (v)
1796 			put_object(v);
1797 	}
1798 }
1799 
1800 /*
1801  * Print the information for an unreferenced object to the seq file.
1802  */
1803 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1804 {
1805 	struct kmemleak_object *object = v;
1806 	unsigned long flags;
1807 
1808 	raw_spin_lock_irqsave(&object->lock, flags);
1809 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1810 		print_unreferenced(seq, object);
1811 	raw_spin_unlock_irqrestore(&object->lock, flags);
1812 	return 0;
1813 }
1814 
1815 static const struct seq_operations kmemleak_seq_ops = {
1816 	.start = kmemleak_seq_start,
1817 	.next  = kmemleak_seq_next,
1818 	.stop  = kmemleak_seq_stop,
1819 	.show  = kmemleak_seq_show,
1820 };
1821 
1822 static int kmemleak_open(struct inode *inode, struct file *file)
1823 {
1824 	return seq_open(file, &kmemleak_seq_ops);
1825 }
1826 
1827 static int dump_str_object_info(const char *str)
1828 {
1829 	unsigned long flags;
1830 	struct kmemleak_object *object;
1831 	unsigned long addr;
1832 
1833 	if (kstrtoul(str, 0, &addr))
1834 		return -EINVAL;
1835 	object = find_and_get_object(addr, 0);
1836 	if (!object) {
1837 		pr_info("Unknown object at 0x%08lx\n", addr);
1838 		return -EINVAL;
1839 	}
1840 
1841 	raw_spin_lock_irqsave(&object->lock, flags);
1842 	dump_object_info(object);
1843 	raw_spin_unlock_irqrestore(&object->lock, flags);
1844 
1845 	put_object(object);
1846 	return 0;
1847 }
1848 
1849 /*
1850  * We use grey instead of black to ensure we can do future scans on the same
1851  * objects. If we did not do future scans these black objects could
1852  * potentially contain references to newly allocated objects in the future and
1853  * we'd end up with false positives.
1854  */
1855 static void kmemleak_clear(void)
1856 {
1857 	struct kmemleak_object *object;
1858 
1859 	rcu_read_lock();
1860 	list_for_each_entry_rcu(object, &object_list, object_list) {
1861 		raw_spin_lock_irq(&object->lock);
1862 		if ((object->flags & OBJECT_REPORTED) &&
1863 		    unreferenced_object(object))
1864 			__paint_it(object, KMEMLEAK_GREY);
1865 		raw_spin_unlock_irq(&object->lock);
1866 	}
1867 	rcu_read_unlock();
1868 
1869 	kmemleak_found_leaks = false;
1870 }
1871 
1872 static void __kmemleak_do_cleanup(void);
1873 
1874 /*
1875  * File write operation to configure kmemleak at run-time. The following
1876  * commands can be written to the /sys/kernel/debug/kmemleak file:
1877  *   off	- disable kmemleak (irreversible)
1878  *   stack=on	- enable the task stacks scanning
1879  *   stack=off	- disable the tasks stacks scanning
1880  *   scan=on	- start the automatic memory scanning thread
1881  *   scan=off	- stop the automatic memory scanning thread
1882  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1883  *		  disable it)
1884  *   scan	- trigger a memory scan
1885  *   clear	- mark all current reported unreferenced kmemleak objects as
1886  *		  grey to ignore printing them, or free all kmemleak objects
1887  *		  if kmemleak has been disabled.
1888  *   dump=...	- dump information about the object found at the given address
1889  */
1890 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1891 			      size_t size, loff_t *ppos)
1892 {
1893 	char buf[64];
1894 	int buf_size;
1895 	int ret;
1896 
1897 	buf_size = min(size, (sizeof(buf) - 1));
1898 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1899 		return -EFAULT;
1900 	buf[buf_size] = 0;
1901 
1902 	ret = mutex_lock_interruptible(&scan_mutex);
1903 	if (ret < 0)
1904 		return ret;
1905 
1906 	if (strncmp(buf, "clear", 5) == 0) {
1907 		if (kmemleak_enabled)
1908 			kmemleak_clear();
1909 		else
1910 			__kmemleak_do_cleanup();
1911 		goto out;
1912 	}
1913 
1914 	if (!kmemleak_enabled) {
1915 		ret = -EPERM;
1916 		goto out;
1917 	}
1918 
1919 	if (strncmp(buf, "off", 3) == 0)
1920 		kmemleak_disable();
1921 	else if (strncmp(buf, "stack=on", 8) == 0)
1922 		kmemleak_stack_scan = 1;
1923 	else if (strncmp(buf, "stack=off", 9) == 0)
1924 		kmemleak_stack_scan = 0;
1925 	else if (strncmp(buf, "scan=on", 7) == 0)
1926 		start_scan_thread();
1927 	else if (strncmp(buf, "scan=off", 8) == 0)
1928 		stop_scan_thread();
1929 	else if (strncmp(buf, "scan=", 5) == 0) {
1930 		unsigned secs;
1931 		unsigned long msecs;
1932 
1933 		ret = kstrtouint(buf + 5, 0, &secs);
1934 		if (ret < 0)
1935 			goto out;
1936 
1937 		msecs = secs * MSEC_PER_SEC;
1938 		if (msecs > UINT_MAX)
1939 			msecs = UINT_MAX;
1940 
1941 		stop_scan_thread();
1942 		if (msecs) {
1943 			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1944 			start_scan_thread();
1945 		}
1946 	} else if (strncmp(buf, "scan", 4) == 0)
1947 		kmemleak_scan();
1948 	else if (strncmp(buf, "dump=", 5) == 0)
1949 		ret = dump_str_object_info(buf + 5);
1950 	else
1951 		ret = -EINVAL;
1952 
1953 out:
1954 	mutex_unlock(&scan_mutex);
1955 	if (ret < 0)
1956 		return ret;
1957 
1958 	/* ignore the rest of the buffer, only one command at a time */
1959 	*ppos += size;
1960 	return size;
1961 }
1962 
1963 static const struct file_operations kmemleak_fops = {
1964 	.owner		= THIS_MODULE,
1965 	.open		= kmemleak_open,
1966 	.read		= seq_read,
1967 	.write		= kmemleak_write,
1968 	.llseek		= seq_lseek,
1969 	.release	= seq_release,
1970 };
1971 
1972 static void __kmemleak_do_cleanup(void)
1973 {
1974 	struct kmemleak_object *object, *tmp;
1975 
1976 	/*
1977 	 * Kmemleak has already been disabled, no need for RCU list traversal
1978 	 * or kmemleak_lock held.
1979 	 */
1980 	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1981 		__remove_object(object);
1982 		__delete_object(object);
1983 	}
1984 }
1985 
1986 /*
1987  * Stop the memory scanning thread and free the kmemleak internal objects if
1988  * no previous scan thread (otherwise, kmemleak may still have some useful
1989  * information on memory leaks).
1990  */
1991 static void kmemleak_do_cleanup(struct work_struct *work)
1992 {
1993 	stop_scan_thread();
1994 
1995 	mutex_lock(&scan_mutex);
1996 	/*
1997 	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1998 	 * longer track object freeing. Ordering of the scan thread stopping and
1999 	 * the memory accesses below is guaranteed by the kthread_stop()
2000 	 * function.
2001 	 */
2002 	kmemleak_free_enabled = 0;
2003 	mutex_unlock(&scan_mutex);
2004 
2005 	if (!kmemleak_found_leaks)
2006 		__kmemleak_do_cleanup();
2007 	else
2008 		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2009 }
2010 
2011 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2012 
2013 /*
2014  * Disable kmemleak. No memory allocation/freeing will be traced once this
2015  * function is called. Disabling kmemleak is an irreversible operation.
2016  */
2017 static void kmemleak_disable(void)
2018 {
2019 	/* atomically check whether it was already invoked */
2020 	if (cmpxchg(&kmemleak_error, 0, 1))
2021 		return;
2022 
2023 	/* stop any memory operation tracing */
2024 	kmemleak_enabled = 0;
2025 
2026 	/* check whether it is too early for a kernel thread */
2027 	if (kmemleak_initialized)
2028 		schedule_work(&cleanup_work);
2029 	else
2030 		kmemleak_free_enabled = 0;
2031 
2032 	pr_info("Kernel memory leak detector disabled\n");
2033 }
2034 
2035 /*
2036  * Allow boot-time kmemleak disabling (enabled by default).
2037  */
2038 static int __init kmemleak_boot_config(char *str)
2039 {
2040 	if (!str)
2041 		return -EINVAL;
2042 	if (strcmp(str, "off") == 0)
2043 		kmemleak_disable();
2044 	else if (strcmp(str, "on") == 0)
2045 		kmemleak_skip_disable = 1;
2046 	else
2047 		return -EINVAL;
2048 	return 0;
2049 }
2050 early_param("kmemleak", kmemleak_boot_config);
2051 
2052 /*
2053  * Kmemleak initialization.
2054  */
2055 void __init kmemleak_init(void)
2056 {
2057 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2058 	if (!kmemleak_skip_disable) {
2059 		kmemleak_disable();
2060 		return;
2061 	}
2062 #endif
2063 
2064 	if (kmemleak_error)
2065 		return;
2066 
2067 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2068 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2069 
2070 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2071 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2072 
2073 	/* register the data/bss sections */
2074 	create_object((unsigned long)_sdata, _edata - _sdata,
2075 		      KMEMLEAK_GREY, GFP_ATOMIC);
2076 	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2077 		      KMEMLEAK_GREY, GFP_ATOMIC);
2078 	/* only register .data..ro_after_init if not within .data */
2079 	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2080 		create_object((unsigned long)__start_ro_after_init,
2081 			      __end_ro_after_init - __start_ro_after_init,
2082 			      KMEMLEAK_GREY, GFP_ATOMIC);
2083 }
2084 
2085 /*
2086  * Late initialization function.
2087  */
2088 static int __init kmemleak_late_init(void)
2089 {
2090 	kmemleak_initialized = 1;
2091 
2092 	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2093 
2094 	if (kmemleak_error) {
2095 		/*
2096 		 * Some error occurred and kmemleak was disabled. There is a
2097 		 * small chance that kmemleak_disable() was called immediately
2098 		 * after setting kmemleak_initialized and we may end up with
2099 		 * two clean-up threads but serialized by scan_mutex.
2100 		 */
2101 		schedule_work(&cleanup_work);
2102 		return -ENOMEM;
2103 	}
2104 
2105 	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2106 		mutex_lock(&scan_mutex);
2107 		start_scan_thread();
2108 		mutex_unlock(&scan_mutex);
2109 	}
2110 
2111 	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2112 		mem_pool_free_count);
2113 
2114 	return 0;
2115 }
2116 late_initcall(kmemleak_late_init);
2117