xref: /openbmc/linux/mm/kmemleak.c (revision 92ed1a76)
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/kmemleak.txt.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a priority search tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * The kmemleak_object structures have a use_count incremented or decremented
57  * using the get_object()/put_object() functions. When the use_count becomes
58  * 0, this count can no longer be incremented and put_object() schedules the
59  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60  * function must be protected by rcu_read_lock() to avoid accessing a freed
61  * structure.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
75 #include <linux/fs.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96 
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <asm/atomic.h>
100 
101 #include <linux/kmemcheck.h>
102 #include <linux/kmemleak.h>
103 
104 /*
105  * Kmemleak configuration and common defines.
106  */
107 #define MAX_TRACE		16	/* stack trace length */
108 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
109 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
110 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
112 
113 #define BYTES_PER_POINTER	sizeof(void *)
114 
115 /* GFP bitmask for kmemleak internal allocations */
116 #define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
117 
118 /* scanning area inside a memory block */
119 struct kmemleak_scan_area {
120 	struct hlist_node node;
121 	unsigned long start;
122 	size_t size;
123 };
124 
125 #define KMEMLEAK_GREY	0
126 #define KMEMLEAK_BLACK	-1
127 
128 /*
129  * Structure holding the metadata for each allocated memory block.
130  * Modifications to such objects should be made while holding the
131  * object->lock. Insertions or deletions from object_list, gray_list or
132  * tree_node are already protected by the corresponding locks or mutex (see
133  * the notes on locking above). These objects are reference-counted
134  * (use_count) and freed using the RCU mechanism.
135  */
136 struct kmemleak_object {
137 	spinlock_t lock;
138 	unsigned long flags;		/* object status flags */
139 	struct list_head object_list;
140 	struct list_head gray_list;
141 	struct prio_tree_node tree_node;
142 	struct rcu_head rcu;		/* object_list lockless traversal */
143 	/* object usage count; object freed when use_count == 0 */
144 	atomic_t use_count;
145 	unsigned long pointer;
146 	size_t size;
147 	/* minimum number of a pointers found before it is considered leak */
148 	int min_count;
149 	/* the total number of pointers found pointing to this object */
150 	int count;
151 	/* checksum for detecting modified objects */
152 	u32 checksum;
153 	/* memory ranges to be scanned inside an object (empty for all) */
154 	struct hlist_head area_list;
155 	unsigned long trace[MAX_TRACE];
156 	unsigned int trace_len;
157 	unsigned long jiffies;		/* creation timestamp */
158 	pid_t pid;			/* pid of the current task */
159 	char comm[TASK_COMM_LEN];	/* executable name */
160 };
161 
162 /* flag representing the memory block allocation status */
163 #define OBJECT_ALLOCATED	(1 << 0)
164 /* flag set after the first reporting of an unreference object */
165 #define OBJECT_REPORTED		(1 << 1)
166 /* flag set to not scan the object */
167 #define OBJECT_NO_SCAN		(1 << 2)
168 
169 /* number of bytes to print per line; must be 16 or 32 */
170 #define HEX_ROW_SIZE		16
171 /* number of bytes to print at a time (1, 2, 4, 8) */
172 #define HEX_GROUP_SIZE		1
173 /* include ASCII after the hex output */
174 #define HEX_ASCII		1
175 /* max number of lines to be printed */
176 #define HEX_MAX_LINES		2
177 
178 /* the list of all allocated objects */
179 static LIST_HEAD(object_list);
180 /* the list of gray-colored objects (see color_gray comment below) */
181 static LIST_HEAD(gray_list);
182 /* prio search tree for object boundaries */
183 static struct prio_tree_root object_tree_root;
184 /* rw_lock protecting the access to object_list and prio_tree_root */
185 static DEFINE_RWLOCK(kmemleak_lock);
186 
187 /* allocation caches for kmemleak internal data */
188 static struct kmem_cache *object_cache;
189 static struct kmem_cache *scan_area_cache;
190 
191 /* set if tracing memory operations is enabled */
192 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
193 /* set in the late_initcall if there were no errors */
194 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
195 /* enables or disables early logging of the memory operations */
196 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
197 /* set if a fata kmemleak error has occurred */
198 static atomic_t kmemleak_error = ATOMIC_INIT(0);
199 
200 /* minimum and maximum address that may be valid pointers */
201 static unsigned long min_addr = ULONG_MAX;
202 static unsigned long max_addr;
203 
204 static struct task_struct *scan_thread;
205 /* used to avoid reporting of recently allocated objects */
206 static unsigned long jiffies_min_age;
207 static unsigned long jiffies_last_scan;
208 /* delay between automatic memory scannings */
209 static signed long jiffies_scan_wait;
210 /* enables or disables the task stacks scanning */
211 static int kmemleak_stack_scan = 1;
212 /* protects the memory scanning, parameters and debug/kmemleak file access */
213 static DEFINE_MUTEX(scan_mutex);
214 /* setting kmemleak=on, will set this var, skipping the disable */
215 static int kmemleak_skip_disable;
216 
217 
218 /*
219  * Early object allocation/freeing logging. Kmemleak is initialized after the
220  * kernel allocator. However, both the kernel allocator and kmemleak may
221  * allocate memory blocks which need to be tracked. Kmemleak defines an
222  * arbitrary buffer to hold the allocation/freeing information before it is
223  * fully initialized.
224  */
225 
226 /* kmemleak operation type for early logging */
227 enum {
228 	KMEMLEAK_ALLOC,
229 	KMEMLEAK_FREE,
230 	KMEMLEAK_FREE_PART,
231 	KMEMLEAK_NOT_LEAK,
232 	KMEMLEAK_IGNORE,
233 	KMEMLEAK_SCAN_AREA,
234 	KMEMLEAK_NO_SCAN
235 };
236 
237 /*
238  * Structure holding the information passed to kmemleak callbacks during the
239  * early logging.
240  */
241 struct early_log {
242 	int op_type;			/* kmemleak operation type */
243 	const void *ptr;		/* allocated/freed memory block */
244 	size_t size;			/* memory block size */
245 	int min_count;			/* minimum reference count */
246 	unsigned long trace[MAX_TRACE];	/* stack trace */
247 	unsigned int trace_len;		/* stack trace length */
248 };
249 
250 /* early logging buffer and current position */
251 static struct early_log
252 	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
253 static int crt_early_log __initdata;
254 
255 static void kmemleak_disable(void);
256 
257 /*
258  * Print a warning and dump the stack trace.
259  */
260 #define kmemleak_warn(x...)	do {	\
261 	pr_warning(x);			\
262 	dump_stack();			\
263 } while (0)
264 
265 /*
266  * Macro invoked when a serious kmemleak condition occured and cannot be
267  * recovered from. Kmemleak will be disabled and further allocation/freeing
268  * tracing no longer available.
269  */
270 #define kmemleak_stop(x...)	do {	\
271 	kmemleak_warn(x);		\
272 	kmemleak_disable();		\
273 } while (0)
274 
275 /*
276  * Printing of the objects hex dump to the seq file. The number of lines to be
277  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279  * with the object->lock held.
280  */
281 static void hex_dump_object(struct seq_file *seq,
282 			    struct kmemleak_object *object)
283 {
284 	const u8 *ptr = (const u8 *)object->pointer;
285 	int i, len, remaining;
286 	unsigned char linebuf[HEX_ROW_SIZE * 5];
287 
288 	/* limit the number of lines to HEX_MAX_LINES */
289 	remaining = len =
290 		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
291 
292 	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
293 	for (i = 0; i < len; i += HEX_ROW_SIZE) {
294 		int linelen = min(remaining, HEX_ROW_SIZE);
295 
296 		remaining -= HEX_ROW_SIZE;
297 		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
298 				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
299 				   HEX_ASCII);
300 		seq_printf(seq, "    %s\n", linebuf);
301 	}
302 }
303 
304 /*
305  * Object colors, encoded with count and min_count:
306  * - white - orphan object, not enough references to it (count < min_count)
307  * - gray  - not orphan, not marked as false positive (min_count == 0) or
308  *		sufficient references to it (count >= min_count)
309  * - black - ignore, it doesn't contain references (e.g. text section)
310  *		(min_count == -1). No function defined for this color.
311  * Newly created objects don't have any color assigned (object->count == -1)
312  * before the next memory scan when they become white.
313  */
314 static bool color_white(const struct kmemleak_object *object)
315 {
316 	return object->count != KMEMLEAK_BLACK &&
317 		object->count < object->min_count;
318 }
319 
320 static bool color_gray(const struct kmemleak_object *object)
321 {
322 	return object->min_count != KMEMLEAK_BLACK &&
323 		object->count >= object->min_count;
324 }
325 
326 /*
327  * Objects are considered unreferenced only if their color is white, they have
328  * not be deleted and have a minimum age to avoid false positives caused by
329  * pointers temporarily stored in CPU registers.
330  */
331 static bool unreferenced_object(struct kmemleak_object *object)
332 {
333 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
334 		time_before_eq(object->jiffies + jiffies_min_age,
335 			       jiffies_last_scan);
336 }
337 
338 /*
339  * Printing of the unreferenced objects information to the seq file. The
340  * print_unreferenced function must be called with the object->lock held.
341  */
342 static void print_unreferenced(struct seq_file *seq,
343 			       struct kmemleak_object *object)
344 {
345 	int i;
346 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
347 
348 	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
349 		   object->pointer, object->size);
350 	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
351 		   object->comm, object->pid, object->jiffies,
352 		   msecs_age / 1000, msecs_age % 1000);
353 	hex_dump_object(seq, object);
354 	seq_printf(seq, "  backtrace:\n");
355 
356 	for (i = 0; i < object->trace_len; i++) {
357 		void *ptr = (void *)object->trace[i];
358 		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
359 	}
360 }
361 
362 /*
363  * Print the kmemleak_object information. This function is used mainly for
364  * debugging special cases when kmemleak operations. It must be called with
365  * the object->lock held.
366  */
367 static void dump_object_info(struct kmemleak_object *object)
368 {
369 	struct stack_trace trace;
370 
371 	trace.nr_entries = object->trace_len;
372 	trace.entries = object->trace;
373 
374 	pr_notice("Object 0x%08lx (size %zu):\n",
375 		  object->tree_node.start, object->size);
376 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
377 		  object->comm, object->pid, object->jiffies);
378 	pr_notice("  min_count = %d\n", object->min_count);
379 	pr_notice("  count = %d\n", object->count);
380 	pr_notice("  flags = 0x%lx\n", object->flags);
381 	pr_notice("  checksum = %d\n", object->checksum);
382 	pr_notice("  backtrace:\n");
383 	print_stack_trace(&trace, 4);
384 }
385 
386 /*
387  * Look-up a memory block metadata (kmemleak_object) in the priority search
388  * tree based on a pointer value. If alias is 0, only values pointing to the
389  * beginning of the memory block are allowed. The kmemleak_lock must be held
390  * when calling this function.
391  */
392 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
393 {
394 	struct prio_tree_node *node;
395 	struct prio_tree_iter iter;
396 	struct kmemleak_object *object;
397 
398 	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
399 	node = prio_tree_next(&iter);
400 	if (node) {
401 		object = prio_tree_entry(node, struct kmemleak_object,
402 					 tree_node);
403 		if (!alias && object->pointer != ptr) {
404 			pr_warning("Found object by alias at 0x%08lx\n", ptr);
405 			dump_stack();
406 			dump_object_info(object);
407 			object = NULL;
408 		}
409 	} else
410 		object = NULL;
411 
412 	return object;
413 }
414 
415 /*
416  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
417  * that once an object's use_count reached 0, the RCU freeing was already
418  * registered and the object should no longer be used. This function must be
419  * called under the protection of rcu_read_lock().
420  */
421 static int get_object(struct kmemleak_object *object)
422 {
423 	return atomic_inc_not_zero(&object->use_count);
424 }
425 
426 /*
427  * RCU callback to free a kmemleak_object.
428  */
429 static void free_object_rcu(struct rcu_head *rcu)
430 {
431 	struct hlist_node *elem, *tmp;
432 	struct kmemleak_scan_area *area;
433 	struct kmemleak_object *object =
434 		container_of(rcu, struct kmemleak_object, rcu);
435 
436 	/*
437 	 * Once use_count is 0 (guaranteed by put_object), there is no other
438 	 * code accessing this object, hence no need for locking.
439 	 */
440 	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
441 		hlist_del(elem);
442 		kmem_cache_free(scan_area_cache, area);
443 	}
444 	kmem_cache_free(object_cache, object);
445 }
446 
447 /*
448  * Decrement the object use_count. Once the count is 0, free the object using
449  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
450  * delete_object() path, the delayed RCU freeing ensures that there is no
451  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
452  * is also possible.
453  */
454 static void put_object(struct kmemleak_object *object)
455 {
456 	if (!atomic_dec_and_test(&object->use_count))
457 		return;
458 
459 	/* should only get here after delete_object was called */
460 	WARN_ON(object->flags & OBJECT_ALLOCATED);
461 
462 	call_rcu(&object->rcu, free_object_rcu);
463 }
464 
465 /*
466  * Look up an object in the prio search tree and increase its use_count.
467  */
468 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
469 {
470 	unsigned long flags;
471 	struct kmemleak_object *object = NULL;
472 
473 	rcu_read_lock();
474 	read_lock_irqsave(&kmemleak_lock, flags);
475 	if (ptr >= min_addr && ptr < max_addr)
476 		object = lookup_object(ptr, alias);
477 	read_unlock_irqrestore(&kmemleak_lock, flags);
478 
479 	/* check whether the object is still available */
480 	if (object && !get_object(object))
481 		object = NULL;
482 	rcu_read_unlock();
483 
484 	return object;
485 }
486 
487 /*
488  * Save stack trace to the given array of MAX_TRACE size.
489  */
490 static int __save_stack_trace(unsigned long *trace)
491 {
492 	struct stack_trace stack_trace;
493 
494 	stack_trace.max_entries = MAX_TRACE;
495 	stack_trace.nr_entries = 0;
496 	stack_trace.entries = trace;
497 	stack_trace.skip = 2;
498 	save_stack_trace(&stack_trace);
499 
500 	return stack_trace.nr_entries;
501 }
502 
503 /*
504  * Create the metadata (struct kmemleak_object) corresponding to an allocated
505  * memory block and add it to the object_list and object_tree_root.
506  */
507 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
508 					     int min_count, gfp_t gfp)
509 {
510 	unsigned long flags;
511 	struct kmemleak_object *object;
512 	struct prio_tree_node *node;
513 
514 	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
515 	if (!object) {
516 		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
517 		return NULL;
518 	}
519 
520 	INIT_LIST_HEAD(&object->object_list);
521 	INIT_LIST_HEAD(&object->gray_list);
522 	INIT_HLIST_HEAD(&object->area_list);
523 	spin_lock_init(&object->lock);
524 	atomic_set(&object->use_count, 1);
525 	object->flags = OBJECT_ALLOCATED;
526 	object->pointer = ptr;
527 	object->size = size;
528 	object->min_count = min_count;
529 	object->count = 0;			/* white color initially */
530 	object->jiffies = jiffies;
531 	object->checksum = 0;
532 
533 	/* task information */
534 	if (in_irq()) {
535 		object->pid = 0;
536 		strncpy(object->comm, "hardirq", sizeof(object->comm));
537 	} else if (in_softirq()) {
538 		object->pid = 0;
539 		strncpy(object->comm, "softirq", sizeof(object->comm));
540 	} else {
541 		object->pid = current->pid;
542 		/*
543 		 * There is a small chance of a race with set_task_comm(),
544 		 * however using get_task_comm() here may cause locking
545 		 * dependency issues with current->alloc_lock. In the worst
546 		 * case, the command line is not correct.
547 		 */
548 		strncpy(object->comm, current->comm, sizeof(object->comm));
549 	}
550 
551 	/* kernel backtrace */
552 	object->trace_len = __save_stack_trace(object->trace);
553 
554 	INIT_PRIO_TREE_NODE(&object->tree_node);
555 	object->tree_node.start = ptr;
556 	object->tree_node.last = ptr + size - 1;
557 
558 	write_lock_irqsave(&kmemleak_lock, flags);
559 
560 	min_addr = min(min_addr, ptr);
561 	max_addr = max(max_addr, ptr + size);
562 	node = prio_tree_insert(&object_tree_root, &object->tree_node);
563 	/*
564 	 * The code calling the kernel does not yet have the pointer to the
565 	 * memory block to be able to free it.  However, we still hold the
566 	 * kmemleak_lock here in case parts of the kernel started freeing
567 	 * random memory blocks.
568 	 */
569 	if (node != &object->tree_node) {
570 		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
571 			      "(already existing)\n", ptr);
572 		object = lookup_object(ptr, 1);
573 		spin_lock(&object->lock);
574 		dump_object_info(object);
575 		spin_unlock(&object->lock);
576 
577 		goto out;
578 	}
579 	list_add_tail_rcu(&object->object_list, &object_list);
580 out:
581 	write_unlock_irqrestore(&kmemleak_lock, flags);
582 	return object;
583 }
584 
585 /*
586  * Remove the metadata (struct kmemleak_object) for a memory block from the
587  * object_list and object_tree_root and decrement its use_count.
588  */
589 static void __delete_object(struct kmemleak_object *object)
590 {
591 	unsigned long flags;
592 
593 	write_lock_irqsave(&kmemleak_lock, flags);
594 	prio_tree_remove(&object_tree_root, &object->tree_node);
595 	list_del_rcu(&object->object_list);
596 	write_unlock_irqrestore(&kmemleak_lock, flags);
597 
598 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
599 	WARN_ON(atomic_read(&object->use_count) < 2);
600 
601 	/*
602 	 * Locking here also ensures that the corresponding memory block
603 	 * cannot be freed when it is being scanned.
604 	 */
605 	spin_lock_irqsave(&object->lock, flags);
606 	object->flags &= ~OBJECT_ALLOCATED;
607 	spin_unlock_irqrestore(&object->lock, flags);
608 	put_object(object);
609 }
610 
611 /*
612  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
613  * delete it.
614  */
615 static void delete_object_full(unsigned long ptr)
616 {
617 	struct kmemleak_object *object;
618 
619 	object = find_and_get_object(ptr, 0);
620 	if (!object) {
621 #ifdef DEBUG
622 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
623 			      ptr);
624 #endif
625 		return;
626 	}
627 	__delete_object(object);
628 	put_object(object);
629 }
630 
631 /*
632  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
633  * delete it. If the memory block is partially freed, the function may create
634  * additional metadata for the remaining parts of the block.
635  */
636 static void delete_object_part(unsigned long ptr, size_t size)
637 {
638 	struct kmemleak_object *object;
639 	unsigned long start, end;
640 
641 	object = find_and_get_object(ptr, 1);
642 	if (!object) {
643 #ifdef DEBUG
644 		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
645 			      "(size %zu)\n", ptr, size);
646 #endif
647 		return;
648 	}
649 	__delete_object(object);
650 
651 	/*
652 	 * Create one or two objects that may result from the memory block
653 	 * split. Note that partial freeing is only done by free_bootmem() and
654 	 * this happens before kmemleak_init() is called. The path below is
655 	 * only executed during early log recording in kmemleak_init(), so
656 	 * GFP_KERNEL is enough.
657 	 */
658 	start = object->pointer;
659 	end = object->pointer + object->size;
660 	if (ptr > start)
661 		create_object(start, ptr - start, object->min_count,
662 			      GFP_KERNEL);
663 	if (ptr + size < end)
664 		create_object(ptr + size, end - ptr - size, object->min_count,
665 			      GFP_KERNEL);
666 
667 	put_object(object);
668 }
669 
670 static void __paint_it(struct kmemleak_object *object, int color)
671 {
672 	object->min_count = color;
673 	if (color == KMEMLEAK_BLACK)
674 		object->flags |= OBJECT_NO_SCAN;
675 }
676 
677 static void paint_it(struct kmemleak_object *object, int color)
678 {
679 	unsigned long flags;
680 
681 	spin_lock_irqsave(&object->lock, flags);
682 	__paint_it(object, color);
683 	spin_unlock_irqrestore(&object->lock, flags);
684 }
685 
686 static void paint_ptr(unsigned long ptr, int color)
687 {
688 	struct kmemleak_object *object;
689 
690 	object = find_and_get_object(ptr, 0);
691 	if (!object) {
692 		kmemleak_warn("Trying to color unknown object "
693 			      "at 0x%08lx as %s\n", ptr,
694 			      (color == KMEMLEAK_GREY) ? "Grey" :
695 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
696 		return;
697 	}
698 	paint_it(object, color);
699 	put_object(object);
700 }
701 
702 /*
703  * Mark an object permanently as gray-colored so that it can no longer be
704  * reported as a leak. This is used in general to mark a false positive.
705  */
706 static void make_gray_object(unsigned long ptr)
707 {
708 	paint_ptr(ptr, KMEMLEAK_GREY);
709 }
710 
711 /*
712  * Mark the object as black-colored so that it is ignored from scans and
713  * reporting.
714  */
715 static void make_black_object(unsigned long ptr)
716 {
717 	paint_ptr(ptr, KMEMLEAK_BLACK);
718 }
719 
720 /*
721  * Add a scanning area to the object. If at least one such area is added,
722  * kmemleak will only scan these ranges rather than the whole memory block.
723  */
724 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
725 {
726 	unsigned long flags;
727 	struct kmemleak_object *object;
728 	struct kmemleak_scan_area *area;
729 
730 	object = find_and_get_object(ptr, 1);
731 	if (!object) {
732 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
733 			      ptr);
734 		return;
735 	}
736 
737 	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
738 	if (!area) {
739 		kmemleak_warn("Cannot allocate a scan area\n");
740 		goto out;
741 	}
742 
743 	spin_lock_irqsave(&object->lock, flags);
744 	if (ptr + size > object->pointer + object->size) {
745 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
746 		dump_object_info(object);
747 		kmem_cache_free(scan_area_cache, area);
748 		goto out_unlock;
749 	}
750 
751 	INIT_HLIST_NODE(&area->node);
752 	area->start = ptr;
753 	area->size = size;
754 
755 	hlist_add_head(&area->node, &object->area_list);
756 out_unlock:
757 	spin_unlock_irqrestore(&object->lock, flags);
758 out:
759 	put_object(object);
760 }
761 
762 /*
763  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
764  * pointer. Such object will not be scanned by kmemleak but references to it
765  * are searched.
766  */
767 static void object_no_scan(unsigned long ptr)
768 {
769 	unsigned long flags;
770 	struct kmemleak_object *object;
771 
772 	object = find_and_get_object(ptr, 0);
773 	if (!object) {
774 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
775 		return;
776 	}
777 
778 	spin_lock_irqsave(&object->lock, flags);
779 	object->flags |= OBJECT_NO_SCAN;
780 	spin_unlock_irqrestore(&object->lock, flags);
781 	put_object(object);
782 }
783 
784 /*
785  * Log an early kmemleak_* call to the early_log buffer. These calls will be
786  * processed later once kmemleak is fully initialized.
787  */
788 static void __init log_early(int op_type, const void *ptr, size_t size,
789 			     int min_count)
790 {
791 	unsigned long flags;
792 	struct early_log *log;
793 
794 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
795 		pr_warning("Early log buffer exceeded, "
796 			   "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
797 		kmemleak_disable();
798 		return;
799 	}
800 
801 	/*
802 	 * There is no need for locking since the kernel is still in UP mode
803 	 * at this stage. Disabling the IRQs is enough.
804 	 */
805 	local_irq_save(flags);
806 	log = &early_log[crt_early_log];
807 	log->op_type = op_type;
808 	log->ptr = ptr;
809 	log->size = size;
810 	log->min_count = min_count;
811 	if (op_type == KMEMLEAK_ALLOC)
812 		log->trace_len = __save_stack_trace(log->trace);
813 	crt_early_log++;
814 	local_irq_restore(flags);
815 }
816 
817 /*
818  * Log an early allocated block and populate the stack trace.
819  */
820 static void early_alloc(struct early_log *log)
821 {
822 	struct kmemleak_object *object;
823 	unsigned long flags;
824 	int i;
825 
826 	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
827 		return;
828 
829 	/*
830 	 * RCU locking needed to ensure object is not freed via put_object().
831 	 */
832 	rcu_read_lock();
833 	object = create_object((unsigned long)log->ptr, log->size,
834 			       log->min_count, GFP_ATOMIC);
835 	if (!object)
836 		goto out;
837 	spin_lock_irqsave(&object->lock, flags);
838 	for (i = 0; i < log->trace_len; i++)
839 		object->trace[i] = log->trace[i];
840 	object->trace_len = log->trace_len;
841 	spin_unlock_irqrestore(&object->lock, flags);
842 out:
843 	rcu_read_unlock();
844 }
845 
846 /**
847  * kmemleak_alloc - register a newly allocated object
848  * @ptr:	pointer to beginning of the object
849  * @size:	size of the object
850  * @min_count:	minimum number of references to this object. If during memory
851  *		scanning a number of references less than @min_count is found,
852  *		the object is reported as a memory leak. If @min_count is 0,
853  *		the object is never reported as a leak. If @min_count is -1,
854  *		the object is ignored (not scanned and not reported as a leak)
855  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
856  *
857  * This function is called from the kernel allocators when a new object
858  * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
859  */
860 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
861 			  gfp_t gfp)
862 {
863 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
864 
865 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
866 		create_object((unsigned long)ptr, size, min_count, gfp);
867 	else if (atomic_read(&kmemleak_early_log))
868 		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
869 }
870 EXPORT_SYMBOL_GPL(kmemleak_alloc);
871 
872 /**
873  * kmemleak_free - unregister a previously registered object
874  * @ptr:	pointer to beginning of the object
875  *
876  * This function is called from the kernel allocators when an object (memory
877  * block) is freed (kmem_cache_free, kfree, vfree etc.).
878  */
879 void __ref kmemleak_free(const void *ptr)
880 {
881 	pr_debug("%s(0x%p)\n", __func__, ptr);
882 
883 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
884 		delete_object_full((unsigned long)ptr);
885 	else if (atomic_read(&kmemleak_early_log))
886 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
887 }
888 EXPORT_SYMBOL_GPL(kmemleak_free);
889 
890 /**
891  * kmemleak_free_part - partially unregister a previously registered object
892  * @ptr:	pointer to the beginning or inside the object. This also
893  *		represents the start of the range to be freed
894  * @size:	size to be unregistered
895  *
896  * This function is called when only a part of a memory block is freed
897  * (usually from the bootmem allocator).
898  */
899 void __ref kmemleak_free_part(const void *ptr, size_t size)
900 {
901 	pr_debug("%s(0x%p)\n", __func__, ptr);
902 
903 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
904 		delete_object_part((unsigned long)ptr, size);
905 	else if (atomic_read(&kmemleak_early_log))
906 		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
907 }
908 EXPORT_SYMBOL_GPL(kmemleak_free_part);
909 
910 /**
911  * kmemleak_not_leak - mark an allocated object as false positive
912  * @ptr:	pointer to beginning of the object
913  *
914  * Calling this function on an object will cause the memory block to no longer
915  * be reported as leak and always be scanned.
916  */
917 void __ref kmemleak_not_leak(const void *ptr)
918 {
919 	pr_debug("%s(0x%p)\n", __func__, ptr);
920 
921 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
922 		make_gray_object((unsigned long)ptr);
923 	else if (atomic_read(&kmemleak_early_log))
924 		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
925 }
926 EXPORT_SYMBOL(kmemleak_not_leak);
927 
928 /**
929  * kmemleak_ignore - ignore an allocated object
930  * @ptr:	pointer to beginning of the object
931  *
932  * Calling this function on an object will cause the memory block to be
933  * ignored (not scanned and not reported as a leak). This is usually done when
934  * it is known that the corresponding block is not a leak and does not contain
935  * any references to other allocated memory blocks.
936  */
937 void __ref kmemleak_ignore(const void *ptr)
938 {
939 	pr_debug("%s(0x%p)\n", __func__, ptr);
940 
941 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
942 		make_black_object((unsigned long)ptr);
943 	else if (atomic_read(&kmemleak_early_log))
944 		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
945 }
946 EXPORT_SYMBOL(kmemleak_ignore);
947 
948 /**
949  * kmemleak_scan_area - limit the range to be scanned in an allocated object
950  * @ptr:	pointer to beginning or inside the object. This also
951  *		represents the start of the scan area
952  * @size:	size of the scan area
953  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
954  *
955  * This function is used when it is known that only certain parts of an object
956  * contain references to other objects. Kmemleak will only scan these areas
957  * reducing the number false negatives.
958  */
959 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
960 {
961 	pr_debug("%s(0x%p)\n", __func__, ptr);
962 
963 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
964 		add_scan_area((unsigned long)ptr, size, gfp);
965 	else if (atomic_read(&kmemleak_early_log))
966 		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
967 }
968 EXPORT_SYMBOL(kmemleak_scan_area);
969 
970 /**
971  * kmemleak_no_scan - do not scan an allocated object
972  * @ptr:	pointer to beginning of the object
973  *
974  * This function notifies kmemleak not to scan the given memory block. Useful
975  * in situations where it is known that the given object does not contain any
976  * references to other objects. Kmemleak will not scan such objects reducing
977  * the number of false negatives.
978  */
979 void __ref kmemleak_no_scan(const void *ptr)
980 {
981 	pr_debug("%s(0x%p)\n", __func__, ptr);
982 
983 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
984 		object_no_scan((unsigned long)ptr);
985 	else if (atomic_read(&kmemleak_early_log))
986 		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
987 }
988 EXPORT_SYMBOL(kmemleak_no_scan);
989 
990 /*
991  * Update an object's checksum and return true if it was modified.
992  */
993 static bool update_checksum(struct kmemleak_object *object)
994 {
995 	u32 old_csum = object->checksum;
996 
997 	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
998 		return false;
999 
1000 	object->checksum = crc32(0, (void *)object->pointer, object->size);
1001 	return object->checksum != old_csum;
1002 }
1003 
1004 /*
1005  * Memory scanning is a long process and it needs to be interruptable. This
1006  * function checks whether such interrupt condition occured.
1007  */
1008 static int scan_should_stop(void)
1009 {
1010 	if (!atomic_read(&kmemleak_enabled))
1011 		return 1;
1012 
1013 	/*
1014 	 * This function may be called from either process or kthread context,
1015 	 * hence the need to check for both stop conditions.
1016 	 */
1017 	if (current->mm)
1018 		return signal_pending(current);
1019 	else
1020 		return kthread_should_stop();
1021 
1022 	return 0;
1023 }
1024 
1025 /*
1026  * Scan a memory block (exclusive range) for valid pointers and add those
1027  * found to the gray list.
1028  */
1029 static void scan_block(void *_start, void *_end,
1030 		       struct kmemleak_object *scanned, int allow_resched)
1031 {
1032 	unsigned long *ptr;
1033 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1034 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1035 
1036 	for (ptr = start; ptr < end; ptr++) {
1037 		struct kmemleak_object *object;
1038 		unsigned long flags;
1039 		unsigned long pointer;
1040 
1041 		if (allow_resched)
1042 			cond_resched();
1043 		if (scan_should_stop())
1044 			break;
1045 
1046 		/* don't scan uninitialized memory */
1047 		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1048 						  BYTES_PER_POINTER))
1049 			continue;
1050 
1051 		pointer = *ptr;
1052 
1053 		object = find_and_get_object(pointer, 1);
1054 		if (!object)
1055 			continue;
1056 		if (object == scanned) {
1057 			/* self referenced, ignore */
1058 			put_object(object);
1059 			continue;
1060 		}
1061 
1062 		/*
1063 		 * Avoid the lockdep recursive warning on object->lock being
1064 		 * previously acquired in scan_object(). These locks are
1065 		 * enclosed by scan_mutex.
1066 		 */
1067 		spin_lock_irqsave_nested(&object->lock, flags,
1068 					 SINGLE_DEPTH_NESTING);
1069 		if (!color_white(object)) {
1070 			/* non-orphan, ignored or new */
1071 			spin_unlock_irqrestore(&object->lock, flags);
1072 			put_object(object);
1073 			continue;
1074 		}
1075 
1076 		/*
1077 		 * Increase the object's reference count (number of pointers
1078 		 * to the memory block). If this count reaches the required
1079 		 * minimum, the object's color will become gray and it will be
1080 		 * added to the gray_list.
1081 		 */
1082 		object->count++;
1083 		if (color_gray(object)) {
1084 			list_add_tail(&object->gray_list, &gray_list);
1085 			spin_unlock_irqrestore(&object->lock, flags);
1086 			continue;
1087 		}
1088 
1089 		spin_unlock_irqrestore(&object->lock, flags);
1090 		put_object(object);
1091 	}
1092 }
1093 
1094 /*
1095  * Scan a memory block corresponding to a kmemleak_object. A condition is
1096  * that object->use_count >= 1.
1097  */
1098 static void scan_object(struct kmemleak_object *object)
1099 {
1100 	struct kmemleak_scan_area *area;
1101 	struct hlist_node *elem;
1102 	unsigned long flags;
1103 
1104 	/*
1105 	 * Once the object->lock is acquired, the corresponding memory block
1106 	 * cannot be freed (the same lock is acquired in delete_object).
1107 	 */
1108 	spin_lock_irqsave(&object->lock, flags);
1109 	if (object->flags & OBJECT_NO_SCAN)
1110 		goto out;
1111 	if (!(object->flags & OBJECT_ALLOCATED))
1112 		/* already freed object */
1113 		goto out;
1114 	if (hlist_empty(&object->area_list)) {
1115 		void *start = (void *)object->pointer;
1116 		void *end = (void *)(object->pointer + object->size);
1117 
1118 		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1119 		       !(object->flags & OBJECT_NO_SCAN)) {
1120 			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1121 				   object, 0);
1122 			start += MAX_SCAN_SIZE;
1123 
1124 			spin_unlock_irqrestore(&object->lock, flags);
1125 			cond_resched();
1126 			spin_lock_irqsave(&object->lock, flags);
1127 		}
1128 	} else
1129 		hlist_for_each_entry(area, elem, &object->area_list, node)
1130 			scan_block((void *)area->start,
1131 				   (void *)(area->start + area->size),
1132 				   object, 0);
1133 out:
1134 	spin_unlock_irqrestore(&object->lock, flags);
1135 }
1136 
1137 /*
1138  * Scan the objects already referenced (gray objects). More objects will be
1139  * referenced and, if there are no memory leaks, all the objects are scanned.
1140  */
1141 static void scan_gray_list(void)
1142 {
1143 	struct kmemleak_object *object, *tmp;
1144 
1145 	/*
1146 	 * The list traversal is safe for both tail additions and removals
1147 	 * from inside the loop. The kmemleak objects cannot be freed from
1148 	 * outside the loop because their use_count was incremented.
1149 	 */
1150 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1151 	while (&object->gray_list != &gray_list) {
1152 		cond_resched();
1153 
1154 		/* may add new objects to the list */
1155 		if (!scan_should_stop())
1156 			scan_object(object);
1157 
1158 		tmp = list_entry(object->gray_list.next, typeof(*object),
1159 				 gray_list);
1160 
1161 		/* remove the object from the list and release it */
1162 		list_del(&object->gray_list);
1163 		put_object(object);
1164 
1165 		object = tmp;
1166 	}
1167 	WARN_ON(!list_empty(&gray_list));
1168 }
1169 
1170 /*
1171  * Scan data sections and all the referenced memory blocks allocated via the
1172  * kernel's standard allocators. This function must be called with the
1173  * scan_mutex held.
1174  */
1175 static void kmemleak_scan(void)
1176 {
1177 	unsigned long flags;
1178 	struct kmemleak_object *object;
1179 	int i;
1180 	int new_leaks = 0;
1181 
1182 	jiffies_last_scan = jiffies;
1183 
1184 	/* prepare the kmemleak_object's */
1185 	rcu_read_lock();
1186 	list_for_each_entry_rcu(object, &object_list, object_list) {
1187 		spin_lock_irqsave(&object->lock, flags);
1188 #ifdef DEBUG
1189 		/*
1190 		 * With a few exceptions there should be a maximum of
1191 		 * 1 reference to any object at this point.
1192 		 */
1193 		if (atomic_read(&object->use_count) > 1) {
1194 			pr_debug("object->use_count = %d\n",
1195 				 atomic_read(&object->use_count));
1196 			dump_object_info(object);
1197 		}
1198 #endif
1199 		/* reset the reference count (whiten the object) */
1200 		object->count = 0;
1201 		if (color_gray(object) && get_object(object))
1202 			list_add_tail(&object->gray_list, &gray_list);
1203 
1204 		spin_unlock_irqrestore(&object->lock, flags);
1205 	}
1206 	rcu_read_unlock();
1207 
1208 	/* data/bss scanning */
1209 	scan_block(_sdata, _edata, NULL, 1);
1210 	scan_block(__bss_start, __bss_stop, NULL, 1);
1211 
1212 #ifdef CONFIG_SMP
1213 	/* per-cpu sections scanning */
1214 	for_each_possible_cpu(i)
1215 		scan_block(__per_cpu_start + per_cpu_offset(i),
1216 			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1217 #endif
1218 
1219 	/*
1220 	 * Struct page scanning for each node. The code below is not yet safe
1221 	 * with MEMORY_HOTPLUG.
1222 	 */
1223 	for_each_online_node(i) {
1224 		pg_data_t *pgdat = NODE_DATA(i);
1225 		unsigned long start_pfn = pgdat->node_start_pfn;
1226 		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1227 		unsigned long pfn;
1228 
1229 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1230 			struct page *page;
1231 
1232 			if (!pfn_valid(pfn))
1233 				continue;
1234 			page = pfn_to_page(pfn);
1235 			/* only scan if page is in use */
1236 			if (page_count(page) == 0)
1237 				continue;
1238 			scan_block(page, page + 1, NULL, 1);
1239 		}
1240 	}
1241 
1242 	/*
1243 	 * Scanning the task stacks (may introduce false negatives).
1244 	 */
1245 	if (kmemleak_stack_scan) {
1246 		struct task_struct *p, *g;
1247 
1248 		read_lock(&tasklist_lock);
1249 		do_each_thread(g, p) {
1250 			scan_block(task_stack_page(p), task_stack_page(p) +
1251 				   THREAD_SIZE, NULL, 0);
1252 		} while_each_thread(g, p);
1253 		read_unlock(&tasklist_lock);
1254 	}
1255 
1256 	/*
1257 	 * Scan the objects already referenced from the sections scanned
1258 	 * above.
1259 	 */
1260 	scan_gray_list();
1261 
1262 	/*
1263 	 * Check for new or unreferenced objects modified since the previous
1264 	 * scan and color them gray until the next scan.
1265 	 */
1266 	rcu_read_lock();
1267 	list_for_each_entry_rcu(object, &object_list, object_list) {
1268 		spin_lock_irqsave(&object->lock, flags);
1269 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1270 		    && update_checksum(object) && get_object(object)) {
1271 			/* color it gray temporarily */
1272 			object->count = object->min_count;
1273 			list_add_tail(&object->gray_list, &gray_list);
1274 		}
1275 		spin_unlock_irqrestore(&object->lock, flags);
1276 	}
1277 	rcu_read_unlock();
1278 
1279 	/*
1280 	 * Re-scan the gray list for modified unreferenced objects.
1281 	 */
1282 	scan_gray_list();
1283 
1284 	/*
1285 	 * If scanning was stopped do not report any new unreferenced objects.
1286 	 */
1287 	if (scan_should_stop())
1288 		return;
1289 
1290 	/*
1291 	 * Scanning result reporting.
1292 	 */
1293 	rcu_read_lock();
1294 	list_for_each_entry_rcu(object, &object_list, object_list) {
1295 		spin_lock_irqsave(&object->lock, flags);
1296 		if (unreferenced_object(object) &&
1297 		    !(object->flags & OBJECT_REPORTED)) {
1298 			object->flags |= OBJECT_REPORTED;
1299 			new_leaks++;
1300 		}
1301 		spin_unlock_irqrestore(&object->lock, flags);
1302 	}
1303 	rcu_read_unlock();
1304 
1305 	if (new_leaks)
1306 		pr_info("%d new suspected memory leaks (see "
1307 			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1308 
1309 }
1310 
1311 /*
1312  * Thread function performing automatic memory scanning. Unreferenced objects
1313  * at the end of a memory scan are reported but only the first time.
1314  */
1315 static int kmemleak_scan_thread(void *arg)
1316 {
1317 	static int first_run = 1;
1318 
1319 	pr_info("Automatic memory scanning thread started\n");
1320 	set_user_nice(current, 10);
1321 
1322 	/*
1323 	 * Wait before the first scan to allow the system to fully initialize.
1324 	 */
1325 	if (first_run) {
1326 		first_run = 0;
1327 		ssleep(SECS_FIRST_SCAN);
1328 	}
1329 
1330 	while (!kthread_should_stop()) {
1331 		signed long timeout = jiffies_scan_wait;
1332 
1333 		mutex_lock(&scan_mutex);
1334 		kmemleak_scan();
1335 		mutex_unlock(&scan_mutex);
1336 
1337 		/* wait before the next scan */
1338 		while (timeout && !kthread_should_stop())
1339 			timeout = schedule_timeout_interruptible(timeout);
1340 	}
1341 
1342 	pr_info("Automatic memory scanning thread ended\n");
1343 
1344 	return 0;
1345 }
1346 
1347 /*
1348  * Start the automatic memory scanning thread. This function must be called
1349  * with the scan_mutex held.
1350  */
1351 static void start_scan_thread(void)
1352 {
1353 	if (scan_thread)
1354 		return;
1355 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1356 	if (IS_ERR(scan_thread)) {
1357 		pr_warning("Failed to create the scan thread\n");
1358 		scan_thread = NULL;
1359 	}
1360 }
1361 
1362 /*
1363  * Stop the automatic memory scanning thread. This function must be called
1364  * with the scan_mutex held.
1365  */
1366 static void stop_scan_thread(void)
1367 {
1368 	if (scan_thread) {
1369 		kthread_stop(scan_thread);
1370 		scan_thread = NULL;
1371 	}
1372 }
1373 
1374 /*
1375  * Iterate over the object_list and return the first valid object at or after
1376  * the required position with its use_count incremented. The function triggers
1377  * a memory scanning when the pos argument points to the first position.
1378  */
1379 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1380 {
1381 	struct kmemleak_object *object;
1382 	loff_t n = *pos;
1383 	int err;
1384 
1385 	err = mutex_lock_interruptible(&scan_mutex);
1386 	if (err < 0)
1387 		return ERR_PTR(err);
1388 
1389 	rcu_read_lock();
1390 	list_for_each_entry_rcu(object, &object_list, object_list) {
1391 		if (n-- > 0)
1392 			continue;
1393 		if (get_object(object))
1394 			goto out;
1395 	}
1396 	object = NULL;
1397 out:
1398 	return object;
1399 }
1400 
1401 /*
1402  * Return the next object in the object_list. The function decrements the
1403  * use_count of the previous object and increases that of the next one.
1404  */
1405 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1406 {
1407 	struct kmemleak_object *prev_obj = v;
1408 	struct kmemleak_object *next_obj = NULL;
1409 	struct list_head *n = &prev_obj->object_list;
1410 
1411 	++(*pos);
1412 
1413 	list_for_each_continue_rcu(n, &object_list) {
1414 		next_obj = list_entry(n, struct kmemleak_object, object_list);
1415 		if (get_object(next_obj))
1416 			break;
1417 	}
1418 
1419 	put_object(prev_obj);
1420 	return next_obj;
1421 }
1422 
1423 /*
1424  * Decrement the use_count of the last object required, if any.
1425  */
1426 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1427 {
1428 	if (!IS_ERR(v)) {
1429 		/*
1430 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1431 		 * waiting was interrupted, so only release it if !IS_ERR.
1432 		 */
1433 		rcu_read_unlock();
1434 		mutex_unlock(&scan_mutex);
1435 		if (v)
1436 			put_object(v);
1437 	}
1438 }
1439 
1440 /*
1441  * Print the information for an unreferenced object to the seq file.
1442  */
1443 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1444 {
1445 	struct kmemleak_object *object = v;
1446 	unsigned long flags;
1447 
1448 	spin_lock_irqsave(&object->lock, flags);
1449 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1450 		print_unreferenced(seq, object);
1451 	spin_unlock_irqrestore(&object->lock, flags);
1452 	return 0;
1453 }
1454 
1455 static const struct seq_operations kmemleak_seq_ops = {
1456 	.start = kmemleak_seq_start,
1457 	.next  = kmemleak_seq_next,
1458 	.stop  = kmemleak_seq_stop,
1459 	.show  = kmemleak_seq_show,
1460 };
1461 
1462 static int kmemleak_open(struct inode *inode, struct file *file)
1463 {
1464 	if (!atomic_read(&kmemleak_enabled))
1465 		return -EBUSY;
1466 
1467 	return seq_open(file, &kmemleak_seq_ops);
1468 }
1469 
1470 static int kmemleak_release(struct inode *inode, struct file *file)
1471 {
1472 	return seq_release(inode, file);
1473 }
1474 
1475 static int dump_str_object_info(const char *str)
1476 {
1477 	unsigned long flags;
1478 	struct kmemleak_object *object;
1479 	unsigned long addr;
1480 
1481 	addr= simple_strtoul(str, NULL, 0);
1482 	object = find_and_get_object(addr, 0);
1483 	if (!object) {
1484 		pr_info("Unknown object at 0x%08lx\n", addr);
1485 		return -EINVAL;
1486 	}
1487 
1488 	spin_lock_irqsave(&object->lock, flags);
1489 	dump_object_info(object);
1490 	spin_unlock_irqrestore(&object->lock, flags);
1491 
1492 	put_object(object);
1493 	return 0;
1494 }
1495 
1496 /*
1497  * We use grey instead of black to ensure we can do future scans on the same
1498  * objects. If we did not do future scans these black objects could
1499  * potentially contain references to newly allocated objects in the future and
1500  * we'd end up with false positives.
1501  */
1502 static void kmemleak_clear(void)
1503 {
1504 	struct kmemleak_object *object;
1505 	unsigned long flags;
1506 
1507 	rcu_read_lock();
1508 	list_for_each_entry_rcu(object, &object_list, object_list) {
1509 		spin_lock_irqsave(&object->lock, flags);
1510 		if ((object->flags & OBJECT_REPORTED) &&
1511 		    unreferenced_object(object))
1512 			__paint_it(object, KMEMLEAK_GREY);
1513 		spin_unlock_irqrestore(&object->lock, flags);
1514 	}
1515 	rcu_read_unlock();
1516 }
1517 
1518 /*
1519  * File write operation to configure kmemleak at run-time. The following
1520  * commands can be written to the /sys/kernel/debug/kmemleak file:
1521  *   off	- disable kmemleak (irreversible)
1522  *   stack=on	- enable the task stacks scanning
1523  *   stack=off	- disable the tasks stacks scanning
1524  *   scan=on	- start the automatic memory scanning thread
1525  *   scan=off	- stop the automatic memory scanning thread
1526  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1527  *		  disable it)
1528  *   scan	- trigger a memory scan
1529  *   clear	- mark all current reported unreferenced kmemleak objects as
1530  *		  grey to ignore printing them
1531  *   dump=...	- dump information about the object found at the given address
1532  */
1533 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1534 			      size_t size, loff_t *ppos)
1535 {
1536 	char buf[64];
1537 	int buf_size;
1538 	int ret;
1539 
1540 	buf_size = min(size, (sizeof(buf) - 1));
1541 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1542 		return -EFAULT;
1543 	buf[buf_size] = 0;
1544 
1545 	ret = mutex_lock_interruptible(&scan_mutex);
1546 	if (ret < 0)
1547 		return ret;
1548 
1549 	if (strncmp(buf, "off", 3) == 0)
1550 		kmemleak_disable();
1551 	else if (strncmp(buf, "stack=on", 8) == 0)
1552 		kmemleak_stack_scan = 1;
1553 	else if (strncmp(buf, "stack=off", 9) == 0)
1554 		kmemleak_stack_scan = 0;
1555 	else if (strncmp(buf, "scan=on", 7) == 0)
1556 		start_scan_thread();
1557 	else if (strncmp(buf, "scan=off", 8) == 0)
1558 		stop_scan_thread();
1559 	else if (strncmp(buf, "scan=", 5) == 0) {
1560 		unsigned long secs;
1561 
1562 		ret = strict_strtoul(buf + 5, 0, &secs);
1563 		if (ret < 0)
1564 			goto out;
1565 		stop_scan_thread();
1566 		if (secs) {
1567 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1568 			start_scan_thread();
1569 		}
1570 	} else if (strncmp(buf, "scan", 4) == 0)
1571 		kmemleak_scan();
1572 	else if (strncmp(buf, "clear", 5) == 0)
1573 		kmemleak_clear();
1574 	else if (strncmp(buf, "dump=", 5) == 0)
1575 		ret = dump_str_object_info(buf + 5);
1576 	else
1577 		ret = -EINVAL;
1578 
1579 out:
1580 	mutex_unlock(&scan_mutex);
1581 	if (ret < 0)
1582 		return ret;
1583 
1584 	/* ignore the rest of the buffer, only one command at a time */
1585 	*ppos += size;
1586 	return size;
1587 }
1588 
1589 static const struct file_operations kmemleak_fops = {
1590 	.owner		= THIS_MODULE,
1591 	.open		= kmemleak_open,
1592 	.read		= seq_read,
1593 	.write		= kmemleak_write,
1594 	.llseek		= seq_lseek,
1595 	.release	= kmemleak_release,
1596 };
1597 
1598 /*
1599  * Perform the freeing of the kmemleak internal objects after waiting for any
1600  * current memory scan to complete.
1601  */
1602 static void kmemleak_do_cleanup(struct work_struct *work)
1603 {
1604 	struct kmemleak_object *object;
1605 
1606 	mutex_lock(&scan_mutex);
1607 	stop_scan_thread();
1608 
1609 	rcu_read_lock();
1610 	list_for_each_entry_rcu(object, &object_list, object_list)
1611 		delete_object_full(object->pointer);
1612 	rcu_read_unlock();
1613 	mutex_unlock(&scan_mutex);
1614 }
1615 
1616 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1617 
1618 /*
1619  * Disable kmemleak. No memory allocation/freeing will be traced once this
1620  * function is called. Disabling kmemleak is an irreversible operation.
1621  */
1622 static void kmemleak_disable(void)
1623 {
1624 	/* atomically check whether it was already invoked */
1625 	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1626 		return;
1627 
1628 	/* stop any memory operation tracing */
1629 	atomic_set(&kmemleak_early_log, 0);
1630 	atomic_set(&kmemleak_enabled, 0);
1631 
1632 	/* check whether it is too early for a kernel thread */
1633 	if (atomic_read(&kmemleak_initialized))
1634 		schedule_work(&cleanup_work);
1635 
1636 	pr_info("Kernel memory leak detector disabled\n");
1637 }
1638 
1639 /*
1640  * Allow boot-time kmemleak disabling (enabled by default).
1641  */
1642 static int kmemleak_boot_config(char *str)
1643 {
1644 	if (!str)
1645 		return -EINVAL;
1646 	if (strcmp(str, "off") == 0)
1647 		kmemleak_disable();
1648 	else if (strcmp(str, "on") == 0)
1649 		kmemleak_skip_disable = 1;
1650 	else
1651 		return -EINVAL;
1652 	return 0;
1653 }
1654 early_param("kmemleak", kmemleak_boot_config);
1655 
1656 /*
1657  * Kmemleak initialization.
1658  */
1659 void __init kmemleak_init(void)
1660 {
1661 	int i;
1662 	unsigned long flags;
1663 
1664 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1665 	if (!kmemleak_skip_disable) {
1666 		kmemleak_disable();
1667 		return;
1668 	}
1669 #endif
1670 
1671 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1672 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1673 
1674 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1675 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1676 	INIT_PRIO_TREE_ROOT(&object_tree_root);
1677 
1678 	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1679 	local_irq_save(flags);
1680 	if (!atomic_read(&kmemleak_error)) {
1681 		atomic_set(&kmemleak_enabled, 1);
1682 		atomic_set(&kmemleak_early_log, 0);
1683 	}
1684 	local_irq_restore(flags);
1685 
1686 	/*
1687 	 * This is the point where tracking allocations is safe. Automatic
1688 	 * scanning is started during the late initcall. Add the early logged
1689 	 * callbacks to the kmemleak infrastructure.
1690 	 */
1691 	for (i = 0; i < crt_early_log; i++) {
1692 		struct early_log *log = &early_log[i];
1693 
1694 		switch (log->op_type) {
1695 		case KMEMLEAK_ALLOC:
1696 			early_alloc(log);
1697 			break;
1698 		case KMEMLEAK_FREE:
1699 			kmemleak_free(log->ptr);
1700 			break;
1701 		case KMEMLEAK_FREE_PART:
1702 			kmemleak_free_part(log->ptr, log->size);
1703 			break;
1704 		case KMEMLEAK_NOT_LEAK:
1705 			kmemleak_not_leak(log->ptr);
1706 			break;
1707 		case KMEMLEAK_IGNORE:
1708 			kmemleak_ignore(log->ptr);
1709 			break;
1710 		case KMEMLEAK_SCAN_AREA:
1711 			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1712 			break;
1713 		case KMEMLEAK_NO_SCAN:
1714 			kmemleak_no_scan(log->ptr);
1715 			break;
1716 		default:
1717 			WARN_ON(1);
1718 		}
1719 	}
1720 }
1721 
1722 /*
1723  * Late initialization function.
1724  */
1725 static int __init kmemleak_late_init(void)
1726 {
1727 	struct dentry *dentry;
1728 
1729 	atomic_set(&kmemleak_initialized, 1);
1730 
1731 	if (atomic_read(&kmemleak_error)) {
1732 		/*
1733 		 * Some error occured and kmemleak was disabled. There is a
1734 		 * small chance that kmemleak_disable() was called immediately
1735 		 * after setting kmemleak_initialized and we may end up with
1736 		 * two clean-up threads but serialized by scan_mutex.
1737 		 */
1738 		schedule_work(&cleanup_work);
1739 		return -ENOMEM;
1740 	}
1741 
1742 	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1743 				     &kmemleak_fops);
1744 	if (!dentry)
1745 		pr_warning("Failed to create the debugfs kmemleak file\n");
1746 	mutex_lock(&scan_mutex);
1747 	start_scan_thread();
1748 	mutex_unlock(&scan_mutex);
1749 
1750 	pr_info("Kernel memory leak detector initialized\n");
1751 
1752 	return 0;
1753 }
1754 late_initcall(kmemleak_late_init);
1755