xref: /openbmc/linux/mm/kmemleak.c (revision 0ea820cf)
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/kmemleak.txt.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a priority search tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * The kmemleak_object structures have a use_count incremented or decremented
57  * using the get_object()/put_object() functions. When the use_count becomes
58  * 0, this count can no longer be incremented and put_object() schedules the
59  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60  * function must be protected by rcu_read_lock() to avoid accessing a freed
61  * structure.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/module.h>
73 #include <linux/kthread.h>
74 #include <linux/prio_tree.h>
75 #include <linux/gfp.h>
76 #include <linux/fs.h>
77 #include <linux/debugfs.h>
78 #include <linux/seq_file.h>
79 #include <linux/cpumask.h>
80 #include <linux/spinlock.h>
81 #include <linux/mutex.h>
82 #include <linux/rcupdate.h>
83 #include <linux/stacktrace.h>
84 #include <linux/cache.h>
85 #include <linux/percpu.h>
86 #include <linux/hardirq.h>
87 #include <linux/mmzone.h>
88 #include <linux/slab.h>
89 #include <linux/thread_info.h>
90 #include <linux/err.h>
91 #include <linux/uaccess.h>
92 #include <linux/string.h>
93 #include <linux/nodemask.h>
94 #include <linux/mm.h>
95 #include <linux/workqueue.h>
96 #include <linux/crc32.h>
97 
98 #include <asm/sections.h>
99 #include <asm/processor.h>
100 #include <asm/atomic.h>
101 
102 #include <linux/kmemcheck.h>
103 #include <linux/kmemleak.h>
104 
105 /*
106  * Kmemleak configuration and common defines.
107  */
108 #define MAX_TRACE		16	/* stack trace length */
109 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
110 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
111 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
112 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
113 
114 #define BYTES_PER_POINTER	sizeof(void *)
115 
116 /* GFP bitmask for kmemleak internal allocations */
117 #define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
118 
119 /* scanning area inside a memory block */
120 struct kmemleak_scan_area {
121 	struct hlist_node node;
122 	unsigned long start;
123 	size_t size;
124 };
125 
126 #define KMEMLEAK_GREY	0
127 #define KMEMLEAK_BLACK	-1
128 
129 /*
130  * Structure holding the metadata for each allocated memory block.
131  * Modifications to such objects should be made while holding the
132  * object->lock. Insertions or deletions from object_list, gray_list or
133  * tree_node are already protected by the corresponding locks or mutex (see
134  * the notes on locking above). These objects are reference-counted
135  * (use_count) and freed using the RCU mechanism.
136  */
137 struct kmemleak_object {
138 	spinlock_t lock;
139 	unsigned long flags;		/* object status flags */
140 	struct list_head object_list;
141 	struct list_head gray_list;
142 	struct prio_tree_node tree_node;
143 	struct rcu_head rcu;		/* object_list lockless traversal */
144 	/* object usage count; object freed when use_count == 0 */
145 	atomic_t use_count;
146 	unsigned long pointer;
147 	size_t size;
148 	/* minimum number of a pointers found before it is considered leak */
149 	int min_count;
150 	/* the total number of pointers found pointing to this object */
151 	int count;
152 	/* checksum for detecting modified objects */
153 	u32 checksum;
154 	/* memory ranges to be scanned inside an object (empty for all) */
155 	struct hlist_head area_list;
156 	unsigned long trace[MAX_TRACE];
157 	unsigned int trace_len;
158 	unsigned long jiffies;		/* creation timestamp */
159 	pid_t pid;			/* pid of the current task */
160 	char comm[TASK_COMM_LEN];	/* executable name */
161 };
162 
163 /* flag representing the memory block allocation status */
164 #define OBJECT_ALLOCATED	(1 << 0)
165 /* flag set after the first reporting of an unreference object */
166 #define OBJECT_REPORTED		(1 << 1)
167 /* flag set to not scan the object */
168 #define OBJECT_NO_SCAN		(1 << 2)
169 
170 /* number of bytes to print per line; must be 16 or 32 */
171 #define HEX_ROW_SIZE		16
172 /* number of bytes to print at a time (1, 2, 4, 8) */
173 #define HEX_GROUP_SIZE		1
174 /* include ASCII after the hex output */
175 #define HEX_ASCII		1
176 /* max number of lines to be printed */
177 #define HEX_MAX_LINES		2
178 
179 /* the list of all allocated objects */
180 static LIST_HEAD(object_list);
181 /* the list of gray-colored objects (see color_gray comment below) */
182 static LIST_HEAD(gray_list);
183 /* prio search tree for object boundaries */
184 static struct prio_tree_root object_tree_root;
185 /* rw_lock protecting the access to object_list and prio_tree_root */
186 static DEFINE_RWLOCK(kmemleak_lock);
187 
188 /* allocation caches for kmemleak internal data */
189 static struct kmem_cache *object_cache;
190 static struct kmem_cache *scan_area_cache;
191 
192 /* set if tracing memory operations is enabled */
193 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
194 /* set in the late_initcall if there were no errors */
195 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
196 /* enables or disables early logging of the memory operations */
197 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
198 /* set if a fata kmemleak error has occurred */
199 static atomic_t kmemleak_error = ATOMIC_INIT(0);
200 
201 /* minimum and maximum address that may be valid pointers */
202 static unsigned long min_addr = ULONG_MAX;
203 static unsigned long max_addr;
204 
205 static struct task_struct *scan_thread;
206 /* used to avoid reporting of recently allocated objects */
207 static unsigned long jiffies_min_age;
208 static unsigned long jiffies_last_scan;
209 /* delay between automatic memory scannings */
210 static signed long jiffies_scan_wait;
211 /* enables or disables the task stacks scanning */
212 static int kmemleak_stack_scan = 1;
213 /* protects the memory scanning, parameters and debug/kmemleak file access */
214 static DEFINE_MUTEX(scan_mutex);
215 
216 /*
217  * Early object allocation/freeing logging. Kmemleak is initialized after the
218  * kernel allocator. However, both the kernel allocator and kmemleak may
219  * allocate memory blocks which need to be tracked. Kmemleak defines an
220  * arbitrary buffer to hold the allocation/freeing information before it is
221  * fully initialized.
222  */
223 
224 /* kmemleak operation type for early logging */
225 enum {
226 	KMEMLEAK_ALLOC,
227 	KMEMLEAK_FREE,
228 	KMEMLEAK_FREE_PART,
229 	KMEMLEAK_NOT_LEAK,
230 	KMEMLEAK_IGNORE,
231 	KMEMLEAK_SCAN_AREA,
232 	KMEMLEAK_NO_SCAN
233 };
234 
235 /*
236  * Structure holding the information passed to kmemleak callbacks during the
237  * early logging.
238  */
239 struct early_log {
240 	int op_type;			/* kmemleak operation type */
241 	const void *ptr;		/* allocated/freed memory block */
242 	size_t size;			/* memory block size */
243 	int min_count;			/* minimum reference count */
244 	unsigned long trace[MAX_TRACE];	/* stack trace */
245 	unsigned int trace_len;		/* stack trace length */
246 };
247 
248 /* early logging buffer and current position */
249 static struct early_log
250 	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
251 static int crt_early_log __initdata;
252 
253 static void kmemleak_disable(void);
254 
255 /*
256  * Print a warning and dump the stack trace.
257  */
258 #define kmemleak_warn(x...)	do {	\
259 	pr_warning(x);			\
260 	dump_stack();			\
261 } while (0)
262 
263 /*
264  * Macro invoked when a serious kmemleak condition occured and cannot be
265  * recovered from. Kmemleak will be disabled and further allocation/freeing
266  * tracing no longer available.
267  */
268 #define kmemleak_stop(x...)	do {	\
269 	kmemleak_warn(x);		\
270 	kmemleak_disable();		\
271 } while (0)
272 
273 /*
274  * Printing of the objects hex dump to the seq file. The number of lines to be
275  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
276  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
277  * with the object->lock held.
278  */
279 static void hex_dump_object(struct seq_file *seq,
280 			    struct kmemleak_object *object)
281 {
282 	const u8 *ptr = (const u8 *)object->pointer;
283 	int i, len, remaining;
284 	unsigned char linebuf[HEX_ROW_SIZE * 5];
285 
286 	/* limit the number of lines to HEX_MAX_LINES */
287 	remaining = len =
288 		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
289 
290 	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
291 	for (i = 0; i < len; i += HEX_ROW_SIZE) {
292 		int linelen = min(remaining, HEX_ROW_SIZE);
293 
294 		remaining -= HEX_ROW_SIZE;
295 		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
296 				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
297 				   HEX_ASCII);
298 		seq_printf(seq, "    %s\n", linebuf);
299 	}
300 }
301 
302 /*
303  * Object colors, encoded with count and min_count:
304  * - white - orphan object, not enough references to it (count < min_count)
305  * - gray  - not orphan, not marked as false positive (min_count == 0) or
306  *		sufficient references to it (count >= min_count)
307  * - black - ignore, it doesn't contain references (e.g. text section)
308  *		(min_count == -1). No function defined for this color.
309  * Newly created objects don't have any color assigned (object->count == -1)
310  * before the next memory scan when they become white.
311  */
312 static bool color_white(const struct kmemleak_object *object)
313 {
314 	return object->count != KMEMLEAK_BLACK &&
315 		object->count < object->min_count;
316 }
317 
318 static bool color_gray(const struct kmemleak_object *object)
319 {
320 	return object->min_count != KMEMLEAK_BLACK &&
321 		object->count >= object->min_count;
322 }
323 
324 /*
325  * Objects are considered unreferenced only if their color is white, they have
326  * not be deleted and have a minimum age to avoid false positives caused by
327  * pointers temporarily stored in CPU registers.
328  */
329 static bool unreferenced_object(struct kmemleak_object *object)
330 {
331 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
332 		time_before_eq(object->jiffies + jiffies_min_age,
333 			       jiffies_last_scan);
334 }
335 
336 /*
337  * Printing of the unreferenced objects information to the seq file. The
338  * print_unreferenced function must be called with the object->lock held.
339  */
340 static void print_unreferenced(struct seq_file *seq,
341 			       struct kmemleak_object *object)
342 {
343 	int i;
344 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
345 
346 	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
347 		   object->pointer, object->size);
348 	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
349 		   object->comm, object->pid, object->jiffies,
350 		   msecs_age / 1000, msecs_age % 1000);
351 	hex_dump_object(seq, object);
352 	seq_printf(seq, "  backtrace:\n");
353 
354 	for (i = 0; i < object->trace_len; i++) {
355 		void *ptr = (void *)object->trace[i];
356 		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
357 	}
358 }
359 
360 /*
361  * Print the kmemleak_object information. This function is used mainly for
362  * debugging special cases when kmemleak operations. It must be called with
363  * the object->lock held.
364  */
365 static void dump_object_info(struct kmemleak_object *object)
366 {
367 	struct stack_trace trace;
368 
369 	trace.nr_entries = object->trace_len;
370 	trace.entries = object->trace;
371 
372 	pr_notice("Object 0x%08lx (size %zu):\n",
373 		  object->tree_node.start, object->size);
374 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
375 		  object->comm, object->pid, object->jiffies);
376 	pr_notice("  min_count = %d\n", object->min_count);
377 	pr_notice("  count = %d\n", object->count);
378 	pr_notice("  flags = 0x%lx\n", object->flags);
379 	pr_notice("  checksum = %d\n", object->checksum);
380 	pr_notice("  backtrace:\n");
381 	print_stack_trace(&trace, 4);
382 }
383 
384 /*
385  * Look-up a memory block metadata (kmemleak_object) in the priority search
386  * tree based on a pointer value. If alias is 0, only values pointing to the
387  * beginning of the memory block are allowed. The kmemleak_lock must be held
388  * when calling this function.
389  */
390 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
391 {
392 	struct prio_tree_node *node;
393 	struct prio_tree_iter iter;
394 	struct kmemleak_object *object;
395 
396 	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
397 	node = prio_tree_next(&iter);
398 	if (node) {
399 		object = prio_tree_entry(node, struct kmemleak_object,
400 					 tree_node);
401 		if (!alias && object->pointer != ptr) {
402 			kmemleak_warn("Found object by alias");
403 			object = NULL;
404 		}
405 	} else
406 		object = NULL;
407 
408 	return object;
409 }
410 
411 /*
412  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
413  * that once an object's use_count reached 0, the RCU freeing was already
414  * registered and the object should no longer be used. This function must be
415  * called under the protection of rcu_read_lock().
416  */
417 static int get_object(struct kmemleak_object *object)
418 {
419 	return atomic_inc_not_zero(&object->use_count);
420 }
421 
422 /*
423  * RCU callback to free a kmemleak_object.
424  */
425 static void free_object_rcu(struct rcu_head *rcu)
426 {
427 	struct hlist_node *elem, *tmp;
428 	struct kmemleak_scan_area *area;
429 	struct kmemleak_object *object =
430 		container_of(rcu, struct kmemleak_object, rcu);
431 
432 	/*
433 	 * Once use_count is 0 (guaranteed by put_object), there is no other
434 	 * code accessing this object, hence no need for locking.
435 	 */
436 	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
437 		hlist_del(elem);
438 		kmem_cache_free(scan_area_cache, area);
439 	}
440 	kmem_cache_free(object_cache, object);
441 }
442 
443 /*
444  * Decrement the object use_count. Once the count is 0, free the object using
445  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
446  * delete_object() path, the delayed RCU freeing ensures that there is no
447  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
448  * is also possible.
449  */
450 static void put_object(struct kmemleak_object *object)
451 {
452 	if (!atomic_dec_and_test(&object->use_count))
453 		return;
454 
455 	/* should only get here after delete_object was called */
456 	WARN_ON(object->flags & OBJECT_ALLOCATED);
457 
458 	call_rcu(&object->rcu, free_object_rcu);
459 }
460 
461 /*
462  * Look up an object in the prio search tree and increase its use_count.
463  */
464 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
465 {
466 	unsigned long flags;
467 	struct kmemleak_object *object = NULL;
468 
469 	rcu_read_lock();
470 	read_lock_irqsave(&kmemleak_lock, flags);
471 	if (ptr >= min_addr && ptr < max_addr)
472 		object = lookup_object(ptr, alias);
473 	read_unlock_irqrestore(&kmemleak_lock, flags);
474 
475 	/* check whether the object is still available */
476 	if (object && !get_object(object))
477 		object = NULL;
478 	rcu_read_unlock();
479 
480 	return object;
481 }
482 
483 /*
484  * Save stack trace to the given array of MAX_TRACE size.
485  */
486 static int __save_stack_trace(unsigned long *trace)
487 {
488 	struct stack_trace stack_trace;
489 
490 	stack_trace.max_entries = MAX_TRACE;
491 	stack_trace.nr_entries = 0;
492 	stack_trace.entries = trace;
493 	stack_trace.skip = 2;
494 	save_stack_trace(&stack_trace);
495 
496 	return stack_trace.nr_entries;
497 }
498 
499 /*
500  * Create the metadata (struct kmemleak_object) corresponding to an allocated
501  * memory block and add it to the object_list and object_tree_root.
502  */
503 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
504 					     int min_count, gfp_t gfp)
505 {
506 	unsigned long flags;
507 	struct kmemleak_object *object;
508 	struct prio_tree_node *node;
509 
510 	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
511 	if (!object) {
512 		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
513 		return NULL;
514 	}
515 
516 	INIT_LIST_HEAD(&object->object_list);
517 	INIT_LIST_HEAD(&object->gray_list);
518 	INIT_HLIST_HEAD(&object->area_list);
519 	spin_lock_init(&object->lock);
520 	atomic_set(&object->use_count, 1);
521 	object->flags = OBJECT_ALLOCATED;
522 	object->pointer = ptr;
523 	object->size = size;
524 	object->min_count = min_count;
525 	object->count = 0;			/* white color initially */
526 	object->jiffies = jiffies;
527 	object->checksum = 0;
528 
529 	/* task information */
530 	if (in_irq()) {
531 		object->pid = 0;
532 		strncpy(object->comm, "hardirq", sizeof(object->comm));
533 	} else if (in_softirq()) {
534 		object->pid = 0;
535 		strncpy(object->comm, "softirq", sizeof(object->comm));
536 	} else {
537 		object->pid = current->pid;
538 		/*
539 		 * There is a small chance of a race with set_task_comm(),
540 		 * however using get_task_comm() here may cause locking
541 		 * dependency issues with current->alloc_lock. In the worst
542 		 * case, the command line is not correct.
543 		 */
544 		strncpy(object->comm, current->comm, sizeof(object->comm));
545 	}
546 
547 	/* kernel backtrace */
548 	object->trace_len = __save_stack_trace(object->trace);
549 
550 	INIT_PRIO_TREE_NODE(&object->tree_node);
551 	object->tree_node.start = ptr;
552 	object->tree_node.last = ptr + size - 1;
553 
554 	write_lock_irqsave(&kmemleak_lock, flags);
555 
556 	min_addr = min(min_addr, ptr);
557 	max_addr = max(max_addr, ptr + size);
558 	node = prio_tree_insert(&object_tree_root, &object->tree_node);
559 	/*
560 	 * The code calling the kernel does not yet have the pointer to the
561 	 * memory block to be able to free it.  However, we still hold the
562 	 * kmemleak_lock here in case parts of the kernel started freeing
563 	 * random memory blocks.
564 	 */
565 	if (node != &object->tree_node) {
566 		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
567 			      "(already existing)\n", ptr);
568 		object = lookup_object(ptr, 1);
569 		spin_lock(&object->lock);
570 		dump_object_info(object);
571 		spin_unlock(&object->lock);
572 
573 		goto out;
574 	}
575 	list_add_tail_rcu(&object->object_list, &object_list);
576 out:
577 	write_unlock_irqrestore(&kmemleak_lock, flags);
578 	return object;
579 }
580 
581 /*
582  * Remove the metadata (struct kmemleak_object) for a memory block from the
583  * object_list and object_tree_root and decrement its use_count.
584  */
585 static void __delete_object(struct kmemleak_object *object)
586 {
587 	unsigned long flags;
588 
589 	write_lock_irqsave(&kmemleak_lock, flags);
590 	prio_tree_remove(&object_tree_root, &object->tree_node);
591 	list_del_rcu(&object->object_list);
592 	write_unlock_irqrestore(&kmemleak_lock, flags);
593 
594 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
595 	WARN_ON(atomic_read(&object->use_count) < 2);
596 
597 	/*
598 	 * Locking here also ensures that the corresponding memory block
599 	 * cannot be freed when it is being scanned.
600 	 */
601 	spin_lock_irqsave(&object->lock, flags);
602 	object->flags &= ~OBJECT_ALLOCATED;
603 	spin_unlock_irqrestore(&object->lock, flags);
604 	put_object(object);
605 }
606 
607 /*
608  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
609  * delete it.
610  */
611 static void delete_object_full(unsigned long ptr)
612 {
613 	struct kmemleak_object *object;
614 
615 	object = find_and_get_object(ptr, 0);
616 	if (!object) {
617 #ifdef DEBUG
618 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
619 			      ptr);
620 #endif
621 		return;
622 	}
623 	__delete_object(object);
624 	put_object(object);
625 }
626 
627 /*
628  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
629  * delete it. If the memory block is partially freed, the function may create
630  * additional metadata for the remaining parts of the block.
631  */
632 static void delete_object_part(unsigned long ptr, size_t size)
633 {
634 	struct kmemleak_object *object;
635 	unsigned long start, end;
636 
637 	object = find_and_get_object(ptr, 1);
638 	if (!object) {
639 #ifdef DEBUG
640 		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
641 			      "(size %zu)\n", ptr, size);
642 #endif
643 		return;
644 	}
645 	__delete_object(object);
646 
647 	/*
648 	 * Create one or two objects that may result from the memory block
649 	 * split. Note that partial freeing is only done by free_bootmem() and
650 	 * this happens before kmemleak_init() is called. The path below is
651 	 * only executed during early log recording in kmemleak_init(), so
652 	 * GFP_KERNEL is enough.
653 	 */
654 	start = object->pointer;
655 	end = object->pointer + object->size;
656 	if (ptr > start)
657 		create_object(start, ptr - start, object->min_count,
658 			      GFP_KERNEL);
659 	if (ptr + size < end)
660 		create_object(ptr + size, end - ptr - size, object->min_count,
661 			      GFP_KERNEL);
662 
663 	put_object(object);
664 }
665 
666 static void __paint_it(struct kmemleak_object *object, int color)
667 {
668 	object->min_count = color;
669 	if (color == KMEMLEAK_BLACK)
670 		object->flags |= OBJECT_NO_SCAN;
671 }
672 
673 static void paint_it(struct kmemleak_object *object, int color)
674 {
675 	unsigned long flags;
676 
677 	spin_lock_irqsave(&object->lock, flags);
678 	__paint_it(object, color);
679 	spin_unlock_irqrestore(&object->lock, flags);
680 }
681 
682 static void paint_ptr(unsigned long ptr, int color)
683 {
684 	struct kmemleak_object *object;
685 
686 	object = find_and_get_object(ptr, 0);
687 	if (!object) {
688 		kmemleak_warn("Trying to color unknown object "
689 			      "at 0x%08lx as %s\n", ptr,
690 			      (color == KMEMLEAK_GREY) ? "Grey" :
691 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
692 		return;
693 	}
694 	paint_it(object, color);
695 	put_object(object);
696 }
697 
698 /*
699  * Make a object permanently as gray-colored so that it can no longer be
700  * reported as a leak. This is used in general to mark a false positive.
701  */
702 static void make_gray_object(unsigned long ptr)
703 {
704 	paint_ptr(ptr, KMEMLEAK_GREY);
705 }
706 
707 /*
708  * Mark the object as black-colored so that it is ignored from scans and
709  * reporting.
710  */
711 static void make_black_object(unsigned long ptr)
712 {
713 	paint_ptr(ptr, KMEMLEAK_BLACK);
714 }
715 
716 /*
717  * Add a scanning area to the object. If at least one such area is added,
718  * kmemleak will only scan these ranges rather than the whole memory block.
719  */
720 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
721 {
722 	unsigned long flags;
723 	struct kmemleak_object *object;
724 	struct kmemleak_scan_area *area;
725 
726 	object = find_and_get_object(ptr, 1);
727 	if (!object) {
728 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
729 			      ptr);
730 		return;
731 	}
732 
733 	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
734 	if (!area) {
735 		kmemleak_warn("Cannot allocate a scan area\n");
736 		goto out;
737 	}
738 
739 	spin_lock_irqsave(&object->lock, flags);
740 	if (ptr + size > object->pointer + object->size) {
741 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
742 		dump_object_info(object);
743 		kmem_cache_free(scan_area_cache, area);
744 		goto out_unlock;
745 	}
746 
747 	INIT_HLIST_NODE(&area->node);
748 	area->start = ptr;
749 	area->size = size;
750 
751 	hlist_add_head(&area->node, &object->area_list);
752 out_unlock:
753 	spin_unlock_irqrestore(&object->lock, flags);
754 out:
755 	put_object(object);
756 }
757 
758 /*
759  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
760  * pointer. Such object will not be scanned by kmemleak but references to it
761  * are searched.
762  */
763 static void object_no_scan(unsigned long ptr)
764 {
765 	unsigned long flags;
766 	struct kmemleak_object *object;
767 
768 	object = find_and_get_object(ptr, 0);
769 	if (!object) {
770 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
771 		return;
772 	}
773 
774 	spin_lock_irqsave(&object->lock, flags);
775 	object->flags |= OBJECT_NO_SCAN;
776 	spin_unlock_irqrestore(&object->lock, flags);
777 	put_object(object);
778 }
779 
780 /*
781  * Log an early kmemleak_* call to the early_log buffer. These calls will be
782  * processed later once kmemleak is fully initialized.
783  */
784 static void __init log_early(int op_type, const void *ptr, size_t size,
785 			     int min_count)
786 {
787 	unsigned long flags;
788 	struct early_log *log;
789 
790 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
791 		pr_warning("Early log buffer exceeded, "
792 			   "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
793 		kmemleak_disable();
794 		return;
795 	}
796 
797 	/*
798 	 * There is no need for locking since the kernel is still in UP mode
799 	 * at this stage. Disabling the IRQs is enough.
800 	 */
801 	local_irq_save(flags);
802 	log = &early_log[crt_early_log];
803 	log->op_type = op_type;
804 	log->ptr = ptr;
805 	log->size = size;
806 	log->min_count = min_count;
807 	if (op_type == KMEMLEAK_ALLOC)
808 		log->trace_len = __save_stack_trace(log->trace);
809 	crt_early_log++;
810 	local_irq_restore(flags);
811 }
812 
813 /*
814  * Log an early allocated block and populate the stack trace.
815  */
816 static void early_alloc(struct early_log *log)
817 {
818 	struct kmemleak_object *object;
819 	unsigned long flags;
820 	int i;
821 
822 	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
823 		return;
824 
825 	/*
826 	 * RCU locking needed to ensure object is not freed via put_object().
827 	 */
828 	rcu_read_lock();
829 	object = create_object((unsigned long)log->ptr, log->size,
830 			       log->min_count, GFP_ATOMIC);
831 	if (!object)
832 		goto out;
833 	spin_lock_irqsave(&object->lock, flags);
834 	for (i = 0; i < log->trace_len; i++)
835 		object->trace[i] = log->trace[i];
836 	object->trace_len = log->trace_len;
837 	spin_unlock_irqrestore(&object->lock, flags);
838 out:
839 	rcu_read_unlock();
840 }
841 
842 /*
843  * Memory allocation function callback. This function is called from the
844  * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
845  * vmalloc etc.).
846  */
847 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
848 			  gfp_t gfp)
849 {
850 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
851 
852 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
853 		create_object((unsigned long)ptr, size, min_count, gfp);
854 	else if (atomic_read(&kmemleak_early_log))
855 		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
856 }
857 EXPORT_SYMBOL_GPL(kmemleak_alloc);
858 
859 /*
860  * Memory freeing function callback. This function is called from the kernel
861  * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.).
862  */
863 void __ref kmemleak_free(const void *ptr)
864 {
865 	pr_debug("%s(0x%p)\n", __func__, ptr);
866 
867 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
868 		delete_object_full((unsigned long)ptr);
869 	else if (atomic_read(&kmemleak_early_log))
870 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
871 }
872 EXPORT_SYMBOL_GPL(kmemleak_free);
873 
874 /*
875  * Partial memory freeing function callback. This function is usually called
876  * from bootmem allocator when (part of) a memory block is freed.
877  */
878 void __ref kmemleak_free_part(const void *ptr, size_t size)
879 {
880 	pr_debug("%s(0x%p)\n", __func__, ptr);
881 
882 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
883 		delete_object_part((unsigned long)ptr, size);
884 	else if (atomic_read(&kmemleak_early_log))
885 		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
886 }
887 EXPORT_SYMBOL_GPL(kmemleak_free_part);
888 
889 /*
890  * Mark an already allocated memory block as a false positive. This will cause
891  * the block to no longer be reported as leak and always be scanned.
892  */
893 void __ref kmemleak_not_leak(const void *ptr)
894 {
895 	pr_debug("%s(0x%p)\n", __func__, ptr);
896 
897 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
898 		make_gray_object((unsigned long)ptr);
899 	else if (atomic_read(&kmemleak_early_log))
900 		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
901 }
902 EXPORT_SYMBOL(kmemleak_not_leak);
903 
904 /*
905  * Ignore a memory block. This is usually done when it is known that the
906  * corresponding block is not a leak and does not contain any references to
907  * other allocated memory blocks.
908  */
909 void __ref kmemleak_ignore(const void *ptr)
910 {
911 	pr_debug("%s(0x%p)\n", __func__, ptr);
912 
913 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
914 		make_black_object((unsigned long)ptr);
915 	else if (atomic_read(&kmemleak_early_log))
916 		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
917 }
918 EXPORT_SYMBOL(kmemleak_ignore);
919 
920 /*
921  * Limit the range to be scanned in an allocated memory block.
922  */
923 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
924 {
925 	pr_debug("%s(0x%p)\n", __func__, ptr);
926 
927 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
928 		add_scan_area((unsigned long)ptr, size, gfp);
929 	else if (atomic_read(&kmemleak_early_log))
930 		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
931 }
932 EXPORT_SYMBOL(kmemleak_scan_area);
933 
934 /*
935  * Inform kmemleak not to scan the given memory block.
936  */
937 void __ref kmemleak_no_scan(const void *ptr)
938 {
939 	pr_debug("%s(0x%p)\n", __func__, ptr);
940 
941 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
942 		object_no_scan((unsigned long)ptr);
943 	else if (atomic_read(&kmemleak_early_log))
944 		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
945 }
946 EXPORT_SYMBOL(kmemleak_no_scan);
947 
948 /*
949  * Update an object's checksum and return true if it was modified.
950  */
951 static bool update_checksum(struct kmemleak_object *object)
952 {
953 	u32 old_csum = object->checksum;
954 
955 	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
956 		return false;
957 
958 	object->checksum = crc32(0, (void *)object->pointer, object->size);
959 	return object->checksum != old_csum;
960 }
961 
962 /*
963  * Memory scanning is a long process and it needs to be interruptable. This
964  * function checks whether such interrupt condition occured.
965  */
966 static int scan_should_stop(void)
967 {
968 	if (!atomic_read(&kmemleak_enabled))
969 		return 1;
970 
971 	/*
972 	 * This function may be called from either process or kthread context,
973 	 * hence the need to check for both stop conditions.
974 	 */
975 	if (current->mm)
976 		return signal_pending(current);
977 	else
978 		return kthread_should_stop();
979 
980 	return 0;
981 }
982 
983 /*
984  * Scan a memory block (exclusive range) for valid pointers and add those
985  * found to the gray list.
986  */
987 static void scan_block(void *_start, void *_end,
988 		       struct kmemleak_object *scanned, int allow_resched)
989 {
990 	unsigned long *ptr;
991 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
992 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
993 
994 	for (ptr = start; ptr < end; ptr++) {
995 		struct kmemleak_object *object;
996 		unsigned long flags;
997 		unsigned long pointer;
998 
999 		if (allow_resched)
1000 			cond_resched();
1001 		if (scan_should_stop())
1002 			break;
1003 
1004 		/* don't scan uninitialized memory */
1005 		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1006 						  BYTES_PER_POINTER))
1007 			continue;
1008 
1009 		pointer = *ptr;
1010 
1011 		object = find_and_get_object(pointer, 1);
1012 		if (!object)
1013 			continue;
1014 		if (object == scanned) {
1015 			/* self referenced, ignore */
1016 			put_object(object);
1017 			continue;
1018 		}
1019 
1020 		/*
1021 		 * Avoid the lockdep recursive warning on object->lock being
1022 		 * previously acquired in scan_object(). These locks are
1023 		 * enclosed by scan_mutex.
1024 		 */
1025 		spin_lock_irqsave_nested(&object->lock, flags,
1026 					 SINGLE_DEPTH_NESTING);
1027 		if (!color_white(object)) {
1028 			/* non-orphan, ignored or new */
1029 			spin_unlock_irqrestore(&object->lock, flags);
1030 			put_object(object);
1031 			continue;
1032 		}
1033 
1034 		/*
1035 		 * Increase the object's reference count (number of pointers
1036 		 * to the memory block). If this count reaches the required
1037 		 * minimum, the object's color will become gray and it will be
1038 		 * added to the gray_list.
1039 		 */
1040 		object->count++;
1041 		if (color_gray(object)) {
1042 			list_add_tail(&object->gray_list, &gray_list);
1043 			spin_unlock_irqrestore(&object->lock, flags);
1044 			continue;
1045 		}
1046 
1047 		spin_unlock_irqrestore(&object->lock, flags);
1048 		put_object(object);
1049 	}
1050 }
1051 
1052 /*
1053  * Scan a memory block corresponding to a kmemleak_object. A condition is
1054  * that object->use_count >= 1.
1055  */
1056 static void scan_object(struct kmemleak_object *object)
1057 {
1058 	struct kmemleak_scan_area *area;
1059 	struct hlist_node *elem;
1060 	unsigned long flags;
1061 
1062 	/*
1063 	 * Once the object->lock is acquired, the corresponding memory block
1064 	 * cannot be freed (the same lock is acquired in delete_object).
1065 	 */
1066 	spin_lock_irqsave(&object->lock, flags);
1067 	if (object->flags & OBJECT_NO_SCAN)
1068 		goto out;
1069 	if (!(object->flags & OBJECT_ALLOCATED))
1070 		/* already freed object */
1071 		goto out;
1072 	if (hlist_empty(&object->area_list)) {
1073 		void *start = (void *)object->pointer;
1074 		void *end = (void *)(object->pointer + object->size);
1075 
1076 		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1077 		       !(object->flags & OBJECT_NO_SCAN)) {
1078 			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1079 				   object, 0);
1080 			start += MAX_SCAN_SIZE;
1081 
1082 			spin_unlock_irqrestore(&object->lock, flags);
1083 			cond_resched();
1084 			spin_lock_irqsave(&object->lock, flags);
1085 		}
1086 	} else
1087 		hlist_for_each_entry(area, elem, &object->area_list, node)
1088 			scan_block((void *)area->start,
1089 				   (void *)(area->start + area->size),
1090 				   object, 0);
1091 out:
1092 	spin_unlock_irqrestore(&object->lock, flags);
1093 }
1094 
1095 /*
1096  * Scan the objects already referenced (gray objects). More objects will be
1097  * referenced and, if there are no memory leaks, all the objects are scanned.
1098  */
1099 static void scan_gray_list(void)
1100 {
1101 	struct kmemleak_object *object, *tmp;
1102 
1103 	/*
1104 	 * The list traversal is safe for both tail additions and removals
1105 	 * from inside the loop. The kmemleak objects cannot be freed from
1106 	 * outside the loop because their use_count was incremented.
1107 	 */
1108 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1109 	while (&object->gray_list != &gray_list) {
1110 		cond_resched();
1111 
1112 		/* may add new objects to the list */
1113 		if (!scan_should_stop())
1114 			scan_object(object);
1115 
1116 		tmp = list_entry(object->gray_list.next, typeof(*object),
1117 				 gray_list);
1118 
1119 		/* remove the object from the list and release it */
1120 		list_del(&object->gray_list);
1121 		put_object(object);
1122 
1123 		object = tmp;
1124 	}
1125 	WARN_ON(!list_empty(&gray_list));
1126 }
1127 
1128 /*
1129  * Scan data sections and all the referenced memory blocks allocated via the
1130  * kernel's standard allocators. This function must be called with the
1131  * scan_mutex held.
1132  */
1133 static void kmemleak_scan(void)
1134 {
1135 	unsigned long flags;
1136 	struct kmemleak_object *object;
1137 	int i;
1138 	int new_leaks = 0;
1139 
1140 	jiffies_last_scan = jiffies;
1141 
1142 	/* prepare the kmemleak_object's */
1143 	rcu_read_lock();
1144 	list_for_each_entry_rcu(object, &object_list, object_list) {
1145 		spin_lock_irqsave(&object->lock, flags);
1146 #ifdef DEBUG
1147 		/*
1148 		 * With a few exceptions there should be a maximum of
1149 		 * 1 reference to any object at this point.
1150 		 */
1151 		if (atomic_read(&object->use_count) > 1) {
1152 			pr_debug("object->use_count = %d\n",
1153 				 atomic_read(&object->use_count));
1154 			dump_object_info(object);
1155 		}
1156 #endif
1157 		/* reset the reference count (whiten the object) */
1158 		object->count = 0;
1159 		if (color_gray(object) && get_object(object))
1160 			list_add_tail(&object->gray_list, &gray_list);
1161 
1162 		spin_unlock_irqrestore(&object->lock, flags);
1163 	}
1164 	rcu_read_unlock();
1165 
1166 	/* data/bss scanning */
1167 	scan_block(_sdata, _edata, NULL, 1);
1168 	scan_block(__bss_start, __bss_stop, NULL, 1);
1169 
1170 #ifdef CONFIG_SMP
1171 	/* per-cpu sections scanning */
1172 	for_each_possible_cpu(i)
1173 		scan_block(__per_cpu_start + per_cpu_offset(i),
1174 			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1175 #endif
1176 
1177 	/*
1178 	 * Struct page scanning for each node. The code below is not yet safe
1179 	 * with MEMORY_HOTPLUG.
1180 	 */
1181 	for_each_online_node(i) {
1182 		pg_data_t *pgdat = NODE_DATA(i);
1183 		unsigned long start_pfn = pgdat->node_start_pfn;
1184 		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1185 		unsigned long pfn;
1186 
1187 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1188 			struct page *page;
1189 
1190 			if (!pfn_valid(pfn))
1191 				continue;
1192 			page = pfn_to_page(pfn);
1193 			/* only scan if page is in use */
1194 			if (page_count(page) == 0)
1195 				continue;
1196 			scan_block(page, page + 1, NULL, 1);
1197 		}
1198 	}
1199 
1200 	/*
1201 	 * Scanning the task stacks (may introduce false negatives).
1202 	 */
1203 	if (kmemleak_stack_scan) {
1204 		struct task_struct *p, *g;
1205 
1206 		read_lock(&tasklist_lock);
1207 		do_each_thread(g, p) {
1208 			scan_block(task_stack_page(p), task_stack_page(p) +
1209 				   THREAD_SIZE, NULL, 0);
1210 		} while_each_thread(g, p);
1211 		read_unlock(&tasklist_lock);
1212 	}
1213 
1214 	/*
1215 	 * Scan the objects already referenced from the sections scanned
1216 	 * above.
1217 	 */
1218 	scan_gray_list();
1219 
1220 	/*
1221 	 * Check for new or unreferenced objects modified since the previous
1222 	 * scan and color them gray until the next scan.
1223 	 */
1224 	rcu_read_lock();
1225 	list_for_each_entry_rcu(object, &object_list, object_list) {
1226 		spin_lock_irqsave(&object->lock, flags);
1227 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1228 		    && update_checksum(object) && get_object(object)) {
1229 			/* color it gray temporarily */
1230 			object->count = object->min_count;
1231 			list_add_tail(&object->gray_list, &gray_list);
1232 		}
1233 		spin_unlock_irqrestore(&object->lock, flags);
1234 	}
1235 	rcu_read_unlock();
1236 
1237 	/*
1238 	 * Re-scan the gray list for modified unreferenced objects.
1239 	 */
1240 	scan_gray_list();
1241 
1242 	/*
1243 	 * If scanning was stopped do not report any new unreferenced objects.
1244 	 */
1245 	if (scan_should_stop())
1246 		return;
1247 
1248 	/*
1249 	 * Scanning result reporting.
1250 	 */
1251 	rcu_read_lock();
1252 	list_for_each_entry_rcu(object, &object_list, object_list) {
1253 		spin_lock_irqsave(&object->lock, flags);
1254 		if (unreferenced_object(object) &&
1255 		    !(object->flags & OBJECT_REPORTED)) {
1256 			object->flags |= OBJECT_REPORTED;
1257 			new_leaks++;
1258 		}
1259 		spin_unlock_irqrestore(&object->lock, flags);
1260 	}
1261 	rcu_read_unlock();
1262 
1263 	if (new_leaks)
1264 		pr_info("%d new suspected memory leaks (see "
1265 			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1266 
1267 }
1268 
1269 /*
1270  * Thread function performing automatic memory scanning. Unreferenced objects
1271  * at the end of a memory scan are reported but only the first time.
1272  */
1273 static int kmemleak_scan_thread(void *arg)
1274 {
1275 	static int first_run = 1;
1276 
1277 	pr_info("Automatic memory scanning thread started\n");
1278 	set_user_nice(current, 10);
1279 
1280 	/*
1281 	 * Wait before the first scan to allow the system to fully initialize.
1282 	 */
1283 	if (first_run) {
1284 		first_run = 0;
1285 		ssleep(SECS_FIRST_SCAN);
1286 	}
1287 
1288 	while (!kthread_should_stop()) {
1289 		signed long timeout = jiffies_scan_wait;
1290 
1291 		mutex_lock(&scan_mutex);
1292 		kmemleak_scan();
1293 		mutex_unlock(&scan_mutex);
1294 
1295 		/* wait before the next scan */
1296 		while (timeout && !kthread_should_stop())
1297 			timeout = schedule_timeout_interruptible(timeout);
1298 	}
1299 
1300 	pr_info("Automatic memory scanning thread ended\n");
1301 
1302 	return 0;
1303 }
1304 
1305 /*
1306  * Start the automatic memory scanning thread. This function must be called
1307  * with the scan_mutex held.
1308  */
1309 static void start_scan_thread(void)
1310 {
1311 	if (scan_thread)
1312 		return;
1313 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1314 	if (IS_ERR(scan_thread)) {
1315 		pr_warning("Failed to create the scan thread\n");
1316 		scan_thread = NULL;
1317 	}
1318 }
1319 
1320 /*
1321  * Stop the automatic memory scanning thread. This function must be called
1322  * with the scan_mutex held.
1323  */
1324 static void stop_scan_thread(void)
1325 {
1326 	if (scan_thread) {
1327 		kthread_stop(scan_thread);
1328 		scan_thread = NULL;
1329 	}
1330 }
1331 
1332 /*
1333  * Iterate over the object_list and return the first valid object at or after
1334  * the required position with its use_count incremented. The function triggers
1335  * a memory scanning when the pos argument points to the first position.
1336  */
1337 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1338 {
1339 	struct kmemleak_object *object;
1340 	loff_t n = *pos;
1341 	int err;
1342 
1343 	err = mutex_lock_interruptible(&scan_mutex);
1344 	if (err < 0)
1345 		return ERR_PTR(err);
1346 
1347 	rcu_read_lock();
1348 	list_for_each_entry_rcu(object, &object_list, object_list) {
1349 		if (n-- > 0)
1350 			continue;
1351 		if (get_object(object))
1352 			goto out;
1353 	}
1354 	object = NULL;
1355 out:
1356 	return object;
1357 }
1358 
1359 /*
1360  * Return the next object in the object_list. The function decrements the
1361  * use_count of the previous object and increases that of the next one.
1362  */
1363 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1364 {
1365 	struct kmemleak_object *prev_obj = v;
1366 	struct kmemleak_object *next_obj = NULL;
1367 	struct list_head *n = &prev_obj->object_list;
1368 
1369 	++(*pos);
1370 
1371 	list_for_each_continue_rcu(n, &object_list) {
1372 		next_obj = list_entry(n, struct kmemleak_object, object_list);
1373 		if (get_object(next_obj))
1374 			break;
1375 	}
1376 
1377 	put_object(prev_obj);
1378 	return next_obj;
1379 }
1380 
1381 /*
1382  * Decrement the use_count of the last object required, if any.
1383  */
1384 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1385 {
1386 	if (!IS_ERR(v)) {
1387 		/*
1388 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1389 		 * waiting was interrupted, so only release it if !IS_ERR.
1390 		 */
1391 		rcu_read_unlock();
1392 		mutex_unlock(&scan_mutex);
1393 		if (v)
1394 			put_object(v);
1395 	}
1396 }
1397 
1398 /*
1399  * Print the information for an unreferenced object to the seq file.
1400  */
1401 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1402 {
1403 	struct kmemleak_object *object = v;
1404 	unsigned long flags;
1405 
1406 	spin_lock_irqsave(&object->lock, flags);
1407 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1408 		print_unreferenced(seq, object);
1409 	spin_unlock_irqrestore(&object->lock, flags);
1410 	return 0;
1411 }
1412 
1413 static const struct seq_operations kmemleak_seq_ops = {
1414 	.start = kmemleak_seq_start,
1415 	.next  = kmemleak_seq_next,
1416 	.stop  = kmemleak_seq_stop,
1417 	.show  = kmemleak_seq_show,
1418 };
1419 
1420 static int kmemleak_open(struct inode *inode, struct file *file)
1421 {
1422 	if (!atomic_read(&kmemleak_enabled))
1423 		return -EBUSY;
1424 
1425 	return seq_open(file, &kmemleak_seq_ops);
1426 }
1427 
1428 static int kmemleak_release(struct inode *inode, struct file *file)
1429 {
1430 	return seq_release(inode, file);
1431 }
1432 
1433 static int dump_str_object_info(const char *str)
1434 {
1435 	unsigned long flags;
1436 	struct kmemleak_object *object;
1437 	unsigned long addr;
1438 
1439 	addr= simple_strtoul(str, NULL, 0);
1440 	object = find_and_get_object(addr, 0);
1441 	if (!object) {
1442 		pr_info("Unknown object at 0x%08lx\n", addr);
1443 		return -EINVAL;
1444 	}
1445 
1446 	spin_lock_irqsave(&object->lock, flags);
1447 	dump_object_info(object);
1448 	spin_unlock_irqrestore(&object->lock, flags);
1449 
1450 	put_object(object);
1451 	return 0;
1452 }
1453 
1454 /*
1455  * We use grey instead of black to ensure we can do future scans on the same
1456  * objects. If we did not do future scans these black objects could
1457  * potentially contain references to newly allocated objects in the future and
1458  * we'd end up with false positives.
1459  */
1460 static void kmemleak_clear(void)
1461 {
1462 	struct kmemleak_object *object;
1463 	unsigned long flags;
1464 
1465 	rcu_read_lock();
1466 	list_for_each_entry_rcu(object, &object_list, object_list) {
1467 		spin_lock_irqsave(&object->lock, flags);
1468 		if ((object->flags & OBJECT_REPORTED) &&
1469 		    unreferenced_object(object))
1470 			__paint_it(object, KMEMLEAK_GREY);
1471 		spin_unlock_irqrestore(&object->lock, flags);
1472 	}
1473 	rcu_read_unlock();
1474 }
1475 
1476 /*
1477  * File write operation to configure kmemleak at run-time. The following
1478  * commands can be written to the /sys/kernel/debug/kmemleak file:
1479  *   off	- disable kmemleak (irreversible)
1480  *   stack=on	- enable the task stacks scanning
1481  *   stack=off	- disable the tasks stacks scanning
1482  *   scan=on	- start the automatic memory scanning thread
1483  *   scan=off	- stop the automatic memory scanning thread
1484  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1485  *		  disable it)
1486  *   scan	- trigger a memory scan
1487  *   clear	- mark all current reported unreferenced kmemleak objects as
1488  *		  grey to ignore printing them
1489  *   dump=...	- dump information about the object found at the given address
1490  */
1491 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1492 			      size_t size, loff_t *ppos)
1493 {
1494 	char buf[64];
1495 	int buf_size;
1496 	int ret;
1497 
1498 	buf_size = min(size, (sizeof(buf) - 1));
1499 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1500 		return -EFAULT;
1501 	buf[buf_size] = 0;
1502 
1503 	ret = mutex_lock_interruptible(&scan_mutex);
1504 	if (ret < 0)
1505 		return ret;
1506 
1507 	if (strncmp(buf, "off", 3) == 0)
1508 		kmemleak_disable();
1509 	else if (strncmp(buf, "stack=on", 8) == 0)
1510 		kmemleak_stack_scan = 1;
1511 	else if (strncmp(buf, "stack=off", 9) == 0)
1512 		kmemleak_stack_scan = 0;
1513 	else if (strncmp(buf, "scan=on", 7) == 0)
1514 		start_scan_thread();
1515 	else if (strncmp(buf, "scan=off", 8) == 0)
1516 		stop_scan_thread();
1517 	else if (strncmp(buf, "scan=", 5) == 0) {
1518 		unsigned long secs;
1519 
1520 		ret = strict_strtoul(buf + 5, 0, &secs);
1521 		if (ret < 0)
1522 			goto out;
1523 		stop_scan_thread();
1524 		if (secs) {
1525 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1526 			start_scan_thread();
1527 		}
1528 	} else if (strncmp(buf, "scan", 4) == 0)
1529 		kmemleak_scan();
1530 	else if (strncmp(buf, "clear", 5) == 0)
1531 		kmemleak_clear();
1532 	else if (strncmp(buf, "dump=", 5) == 0)
1533 		ret = dump_str_object_info(buf + 5);
1534 	else
1535 		ret = -EINVAL;
1536 
1537 out:
1538 	mutex_unlock(&scan_mutex);
1539 	if (ret < 0)
1540 		return ret;
1541 
1542 	/* ignore the rest of the buffer, only one command at a time */
1543 	*ppos += size;
1544 	return size;
1545 }
1546 
1547 static const struct file_operations kmemleak_fops = {
1548 	.owner		= THIS_MODULE,
1549 	.open		= kmemleak_open,
1550 	.read		= seq_read,
1551 	.write		= kmemleak_write,
1552 	.llseek		= seq_lseek,
1553 	.release	= kmemleak_release,
1554 };
1555 
1556 /*
1557  * Perform the freeing of the kmemleak internal objects after waiting for any
1558  * current memory scan to complete.
1559  */
1560 static void kmemleak_do_cleanup(struct work_struct *work)
1561 {
1562 	struct kmemleak_object *object;
1563 
1564 	mutex_lock(&scan_mutex);
1565 	stop_scan_thread();
1566 
1567 	rcu_read_lock();
1568 	list_for_each_entry_rcu(object, &object_list, object_list)
1569 		delete_object_full(object->pointer);
1570 	rcu_read_unlock();
1571 	mutex_unlock(&scan_mutex);
1572 }
1573 
1574 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1575 
1576 /*
1577  * Disable kmemleak. No memory allocation/freeing will be traced once this
1578  * function is called. Disabling kmemleak is an irreversible operation.
1579  */
1580 static void kmemleak_disable(void)
1581 {
1582 	/* atomically check whether it was already invoked */
1583 	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1584 		return;
1585 
1586 	/* stop any memory operation tracing */
1587 	atomic_set(&kmemleak_early_log, 0);
1588 	atomic_set(&kmemleak_enabled, 0);
1589 
1590 	/* check whether it is too early for a kernel thread */
1591 	if (atomic_read(&kmemleak_initialized))
1592 		schedule_work(&cleanup_work);
1593 
1594 	pr_info("Kernel memory leak detector disabled\n");
1595 }
1596 
1597 /*
1598  * Allow boot-time kmemleak disabling (enabled by default).
1599  */
1600 static int kmemleak_boot_config(char *str)
1601 {
1602 	if (!str)
1603 		return -EINVAL;
1604 	if (strcmp(str, "off") == 0)
1605 		kmemleak_disable();
1606 	else if (strcmp(str, "on") != 0)
1607 		return -EINVAL;
1608 	return 0;
1609 }
1610 early_param("kmemleak", kmemleak_boot_config);
1611 
1612 /*
1613  * Kmemleak initialization.
1614  */
1615 void __init kmemleak_init(void)
1616 {
1617 	int i;
1618 	unsigned long flags;
1619 
1620 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1621 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1622 
1623 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1624 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1625 	INIT_PRIO_TREE_ROOT(&object_tree_root);
1626 
1627 	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1628 	local_irq_save(flags);
1629 	if (!atomic_read(&kmemleak_error)) {
1630 		atomic_set(&kmemleak_enabled, 1);
1631 		atomic_set(&kmemleak_early_log, 0);
1632 	}
1633 	local_irq_restore(flags);
1634 
1635 	/*
1636 	 * This is the point where tracking allocations is safe. Automatic
1637 	 * scanning is started during the late initcall. Add the early logged
1638 	 * callbacks to the kmemleak infrastructure.
1639 	 */
1640 	for (i = 0; i < crt_early_log; i++) {
1641 		struct early_log *log = &early_log[i];
1642 
1643 		switch (log->op_type) {
1644 		case KMEMLEAK_ALLOC:
1645 			early_alloc(log);
1646 			break;
1647 		case KMEMLEAK_FREE:
1648 			kmemleak_free(log->ptr);
1649 			break;
1650 		case KMEMLEAK_FREE_PART:
1651 			kmemleak_free_part(log->ptr, log->size);
1652 			break;
1653 		case KMEMLEAK_NOT_LEAK:
1654 			kmemleak_not_leak(log->ptr);
1655 			break;
1656 		case KMEMLEAK_IGNORE:
1657 			kmemleak_ignore(log->ptr);
1658 			break;
1659 		case KMEMLEAK_SCAN_AREA:
1660 			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1661 			break;
1662 		case KMEMLEAK_NO_SCAN:
1663 			kmemleak_no_scan(log->ptr);
1664 			break;
1665 		default:
1666 			WARN_ON(1);
1667 		}
1668 	}
1669 }
1670 
1671 /*
1672  * Late initialization function.
1673  */
1674 static int __init kmemleak_late_init(void)
1675 {
1676 	struct dentry *dentry;
1677 
1678 	atomic_set(&kmemleak_initialized, 1);
1679 
1680 	if (atomic_read(&kmemleak_error)) {
1681 		/*
1682 		 * Some error occured and kmemleak was disabled. There is a
1683 		 * small chance that kmemleak_disable() was called immediately
1684 		 * after setting kmemleak_initialized and we may end up with
1685 		 * two clean-up threads but serialized by scan_mutex.
1686 		 */
1687 		schedule_work(&cleanup_work);
1688 		return -ENOMEM;
1689 	}
1690 
1691 	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1692 				     &kmemleak_fops);
1693 	if (!dentry)
1694 		pr_warning("Failed to create the debugfs kmemleak file\n");
1695 	mutex_lock(&scan_mutex);
1696 	start_scan_thread();
1697 	mutex_unlock(&scan_mutex);
1698 
1699 	pr_info("Kernel memory leak detector initialized\n");
1700 
1701 	return 0;
1702 }
1703 late_initcall(kmemleak_late_init);
1704