xref: /openbmc/linux/mm/kmemleak.c (revision 930beb5a)
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/kmemleak.txt.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a red black tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * The kmemleak_object structures have a use_count incremented or decremented
57  * using the get_object()/put_object() functions. When the use_count becomes
58  * 0, this count can no longer be incremented and put_object() schedules the
59  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
60  * function must be protected by rcu_read_lock() to avoid accessing a freed
61  * structure.
62  */
63 
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 
66 #include <linux/init.h>
67 #include <linux/kernel.h>
68 #include <linux/list.h>
69 #include <linux/sched.h>
70 #include <linux/jiffies.h>
71 #include <linux/delay.h>
72 #include <linux/export.h>
73 #include <linux/kthread.h>
74 #include <linux/rbtree.h>
75 #include <linux/fs.h>
76 #include <linux/debugfs.h>
77 #include <linux/seq_file.h>
78 #include <linux/cpumask.h>
79 #include <linux/spinlock.h>
80 #include <linux/mutex.h>
81 #include <linux/rcupdate.h>
82 #include <linux/stacktrace.h>
83 #include <linux/cache.h>
84 #include <linux/percpu.h>
85 #include <linux/hardirq.h>
86 #include <linux/mmzone.h>
87 #include <linux/slab.h>
88 #include <linux/thread_info.h>
89 #include <linux/err.h>
90 #include <linux/uaccess.h>
91 #include <linux/string.h>
92 #include <linux/nodemask.h>
93 #include <linux/mm.h>
94 #include <linux/workqueue.h>
95 #include <linux/crc32.h>
96 
97 #include <asm/sections.h>
98 #include <asm/processor.h>
99 #include <linux/atomic.h>
100 
101 #include <linux/kmemcheck.h>
102 #include <linux/kmemleak.h>
103 #include <linux/memory_hotplug.h>
104 
105 /*
106  * Kmemleak configuration and common defines.
107  */
108 #define MAX_TRACE		16	/* stack trace length */
109 #define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
110 #define SECS_FIRST_SCAN		60	/* delay before the first scan */
111 #define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
112 #define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
113 
114 #define BYTES_PER_POINTER	sizeof(void *)
115 
116 /* GFP bitmask for kmemleak internal allocations */
117 #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
118 				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 				 __GFP_NOWARN)
120 
121 /* scanning area inside a memory block */
122 struct kmemleak_scan_area {
123 	struct hlist_node node;
124 	unsigned long start;
125 	size_t size;
126 };
127 
128 #define KMEMLEAK_GREY	0
129 #define KMEMLEAK_BLACK	-1
130 
131 /*
132  * Structure holding the metadata for each allocated memory block.
133  * Modifications to such objects should be made while holding the
134  * object->lock. Insertions or deletions from object_list, gray_list or
135  * rb_node are already protected by the corresponding locks or mutex (see
136  * the notes on locking above). These objects are reference-counted
137  * (use_count) and freed using the RCU mechanism.
138  */
139 struct kmemleak_object {
140 	spinlock_t lock;
141 	unsigned long flags;		/* object status flags */
142 	struct list_head object_list;
143 	struct list_head gray_list;
144 	struct rb_node rb_node;
145 	struct rcu_head rcu;		/* object_list lockless traversal */
146 	/* object usage count; object freed when use_count == 0 */
147 	atomic_t use_count;
148 	unsigned long pointer;
149 	size_t size;
150 	/* minimum number of a pointers found before it is considered leak */
151 	int min_count;
152 	/* the total number of pointers found pointing to this object */
153 	int count;
154 	/* checksum for detecting modified objects */
155 	u32 checksum;
156 	/* memory ranges to be scanned inside an object (empty for all) */
157 	struct hlist_head area_list;
158 	unsigned long trace[MAX_TRACE];
159 	unsigned int trace_len;
160 	unsigned long jiffies;		/* creation timestamp */
161 	pid_t pid;			/* pid of the current task */
162 	char comm[TASK_COMM_LEN];	/* executable name */
163 };
164 
165 /* flag representing the memory block allocation status */
166 #define OBJECT_ALLOCATED	(1 << 0)
167 /* flag set after the first reporting of an unreference object */
168 #define OBJECT_REPORTED		(1 << 1)
169 /* flag set to not scan the object */
170 #define OBJECT_NO_SCAN		(1 << 2)
171 
172 /* number of bytes to print per line; must be 16 or 32 */
173 #define HEX_ROW_SIZE		16
174 /* number of bytes to print at a time (1, 2, 4, 8) */
175 #define HEX_GROUP_SIZE		1
176 /* include ASCII after the hex output */
177 #define HEX_ASCII		1
178 /* max number of lines to be printed */
179 #define HEX_MAX_LINES		2
180 
181 /* the list of all allocated objects */
182 static LIST_HEAD(object_list);
183 /* the list of gray-colored objects (see color_gray comment below) */
184 static LIST_HEAD(gray_list);
185 /* search tree for object boundaries */
186 static struct rb_root object_tree_root = RB_ROOT;
187 /* rw_lock protecting the access to object_list and object_tree_root */
188 static DEFINE_RWLOCK(kmemleak_lock);
189 
190 /* allocation caches for kmemleak internal data */
191 static struct kmem_cache *object_cache;
192 static struct kmem_cache *scan_area_cache;
193 
194 /* set if tracing memory operations is enabled */
195 static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
196 /* set in the late_initcall if there were no errors */
197 static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
198 /* enables or disables early logging of the memory operations */
199 static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
200 /* set if a kmemleak warning was issued */
201 static atomic_t kmemleak_warning = ATOMIC_INIT(0);
202 /* set if a fatal kmemleak error has occurred */
203 static atomic_t kmemleak_error = ATOMIC_INIT(0);
204 
205 /* minimum and maximum address that may be valid pointers */
206 static unsigned long min_addr = ULONG_MAX;
207 static unsigned long max_addr;
208 
209 static struct task_struct *scan_thread;
210 /* used to avoid reporting of recently allocated objects */
211 static unsigned long jiffies_min_age;
212 static unsigned long jiffies_last_scan;
213 /* delay between automatic memory scannings */
214 static signed long jiffies_scan_wait;
215 /* enables or disables the task stacks scanning */
216 static int kmemleak_stack_scan = 1;
217 /* protects the memory scanning, parameters and debug/kmemleak file access */
218 static DEFINE_MUTEX(scan_mutex);
219 /* setting kmemleak=on, will set this var, skipping the disable */
220 static int kmemleak_skip_disable;
221 
222 
223 /*
224  * Early object allocation/freeing logging. Kmemleak is initialized after the
225  * kernel allocator. However, both the kernel allocator and kmemleak may
226  * allocate memory blocks which need to be tracked. Kmemleak defines an
227  * arbitrary buffer to hold the allocation/freeing information before it is
228  * fully initialized.
229  */
230 
231 /* kmemleak operation type for early logging */
232 enum {
233 	KMEMLEAK_ALLOC,
234 	KMEMLEAK_ALLOC_PERCPU,
235 	KMEMLEAK_FREE,
236 	KMEMLEAK_FREE_PART,
237 	KMEMLEAK_FREE_PERCPU,
238 	KMEMLEAK_NOT_LEAK,
239 	KMEMLEAK_IGNORE,
240 	KMEMLEAK_SCAN_AREA,
241 	KMEMLEAK_NO_SCAN
242 };
243 
244 /*
245  * Structure holding the information passed to kmemleak callbacks during the
246  * early logging.
247  */
248 struct early_log {
249 	int op_type;			/* kmemleak operation type */
250 	const void *ptr;		/* allocated/freed memory block */
251 	size_t size;			/* memory block size */
252 	int min_count;			/* minimum reference count */
253 	unsigned long trace[MAX_TRACE];	/* stack trace */
254 	unsigned int trace_len;		/* stack trace length */
255 };
256 
257 /* early logging buffer and current position */
258 static struct early_log
259 	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
260 static int crt_early_log __initdata;
261 
262 static void kmemleak_disable(void);
263 
264 /*
265  * Print a warning and dump the stack trace.
266  */
267 #define kmemleak_warn(x...)	do {		\
268 	pr_warning(x);				\
269 	dump_stack();				\
270 	atomic_set(&kmemleak_warning, 1);	\
271 } while (0)
272 
273 /*
274  * Macro invoked when a serious kmemleak condition occurred and cannot be
275  * recovered from. Kmemleak will be disabled and further allocation/freeing
276  * tracing no longer available.
277  */
278 #define kmemleak_stop(x...)	do {	\
279 	kmemleak_warn(x);		\
280 	kmemleak_disable();		\
281 } while (0)
282 
283 /*
284  * Printing of the objects hex dump to the seq file. The number of lines to be
285  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
286  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
287  * with the object->lock held.
288  */
289 static void hex_dump_object(struct seq_file *seq,
290 			    struct kmemleak_object *object)
291 {
292 	const u8 *ptr = (const u8 *)object->pointer;
293 	int i, len, remaining;
294 	unsigned char linebuf[HEX_ROW_SIZE * 5];
295 
296 	/* limit the number of lines to HEX_MAX_LINES */
297 	remaining = len =
298 		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
299 
300 	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
301 	for (i = 0; i < len; i += HEX_ROW_SIZE) {
302 		int linelen = min(remaining, HEX_ROW_SIZE);
303 
304 		remaining -= HEX_ROW_SIZE;
305 		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
306 				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
307 				   HEX_ASCII);
308 		seq_printf(seq, "    %s\n", linebuf);
309 	}
310 }
311 
312 /*
313  * Object colors, encoded with count and min_count:
314  * - white - orphan object, not enough references to it (count < min_count)
315  * - gray  - not orphan, not marked as false positive (min_count == 0) or
316  *		sufficient references to it (count >= min_count)
317  * - black - ignore, it doesn't contain references (e.g. text section)
318  *		(min_count == -1). No function defined for this color.
319  * Newly created objects don't have any color assigned (object->count == -1)
320  * before the next memory scan when they become white.
321  */
322 static bool color_white(const struct kmemleak_object *object)
323 {
324 	return object->count != KMEMLEAK_BLACK &&
325 		object->count < object->min_count;
326 }
327 
328 static bool color_gray(const struct kmemleak_object *object)
329 {
330 	return object->min_count != KMEMLEAK_BLACK &&
331 		object->count >= object->min_count;
332 }
333 
334 /*
335  * Objects are considered unreferenced only if their color is white, they have
336  * not be deleted and have a minimum age to avoid false positives caused by
337  * pointers temporarily stored in CPU registers.
338  */
339 static bool unreferenced_object(struct kmemleak_object *object)
340 {
341 	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
342 		time_before_eq(object->jiffies + jiffies_min_age,
343 			       jiffies_last_scan);
344 }
345 
346 /*
347  * Printing of the unreferenced objects information to the seq file. The
348  * print_unreferenced function must be called with the object->lock held.
349  */
350 static void print_unreferenced(struct seq_file *seq,
351 			       struct kmemleak_object *object)
352 {
353 	int i;
354 	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
355 
356 	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
357 		   object->pointer, object->size);
358 	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
359 		   object->comm, object->pid, object->jiffies,
360 		   msecs_age / 1000, msecs_age % 1000);
361 	hex_dump_object(seq, object);
362 	seq_printf(seq, "  backtrace:\n");
363 
364 	for (i = 0; i < object->trace_len; i++) {
365 		void *ptr = (void *)object->trace[i];
366 		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
367 	}
368 }
369 
370 /*
371  * Print the kmemleak_object information. This function is used mainly for
372  * debugging special cases when kmemleak operations. It must be called with
373  * the object->lock held.
374  */
375 static void dump_object_info(struct kmemleak_object *object)
376 {
377 	struct stack_trace trace;
378 
379 	trace.nr_entries = object->trace_len;
380 	trace.entries = object->trace;
381 
382 	pr_notice("Object 0x%08lx (size %zu):\n",
383 		  object->pointer, object->size);
384 	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
385 		  object->comm, object->pid, object->jiffies);
386 	pr_notice("  min_count = %d\n", object->min_count);
387 	pr_notice("  count = %d\n", object->count);
388 	pr_notice("  flags = 0x%lx\n", object->flags);
389 	pr_notice("  checksum = %d\n", object->checksum);
390 	pr_notice("  backtrace:\n");
391 	print_stack_trace(&trace, 4);
392 }
393 
394 /*
395  * Look-up a memory block metadata (kmemleak_object) in the object search
396  * tree based on a pointer value. If alias is 0, only values pointing to the
397  * beginning of the memory block are allowed. The kmemleak_lock must be held
398  * when calling this function.
399  */
400 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
401 {
402 	struct rb_node *rb = object_tree_root.rb_node;
403 
404 	while (rb) {
405 		struct kmemleak_object *object =
406 			rb_entry(rb, struct kmemleak_object, rb_node);
407 		if (ptr < object->pointer)
408 			rb = object->rb_node.rb_left;
409 		else if (object->pointer + object->size <= ptr)
410 			rb = object->rb_node.rb_right;
411 		else if (object->pointer == ptr || alias)
412 			return object;
413 		else {
414 			kmemleak_warn("Found object by alias at 0x%08lx\n",
415 				      ptr);
416 			dump_object_info(object);
417 			break;
418 		}
419 	}
420 	return NULL;
421 }
422 
423 /*
424  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
425  * that once an object's use_count reached 0, the RCU freeing was already
426  * registered and the object should no longer be used. This function must be
427  * called under the protection of rcu_read_lock().
428  */
429 static int get_object(struct kmemleak_object *object)
430 {
431 	return atomic_inc_not_zero(&object->use_count);
432 }
433 
434 /*
435  * RCU callback to free a kmemleak_object.
436  */
437 static void free_object_rcu(struct rcu_head *rcu)
438 {
439 	struct hlist_node *tmp;
440 	struct kmemleak_scan_area *area;
441 	struct kmemleak_object *object =
442 		container_of(rcu, struct kmemleak_object, rcu);
443 
444 	/*
445 	 * Once use_count is 0 (guaranteed by put_object), there is no other
446 	 * code accessing this object, hence no need for locking.
447 	 */
448 	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
449 		hlist_del(&area->node);
450 		kmem_cache_free(scan_area_cache, area);
451 	}
452 	kmem_cache_free(object_cache, object);
453 }
454 
455 /*
456  * Decrement the object use_count. Once the count is 0, free the object using
457  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
458  * delete_object() path, the delayed RCU freeing ensures that there is no
459  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
460  * is also possible.
461  */
462 static void put_object(struct kmemleak_object *object)
463 {
464 	if (!atomic_dec_and_test(&object->use_count))
465 		return;
466 
467 	/* should only get here after delete_object was called */
468 	WARN_ON(object->flags & OBJECT_ALLOCATED);
469 
470 	call_rcu(&object->rcu, free_object_rcu);
471 }
472 
473 /*
474  * Look up an object in the object search tree and increase its use_count.
475  */
476 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
477 {
478 	unsigned long flags;
479 	struct kmemleak_object *object = NULL;
480 
481 	rcu_read_lock();
482 	read_lock_irqsave(&kmemleak_lock, flags);
483 	if (ptr >= min_addr && ptr < max_addr)
484 		object = lookup_object(ptr, alias);
485 	read_unlock_irqrestore(&kmemleak_lock, flags);
486 
487 	/* check whether the object is still available */
488 	if (object && !get_object(object))
489 		object = NULL;
490 	rcu_read_unlock();
491 
492 	return object;
493 }
494 
495 /*
496  * Save stack trace to the given array of MAX_TRACE size.
497  */
498 static int __save_stack_trace(unsigned long *trace)
499 {
500 	struct stack_trace stack_trace;
501 
502 	stack_trace.max_entries = MAX_TRACE;
503 	stack_trace.nr_entries = 0;
504 	stack_trace.entries = trace;
505 	stack_trace.skip = 2;
506 	save_stack_trace(&stack_trace);
507 
508 	return stack_trace.nr_entries;
509 }
510 
511 /*
512  * Create the metadata (struct kmemleak_object) corresponding to an allocated
513  * memory block and add it to the object_list and object_tree_root.
514  */
515 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
516 					     int min_count, gfp_t gfp)
517 {
518 	unsigned long flags;
519 	struct kmemleak_object *object, *parent;
520 	struct rb_node **link, *rb_parent;
521 
522 	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
523 	if (!object) {
524 		pr_warning("Cannot allocate a kmemleak_object structure\n");
525 		kmemleak_disable();
526 		return NULL;
527 	}
528 
529 	INIT_LIST_HEAD(&object->object_list);
530 	INIT_LIST_HEAD(&object->gray_list);
531 	INIT_HLIST_HEAD(&object->area_list);
532 	spin_lock_init(&object->lock);
533 	atomic_set(&object->use_count, 1);
534 	object->flags = OBJECT_ALLOCATED;
535 	object->pointer = ptr;
536 	object->size = size;
537 	object->min_count = min_count;
538 	object->count = 0;			/* white color initially */
539 	object->jiffies = jiffies;
540 	object->checksum = 0;
541 
542 	/* task information */
543 	if (in_irq()) {
544 		object->pid = 0;
545 		strncpy(object->comm, "hardirq", sizeof(object->comm));
546 	} else if (in_softirq()) {
547 		object->pid = 0;
548 		strncpy(object->comm, "softirq", sizeof(object->comm));
549 	} else {
550 		object->pid = current->pid;
551 		/*
552 		 * There is a small chance of a race with set_task_comm(),
553 		 * however using get_task_comm() here may cause locking
554 		 * dependency issues with current->alloc_lock. In the worst
555 		 * case, the command line is not correct.
556 		 */
557 		strncpy(object->comm, current->comm, sizeof(object->comm));
558 	}
559 
560 	/* kernel backtrace */
561 	object->trace_len = __save_stack_trace(object->trace);
562 
563 	write_lock_irqsave(&kmemleak_lock, flags);
564 
565 	min_addr = min(min_addr, ptr);
566 	max_addr = max(max_addr, ptr + size);
567 	link = &object_tree_root.rb_node;
568 	rb_parent = NULL;
569 	while (*link) {
570 		rb_parent = *link;
571 		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
572 		if (ptr + size <= parent->pointer)
573 			link = &parent->rb_node.rb_left;
574 		else if (parent->pointer + parent->size <= ptr)
575 			link = &parent->rb_node.rb_right;
576 		else {
577 			kmemleak_stop("Cannot insert 0x%lx into the object "
578 				      "search tree (overlaps existing)\n",
579 				      ptr);
580 			kmem_cache_free(object_cache, object);
581 			object = parent;
582 			spin_lock(&object->lock);
583 			dump_object_info(object);
584 			spin_unlock(&object->lock);
585 			goto out;
586 		}
587 	}
588 	rb_link_node(&object->rb_node, rb_parent, link);
589 	rb_insert_color(&object->rb_node, &object_tree_root);
590 
591 	list_add_tail_rcu(&object->object_list, &object_list);
592 out:
593 	write_unlock_irqrestore(&kmemleak_lock, flags);
594 	return object;
595 }
596 
597 /*
598  * Remove the metadata (struct kmemleak_object) for a memory block from the
599  * object_list and object_tree_root and decrement its use_count.
600  */
601 static void __delete_object(struct kmemleak_object *object)
602 {
603 	unsigned long flags;
604 
605 	write_lock_irqsave(&kmemleak_lock, flags);
606 	rb_erase(&object->rb_node, &object_tree_root);
607 	list_del_rcu(&object->object_list);
608 	write_unlock_irqrestore(&kmemleak_lock, flags);
609 
610 	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
611 	WARN_ON(atomic_read(&object->use_count) < 2);
612 
613 	/*
614 	 * Locking here also ensures that the corresponding memory block
615 	 * cannot be freed when it is being scanned.
616 	 */
617 	spin_lock_irqsave(&object->lock, flags);
618 	object->flags &= ~OBJECT_ALLOCATED;
619 	spin_unlock_irqrestore(&object->lock, flags);
620 	put_object(object);
621 }
622 
623 /*
624  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
625  * delete it.
626  */
627 static void delete_object_full(unsigned long ptr)
628 {
629 	struct kmemleak_object *object;
630 
631 	object = find_and_get_object(ptr, 0);
632 	if (!object) {
633 #ifdef DEBUG
634 		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
635 			      ptr);
636 #endif
637 		return;
638 	}
639 	__delete_object(object);
640 	put_object(object);
641 }
642 
643 /*
644  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
645  * delete it. If the memory block is partially freed, the function may create
646  * additional metadata for the remaining parts of the block.
647  */
648 static void delete_object_part(unsigned long ptr, size_t size)
649 {
650 	struct kmemleak_object *object;
651 	unsigned long start, end;
652 
653 	object = find_and_get_object(ptr, 1);
654 	if (!object) {
655 #ifdef DEBUG
656 		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
657 			      "(size %zu)\n", ptr, size);
658 #endif
659 		return;
660 	}
661 	__delete_object(object);
662 
663 	/*
664 	 * Create one or two objects that may result from the memory block
665 	 * split. Note that partial freeing is only done by free_bootmem() and
666 	 * this happens before kmemleak_init() is called. The path below is
667 	 * only executed during early log recording in kmemleak_init(), so
668 	 * GFP_KERNEL is enough.
669 	 */
670 	start = object->pointer;
671 	end = object->pointer + object->size;
672 	if (ptr > start)
673 		create_object(start, ptr - start, object->min_count,
674 			      GFP_KERNEL);
675 	if (ptr + size < end)
676 		create_object(ptr + size, end - ptr - size, object->min_count,
677 			      GFP_KERNEL);
678 
679 	put_object(object);
680 }
681 
682 static void __paint_it(struct kmemleak_object *object, int color)
683 {
684 	object->min_count = color;
685 	if (color == KMEMLEAK_BLACK)
686 		object->flags |= OBJECT_NO_SCAN;
687 }
688 
689 static void paint_it(struct kmemleak_object *object, int color)
690 {
691 	unsigned long flags;
692 
693 	spin_lock_irqsave(&object->lock, flags);
694 	__paint_it(object, color);
695 	spin_unlock_irqrestore(&object->lock, flags);
696 }
697 
698 static void paint_ptr(unsigned long ptr, int color)
699 {
700 	struct kmemleak_object *object;
701 
702 	object = find_and_get_object(ptr, 0);
703 	if (!object) {
704 		kmemleak_warn("Trying to color unknown object "
705 			      "at 0x%08lx as %s\n", ptr,
706 			      (color == KMEMLEAK_GREY) ? "Grey" :
707 			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
708 		return;
709 	}
710 	paint_it(object, color);
711 	put_object(object);
712 }
713 
714 /*
715  * Mark an object permanently as gray-colored so that it can no longer be
716  * reported as a leak. This is used in general to mark a false positive.
717  */
718 static void make_gray_object(unsigned long ptr)
719 {
720 	paint_ptr(ptr, KMEMLEAK_GREY);
721 }
722 
723 /*
724  * Mark the object as black-colored so that it is ignored from scans and
725  * reporting.
726  */
727 static void make_black_object(unsigned long ptr)
728 {
729 	paint_ptr(ptr, KMEMLEAK_BLACK);
730 }
731 
732 /*
733  * Add a scanning area to the object. If at least one such area is added,
734  * kmemleak will only scan these ranges rather than the whole memory block.
735  */
736 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
737 {
738 	unsigned long flags;
739 	struct kmemleak_object *object;
740 	struct kmemleak_scan_area *area;
741 
742 	object = find_and_get_object(ptr, 1);
743 	if (!object) {
744 		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
745 			      ptr);
746 		return;
747 	}
748 
749 	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
750 	if (!area) {
751 		pr_warning("Cannot allocate a scan area\n");
752 		goto out;
753 	}
754 
755 	spin_lock_irqsave(&object->lock, flags);
756 	if (size == SIZE_MAX) {
757 		size = object->pointer + object->size - ptr;
758 	} else if (ptr + size > object->pointer + object->size) {
759 		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
760 		dump_object_info(object);
761 		kmem_cache_free(scan_area_cache, area);
762 		goto out_unlock;
763 	}
764 
765 	INIT_HLIST_NODE(&area->node);
766 	area->start = ptr;
767 	area->size = size;
768 
769 	hlist_add_head(&area->node, &object->area_list);
770 out_unlock:
771 	spin_unlock_irqrestore(&object->lock, flags);
772 out:
773 	put_object(object);
774 }
775 
776 /*
777  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
778  * pointer. Such object will not be scanned by kmemleak but references to it
779  * are searched.
780  */
781 static void object_no_scan(unsigned long ptr)
782 {
783 	unsigned long flags;
784 	struct kmemleak_object *object;
785 
786 	object = find_and_get_object(ptr, 0);
787 	if (!object) {
788 		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
789 		return;
790 	}
791 
792 	spin_lock_irqsave(&object->lock, flags);
793 	object->flags |= OBJECT_NO_SCAN;
794 	spin_unlock_irqrestore(&object->lock, flags);
795 	put_object(object);
796 }
797 
798 /*
799  * Log an early kmemleak_* call to the early_log buffer. These calls will be
800  * processed later once kmemleak is fully initialized.
801  */
802 static void __init log_early(int op_type, const void *ptr, size_t size,
803 			     int min_count)
804 {
805 	unsigned long flags;
806 	struct early_log *log;
807 
808 	if (atomic_read(&kmemleak_error)) {
809 		/* kmemleak stopped recording, just count the requests */
810 		crt_early_log++;
811 		return;
812 	}
813 
814 	if (crt_early_log >= ARRAY_SIZE(early_log)) {
815 		kmemleak_disable();
816 		return;
817 	}
818 
819 	/*
820 	 * There is no need for locking since the kernel is still in UP mode
821 	 * at this stage. Disabling the IRQs is enough.
822 	 */
823 	local_irq_save(flags);
824 	log = &early_log[crt_early_log];
825 	log->op_type = op_type;
826 	log->ptr = ptr;
827 	log->size = size;
828 	log->min_count = min_count;
829 	log->trace_len = __save_stack_trace(log->trace);
830 	crt_early_log++;
831 	local_irq_restore(flags);
832 }
833 
834 /*
835  * Log an early allocated block and populate the stack trace.
836  */
837 static void early_alloc(struct early_log *log)
838 {
839 	struct kmemleak_object *object;
840 	unsigned long flags;
841 	int i;
842 
843 	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
844 		return;
845 
846 	/*
847 	 * RCU locking needed to ensure object is not freed via put_object().
848 	 */
849 	rcu_read_lock();
850 	object = create_object((unsigned long)log->ptr, log->size,
851 			       log->min_count, GFP_ATOMIC);
852 	if (!object)
853 		goto out;
854 	spin_lock_irqsave(&object->lock, flags);
855 	for (i = 0; i < log->trace_len; i++)
856 		object->trace[i] = log->trace[i];
857 	object->trace_len = log->trace_len;
858 	spin_unlock_irqrestore(&object->lock, flags);
859 out:
860 	rcu_read_unlock();
861 }
862 
863 /*
864  * Log an early allocated block and populate the stack trace.
865  */
866 static void early_alloc_percpu(struct early_log *log)
867 {
868 	unsigned int cpu;
869 	const void __percpu *ptr = log->ptr;
870 
871 	for_each_possible_cpu(cpu) {
872 		log->ptr = per_cpu_ptr(ptr, cpu);
873 		early_alloc(log);
874 	}
875 }
876 
877 /**
878  * kmemleak_alloc - register a newly allocated object
879  * @ptr:	pointer to beginning of the object
880  * @size:	size of the object
881  * @min_count:	minimum number of references to this object. If during memory
882  *		scanning a number of references less than @min_count is found,
883  *		the object is reported as a memory leak. If @min_count is 0,
884  *		the object is never reported as a leak. If @min_count is -1,
885  *		the object is ignored (not scanned and not reported as a leak)
886  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
887  *
888  * This function is called from the kernel allocators when a new object
889  * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
890  */
891 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
892 			  gfp_t gfp)
893 {
894 	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
895 
896 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
897 		create_object((unsigned long)ptr, size, min_count, gfp);
898 	else if (atomic_read(&kmemleak_early_log))
899 		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
900 }
901 EXPORT_SYMBOL_GPL(kmemleak_alloc);
902 
903 /**
904  * kmemleak_alloc_percpu - register a newly allocated __percpu object
905  * @ptr:	__percpu pointer to beginning of the object
906  * @size:	size of the object
907  *
908  * This function is called from the kernel percpu allocator when a new object
909  * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
910  * allocation.
911  */
912 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
913 {
914 	unsigned int cpu;
915 
916 	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
917 
918 	/*
919 	 * Percpu allocations are only scanned and not reported as leaks
920 	 * (min_count is set to 0).
921 	 */
922 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
923 		for_each_possible_cpu(cpu)
924 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
925 				      size, 0, GFP_KERNEL);
926 	else if (atomic_read(&kmemleak_early_log))
927 		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
928 }
929 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
930 
931 /**
932  * kmemleak_free - unregister a previously registered object
933  * @ptr:	pointer to beginning of the object
934  *
935  * This function is called from the kernel allocators when an object (memory
936  * block) is freed (kmem_cache_free, kfree, vfree etc.).
937  */
938 void __ref kmemleak_free(const void *ptr)
939 {
940 	pr_debug("%s(0x%p)\n", __func__, ptr);
941 
942 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
943 		delete_object_full((unsigned long)ptr);
944 	else if (atomic_read(&kmemleak_early_log))
945 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
946 }
947 EXPORT_SYMBOL_GPL(kmemleak_free);
948 
949 /**
950  * kmemleak_free_part - partially unregister a previously registered object
951  * @ptr:	pointer to the beginning or inside the object. This also
952  *		represents the start of the range to be freed
953  * @size:	size to be unregistered
954  *
955  * This function is called when only a part of a memory block is freed
956  * (usually from the bootmem allocator).
957  */
958 void __ref kmemleak_free_part(const void *ptr, size_t size)
959 {
960 	pr_debug("%s(0x%p)\n", __func__, ptr);
961 
962 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
963 		delete_object_part((unsigned long)ptr, size);
964 	else if (atomic_read(&kmemleak_early_log))
965 		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
966 }
967 EXPORT_SYMBOL_GPL(kmemleak_free_part);
968 
969 /**
970  * kmemleak_free_percpu - unregister a previously registered __percpu object
971  * @ptr:	__percpu pointer to beginning of the object
972  *
973  * This function is called from the kernel percpu allocator when an object
974  * (memory block) is freed (free_percpu).
975  */
976 void __ref kmemleak_free_percpu(const void __percpu *ptr)
977 {
978 	unsigned int cpu;
979 
980 	pr_debug("%s(0x%p)\n", __func__, ptr);
981 
982 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
983 		for_each_possible_cpu(cpu)
984 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
985 								      cpu));
986 	else if (atomic_read(&kmemleak_early_log))
987 		log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
988 }
989 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
990 
991 /**
992  * kmemleak_not_leak - mark an allocated object as false positive
993  * @ptr:	pointer to beginning of the object
994  *
995  * Calling this function on an object will cause the memory block to no longer
996  * be reported as leak and always be scanned.
997  */
998 void __ref kmemleak_not_leak(const void *ptr)
999 {
1000 	pr_debug("%s(0x%p)\n", __func__, ptr);
1001 
1002 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1003 		make_gray_object((unsigned long)ptr);
1004 	else if (atomic_read(&kmemleak_early_log))
1005 		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1006 }
1007 EXPORT_SYMBOL(kmemleak_not_leak);
1008 
1009 /**
1010  * kmemleak_ignore - ignore an allocated object
1011  * @ptr:	pointer to beginning of the object
1012  *
1013  * Calling this function on an object will cause the memory block to be
1014  * ignored (not scanned and not reported as a leak). This is usually done when
1015  * it is known that the corresponding block is not a leak and does not contain
1016  * any references to other allocated memory blocks.
1017  */
1018 void __ref kmemleak_ignore(const void *ptr)
1019 {
1020 	pr_debug("%s(0x%p)\n", __func__, ptr);
1021 
1022 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1023 		make_black_object((unsigned long)ptr);
1024 	else if (atomic_read(&kmemleak_early_log))
1025 		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1026 }
1027 EXPORT_SYMBOL(kmemleak_ignore);
1028 
1029 /**
1030  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1031  * @ptr:	pointer to beginning or inside the object. This also
1032  *		represents the start of the scan area
1033  * @size:	size of the scan area
1034  * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1035  *
1036  * This function is used when it is known that only certain parts of an object
1037  * contain references to other objects. Kmemleak will only scan these areas
1038  * reducing the number false negatives.
1039  */
1040 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1041 {
1042 	pr_debug("%s(0x%p)\n", __func__, ptr);
1043 
1044 	if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1045 		add_scan_area((unsigned long)ptr, size, gfp);
1046 	else if (atomic_read(&kmemleak_early_log))
1047 		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1048 }
1049 EXPORT_SYMBOL(kmemleak_scan_area);
1050 
1051 /**
1052  * kmemleak_no_scan - do not scan an allocated object
1053  * @ptr:	pointer to beginning of the object
1054  *
1055  * This function notifies kmemleak not to scan the given memory block. Useful
1056  * in situations where it is known that the given object does not contain any
1057  * references to other objects. Kmemleak will not scan such objects reducing
1058  * the number of false negatives.
1059  */
1060 void __ref kmemleak_no_scan(const void *ptr)
1061 {
1062 	pr_debug("%s(0x%p)\n", __func__, ptr);
1063 
1064 	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1065 		object_no_scan((unsigned long)ptr);
1066 	else if (atomic_read(&kmemleak_early_log))
1067 		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1068 }
1069 EXPORT_SYMBOL(kmemleak_no_scan);
1070 
1071 /*
1072  * Update an object's checksum and return true if it was modified.
1073  */
1074 static bool update_checksum(struct kmemleak_object *object)
1075 {
1076 	u32 old_csum = object->checksum;
1077 
1078 	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1079 		return false;
1080 
1081 	object->checksum = crc32(0, (void *)object->pointer, object->size);
1082 	return object->checksum != old_csum;
1083 }
1084 
1085 /*
1086  * Memory scanning is a long process and it needs to be interruptable. This
1087  * function checks whether such interrupt condition occurred.
1088  */
1089 static int scan_should_stop(void)
1090 {
1091 	if (!atomic_read(&kmemleak_enabled))
1092 		return 1;
1093 
1094 	/*
1095 	 * This function may be called from either process or kthread context,
1096 	 * hence the need to check for both stop conditions.
1097 	 */
1098 	if (current->mm)
1099 		return signal_pending(current);
1100 	else
1101 		return kthread_should_stop();
1102 
1103 	return 0;
1104 }
1105 
1106 /*
1107  * Scan a memory block (exclusive range) for valid pointers and add those
1108  * found to the gray list.
1109  */
1110 static void scan_block(void *_start, void *_end,
1111 		       struct kmemleak_object *scanned, int allow_resched)
1112 {
1113 	unsigned long *ptr;
1114 	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1115 	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1116 
1117 	for (ptr = start; ptr < end; ptr++) {
1118 		struct kmemleak_object *object;
1119 		unsigned long flags;
1120 		unsigned long pointer;
1121 
1122 		if (allow_resched)
1123 			cond_resched();
1124 		if (scan_should_stop())
1125 			break;
1126 
1127 		/* don't scan uninitialized memory */
1128 		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1129 						  BYTES_PER_POINTER))
1130 			continue;
1131 
1132 		pointer = *ptr;
1133 
1134 		object = find_and_get_object(pointer, 1);
1135 		if (!object)
1136 			continue;
1137 		if (object == scanned) {
1138 			/* self referenced, ignore */
1139 			put_object(object);
1140 			continue;
1141 		}
1142 
1143 		/*
1144 		 * Avoid the lockdep recursive warning on object->lock being
1145 		 * previously acquired in scan_object(). These locks are
1146 		 * enclosed by scan_mutex.
1147 		 */
1148 		spin_lock_irqsave_nested(&object->lock, flags,
1149 					 SINGLE_DEPTH_NESTING);
1150 		if (!color_white(object)) {
1151 			/* non-orphan, ignored or new */
1152 			spin_unlock_irqrestore(&object->lock, flags);
1153 			put_object(object);
1154 			continue;
1155 		}
1156 
1157 		/*
1158 		 * Increase the object's reference count (number of pointers
1159 		 * to the memory block). If this count reaches the required
1160 		 * minimum, the object's color will become gray and it will be
1161 		 * added to the gray_list.
1162 		 */
1163 		object->count++;
1164 		if (color_gray(object)) {
1165 			list_add_tail(&object->gray_list, &gray_list);
1166 			spin_unlock_irqrestore(&object->lock, flags);
1167 			continue;
1168 		}
1169 
1170 		spin_unlock_irqrestore(&object->lock, flags);
1171 		put_object(object);
1172 	}
1173 }
1174 
1175 /*
1176  * Scan a memory block corresponding to a kmemleak_object. A condition is
1177  * that object->use_count >= 1.
1178  */
1179 static void scan_object(struct kmemleak_object *object)
1180 {
1181 	struct kmemleak_scan_area *area;
1182 	unsigned long flags;
1183 
1184 	/*
1185 	 * Once the object->lock is acquired, the corresponding memory block
1186 	 * cannot be freed (the same lock is acquired in delete_object).
1187 	 */
1188 	spin_lock_irqsave(&object->lock, flags);
1189 	if (object->flags & OBJECT_NO_SCAN)
1190 		goto out;
1191 	if (!(object->flags & OBJECT_ALLOCATED))
1192 		/* already freed object */
1193 		goto out;
1194 	if (hlist_empty(&object->area_list)) {
1195 		void *start = (void *)object->pointer;
1196 		void *end = (void *)(object->pointer + object->size);
1197 
1198 		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1199 		       !(object->flags & OBJECT_NO_SCAN)) {
1200 			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1201 				   object, 0);
1202 			start += MAX_SCAN_SIZE;
1203 
1204 			spin_unlock_irqrestore(&object->lock, flags);
1205 			cond_resched();
1206 			spin_lock_irqsave(&object->lock, flags);
1207 		}
1208 	} else
1209 		hlist_for_each_entry(area, &object->area_list, node)
1210 			scan_block((void *)area->start,
1211 				   (void *)(area->start + area->size),
1212 				   object, 0);
1213 out:
1214 	spin_unlock_irqrestore(&object->lock, flags);
1215 }
1216 
1217 /*
1218  * Scan the objects already referenced (gray objects). More objects will be
1219  * referenced and, if there are no memory leaks, all the objects are scanned.
1220  */
1221 static void scan_gray_list(void)
1222 {
1223 	struct kmemleak_object *object, *tmp;
1224 
1225 	/*
1226 	 * The list traversal is safe for both tail additions and removals
1227 	 * from inside the loop. The kmemleak objects cannot be freed from
1228 	 * outside the loop because their use_count was incremented.
1229 	 */
1230 	object = list_entry(gray_list.next, typeof(*object), gray_list);
1231 	while (&object->gray_list != &gray_list) {
1232 		cond_resched();
1233 
1234 		/* may add new objects to the list */
1235 		if (!scan_should_stop())
1236 			scan_object(object);
1237 
1238 		tmp = list_entry(object->gray_list.next, typeof(*object),
1239 				 gray_list);
1240 
1241 		/* remove the object from the list and release it */
1242 		list_del(&object->gray_list);
1243 		put_object(object);
1244 
1245 		object = tmp;
1246 	}
1247 	WARN_ON(!list_empty(&gray_list));
1248 }
1249 
1250 /*
1251  * Scan data sections and all the referenced memory blocks allocated via the
1252  * kernel's standard allocators. This function must be called with the
1253  * scan_mutex held.
1254  */
1255 static void kmemleak_scan(void)
1256 {
1257 	unsigned long flags;
1258 	struct kmemleak_object *object;
1259 	int i;
1260 	int new_leaks = 0;
1261 
1262 	jiffies_last_scan = jiffies;
1263 
1264 	/* prepare the kmemleak_object's */
1265 	rcu_read_lock();
1266 	list_for_each_entry_rcu(object, &object_list, object_list) {
1267 		spin_lock_irqsave(&object->lock, flags);
1268 #ifdef DEBUG
1269 		/*
1270 		 * With a few exceptions there should be a maximum of
1271 		 * 1 reference to any object at this point.
1272 		 */
1273 		if (atomic_read(&object->use_count) > 1) {
1274 			pr_debug("object->use_count = %d\n",
1275 				 atomic_read(&object->use_count));
1276 			dump_object_info(object);
1277 		}
1278 #endif
1279 		/* reset the reference count (whiten the object) */
1280 		object->count = 0;
1281 		if (color_gray(object) && get_object(object))
1282 			list_add_tail(&object->gray_list, &gray_list);
1283 
1284 		spin_unlock_irqrestore(&object->lock, flags);
1285 	}
1286 	rcu_read_unlock();
1287 
1288 	/* data/bss scanning */
1289 	scan_block(_sdata, _edata, NULL, 1);
1290 	scan_block(__bss_start, __bss_stop, NULL, 1);
1291 
1292 #ifdef CONFIG_SMP
1293 	/* per-cpu sections scanning */
1294 	for_each_possible_cpu(i)
1295 		scan_block(__per_cpu_start + per_cpu_offset(i),
1296 			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1297 #endif
1298 
1299 	/*
1300 	 * Struct page scanning for each node.
1301 	 */
1302 	lock_memory_hotplug();
1303 	for_each_online_node(i) {
1304 		unsigned long start_pfn = node_start_pfn(i);
1305 		unsigned long end_pfn = node_end_pfn(i);
1306 		unsigned long pfn;
1307 
1308 		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1309 			struct page *page;
1310 
1311 			if (!pfn_valid(pfn))
1312 				continue;
1313 			page = pfn_to_page(pfn);
1314 			/* only scan if page is in use */
1315 			if (page_count(page) == 0)
1316 				continue;
1317 			scan_block(page, page + 1, NULL, 1);
1318 		}
1319 	}
1320 	unlock_memory_hotplug();
1321 
1322 	/*
1323 	 * Scanning the task stacks (may introduce false negatives).
1324 	 */
1325 	if (kmemleak_stack_scan) {
1326 		struct task_struct *p, *g;
1327 
1328 		read_lock(&tasklist_lock);
1329 		do_each_thread(g, p) {
1330 			scan_block(task_stack_page(p), task_stack_page(p) +
1331 				   THREAD_SIZE, NULL, 0);
1332 		} while_each_thread(g, p);
1333 		read_unlock(&tasklist_lock);
1334 	}
1335 
1336 	/*
1337 	 * Scan the objects already referenced from the sections scanned
1338 	 * above.
1339 	 */
1340 	scan_gray_list();
1341 
1342 	/*
1343 	 * Check for new or unreferenced objects modified since the previous
1344 	 * scan and color them gray until the next scan.
1345 	 */
1346 	rcu_read_lock();
1347 	list_for_each_entry_rcu(object, &object_list, object_list) {
1348 		spin_lock_irqsave(&object->lock, flags);
1349 		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1350 		    && update_checksum(object) && get_object(object)) {
1351 			/* color it gray temporarily */
1352 			object->count = object->min_count;
1353 			list_add_tail(&object->gray_list, &gray_list);
1354 		}
1355 		spin_unlock_irqrestore(&object->lock, flags);
1356 	}
1357 	rcu_read_unlock();
1358 
1359 	/*
1360 	 * Re-scan the gray list for modified unreferenced objects.
1361 	 */
1362 	scan_gray_list();
1363 
1364 	/*
1365 	 * If scanning was stopped do not report any new unreferenced objects.
1366 	 */
1367 	if (scan_should_stop())
1368 		return;
1369 
1370 	/*
1371 	 * Scanning result reporting.
1372 	 */
1373 	rcu_read_lock();
1374 	list_for_each_entry_rcu(object, &object_list, object_list) {
1375 		spin_lock_irqsave(&object->lock, flags);
1376 		if (unreferenced_object(object) &&
1377 		    !(object->flags & OBJECT_REPORTED)) {
1378 			object->flags |= OBJECT_REPORTED;
1379 			new_leaks++;
1380 		}
1381 		spin_unlock_irqrestore(&object->lock, flags);
1382 	}
1383 	rcu_read_unlock();
1384 
1385 	if (new_leaks)
1386 		pr_info("%d new suspected memory leaks (see "
1387 			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1388 
1389 }
1390 
1391 /*
1392  * Thread function performing automatic memory scanning. Unreferenced objects
1393  * at the end of a memory scan are reported but only the first time.
1394  */
1395 static int kmemleak_scan_thread(void *arg)
1396 {
1397 	static int first_run = 1;
1398 
1399 	pr_info("Automatic memory scanning thread started\n");
1400 	set_user_nice(current, 10);
1401 
1402 	/*
1403 	 * Wait before the first scan to allow the system to fully initialize.
1404 	 */
1405 	if (first_run) {
1406 		first_run = 0;
1407 		ssleep(SECS_FIRST_SCAN);
1408 	}
1409 
1410 	while (!kthread_should_stop()) {
1411 		signed long timeout = jiffies_scan_wait;
1412 
1413 		mutex_lock(&scan_mutex);
1414 		kmemleak_scan();
1415 		mutex_unlock(&scan_mutex);
1416 
1417 		/* wait before the next scan */
1418 		while (timeout && !kthread_should_stop())
1419 			timeout = schedule_timeout_interruptible(timeout);
1420 	}
1421 
1422 	pr_info("Automatic memory scanning thread ended\n");
1423 
1424 	return 0;
1425 }
1426 
1427 /*
1428  * Start the automatic memory scanning thread. This function must be called
1429  * with the scan_mutex held.
1430  */
1431 static void start_scan_thread(void)
1432 {
1433 	if (scan_thread)
1434 		return;
1435 	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1436 	if (IS_ERR(scan_thread)) {
1437 		pr_warning("Failed to create the scan thread\n");
1438 		scan_thread = NULL;
1439 	}
1440 }
1441 
1442 /*
1443  * Stop the automatic memory scanning thread. This function must be called
1444  * with the scan_mutex held.
1445  */
1446 static void stop_scan_thread(void)
1447 {
1448 	if (scan_thread) {
1449 		kthread_stop(scan_thread);
1450 		scan_thread = NULL;
1451 	}
1452 }
1453 
1454 /*
1455  * Iterate over the object_list and return the first valid object at or after
1456  * the required position with its use_count incremented. The function triggers
1457  * a memory scanning when the pos argument points to the first position.
1458  */
1459 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1460 {
1461 	struct kmemleak_object *object;
1462 	loff_t n = *pos;
1463 	int err;
1464 
1465 	err = mutex_lock_interruptible(&scan_mutex);
1466 	if (err < 0)
1467 		return ERR_PTR(err);
1468 
1469 	rcu_read_lock();
1470 	list_for_each_entry_rcu(object, &object_list, object_list) {
1471 		if (n-- > 0)
1472 			continue;
1473 		if (get_object(object))
1474 			goto out;
1475 	}
1476 	object = NULL;
1477 out:
1478 	return object;
1479 }
1480 
1481 /*
1482  * Return the next object in the object_list. The function decrements the
1483  * use_count of the previous object and increases that of the next one.
1484  */
1485 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1486 {
1487 	struct kmemleak_object *prev_obj = v;
1488 	struct kmemleak_object *next_obj = NULL;
1489 	struct kmemleak_object *obj = prev_obj;
1490 
1491 	++(*pos);
1492 
1493 	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1494 		if (get_object(obj)) {
1495 			next_obj = obj;
1496 			break;
1497 		}
1498 	}
1499 
1500 	put_object(prev_obj);
1501 	return next_obj;
1502 }
1503 
1504 /*
1505  * Decrement the use_count of the last object required, if any.
1506  */
1507 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1508 {
1509 	if (!IS_ERR(v)) {
1510 		/*
1511 		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1512 		 * waiting was interrupted, so only release it if !IS_ERR.
1513 		 */
1514 		rcu_read_unlock();
1515 		mutex_unlock(&scan_mutex);
1516 		if (v)
1517 			put_object(v);
1518 	}
1519 }
1520 
1521 /*
1522  * Print the information for an unreferenced object to the seq file.
1523  */
1524 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1525 {
1526 	struct kmemleak_object *object = v;
1527 	unsigned long flags;
1528 
1529 	spin_lock_irqsave(&object->lock, flags);
1530 	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1531 		print_unreferenced(seq, object);
1532 	spin_unlock_irqrestore(&object->lock, flags);
1533 	return 0;
1534 }
1535 
1536 static const struct seq_operations kmemleak_seq_ops = {
1537 	.start = kmemleak_seq_start,
1538 	.next  = kmemleak_seq_next,
1539 	.stop  = kmemleak_seq_stop,
1540 	.show  = kmemleak_seq_show,
1541 };
1542 
1543 static int kmemleak_open(struct inode *inode, struct file *file)
1544 {
1545 	return seq_open(file, &kmemleak_seq_ops);
1546 }
1547 
1548 static int kmemleak_release(struct inode *inode, struct file *file)
1549 {
1550 	return seq_release(inode, file);
1551 }
1552 
1553 static int dump_str_object_info(const char *str)
1554 {
1555 	unsigned long flags;
1556 	struct kmemleak_object *object;
1557 	unsigned long addr;
1558 
1559 	if (kstrtoul(str, 0, &addr))
1560 		return -EINVAL;
1561 	object = find_and_get_object(addr, 0);
1562 	if (!object) {
1563 		pr_info("Unknown object at 0x%08lx\n", addr);
1564 		return -EINVAL;
1565 	}
1566 
1567 	spin_lock_irqsave(&object->lock, flags);
1568 	dump_object_info(object);
1569 	spin_unlock_irqrestore(&object->lock, flags);
1570 
1571 	put_object(object);
1572 	return 0;
1573 }
1574 
1575 /*
1576  * We use grey instead of black to ensure we can do future scans on the same
1577  * objects. If we did not do future scans these black objects could
1578  * potentially contain references to newly allocated objects in the future and
1579  * we'd end up with false positives.
1580  */
1581 static void kmemleak_clear(void)
1582 {
1583 	struct kmemleak_object *object;
1584 	unsigned long flags;
1585 
1586 	rcu_read_lock();
1587 	list_for_each_entry_rcu(object, &object_list, object_list) {
1588 		spin_lock_irqsave(&object->lock, flags);
1589 		if ((object->flags & OBJECT_REPORTED) &&
1590 		    unreferenced_object(object))
1591 			__paint_it(object, KMEMLEAK_GREY);
1592 		spin_unlock_irqrestore(&object->lock, flags);
1593 	}
1594 	rcu_read_unlock();
1595 }
1596 
1597 /*
1598  * File write operation to configure kmemleak at run-time. The following
1599  * commands can be written to the /sys/kernel/debug/kmemleak file:
1600  *   off	- disable kmemleak (irreversible)
1601  *   stack=on	- enable the task stacks scanning
1602  *   stack=off	- disable the tasks stacks scanning
1603  *   scan=on	- start the automatic memory scanning thread
1604  *   scan=off	- stop the automatic memory scanning thread
1605  *   scan=...	- set the automatic memory scanning period in seconds (0 to
1606  *		  disable it)
1607  *   scan	- trigger a memory scan
1608  *   clear	- mark all current reported unreferenced kmemleak objects as
1609  *		  grey to ignore printing them
1610  *   dump=...	- dump information about the object found at the given address
1611  */
1612 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1613 			      size_t size, loff_t *ppos)
1614 {
1615 	char buf[64];
1616 	int buf_size;
1617 	int ret;
1618 
1619 	if (!atomic_read(&kmemleak_enabled))
1620 		return -EBUSY;
1621 
1622 	buf_size = min(size, (sizeof(buf) - 1));
1623 	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1624 		return -EFAULT;
1625 	buf[buf_size] = 0;
1626 
1627 	ret = mutex_lock_interruptible(&scan_mutex);
1628 	if (ret < 0)
1629 		return ret;
1630 
1631 	if (strncmp(buf, "off", 3) == 0)
1632 		kmemleak_disable();
1633 	else if (strncmp(buf, "stack=on", 8) == 0)
1634 		kmemleak_stack_scan = 1;
1635 	else if (strncmp(buf, "stack=off", 9) == 0)
1636 		kmemleak_stack_scan = 0;
1637 	else if (strncmp(buf, "scan=on", 7) == 0)
1638 		start_scan_thread();
1639 	else if (strncmp(buf, "scan=off", 8) == 0)
1640 		stop_scan_thread();
1641 	else if (strncmp(buf, "scan=", 5) == 0) {
1642 		unsigned long secs;
1643 
1644 		ret = kstrtoul(buf + 5, 0, &secs);
1645 		if (ret < 0)
1646 			goto out;
1647 		stop_scan_thread();
1648 		if (secs) {
1649 			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1650 			start_scan_thread();
1651 		}
1652 	} else if (strncmp(buf, "scan", 4) == 0)
1653 		kmemleak_scan();
1654 	else if (strncmp(buf, "clear", 5) == 0)
1655 		kmemleak_clear();
1656 	else if (strncmp(buf, "dump=", 5) == 0)
1657 		ret = dump_str_object_info(buf + 5);
1658 	else
1659 		ret = -EINVAL;
1660 
1661 out:
1662 	mutex_unlock(&scan_mutex);
1663 	if (ret < 0)
1664 		return ret;
1665 
1666 	/* ignore the rest of the buffer, only one command at a time */
1667 	*ppos += size;
1668 	return size;
1669 }
1670 
1671 static const struct file_operations kmemleak_fops = {
1672 	.owner		= THIS_MODULE,
1673 	.open		= kmemleak_open,
1674 	.read		= seq_read,
1675 	.write		= kmemleak_write,
1676 	.llseek		= seq_lseek,
1677 	.release	= kmemleak_release,
1678 };
1679 
1680 /*
1681  * Stop the memory scanning thread and free the kmemleak internal objects if
1682  * no previous scan thread (otherwise, kmemleak may still have some useful
1683  * information on memory leaks).
1684  */
1685 static void kmemleak_do_cleanup(struct work_struct *work)
1686 {
1687 	struct kmemleak_object *object;
1688 	bool cleanup = scan_thread == NULL;
1689 
1690 	mutex_lock(&scan_mutex);
1691 	stop_scan_thread();
1692 
1693 	if (cleanup) {
1694 		rcu_read_lock();
1695 		list_for_each_entry_rcu(object, &object_list, object_list)
1696 			delete_object_full(object->pointer);
1697 		rcu_read_unlock();
1698 	}
1699 	mutex_unlock(&scan_mutex);
1700 }
1701 
1702 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1703 
1704 /*
1705  * Disable kmemleak. No memory allocation/freeing will be traced once this
1706  * function is called. Disabling kmemleak is an irreversible operation.
1707  */
1708 static void kmemleak_disable(void)
1709 {
1710 	/* atomically check whether it was already invoked */
1711 	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1712 		return;
1713 
1714 	/* stop any memory operation tracing */
1715 	atomic_set(&kmemleak_enabled, 0);
1716 
1717 	/* check whether it is too early for a kernel thread */
1718 	if (atomic_read(&kmemleak_initialized))
1719 		schedule_work(&cleanup_work);
1720 
1721 	pr_info("Kernel memory leak detector disabled\n");
1722 }
1723 
1724 /*
1725  * Allow boot-time kmemleak disabling (enabled by default).
1726  */
1727 static int kmemleak_boot_config(char *str)
1728 {
1729 	if (!str)
1730 		return -EINVAL;
1731 	if (strcmp(str, "off") == 0)
1732 		kmemleak_disable();
1733 	else if (strcmp(str, "on") == 0)
1734 		kmemleak_skip_disable = 1;
1735 	else
1736 		return -EINVAL;
1737 	return 0;
1738 }
1739 early_param("kmemleak", kmemleak_boot_config);
1740 
1741 static void __init print_log_trace(struct early_log *log)
1742 {
1743 	struct stack_trace trace;
1744 
1745 	trace.nr_entries = log->trace_len;
1746 	trace.entries = log->trace;
1747 
1748 	pr_notice("Early log backtrace:\n");
1749 	print_stack_trace(&trace, 2);
1750 }
1751 
1752 /*
1753  * Kmemleak initialization.
1754  */
1755 void __init kmemleak_init(void)
1756 {
1757 	int i;
1758 	unsigned long flags;
1759 
1760 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1761 	if (!kmemleak_skip_disable) {
1762 		atomic_set(&kmemleak_early_log, 0);
1763 		kmemleak_disable();
1764 		return;
1765 	}
1766 #endif
1767 
1768 	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1769 	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1770 
1771 	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1772 	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1773 
1774 	if (crt_early_log >= ARRAY_SIZE(early_log))
1775 		pr_warning("Early log buffer exceeded (%d), please increase "
1776 			   "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1777 
1778 	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1779 	local_irq_save(flags);
1780 	atomic_set(&kmemleak_early_log, 0);
1781 	if (atomic_read(&kmemleak_error)) {
1782 		local_irq_restore(flags);
1783 		return;
1784 	} else
1785 		atomic_set(&kmemleak_enabled, 1);
1786 	local_irq_restore(flags);
1787 
1788 	/*
1789 	 * This is the point where tracking allocations is safe. Automatic
1790 	 * scanning is started during the late initcall. Add the early logged
1791 	 * callbacks to the kmemleak infrastructure.
1792 	 */
1793 	for (i = 0; i < crt_early_log; i++) {
1794 		struct early_log *log = &early_log[i];
1795 
1796 		switch (log->op_type) {
1797 		case KMEMLEAK_ALLOC:
1798 			early_alloc(log);
1799 			break;
1800 		case KMEMLEAK_ALLOC_PERCPU:
1801 			early_alloc_percpu(log);
1802 			break;
1803 		case KMEMLEAK_FREE:
1804 			kmemleak_free(log->ptr);
1805 			break;
1806 		case KMEMLEAK_FREE_PART:
1807 			kmemleak_free_part(log->ptr, log->size);
1808 			break;
1809 		case KMEMLEAK_FREE_PERCPU:
1810 			kmemleak_free_percpu(log->ptr);
1811 			break;
1812 		case KMEMLEAK_NOT_LEAK:
1813 			kmemleak_not_leak(log->ptr);
1814 			break;
1815 		case KMEMLEAK_IGNORE:
1816 			kmemleak_ignore(log->ptr);
1817 			break;
1818 		case KMEMLEAK_SCAN_AREA:
1819 			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1820 			break;
1821 		case KMEMLEAK_NO_SCAN:
1822 			kmemleak_no_scan(log->ptr);
1823 			break;
1824 		default:
1825 			kmemleak_warn("Unknown early log operation: %d\n",
1826 				      log->op_type);
1827 		}
1828 
1829 		if (atomic_read(&kmemleak_warning)) {
1830 			print_log_trace(log);
1831 			atomic_set(&kmemleak_warning, 0);
1832 		}
1833 	}
1834 }
1835 
1836 /*
1837  * Late initialization function.
1838  */
1839 static int __init kmemleak_late_init(void)
1840 {
1841 	struct dentry *dentry;
1842 
1843 	atomic_set(&kmemleak_initialized, 1);
1844 
1845 	if (atomic_read(&kmemleak_error)) {
1846 		/*
1847 		 * Some error occurred and kmemleak was disabled. There is a
1848 		 * small chance that kmemleak_disable() was called immediately
1849 		 * after setting kmemleak_initialized and we may end up with
1850 		 * two clean-up threads but serialized by scan_mutex.
1851 		 */
1852 		schedule_work(&cleanup_work);
1853 		return -ENOMEM;
1854 	}
1855 
1856 	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1857 				     &kmemleak_fops);
1858 	if (!dentry)
1859 		pr_warning("Failed to create the debugfs kmemleak file\n");
1860 	mutex_lock(&scan_mutex);
1861 	start_scan_thread();
1862 	mutex_unlock(&scan_mutex);
1863 
1864 	pr_info("Kernel memory leak detector initialized\n");
1865 
1866 	return 0;
1867 }
1868 late_initcall(kmemleak_late_init);
1869