xref: /openbmc/linux/mm/debug.c (revision f125e2d4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17 
18 #include "internal.h"
19 
20 const char *migrate_reason_names[MR_TYPES] = {
21 	"compaction",
22 	"memory_failure",
23 	"memory_hotplug",
24 	"syscall_or_cpuset",
25 	"mempolicy_mbind",
26 	"numa_misplaced",
27 	"cma",
28 };
29 
30 const struct trace_print_flags pageflag_names[] = {
31 	__def_pageflag_names,
32 	{0, NULL}
33 };
34 
35 const struct trace_print_flags gfpflag_names[] = {
36 	__def_gfpflag_names,
37 	{0, NULL}
38 };
39 
40 const struct trace_print_flags vmaflag_names[] = {
41 	__def_vmaflag_names,
42 	{0, NULL}
43 };
44 
45 void __dump_page(struct page *page, const char *reason)
46 {
47 	struct address_space *mapping;
48 	bool page_poisoned = PagePoisoned(page);
49 	/*
50 	 * Accessing the pageblock without the zone lock. It could change to
51 	 * "isolate" again in the meantime, but since we are just dumping the
52 	 * state for debugging, it should be fine to accept a bit of
53 	 * inaccuracy here due to racing.
54 	 */
55 	bool page_cma = is_migrate_cma_page(page);
56 	int mapcount;
57 	char *type = "";
58 
59 	/*
60 	 * If struct page is poisoned don't access Page*() functions as that
61 	 * leads to recursive loop. Page*() check for poisoned pages, and calls
62 	 * dump_page() when detected.
63 	 */
64 	if (page_poisoned) {
65 		pr_warn("page:%px is uninitialized and poisoned", page);
66 		goto hex_only;
67 	}
68 
69 	mapping = page_mapping(page);
70 
71 	/*
72 	 * Avoid VM_BUG_ON() in page_mapcount().
73 	 * page->_mapcount space in struct page is used by sl[aou]b pages to
74 	 * encode own info.
75 	 */
76 	mapcount = PageSlab(page) ? 0 : page_mapcount(page);
77 
78 	if (PageCompound(page))
79 		pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
80 			"index:%#lx compound_mapcount: %d\n",
81 			page, page_ref_count(page), mapcount,
82 			page->mapping, page_to_pgoff(page),
83 			compound_mapcount(page));
84 	else
85 		pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
86 			page, page_ref_count(page), mapcount,
87 			page->mapping, page_to_pgoff(page));
88 	if (PageKsm(page))
89 		type = "ksm ";
90 	else if (PageAnon(page))
91 		type = "anon ";
92 	else if (mapping) {
93 		if (mapping->host && mapping->host->i_dentry.first) {
94 			struct dentry *dentry;
95 			dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
96 			pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
97 		} else
98 			pr_warn("%ps\n", mapping->a_ops);
99 	}
100 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
101 
102 	pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags,
103 		page_cma ? " CMA" : "");
104 
105 hex_only:
106 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
107 			sizeof(unsigned long), page,
108 			sizeof(struct page), false);
109 
110 	if (reason)
111 		pr_warn("page dumped because: %s\n", reason);
112 
113 #ifdef CONFIG_MEMCG
114 	if (!page_poisoned && page->mem_cgroup)
115 		pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup);
116 #endif
117 }
118 
119 void dump_page(struct page *page, const char *reason)
120 {
121 	__dump_page(page, reason);
122 	dump_page_owner(page);
123 }
124 EXPORT_SYMBOL(dump_page);
125 
126 #ifdef CONFIG_DEBUG_VM
127 
128 void dump_vma(const struct vm_area_struct *vma)
129 {
130 	pr_emerg("vma %px start %px end %px\n"
131 		"next %px prev %px mm %px\n"
132 		"prot %lx anon_vma %px vm_ops %px\n"
133 		"pgoff %lx file %px private_data %px\n"
134 		"flags: %#lx(%pGv)\n",
135 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
136 		vma->vm_prev, vma->vm_mm,
137 		(unsigned long)pgprot_val(vma->vm_page_prot),
138 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
139 		vma->vm_file, vma->vm_private_data,
140 		vma->vm_flags, &vma->vm_flags);
141 }
142 EXPORT_SYMBOL(dump_vma);
143 
144 void dump_mm(const struct mm_struct *mm)
145 {
146 	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
147 #ifdef CONFIG_MMU
148 		"get_unmapped_area %px\n"
149 #endif
150 		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
151 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
152 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
153 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
154 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
155 		"start_brk %lx brk %lx start_stack %lx\n"
156 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
157 		"binfmt %px flags %lx core_state %px\n"
158 #ifdef CONFIG_AIO
159 		"ioctx_table %px\n"
160 #endif
161 #ifdef CONFIG_MEMCG
162 		"owner %px "
163 #endif
164 		"exe_file %px\n"
165 #ifdef CONFIG_MMU_NOTIFIER
166 		"notifier_subscriptions %px\n"
167 #endif
168 #ifdef CONFIG_NUMA_BALANCING
169 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
170 #endif
171 		"tlb_flush_pending %d\n"
172 		"def_flags: %#lx(%pGv)\n",
173 
174 		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
175 #ifdef CONFIG_MMU
176 		mm->get_unmapped_area,
177 #endif
178 		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
179 		mm->pgd, atomic_read(&mm->mm_users),
180 		atomic_read(&mm->mm_count),
181 		mm_pgtables_bytes(mm),
182 		mm->map_count,
183 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
184 		(u64)atomic64_read(&mm->pinned_vm),
185 		mm->data_vm, mm->exec_vm, mm->stack_vm,
186 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
187 		mm->start_brk, mm->brk, mm->start_stack,
188 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
189 		mm->binfmt, mm->flags, mm->core_state,
190 #ifdef CONFIG_AIO
191 		mm->ioctx_table,
192 #endif
193 #ifdef CONFIG_MEMCG
194 		mm->owner,
195 #endif
196 		mm->exe_file,
197 #ifdef CONFIG_MMU_NOTIFIER
198 		mm->notifier_subscriptions,
199 #endif
200 #ifdef CONFIG_NUMA_BALANCING
201 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
202 #endif
203 		atomic_read(&mm->tlb_flush_pending),
204 		mm->def_flags, &mm->def_flags
205 	);
206 }
207 
208 static bool page_init_poisoning __read_mostly = true;
209 
210 static int __init setup_vm_debug(char *str)
211 {
212 	bool __page_init_poisoning = true;
213 
214 	/*
215 	 * Calling vm_debug with no arguments is equivalent to requesting
216 	 * to enable all debugging options we can control.
217 	 */
218 	if (*str++ != '=' || !*str)
219 		goto out;
220 
221 	__page_init_poisoning = false;
222 	if (*str == '-')
223 		goto out;
224 
225 	while (*str) {
226 		switch (tolower(*str)) {
227 		case'p':
228 			__page_init_poisoning = true;
229 			break;
230 		default:
231 			pr_err("vm_debug option '%c' unknown. skipped\n",
232 			       *str);
233 		}
234 
235 		str++;
236 	}
237 out:
238 	if (page_init_poisoning && !__page_init_poisoning)
239 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
240 
241 	page_init_poisoning = __page_init_poisoning;
242 
243 	return 1;
244 }
245 __setup("vm_debug", setup_vm_debug);
246 
247 void page_init_poison(struct page *page, size_t size)
248 {
249 	if (page_init_poisoning)
250 		memset(page, PAGE_POISON_PATTERN, size);
251 }
252 EXPORT_SYMBOL_GPL(page_init_poison);
253 #endif		/* CONFIG_DEBUG_VM */
254