xref: /openbmc/linux/mm/debug.c (revision ee7da21a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/debug.c
4  *
5  * mm/ specific debug routines.
6  *
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/trace_events.h>
12 #include <linux/memcontrol.h>
13 #include <trace/events/mmflags.h>
14 #include <linux/migrate.h>
15 #include <linux/page_owner.h>
16 #include <linux/ctype.h>
17 
18 #include "internal.h"
19 
20 const char *migrate_reason_names[MR_TYPES] = {
21 	"compaction",
22 	"memory_failure",
23 	"memory_hotplug",
24 	"syscall_or_cpuset",
25 	"mempolicy_mbind",
26 	"numa_misplaced",
27 	"cma",
28 };
29 
30 const struct trace_print_flags pageflag_names[] = {
31 	__def_pageflag_names,
32 	{0, NULL}
33 };
34 
35 const struct trace_print_flags gfpflag_names[] = {
36 	__def_gfpflag_names,
37 	{0, NULL}
38 };
39 
40 const struct trace_print_flags vmaflag_names[] = {
41 	__def_vmaflag_names,
42 	{0, NULL}
43 };
44 
45 static void __dump_page(struct page *page)
46 {
47 	struct page *head = compound_head(page);
48 	struct address_space *mapping;
49 	bool compound = PageCompound(page);
50 	/*
51 	 * Accessing the pageblock without the zone lock. It could change to
52 	 * "isolate" again in the meantime, but since we are just dumping the
53 	 * state for debugging, it should be fine to accept a bit of
54 	 * inaccuracy here due to racing.
55 	 */
56 	bool page_cma = is_migrate_cma_page(page);
57 	int mapcount;
58 	char *type = "";
59 
60 	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
61 		/*
62 		 * Corrupt page, so we cannot call page_mapping. Instead, do a
63 		 * safe subset of the steps that page_mapping() does. Caution:
64 		 * this will be misleading for tail pages, PageSwapCache pages,
65 		 * and potentially other situations. (See the page_mapping()
66 		 * implementation for what's missing here.)
67 		 */
68 		unsigned long tmp = (unsigned long)page->mapping;
69 
70 		if (tmp & PAGE_MAPPING_ANON)
71 			mapping = NULL;
72 		else
73 			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
74 		head = page;
75 		compound = false;
76 	} else {
77 		mapping = page_mapping(page);
78 	}
79 
80 	/*
81 	 * Avoid VM_BUG_ON() in page_mapcount().
82 	 * page->_mapcount space in struct page is used by sl[aou]b pages to
83 	 * encode own info.
84 	 */
85 	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
86 
87 	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
88 			page, page_ref_count(head), mapcount, mapping,
89 			page_to_pgoff(page), page_to_pfn(page));
90 	if (compound) {
91 		if (hpage_pincount_available(page)) {
92 			pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
93 					head, compound_order(head),
94 					head_compound_mapcount(head),
95 					head_compound_pincount(head));
96 		} else {
97 			pr_warn("head:%p order:%u compound_mapcount:%d\n",
98 					head, compound_order(head),
99 					head_compound_mapcount(head));
100 		}
101 	}
102 
103 #ifdef CONFIG_MEMCG
104 	if (head->memcg_data)
105 		pr_warn("memcg:%lx\n", head->memcg_data);
106 #endif
107 	if (PageKsm(page))
108 		type = "ksm ";
109 	else if (PageAnon(page))
110 		type = "anon ";
111 	else if (mapping) {
112 		struct inode *host;
113 		const struct address_space_operations *a_ops;
114 		struct hlist_node *dentry_first;
115 		struct dentry *dentry_ptr;
116 		struct dentry dentry;
117 		unsigned long ino;
118 
119 		/*
120 		 * mapping can be invalid pointer and we don't want to crash
121 		 * accessing it, so probe everything depending on it carefully
122 		 */
123 		if (get_kernel_nofault(host, &mapping->host) ||
124 		    get_kernel_nofault(a_ops, &mapping->a_ops)) {
125 			pr_warn("failed to read mapping contents, not a valid kernel address?\n");
126 			goto out_mapping;
127 		}
128 
129 		if (!host) {
130 			pr_warn("aops:%ps\n", a_ops);
131 			goto out_mapping;
132 		}
133 
134 		if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
135 		    get_kernel_nofault(ino, &host->i_ino)) {
136 			pr_warn("aops:%ps with invalid host inode %px\n",
137 					a_ops, host);
138 			goto out_mapping;
139 		}
140 
141 		if (!dentry_first) {
142 			pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
143 			goto out_mapping;
144 		}
145 
146 		dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
147 		if (get_kernel_nofault(dentry, dentry_ptr)) {
148 			pr_warn("aops:%ps ino:%lx with invalid dentry %px\n",
149 					a_ops, ino, dentry_ptr);
150 		} else {
151 			/*
152 			 * if dentry is corrupted, the %pd handler may still
153 			 * crash, but it's unlikely that we reach here with a
154 			 * corrupted struct page
155 			 */
156 			pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n",
157 					a_ops, ino, &dentry);
158 		}
159 	}
160 out_mapping:
161 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
162 
163 	pr_warn("%sflags: %#lx(%pGp)%s\n", type, head->flags, &head->flags,
164 		page_cma ? " CMA" : "");
165 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
166 			sizeof(unsigned long), page,
167 			sizeof(struct page), false);
168 	if (head != page)
169 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
170 			sizeof(unsigned long), head,
171 			sizeof(struct page), false);
172 }
173 
174 void dump_page(struct page *page, const char *reason)
175 {
176 	if (PagePoisoned(page))
177 		pr_warn("page:%p is uninitialized and poisoned", page);
178 	else
179 		__dump_page(page);
180 	if (reason)
181 		pr_warn("page dumped because: %s\n", reason);
182 	dump_page_owner(page);
183 }
184 EXPORT_SYMBOL(dump_page);
185 
186 #ifdef CONFIG_DEBUG_VM
187 
188 void dump_vma(const struct vm_area_struct *vma)
189 {
190 	pr_emerg("vma %px start %px end %px\n"
191 		"next %px prev %px mm %px\n"
192 		"prot %lx anon_vma %px vm_ops %px\n"
193 		"pgoff %lx file %px private_data %px\n"
194 		"flags: %#lx(%pGv)\n",
195 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
196 		vma->vm_prev, vma->vm_mm,
197 		(unsigned long)pgprot_val(vma->vm_page_prot),
198 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
199 		vma->vm_file, vma->vm_private_data,
200 		vma->vm_flags, &vma->vm_flags);
201 }
202 EXPORT_SYMBOL(dump_vma);
203 
204 void dump_mm(const struct mm_struct *mm)
205 {
206 	pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
207 #ifdef CONFIG_MMU
208 		"get_unmapped_area %px\n"
209 #endif
210 		"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
211 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
212 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
213 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
214 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
215 		"start_brk %lx brk %lx start_stack %lx\n"
216 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
217 		"binfmt %px flags %lx core_state %px\n"
218 #ifdef CONFIG_AIO
219 		"ioctx_table %px\n"
220 #endif
221 #ifdef CONFIG_MEMCG
222 		"owner %px "
223 #endif
224 		"exe_file %px\n"
225 #ifdef CONFIG_MMU_NOTIFIER
226 		"notifier_subscriptions %px\n"
227 #endif
228 #ifdef CONFIG_NUMA_BALANCING
229 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
230 #endif
231 		"tlb_flush_pending %d\n"
232 		"def_flags: %#lx(%pGv)\n",
233 
234 		mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
235 #ifdef CONFIG_MMU
236 		mm->get_unmapped_area,
237 #endif
238 		mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
239 		mm->pgd, atomic_read(&mm->mm_users),
240 		atomic_read(&mm->mm_count),
241 		mm_pgtables_bytes(mm),
242 		mm->map_count,
243 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
244 		(u64)atomic64_read(&mm->pinned_vm),
245 		mm->data_vm, mm->exec_vm, mm->stack_vm,
246 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
247 		mm->start_brk, mm->brk, mm->start_stack,
248 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
249 		mm->binfmt, mm->flags, mm->core_state,
250 #ifdef CONFIG_AIO
251 		mm->ioctx_table,
252 #endif
253 #ifdef CONFIG_MEMCG
254 		mm->owner,
255 #endif
256 		mm->exe_file,
257 #ifdef CONFIG_MMU_NOTIFIER
258 		mm->notifier_subscriptions,
259 #endif
260 #ifdef CONFIG_NUMA_BALANCING
261 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
262 #endif
263 		atomic_read(&mm->tlb_flush_pending),
264 		mm->def_flags, &mm->def_flags
265 	);
266 }
267 
268 static bool page_init_poisoning __read_mostly = true;
269 
270 static int __init setup_vm_debug(char *str)
271 {
272 	bool __page_init_poisoning = true;
273 
274 	/*
275 	 * Calling vm_debug with no arguments is equivalent to requesting
276 	 * to enable all debugging options we can control.
277 	 */
278 	if (*str++ != '=' || !*str)
279 		goto out;
280 
281 	__page_init_poisoning = false;
282 	if (*str == '-')
283 		goto out;
284 
285 	while (*str) {
286 		switch (tolower(*str)) {
287 		case'p':
288 			__page_init_poisoning = true;
289 			break;
290 		default:
291 			pr_err("vm_debug option '%c' unknown. skipped\n",
292 			       *str);
293 		}
294 
295 		str++;
296 	}
297 out:
298 	if (page_init_poisoning && !__page_init_poisoning)
299 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
300 
301 	page_init_poisoning = __page_init_poisoning;
302 
303 	return 1;
304 }
305 __setup("vm_debug", setup_vm_debug);
306 
307 void page_init_poison(struct page *page, size_t size)
308 {
309 	if (page_init_poisoning)
310 		memset(page, PAGE_POISON_PATTERN, size);
311 }
312 EXPORT_SYMBOL_GPL(page_init_poison);
313 #endif		/* CONFIG_DEBUG_VM */
314