xref: /openbmc/linux/mm/debug.c (revision b50e195f)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
231c9afa6SSasha Levin /*
331c9afa6SSasha Levin  * mm/debug.c
431c9afa6SSasha Levin  *
531c9afa6SSasha Levin  * mm/ specific debug routines.
631c9afa6SSasha Levin  *
731c9afa6SSasha Levin  */
831c9afa6SSasha Levin 
982742a3aSSasha Levin #include <linux/kernel.h>
1082742a3aSSasha Levin #include <linux/mm.h>
11af658dcaSSteven Rostedt (Red Hat) #include <linux/trace_events.h>
1282742a3aSSasha Levin #include <linux/memcontrol.h>
13420adbe9SVlastimil Babka #include <trace/events/mmflags.h>
147cd12b4aSVlastimil Babka #include <linux/migrate.h>
154e462112SVlastimil Babka #include <linux/page_owner.h>
16f682a97aSAlexander Duyck #include <linux/ctype.h>
1782742a3aSSasha Levin 
18edf14cdbSVlastimil Babka #include "internal.h"
198eb42beaSJohn Hubbard #include <trace/events/migrate.h>
208eb42beaSJohn Hubbard 
218eb42beaSJohn Hubbard /*
228eb42beaSJohn Hubbard  * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can
238eb42beaSJohn Hubbard  * be used to populate migrate_reason_names[].
248eb42beaSJohn Hubbard  */
258eb42beaSJohn Hubbard #undef EM
268eb42beaSJohn Hubbard #undef EMe
278eb42beaSJohn Hubbard #define EM(a, b)	b,
288eb42beaSJohn Hubbard #define EMe(a, b)	b
29edf14cdbSVlastimil Babka 
309a2f45ffSAlexey Dobriyan const char *migrate_reason_names[MR_TYPES] = {
318eb42beaSJohn Hubbard 	MIGRATE_REASON
327cd12b4aSVlastimil Babka };
337cd12b4aSVlastimil Babka 
34edf14cdbSVlastimil Babka const struct trace_print_flags pageflag_names[] = {
35edf14cdbSVlastimil Babka 	__def_pageflag_names,
36edf14cdbSVlastimil Babka 	{0, NULL}
37420adbe9SVlastimil Babka };
38420adbe9SVlastimil Babka 
394c85c0beSHyeonggon Yoo const struct trace_print_flags pagetype_names[] = {
404c85c0beSHyeonggon Yoo 	__def_pagetype_names,
414c85c0beSHyeonggon Yoo 	{0, NULL}
424c85c0beSHyeonggon Yoo };
434c85c0beSHyeonggon Yoo 
44edf14cdbSVlastimil Babka const struct trace_print_flags gfpflag_names[] = {
45edf14cdbSVlastimil Babka 	__def_gfpflag_names,
46edf14cdbSVlastimil Babka 	{0, NULL}
47edf14cdbSVlastimil Babka };
48edf14cdbSVlastimil Babka 
49edf14cdbSVlastimil Babka const struct trace_print_flags vmaflag_names[] = {
50edf14cdbSVlastimil Babka 	__def_vmaflag_names,
51edf14cdbSVlastimil Babka 	{0, NULL}
5282742a3aSSasha Levin };
5382742a3aSSasha Levin 
__dump_page(struct page * page)54be7c701fSMatthew Wilcox (Oracle) static void __dump_page(struct page *page)
5582742a3aSSasha Levin {
5674e8ee47SMatthew Wilcox (Oracle) 	struct folio *folio = page_folio(page);
5774e8ee47SMatthew Wilcox (Oracle) 	struct page *head = &folio->page;
58311ade0eSRobin Murphy 	struct address_space *mapping;
596197ab98SMatthew Wilcox (Oracle) 	bool compound = PageCompound(page);
604a55c047SQian Cai 	/*
614a55c047SQian Cai 	 * Accessing the pageblock without the zone lock. It could change to
624a55c047SQian Cai 	 * "isolate" again in the meantime, but since we are just dumping the
634a55c047SQian Cai 	 * state for debugging, it should be fine to accept a bit of
644a55c047SQian Cai 	 * inaccuracy here due to racing.
654a55c047SQian Cai 	 */
664a55c047SQian Cai 	bool page_cma = is_migrate_cma_page(page);
67fc36def9SPavel Tatashin 	int mapcount;
685b57b8f2SVlastimil Babka 	char *type = "";
69fc36def9SPavel Tatashin 
706197ab98SMatthew Wilcox (Oracle) 	if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
71e1ab96f8SMatthew Wilcox (Oracle) 		/*
72e1ab96f8SMatthew Wilcox (Oracle) 		 * Corrupt page, so we cannot call page_mapping. Instead, do a
73e1ab96f8SMatthew Wilcox (Oracle) 		 * safe subset of the steps that page_mapping() does. Caution:
74e1ab96f8SMatthew Wilcox (Oracle) 		 * this will be misleading for tail pages, PageSwapCache pages,
75e1ab96f8SMatthew Wilcox (Oracle) 		 * and potentially other situations. (See the page_mapping()
76e1ab96f8SMatthew Wilcox (Oracle) 		 * implementation for what's missing here.)
77e1ab96f8SMatthew Wilcox (Oracle) 		 */
78e1ab96f8SMatthew Wilcox (Oracle) 		unsigned long tmp = (unsigned long)page->mapping;
79e1ab96f8SMatthew Wilcox (Oracle) 
80e1ab96f8SMatthew Wilcox (Oracle) 		if (tmp & PAGE_MAPPING_ANON)
81e1ab96f8SMatthew Wilcox (Oracle) 			mapping = NULL;
82e1ab96f8SMatthew Wilcox (Oracle) 		else
83e1ab96f8SMatthew Wilcox (Oracle) 			mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
846197ab98SMatthew Wilcox (Oracle) 		head = page;
8574e8ee47SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
866197ab98SMatthew Wilcox (Oracle) 		compound = false;
876197ab98SMatthew Wilcox (Oracle) 	} else {
88311ade0eSRobin Murphy 		mapping = page_mapping(page);
896197ab98SMatthew Wilcox (Oracle) 	}
90311ade0eSRobin Murphy 
919996f05eSKirill A. Shutemov 	/*
929996f05eSKirill A. Shutemov 	 * Avoid VM_BUG_ON() in page_mapcount().
939996f05eSKirill A. Shutemov 	 * page->_mapcount space in struct page is used by sl[aou]b pages to
949996f05eSKirill A. Shutemov 	 * encode own info.
959996f05eSKirill A. Shutemov 	 */
966197ab98SMatthew Wilcox (Oracle) 	mapcount = PageSlab(head) ? 0 : page_mapcount(page);
974d35427aSKirill A. Shutemov 
9854a75157SMatthew Wilcox (Oracle) 	pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
99452b557cSMatthew Wilcox (Oracle) 			page, page_ref_count(head), mapcount, mapping,
10054a75157SMatthew Wilcox (Oracle) 			page_to_pgoff(page), page_to_pfn(page));
101452b557cSMatthew Wilcox (Oracle) 	if (compound) {
10291ec7f28SMatthew Wilcox (Oracle) 		pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
103452b557cSMatthew Wilcox (Oracle) 				head, compound_order(head),
10491ec7f28SMatthew Wilcox (Oracle) 				folio_entire_mapcount(folio),
105eec20426SMatthew Wilcox (Oracle) 				folio_nr_pages_mapped(folio),
10694688e8eSMatthew Wilcox (Oracle) 				atomic_read(&folio->_pincount));
107452b557cSMatthew Wilcox (Oracle) 	}
10891f5345aSMatthew Wilcox (Oracle) 
10991f5345aSMatthew Wilcox (Oracle) #ifdef CONFIG_MEMCG
11091f5345aSMatthew Wilcox (Oracle) 	if (head->memcg_data)
11191f5345aSMatthew Wilcox (Oracle) 		pr_warn("memcg:%lx\n", head->memcg_data);
11291f5345aSMatthew Wilcox (Oracle) #endif
1136855ac4aSRalph Campbell 	if (PageKsm(page))
1145b57b8f2SVlastimil Babka 		type = "ksm ";
1156855ac4aSRalph Campbell 	else if (PageAnon(page))
1165b57b8f2SVlastimil Babka 		type = "anon ";
1173e9d80a8SMatthew Wilcox (Oracle) 	else if (mapping)
1183e9d80a8SMatthew Wilcox (Oracle) 		dump_mapping(mapping);
119edf14cdbSVlastimil Babka 	BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
120ff8e8116SVlastimil Babka 
12123efd080SMatthew Wilcox (Oracle) 	pr_warn("%sflags: %pGp%s\n", type, &head->flags,
1224a55c047SQian Cai 		page_cma ? " CMA" : "");
123f2421a16SHyeonggon Yoo 	pr_warn("page_type: %pGt\n", &head->page_type);
124f2421a16SHyeonggon Yoo 
125e0392cf7SMichal Hocko 	print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
12646e8a3a0SVlastimil Babka 			sizeof(unsigned long), page,
12746e8a3a0SVlastimil Babka 			sizeof(struct page), false);
1286197ab98SMatthew Wilcox (Oracle) 	if (head != page)
1296197ab98SMatthew Wilcox (Oracle) 		print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
1306197ab98SMatthew Wilcox (Oracle) 			sizeof(unsigned long), head,
1316197ab98SMatthew Wilcox (Oracle) 			sizeof(struct page), false);
13282742a3aSSasha Levin }
13382742a3aSSasha Levin 
dump_page(struct page * page,const char * reason)13482742a3aSSasha Levin void dump_page(struct page *page, const char *reason)
13582742a3aSSasha Levin {
136be7c701fSMatthew Wilcox (Oracle) 	if (PagePoisoned(page))
137be7c701fSMatthew Wilcox (Oracle) 		pr_warn("page:%p is uninitialized and poisoned", page);
138be7c701fSMatthew Wilcox (Oracle) 	else
139be7c701fSMatthew Wilcox (Oracle) 		__dump_page(page);
140be7c701fSMatthew Wilcox (Oracle) 	if (reason)
141be7c701fSMatthew Wilcox (Oracle) 		pr_warn("page dumped because: %s\n", reason);
1424e462112SVlastimil Babka 	dump_page_owner(page);
14382742a3aSSasha Levin }
14482742a3aSSasha Levin EXPORT_SYMBOL(dump_page);
14582742a3aSSasha Levin 
14682742a3aSSasha Levin #ifdef CONFIG_DEBUG_VM
14782742a3aSSasha Levin 
dump_vma(const struct vm_area_struct * vma)14882742a3aSSasha Levin void dump_vma(const struct vm_area_struct *vma)
14982742a3aSSasha Levin {
150763ecb03SLiam R. Howlett 	pr_emerg("vma %px start %px end %px mm %px\n"
151152a2d19SMatthew Wilcox 		"prot %lx anon_vma %px vm_ops %px\n"
152152a2d19SMatthew Wilcox 		"pgoff %lx file %px private_data %px\n"
153b8eceeb9SVlastimil Babka 		"flags: %#lx(%pGv)\n",
154763ecb03SLiam R. Howlett 		vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
15582742a3aSSasha Levin 		(unsigned long)pgprot_val(vma->vm_page_prot),
15682742a3aSSasha Levin 		vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
157b8eceeb9SVlastimil Babka 		vma->vm_file, vma->vm_private_data,
158b8eceeb9SVlastimil Babka 		vma->vm_flags, &vma->vm_flags);
15982742a3aSSasha Levin }
16082742a3aSSasha Levin EXPORT_SYMBOL(dump_vma);
16182742a3aSSasha Levin 
dump_mm(const struct mm_struct * mm)16231c9afa6SSasha Levin void dump_mm(const struct mm_struct *mm)
16331c9afa6SSasha Levin {
164763ecb03SLiam R. Howlett 	pr_emerg("mm %px task_size %lu\n"
16531c9afa6SSasha Levin #ifdef CONFIG_MMU
166152a2d19SMatthew Wilcox 		"get_unmapped_area %px\n"
16731c9afa6SSasha Levin #endif
168763ecb03SLiam R. Howlett 		"mmap_base %lu mmap_legacy_base %lu\n"
169152a2d19SMatthew Wilcox 		"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
17031c9afa6SSasha Levin 		"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
17170f8a3caSDavidlohr Bueso 		"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
17231c9afa6SSasha Levin 		"start_code %lx end_code %lx start_data %lx end_data %lx\n"
17331c9afa6SSasha Levin 		"start_brk %lx brk %lx start_stack %lx\n"
17431c9afa6SSasha Levin 		"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
1750258b5fdSEric W. Biederman 		"binfmt %px flags %lx\n"
17631c9afa6SSasha Levin #ifdef CONFIG_AIO
177152a2d19SMatthew Wilcox 		"ioctx_table %px\n"
17831c9afa6SSasha Levin #endif
17931c9afa6SSasha Levin #ifdef CONFIG_MEMCG
180152a2d19SMatthew Wilcox 		"owner %px "
18131c9afa6SSasha Levin #endif
182152a2d19SMatthew Wilcox 		"exe_file %px\n"
18331c9afa6SSasha Levin #ifdef CONFIG_MMU_NOTIFIER
184984cfe4eSJason Gunthorpe 		"notifier_subscriptions %px\n"
18531c9afa6SSasha Levin #endif
18631c9afa6SSasha Levin #ifdef CONFIG_NUMA_BALANCING
18731c9afa6SSasha Levin 		"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
18831c9afa6SSasha Levin #endif
18931c9afa6SSasha Levin 		"tlb_flush_pending %d\n"
190b8eceeb9SVlastimil Babka 		"def_flags: %#lx(%pGv)\n",
19131c9afa6SSasha Levin 
192763ecb03SLiam R. Howlett 		mm, mm->task_size,
19331c9afa6SSasha Levin #ifdef CONFIG_MMU
19431c9afa6SSasha Levin 		mm->get_unmapped_area,
19531c9afa6SSasha Levin #endif
196763ecb03SLiam R. Howlett 		mm->mmap_base, mm->mmap_legacy_base,
19731c9afa6SSasha Levin 		mm->pgd, atomic_read(&mm->mm_users),
19831c9afa6SSasha Levin 		atomic_read(&mm->mm_count),
199af5b0f6aSKirill A. Shutemov 		mm_pgtables_bytes(mm),
20031c9afa6SSasha Levin 		mm->map_count,
20131c9afa6SSasha Levin 		mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
20244dc1b1fSQian Cai 		(u64)atomic64_read(&mm->pinned_vm),
20370f8a3caSDavidlohr Bueso 		mm->data_vm, mm->exec_vm, mm->stack_vm,
20431c9afa6SSasha Levin 		mm->start_code, mm->end_code, mm->start_data, mm->end_data,
20531c9afa6SSasha Levin 		mm->start_brk, mm->brk, mm->start_stack,
20631c9afa6SSasha Levin 		mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
2070258b5fdSEric W. Biederman 		mm->binfmt, mm->flags,
20831c9afa6SSasha Levin #ifdef CONFIG_AIO
20931c9afa6SSasha Levin 		mm->ioctx_table,
21031c9afa6SSasha Levin #endif
21131c9afa6SSasha Levin #ifdef CONFIG_MEMCG
21231c9afa6SSasha Levin 		mm->owner,
21331c9afa6SSasha Levin #endif
21431c9afa6SSasha Levin 		mm->exe_file,
21531c9afa6SSasha Levin #ifdef CONFIG_MMU_NOTIFIER
216984cfe4eSJason Gunthorpe 		mm->notifier_subscriptions,
21731c9afa6SSasha Levin #endif
21831c9afa6SSasha Levin #ifdef CONFIG_NUMA_BALANCING
21931c9afa6SSasha Levin 		mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
22031c9afa6SSasha Levin #endif
22116af97dcSNadav Amit 		atomic_read(&mm->tlb_flush_pending),
222b8eceeb9SVlastimil Babka 		mm->def_flags, &mm->def_flags
22331c9afa6SSasha Levin 	);
22431c9afa6SSasha Levin }
225c2fdc235SSuren Baghdasaryan EXPORT_SYMBOL(dump_mm);
22631c9afa6SSasha Levin 
227f682a97aSAlexander Duyck static bool page_init_poisoning __read_mostly = true;
228f682a97aSAlexander Duyck 
setup_vm_debug(char * str)229f682a97aSAlexander Duyck static int __init setup_vm_debug(char *str)
230f682a97aSAlexander Duyck {
231f682a97aSAlexander Duyck 	bool __page_init_poisoning = true;
232f682a97aSAlexander Duyck 
233f682a97aSAlexander Duyck 	/*
234f682a97aSAlexander Duyck 	 * Calling vm_debug with no arguments is equivalent to requesting
235f682a97aSAlexander Duyck 	 * to enable all debugging options we can control.
236f682a97aSAlexander Duyck 	 */
237f682a97aSAlexander Duyck 	if (*str++ != '=' || !*str)
238f682a97aSAlexander Duyck 		goto out;
239f682a97aSAlexander Duyck 
240f682a97aSAlexander Duyck 	__page_init_poisoning = false;
241f682a97aSAlexander Duyck 	if (*str == '-')
242f682a97aSAlexander Duyck 		goto out;
243f682a97aSAlexander Duyck 
244f682a97aSAlexander Duyck 	while (*str) {
245f682a97aSAlexander Duyck 		switch (tolower(*str)) {
246f682a97aSAlexander Duyck 		case'p':
247f682a97aSAlexander Duyck 			__page_init_poisoning = true;
248f682a97aSAlexander Duyck 			break;
249f682a97aSAlexander Duyck 		default:
250f682a97aSAlexander Duyck 			pr_err("vm_debug option '%c' unknown. skipped\n",
251f682a97aSAlexander Duyck 			       *str);
252f682a97aSAlexander Duyck 		}
253f682a97aSAlexander Duyck 
254f682a97aSAlexander Duyck 		str++;
255f682a97aSAlexander Duyck 	}
256f682a97aSAlexander Duyck out:
257f682a97aSAlexander Duyck 	if (page_init_poisoning && !__page_init_poisoning)
258f682a97aSAlexander Duyck 		pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n");
259f682a97aSAlexander Duyck 
260f682a97aSAlexander Duyck 	page_init_poisoning = __page_init_poisoning;
261f682a97aSAlexander Duyck 
262f682a97aSAlexander Duyck 	return 1;
263f682a97aSAlexander Duyck }
264f682a97aSAlexander Duyck __setup("vm_debug", setup_vm_debug);
265f682a97aSAlexander Duyck 
page_init_poison(struct page * page,size_t size)266f682a97aSAlexander Duyck void page_init_poison(struct page *page, size_t size)
267f682a97aSAlexander Duyck {
268f682a97aSAlexander Duyck 	if (page_init_poisoning)
269f682a97aSAlexander Duyck 		memset(page, PAGE_POISON_PATTERN, size);
270f682a97aSAlexander Duyck }
271*b50e195fSLiam R. Howlett 
vma_iter_dump_tree(const struct vma_iterator * vmi)272*b50e195fSLiam R. Howlett void vma_iter_dump_tree(const struct vma_iterator *vmi)
273*b50e195fSLiam R. Howlett {
274*b50e195fSLiam R. Howlett #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
275*b50e195fSLiam R. Howlett 	mas_dump(&vmi->mas);
276*b50e195fSLiam R. Howlett 	mt_dump(vmi->mas.tree, mt_dump_hex);
277*b50e195fSLiam R. Howlett #endif	/* CONFIG_DEBUG_VM_MAPLE_TREE */
278*b50e195fSLiam R. Howlett }
279*b50e195fSLiam R. Howlett 
28082742a3aSSasha Levin #endif		/* CONFIG_DEBUG_VM */
281