1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/debug.c 4 * 5 * mm/ specific debug routines. 6 * 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/trace_events.h> 12 #include <linux/memcontrol.h> 13 #include <trace/events/mmflags.h> 14 #include <linux/migrate.h> 15 #include <linux/page_owner.h> 16 #include <linux/ctype.h> 17 18 #include "internal.h" 19 #include <trace/events/migrate.h> 20 21 /* 22 * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can 23 * be used to populate migrate_reason_names[]. 24 */ 25 #undef EM 26 #undef EMe 27 #define EM(a, b) b, 28 #define EMe(a, b) b 29 30 const char *migrate_reason_names[MR_TYPES] = { 31 MIGRATE_REASON 32 }; 33 34 const struct trace_print_flags pageflag_names[] = { 35 __def_pageflag_names, 36 {0, NULL} 37 }; 38 39 const struct trace_print_flags gfpflag_names[] = { 40 __def_gfpflag_names, 41 {0, NULL} 42 }; 43 44 const struct trace_print_flags vmaflag_names[] = { 45 __def_vmaflag_names, 46 {0, NULL} 47 }; 48 49 static void __dump_page(struct page *page) 50 { 51 struct page *head = compound_head(page); 52 struct address_space *mapping; 53 bool compound = PageCompound(page); 54 /* 55 * Accessing the pageblock without the zone lock. It could change to 56 * "isolate" again in the meantime, but since we are just dumping the 57 * state for debugging, it should be fine to accept a bit of 58 * inaccuracy here due to racing. 59 */ 60 bool page_cma = is_migrate_cma_page(page); 61 int mapcount; 62 char *type = ""; 63 64 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { 65 /* 66 * Corrupt page, so we cannot call page_mapping. Instead, do a 67 * safe subset of the steps that page_mapping() does. Caution: 68 * this will be misleading for tail pages, PageSwapCache pages, 69 * and potentially other situations. (See the page_mapping() 70 * implementation for what's missing here.) 71 */ 72 unsigned long tmp = (unsigned long)page->mapping; 73 74 if (tmp & PAGE_MAPPING_ANON) 75 mapping = NULL; 76 else 77 mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); 78 head = page; 79 compound = false; 80 } else { 81 mapping = page_mapping(page); 82 } 83 84 /* 85 * Avoid VM_BUG_ON() in page_mapcount(). 86 * page->_mapcount space in struct page is used by sl[aou]b pages to 87 * encode own info. 88 */ 89 mapcount = PageSlab(head) ? 0 : page_mapcount(page); 90 91 pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", 92 page, page_ref_count(head), mapcount, mapping, 93 page_to_pgoff(page), page_to_pfn(page)); 94 if (compound) { 95 if (hpage_pincount_available(page)) { 96 pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n", 97 head, compound_order(head), 98 head_compound_mapcount(head), 99 head_compound_pincount(head)); 100 } else { 101 pr_warn("head:%p order:%u compound_mapcount:%d\n", 102 head, compound_order(head), 103 head_compound_mapcount(head)); 104 } 105 } 106 107 #ifdef CONFIG_MEMCG 108 if (head->memcg_data) 109 pr_warn("memcg:%lx\n", head->memcg_data); 110 #endif 111 if (PageKsm(page)) 112 type = "ksm "; 113 else if (PageAnon(page)) 114 type = "anon "; 115 else if (mapping) 116 dump_mapping(mapping); 117 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); 118 119 pr_warn("%sflags: %pGp%s\n", type, &head->flags, 120 page_cma ? " CMA" : ""); 121 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, 122 sizeof(unsigned long), page, 123 sizeof(struct page), false); 124 if (head != page) 125 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, 126 sizeof(unsigned long), head, 127 sizeof(struct page), false); 128 } 129 130 void dump_page(struct page *page, const char *reason) 131 { 132 if (PagePoisoned(page)) 133 pr_warn("page:%p is uninitialized and poisoned", page); 134 else 135 __dump_page(page); 136 if (reason) 137 pr_warn("page dumped because: %s\n", reason); 138 dump_page_owner(page); 139 } 140 EXPORT_SYMBOL(dump_page); 141 142 #ifdef CONFIG_DEBUG_VM 143 144 void dump_vma(const struct vm_area_struct *vma) 145 { 146 pr_emerg("vma %px start %px end %px\n" 147 "next %px prev %px mm %px\n" 148 "prot %lx anon_vma %px vm_ops %px\n" 149 "pgoff %lx file %px private_data %px\n" 150 "flags: %#lx(%pGv)\n", 151 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, 152 vma->vm_prev, vma->vm_mm, 153 (unsigned long)pgprot_val(vma->vm_page_prot), 154 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, 155 vma->vm_file, vma->vm_private_data, 156 vma->vm_flags, &vma->vm_flags); 157 } 158 EXPORT_SYMBOL(dump_vma); 159 160 void dump_mm(const struct mm_struct *mm) 161 { 162 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" 163 #ifdef CONFIG_MMU 164 "get_unmapped_area %px\n" 165 #endif 166 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" 167 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 168 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 169 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" 170 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 171 "start_brk %lx brk %lx start_stack %lx\n" 172 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 173 "binfmt %px flags %lx\n" 174 #ifdef CONFIG_AIO 175 "ioctx_table %px\n" 176 #endif 177 #ifdef CONFIG_MEMCG 178 "owner %px " 179 #endif 180 "exe_file %px\n" 181 #ifdef CONFIG_MMU_NOTIFIER 182 "notifier_subscriptions %px\n" 183 #endif 184 #ifdef CONFIG_NUMA_BALANCING 185 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 186 #endif 187 "tlb_flush_pending %d\n" 188 "def_flags: %#lx(%pGv)\n", 189 190 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, 191 #ifdef CONFIG_MMU 192 mm->get_unmapped_area, 193 #endif 194 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, 195 mm->pgd, atomic_read(&mm->mm_users), 196 atomic_read(&mm->mm_count), 197 mm_pgtables_bytes(mm), 198 mm->map_count, 199 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 200 (u64)atomic64_read(&mm->pinned_vm), 201 mm->data_vm, mm->exec_vm, mm->stack_vm, 202 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 203 mm->start_brk, mm->brk, mm->start_stack, 204 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 205 mm->binfmt, mm->flags, 206 #ifdef CONFIG_AIO 207 mm->ioctx_table, 208 #endif 209 #ifdef CONFIG_MEMCG 210 mm->owner, 211 #endif 212 mm->exe_file, 213 #ifdef CONFIG_MMU_NOTIFIER 214 mm->notifier_subscriptions, 215 #endif 216 #ifdef CONFIG_NUMA_BALANCING 217 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 218 #endif 219 atomic_read(&mm->tlb_flush_pending), 220 mm->def_flags, &mm->def_flags 221 ); 222 } 223 224 static bool page_init_poisoning __read_mostly = true; 225 226 static int __init setup_vm_debug(char *str) 227 { 228 bool __page_init_poisoning = true; 229 230 /* 231 * Calling vm_debug with no arguments is equivalent to requesting 232 * to enable all debugging options we can control. 233 */ 234 if (*str++ != '=' || !*str) 235 goto out; 236 237 __page_init_poisoning = false; 238 if (*str == '-') 239 goto out; 240 241 while (*str) { 242 switch (tolower(*str)) { 243 case'p': 244 __page_init_poisoning = true; 245 break; 246 default: 247 pr_err("vm_debug option '%c' unknown. skipped\n", 248 *str); 249 } 250 251 str++; 252 } 253 out: 254 if (page_init_poisoning && !__page_init_poisoning) 255 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); 256 257 page_init_poisoning = __page_init_poisoning; 258 259 return 1; 260 } 261 __setup("vm_debug", setup_vm_debug); 262 263 void page_init_poison(struct page *page, size_t size) 264 { 265 if (page_init_poisoning) 266 memset(page, PAGE_POISON_PATTERN, size); 267 } 268 EXPORT_SYMBOL_GPL(page_init_poison); 269 #endif /* CONFIG_DEBUG_VM */ 270