1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * mm/debug.c 4 * 5 * mm/ specific debug routines. 6 * 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/trace_events.h> 12 #include <linux/memcontrol.h> 13 #include <trace/events/mmflags.h> 14 #include <linux/migrate.h> 15 #include <linux/page_owner.h> 16 #include <linux/ctype.h> 17 18 #include "internal.h" 19 20 const char *migrate_reason_names[MR_TYPES] = { 21 "compaction", 22 "memory_failure", 23 "memory_hotplug", 24 "syscall_or_cpuset", 25 "mempolicy_mbind", 26 "numa_misplaced", 27 "cma", 28 }; 29 30 const struct trace_print_flags pageflag_names[] = { 31 __def_pageflag_names, 32 {0, NULL} 33 }; 34 35 const struct trace_print_flags gfpflag_names[] = { 36 __def_gfpflag_names, 37 {0, NULL} 38 }; 39 40 const struct trace_print_flags vmaflag_names[] = { 41 __def_vmaflag_names, 42 {0, NULL} 43 }; 44 45 void __dump_page(struct page *page, const char *reason) 46 { 47 struct page *head = compound_head(page); 48 struct address_space *mapping; 49 bool page_poisoned = PagePoisoned(page); 50 bool compound = PageCompound(page); 51 /* 52 * Accessing the pageblock without the zone lock. It could change to 53 * "isolate" again in the meantime, but since we are just dumping the 54 * state for debugging, it should be fine to accept a bit of 55 * inaccuracy here due to racing. 56 */ 57 bool page_cma = is_migrate_cma_page(page); 58 int mapcount; 59 char *type = ""; 60 61 /* 62 * If struct page is poisoned don't access Page*() functions as that 63 * leads to recursive loop. Page*() check for poisoned pages, and calls 64 * dump_page() when detected. 65 */ 66 if (page_poisoned) { 67 pr_warn("page:%px is uninitialized and poisoned", page); 68 goto hex_only; 69 } 70 71 if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { 72 /* Corrupt page, cannot call page_mapping */ 73 mapping = page->mapping; 74 head = page; 75 compound = false; 76 } else { 77 mapping = page_mapping(page); 78 } 79 80 /* 81 * Avoid VM_BUG_ON() in page_mapcount(). 82 * page->_mapcount space in struct page is used by sl[aou]b pages to 83 * encode own info. 84 */ 85 mapcount = PageSlab(head) ? 0 : page_mapcount(page); 86 87 if (compound) 88 if (hpage_pincount_available(page)) { 89 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p " 90 "index:%#lx head:%px order:%u " 91 "compound_mapcount:%d compound_pincount:%d\n", 92 page, page_ref_count(head), mapcount, 93 mapping, page_to_pgoff(page), head, 94 compound_order(head), compound_mapcount(page), 95 compound_pincount(page)); 96 } else { 97 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p " 98 "index:%#lx head:%px order:%u " 99 "compound_mapcount:%d\n", 100 page, page_ref_count(head), mapcount, 101 mapping, page_to_pgoff(page), head, 102 compound_order(head), compound_mapcount(page)); 103 } 104 else 105 pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx\n", 106 page, page_ref_count(page), mapcount, 107 mapping, page_to_pgoff(page)); 108 if (PageKsm(page)) 109 type = "ksm "; 110 else if (PageAnon(page)) 111 type = "anon "; 112 else if (mapping) { 113 const struct inode *host; 114 const struct address_space_operations *a_ops; 115 const struct hlist_node *dentry_first; 116 const struct dentry *dentry_ptr; 117 struct dentry dentry; 118 119 /* 120 * mapping can be invalid pointer and we don't want to crash 121 * accessing it, so probe everything depending on it carefully 122 */ 123 if (copy_from_kernel_nofault(&host, &mapping->host, 124 sizeof(struct inode *)) || 125 copy_from_kernel_nofault(&a_ops, &mapping->a_ops, 126 sizeof(struct address_space_operations *))) { 127 pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n"); 128 goto out_mapping; 129 } 130 131 if (!host) { 132 pr_warn("mapping->a_ops:%ps\n", a_ops); 133 goto out_mapping; 134 } 135 136 if (copy_from_kernel_nofault(&dentry_first, 137 &host->i_dentry.first, sizeof(struct hlist_node *))) { 138 pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n", 139 a_ops, host); 140 goto out_mapping; 141 } 142 143 if (!dentry_first) { 144 pr_warn("mapping->a_ops:%ps\n", a_ops); 145 goto out_mapping; 146 } 147 148 dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias); 149 if (copy_from_kernel_nofault(&dentry, dentry_ptr, 150 sizeof(struct dentry))) { 151 pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n", 152 a_ops, dentry_ptr); 153 } else { 154 /* 155 * if dentry is corrupted, the %pd handler may still 156 * crash, but it's unlikely that we reach here with a 157 * corrupted struct page 158 */ 159 pr_warn("mapping->aops:%ps dentry name:\"%pd\"\n", 160 a_ops, &dentry); 161 } 162 } 163 out_mapping: 164 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); 165 166 pr_warn("%sflags: %#lx(%pGp)%s\n", type, page->flags, &page->flags, 167 page_cma ? " CMA" : ""); 168 169 hex_only: 170 print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, 171 sizeof(unsigned long), page, 172 sizeof(struct page), false); 173 if (head != page) 174 print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, 175 sizeof(unsigned long), head, 176 sizeof(struct page), false); 177 178 if (reason) 179 pr_warn("page dumped because: %s\n", reason); 180 181 #ifdef CONFIG_MEMCG 182 if (!page_poisoned && page->mem_cgroup) 183 pr_warn("page->mem_cgroup:%px\n", page->mem_cgroup); 184 #endif 185 } 186 187 void dump_page(struct page *page, const char *reason) 188 { 189 __dump_page(page, reason); 190 dump_page_owner(page); 191 } 192 EXPORT_SYMBOL(dump_page); 193 194 #ifdef CONFIG_DEBUG_VM 195 196 void dump_vma(const struct vm_area_struct *vma) 197 { 198 pr_emerg("vma %px start %px end %px\n" 199 "next %px prev %px mm %px\n" 200 "prot %lx anon_vma %px vm_ops %px\n" 201 "pgoff %lx file %px private_data %px\n" 202 "flags: %#lx(%pGv)\n", 203 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, 204 vma->vm_prev, vma->vm_mm, 205 (unsigned long)pgprot_val(vma->vm_page_prot), 206 vma->anon_vma, vma->vm_ops, vma->vm_pgoff, 207 vma->vm_file, vma->vm_private_data, 208 vma->vm_flags, &vma->vm_flags); 209 } 210 EXPORT_SYMBOL(dump_vma); 211 212 void dump_mm(const struct mm_struct *mm) 213 { 214 pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" 215 #ifdef CONFIG_MMU 216 "get_unmapped_area %px\n" 217 #endif 218 "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" 219 "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" 220 "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" 221 "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" 222 "start_code %lx end_code %lx start_data %lx end_data %lx\n" 223 "start_brk %lx brk %lx start_stack %lx\n" 224 "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" 225 "binfmt %px flags %lx core_state %px\n" 226 #ifdef CONFIG_AIO 227 "ioctx_table %px\n" 228 #endif 229 #ifdef CONFIG_MEMCG 230 "owner %px " 231 #endif 232 "exe_file %px\n" 233 #ifdef CONFIG_MMU_NOTIFIER 234 "notifier_subscriptions %px\n" 235 #endif 236 #ifdef CONFIG_NUMA_BALANCING 237 "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" 238 #endif 239 "tlb_flush_pending %d\n" 240 "def_flags: %#lx(%pGv)\n", 241 242 mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, 243 #ifdef CONFIG_MMU 244 mm->get_unmapped_area, 245 #endif 246 mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, 247 mm->pgd, atomic_read(&mm->mm_users), 248 atomic_read(&mm->mm_count), 249 mm_pgtables_bytes(mm), 250 mm->map_count, 251 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 252 (u64)atomic64_read(&mm->pinned_vm), 253 mm->data_vm, mm->exec_vm, mm->stack_vm, 254 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 255 mm->start_brk, mm->brk, mm->start_stack, 256 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, 257 mm->binfmt, mm->flags, mm->core_state, 258 #ifdef CONFIG_AIO 259 mm->ioctx_table, 260 #endif 261 #ifdef CONFIG_MEMCG 262 mm->owner, 263 #endif 264 mm->exe_file, 265 #ifdef CONFIG_MMU_NOTIFIER 266 mm->notifier_subscriptions, 267 #endif 268 #ifdef CONFIG_NUMA_BALANCING 269 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, 270 #endif 271 atomic_read(&mm->tlb_flush_pending), 272 mm->def_flags, &mm->def_flags 273 ); 274 } 275 276 static bool page_init_poisoning __read_mostly = true; 277 278 static int __init setup_vm_debug(char *str) 279 { 280 bool __page_init_poisoning = true; 281 282 /* 283 * Calling vm_debug with no arguments is equivalent to requesting 284 * to enable all debugging options we can control. 285 */ 286 if (*str++ != '=' || !*str) 287 goto out; 288 289 __page_init_poisoning = false; 290 if (*str == '-') 291 goto out; 292 293 while (*str) { 294 switch (tolower(*str)) { 295 case'p': 296 __page_init_poisoning = true; 297 break; 298 default: 299 pr_err("vm_debug option '%c' unknown. skipped\n", 300 *str); 301 } 302 303 str++; 304 } 305 out: 306 if (page_init_poisoning && !__page_init_poisoning) 307 pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); 308 309 page_init_poisoning = __page_init_poisoning; 310 311 return 1; 312 } 313 __setup("vm_debug", setup_vm_debug); 314 315 void page_init_poison(struct page *page, size_t size) 316 { 317 if (page_init_poisoning) 318 memset(page, PAGE_POISON_PATTERN, size); 319 } 320 EXPORT_SYMBOL_GPL(page_init_poison); 321 #endif /* CONFIG_DEBUG_VM */ 322