1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2016, Rashmica Gupta, IBM Corp. 4 * 5 * This traverses the kernel pagetables and dumps the 6 * information about the used sections of memory to 7 * /sys/kernel/debug/kernel_pagetables. 8 * 9 * Derived from the arm64 implementation: 10 * Copyright (c) 2014, The Linux Foundation, Laura Abbott. 11 * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. 12 */ 13 #include <linux/debugfs.h> 14 #include <linux/fs.h> 15 #include <linux/hugetlb.h> 16 #include <linux/io.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <asm/fixmap.h> 22 #include <asm/pgtable.h> 23 #include <linux/const.h> 24 #include <asm/page.h> 25 #include <asm/pgalloc.h> 26 27 #include "ptdump.h" 28 29 /* 30 * To visualise what is happening, 31 * 32 * - PTRS_PER_P** = how many entries there are in the corresponding P** 33 * - P**_SHIFT = how many bits of the address we use to index into the 34 * corresponding P** 35 * - P**_SIZE is how much memory we can access through the table - not the 36 * size of the table itself. 37 * P**={PGD, PUD, PMD, PTE} 38 * 39 * 40 * Each entry of the PGD points to a PUD. Each entry of a PUD points to a 41 * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to 42 * a page. 43 * 44 * In the case where there are only 3 levels, the PUD is folded into the 45 * PGD: every PUD has only one entry which points to the PMD. 46 * 47 * The page dumper groups page table entries of the same type into a single 48 * description. It uses pg_state to track the range information while 49 * iterating over the PTE entries. When the continuity is broken it then 50 * dumps out a description of the range - ie PTEs that are virtually contiguous 51 * with the same PTE flags are chunked together. This is to make it clear how 52 * different areas of the kernel virtual memory are used. 53 * 54 */ 55 struct pg_state { 56 struct seq_file *seq; 57 const struct addr_marker *marker; 58 unsigned long start_address; 59 unsigned long start_pa; 60 unsigned long last_pa; 61 unsigned int level; 62 u64 current_flags; 63 bool check_wx; 64 unsigned long wx_pages; 65 }; 66 67 struct addr_marker { 68 unsigned long start_address; 69 const char *name; 70 }; 71 72 static struct addr_marker address_markers[] = { 73 { 0, "Start of kernel VM" }, 74 { 0, "vmalloc() Area" }, 75 { 0, "vmalloc() End" }, 76 #ifdef CONFIG_PPC64 77 { 0, "isa I/O start" }, 78 { 0, "isa I/O end" }, 79 { 0, "phb I/O start" }, 80 { 0, "phb I/O end" }, 81 { 0, "I/O remap start" }, 82 { 0, "I/O remap end" }, 83 { 0, "vmemmap start" }, 84 #else 85 { 0, "Early I/O remap start" }, 86 { 0, "Early I/O remap end" }, 87 #ifdef CONFIG_HIGHMEM 88 { 0, "Highmem PTEs start" }, 89 { 0, "Highmem PTEs end" }, 90 #endif 91 { 0, "Fixmap start" }, 92 { 0, "Fixmap end" }, 93 #endif 94 #ifdef CONFIG_KASAN 95 { 0, "kasan shadow mem start" }, 96 { 0, "kasan shadow mem end" }, 97 #endif 98 { -1, NULL }, 99 }; 100 101 #define pt_dump_seq_printf(m, fmt, args...) \ 102 ({ \ 103 if (m) \ 104 seq_printf(m, fmt, ##args); \ 105 }) 106 107 #define pt_dump_seq_putc(m, c) \ 108 ({ \ 109 if (m) \ 110 seq_putc(m, c); \ 111 }) 112 113 static void dump_flag_info(struct pg_state *st, const struct flag_info 114 *flag, u64 pte, int num) 115 { 116 unsigned int i; 117 118 for (i = 0; i < num; i++, flag++) { 119 const char *s = NULL; 120 u64 val; 121 122 /* flag not defined so don't check it */ 123 if (flag->mask == 0) 124 continue; 125 /* Some 'flags' are actually values */ 126 if (flag->is_val) { 127 val = pte & flag->val; 128 if (flag->shift) 129 val = val >> flag->shift; 130 pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val); 131 } else { 132 if ((pte & flag->mask) == flag->val) 133 s = flag->set; 134 else 135 s = flag->clear; 136 if (s) 137 pt_dump_seq_printf(st->seq, " %s", s); 138 } 139 st->current_flags &= ~flag->mask; 140 } 141 if (st->current_flags != 0) 142 pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags); 143 } 144 145 static void dump_addr(struct pg_state *st, unsigned long addr) 146 { 147 static const char units[] = "KMGTPE"; 148 const char *unit = units; 149 unsigned long delta; 150 151 #ifdef CONFIG_PPC64 152 #define REG "0x%016lx" 153 #else 154 #define REG "0x%08lx" 155 #endif 156 157 pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); 158 if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { 159 pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); 160 delta = PAGE_SIZE >> 10; 161 } else { 162 pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); 163 delta = (addr - st->start_address) >> 10; 164 } 165 /* Work out what appropriate unit to use */ 166 while (!(delta & 1023) && unit[1]) { 167 delta >>= 10; 168 unit++; 169 } 170 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit); 171 172 } 173 174 static void note_prot_wx(struct pg_state *st, unsigned long addr) 175 { 176 if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) 177 return; 178 179 if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X))) 180 return; 181 182 WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", 183 (void *)st->start_address, (void *)st->start_address); 184 185 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 186 } 187 188 static void note_page(struct pg_state *st, unsigned long addr, 189 unsigned int level, u64 val) 190 { 191 u64 flag = val & pg_level[level].mask; 192 u64 pa = val & PTE_RPN_MASK; 193 194 /* At first no level is set */ 195 if (!st->level) { 196 st->level = level; 197 st->current_flags = flag; 198 st->start_address = addr; 199 st->start_pa = pa; 200 st->last_pa = pa; 201 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 202 /* 203 * Dump the section of virtual memory when: 204 * - the PTE flags from one entry to the next differs. 205 * - we change levels in the tree. 206 * - the address is in a different section of memory and is thus 207 * used for a different purpose, regardless of the flags. 208 * - the pa of this page is not adjacent to the last inspected page 209 */ 210 } else if (flag != st->current_flags || level != st->level || 211 addr >= st->marker[1].start_address || 212 (pa != st->last_pa + PAGE_SIZE && 213 (pa != st->start_pa || st->start_pa != st->last_pa))) { 214 215 /* Check the PTE flags */ 216 if (st->current_flags) { 217 note_prot_wx(st, addr); 218 dump_addr(st, addr); 219 220 /* Dump all the flags */ 221 if (pg_level[st->level].flag) 222 dump_flag_info(st, pg_level[st->level].flag, 223 st->current_flags, 224 pg_level[st->level].num); 225 226 pt_dump_seq_putc(st->seq, '\n'); 227 } 228 229 /* 230 * Address indicates we have passed the end of the 231 * current section of virtual memory 232 */ 233 while (addr >= st->marker[1].start_address) { 234 st->marker++; 235 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 236 } 237 st->start_address = addr; 238 st->start_pa = pa; 239 st->last_pa = pa; 240 st->current_flags = flag; 241 st->level = level; 242 } else { 243 st->last_pa = pa; 244 } 245 } 246 247 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 248 { 249 pte_t *pte = pte_offset_kernel(pmd, 0); 250 unsigned long addr; 251 unsigned int i; 252 253 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 254 addr = start + i * PAGE_SIZE; 255 note_page(st, addr, 4, pte_val(*pte)); 256 257 } 258 } 259 260 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 261 { 262 pmd_t *pmd = pmd_offset(pud, 0); 263 unsigned long addr; 264 unsigned int i; 265 266 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 267 addr = start + i * PMD_SIZE; 268 if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd)) 269 /* pmd exists */ 270 walk_pte(st, pmd, addr); 271 else 272 note_page(st, addr, 3, pmd_val(*pmd)); 273 } 274 } 275 276 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 277 { 278 pud_t *pud = pud_offset(pgd, 0); 279 unsigned long addr; 280 unsigned int i; 281 282 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 283 addr = start + i * PUD_SIZE; 284 if (!pud_none(*pud) && !pud_is_leaf(*pud)) 285 /* pud exists */ 286 walk_pmd(st, pud, addr); 287 else 288 note_page(st, addr, 2, pud_val(*pud)); 289 } 290 } 291 292 static void walk_pagetables(struct pg_state *st) 293 { 294 unsigned int i; 295 unsigned long addr = st->start_address & PGDIR_MASK; 296 pgd_t *pgd = pgd_offset_k(addr); 297 298 /* 299 * Traverse the linux pagetable structure and dump pages that are in 300 * the hash pagetable. 301 */ 302 for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { 303 if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd)) 304 /* pgd exists */ 305 walk_pud(st, pgd, addr); 306 else 307 note_page(st, addr, 1, pgd_val(*pgd)); 308 } 309 } 310 311 static void populate_markers(void) 312 { 313 int i = 0; 314 315 address_markers[i++].start_address = PAGE_OFFSET; 316 address_markers[i++].start_address = VMALLOC_START; 317 address_markers[i++].start_address = VMALLOC_END; 318 #ifdef CONFIG_PPC64 319 address_markers[i++].start_address = ISA_IO_BASE; 320 address_markers[i++].start_address = ISA_IO_END; 321 address_markers[i++].start_address = PHB_IO_BASE; 322 address_markers[i++].start_address = PHB_IO_END; 323 address_markers[i++].start_address = IOREMAP_BASE; 324 address_markers[i++].start_address = IOREMAP_END; 325 /* What is the ifdef about? */ 326 #ifdef CONFIG_PPC_BOOK3S_64 327 address_markers[i++].start_address = H_VMEMMAP_START; 328 #else 329 address_markers[i++].start_address = VMEMMAP_BASE; 330 #endif 331 #else /* !CONFIG_PPC64 */ 332 address_markers[i++].start_address = ioremap_bot; 333 address_markers[i++].start_address = IOREMAP_TOP; 334 #ifdef CONFIG_HIGHMEM 335 address_markers[i++].start_address = PKMAP_BASE; 336 address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); 337 #endif 338 address_markers[i++].start_address = FIXADDR_START; 339 address_markers[i++].start_address = FIXADDR_TOP; 340 #ifdef CONFIG_KASAN 341 address_markers[i++].start_address = KASAN_SHADOW_START; 342 address_markers[i++].start_address = KASAN_SHADOW_END; 343 #endif 344 #endif /* CONFIG_PPC64 */ 345 } 346 347 static int ptdump_show(struct seq_file *m, void *v) 348 { 349 struct pg_state st = { 350 .seq = m, 351 .marker = address_markers, 352 .start_address = PAGE_OFFSET, 353 }; 354 355 #ifdef CONFIG_PPC64 356 if (!radix_enabled()) 357 st.start_address = KERN_VIRT_START; 358 #endif 359 360 /* Traverse kernel page tables */ 361 walk_pagetables(&st); 362 note_page(&st, 0, 0, 0); 363 return 0; 364 } 365 366 367 static int ptdump_open(struct inode *inode, struct file *file) 368 { 369 return single_open(file, ptdump_show, NULL); 370 } 371 372 static const struct file_operations ptdump_fops = { 373 .open = ptdump_open, 374 .read = seq_read, 375 .llseek = seq_lseek, 376 .release = single_release, 377 }; 378 379 static void build_pgtable_complete_mask(void) 380 { 381 unsigned int i, j; 382 383 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 384 if (pg_level[i].flag) 385 for (j = 0; j < pg_level[i].num; j++) 386 pg_level[i].mask |= pg_level[i].flag[j].mask; 387 } 388 389 #ifdef CONFIG_PPC_DEBUG_WX 390 void ptdump_check_wx(void) 391 { 392 struct pg_state st = { 393 .seq = NULL, 394 .marker = address_markers, 395 .check_wx = true, 396 .start_address = PAGE_OFFSET, 397 }; 398 399 #ifdef CONFIG_PPC64 400 if (!radix_enabled()) 401 st.start_address = KERN_VIRT_START; 402 #endif 403 404 walk_pagetables(&st); 405 406 if (st.wx_pages) 407 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", 408 st.wx_pages); 409 else 410 pr_info("Checked W+X mappings: passed, no W+X pages found\n"); 411 } 412 #endif 413 414 static int ptdump_init(void) 415 { 416 struct dentry *debugfs_file; 417 418 populate_markers(); 419 build_pgtable_complete_mask(); 420 debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL, 421 NULL, &ptdump_fops); 422 return debugfs_file ? 0 : -ENOMEM; 423 } 424 device_initcall(ptdump_init); 425