1 /* 2 * Copyright 2016, Rashmica Gupta, IBM Corp. 3 * 4 * This traverses the kernel pagetables and dumps the 5 * information about the used sections of memory to 6 * /sys/kernel/debug/kernel_pagetables. 7 * 8 * Derived from the arm64 implementation: 9 * Copyright (c) 2014, The Linux Foundation, Laura Abbott. 10 * (C) Copyright 2008 Intel Corporation, Arjan van de Ven. 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; version 2 15 * of the License. 16 */ 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/hugetlb.h> 20 #include <linux/io.h> 21 #include <linux/mm.h> 22 #include <linux/highmem.h> 23 #include <linux/sched.h> 24 #include <linux/seq_file.h> 25 #include <asm/fixmap.h> 26 #include <asm/pgtable.h> 27 #include <linux/const.h> 28 #include <asm/page.h> 29 #include <asm/pgalloc.h> 30 31 #include "ptdump.h" 32 33 #ifdef CONFIG_PPC32 34 #define KERN_VIRT_START PAGE_OFFSET 35 #endif 36 37 /* 38 * To visualise what is happening, 39 * 40 * - PTRS_PER_P** = how many entries there are in the corresponding P** 41 * - P**_SHIFT = how many bits of the address we use to index into the 42 * corresponding P** 43 * - P**_SIZE is how much memory we can access through the table - not the 44 * size of the table itself. 45 * P**={PGD, PUD, PMD, PTE} 46 * 47 * 48 * Each entry of the PGD points to a PUD. Each entry of a PUD points to a 49 * PMD. Each entry of a PMD points to a PTE. And every PTE entry points to 50 * a page. 51 * 52 * In the case where there are only 3 levels, the PUD is folded into the 53 * PGD: every PUD has only one entry which points to the PMD. 54 * 55 * The page dumper groups page table entries of the same type into a single 56 * description. It uses pg_state to track the range information while 57 * iterating over the PTE entries. When the continuity is broken it then 58 * dumps out a description of the range - ie PTEs that are virtually contiguous 59 * with the same PTE flags are chunked together. This is to make it clear how 60 * different areas of the kernel virtual memory are used. 61 * 62 */ 63 struct pg_state { 64 struct seq_file *seq; 65 const struct addr_marker *marker; 66 unsigned long start_address; 67 unsigned long start_pa; 68 unsigned long last_pa; 69 unsigned int level; 70 u64 current_flags; 71 bool check_wx; 72 unsigned long wx_pages; 73 }; 74 75 struct addr_marker { 76 unsigned long start_address; 77 const char *name; 78 }; 79 80 static struct addr_marker address_markers[] = { 81 { 0, "Start of kernel VM" }, 82 { 0, "vmalloc() Area" }, 83 { 0, "vmalloc() End" }, 84 #ifdef CONFIG_PPC64 85 { 0, "isa I/O start" }, 86 { 0, "isa I/O end" }, 87 { 0, "phb I/O start" }, 88 { 0, "phb I/O end" }, 89 { 0, "I/O remap start" }, 90 { 0, "I/O remap end" }, 91 { 0, "vmemmap start" }, 92 #else 93 { 0, "Early I/O remap start" }, 94 { 0, "Early I/O remap end" }, 95 #ifdef CONFIG_NOT_COHERENT_CACHE 96 { 0, "Consistent mem start" }, 97 { 0, "Consistent mem end" }, 98 #endif 99 #ifdef CONFIG_HIGHMEM 100 { 0, "Highmem PTEs start" }, 101 { 0, "Highmem PTEs end" }, 102 #endif 103 { 0, "Fixmap start" }, 104 { 0, "Fixmap end" }, 105 #endif 106 #ifdef CONFIG_KASAN 107 { 0, "kasan shadow mem start" }, 108 { 0, "kasan shadow mem end" }, 109 #endif 110 { -1, NULL }, 111 }; 112 113 #define pt_dump_seq_printf(m, fmt, args...) \ 114 ({ \ 115 if (m) \ 116 seq_printf(m, fmt, ##args); \ 117 }) 118 119 #define pt_dump_seq_putc(m, c) \ 120 ({ \ 121 if (m) \ 122 seq_putc(m, c); \ 123 }) 124 125 static void dump_flag_info(struct pg_state *st, const struct flag_info 126 *flag, u64 pte, int num) 127 { 128 unsigned int i; 129 130 for (i = 0; i < num; i++, flag++) { 131 const char *s = NULL; 132 u64 val; 133 134 /* flag not defined so don't check it */ 135 if (flag->mask == 0) 136 continue; 137 /* Some 'flags' are actually values */ 138 if (flag->is_val) { 139 val = pte & flag->val; 140 if (flag->shift) 141 val = val >> flag->shift; 142 pt_dump_seq_printf(st->seq, " %s:%llx", flag->set, val); 143 } else { 144 if ((pte & flag->mask) == flag->val) 145 s = flag->set; 146 else 147 s = flag->clear; 148 if (s) 149 pt_dump_seq_printf(st->seq, " %s", s); 150 } 151 st->current_flags &= ~flag->mask; 152 } 153 if (st->current_flags != 0) 154 pt_dump_seq_printf(st->seq, " unknown flags:%llx", st->current_flags); 155 } 156 157 static void dump_addr(struct pg_state *st, unsigned long addr) 158 { 159 static const char units[] = "KMGTPE"; 160 const char *unit = units; 161 unsigned long delta; 162 163 #ifdef CONFIG_PPC64 164 #define REG "0x%016lx" 165 #else 166 #define REG "0x%08lx" 167 #endif 168 169 pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); 170 if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { 171 pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); 172 delta = PAGE_SIZE >> 10; 173 } else { 174 pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); 175 delta = (addr - st->start_address) >> 10; 176 } 177 /* Work out what appropriate unit to use */ 178 while (!(delta & 1023) && unit[1]) { 179 delta >>= 10; 180 unit++; 181 } 182 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit); 183 184 } 185 186 static void note_prot_wx(struct pg_state *st, unsigned long addr) 187 { 188 if (!st->check_wx) 189 return; 190 191 if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X))) 192 return; 193 194 WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", 195 (void *)st->start_address, (void *)st->start_address); 196 197 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 198 } 199 200 static void note_page(struct pg_state *st, unsigned long addr, 201 unsigned int level, u64 val) 202 { 203 u64 flag = val & pg_level[level].mask; 204 u64 pa = val & PTE_RPN_MASK; 205 206 /* At first no level is set */ 207 if (!st->level) { 208 st->level = level; 209 st->current_flags = flag; 210 st->start_address = addr; 211 st->start_pa = pa; 212 st->last_pa = pa; 213 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 214 /* 215 * Dump the section of virtual memory when: 216 * - the PTE flags from one entry to the next differs. 217 * - we change levels in the tree. 218 * - the address is in a different section of memory and is thus 219 * used for a different purpose, regardless of the flags. 220 * - the pa of this page is not adjacent to the last inspected page 221 */ 222 } else if (flag != st->current_flags || level != st->level || 223 addr >= st->marker[1].start_address || 224 (pa != st->last_pa + PAGE_SIZE && 225 (pa != st->start_pa || st->start_pa != st->last_pa))) { 226 227 /* Check the PTE flags */ 228 if (st->current_flags) { 229 note_prot_wx(st, addr); 230 dump_addr(st, addr); 231 232 /* Dump all the flags */ 233 if (pg_level[st->level].flag) 234 dump_flag_info(st, pg_level[st->level].flag, 235 st->current_flags, 236 pg_level[st->level].num); 237 238 pt_dump_seq_putc(st->seq, '\n'); 239 } 240 241 /* 242 * Address indicates we have passed the end of the 243 * current section of virtual memory 244 */ 245 while (addr >= st->marker[1].start_address) { 246 st->marker++; 247 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 248 } 249 st->start_address = addr; 250 st->start_pa = pa; 251 st->last_pa = pa; 252 st->current_flags = flag; 253 st->level = level; 254 } else { 255 st->last_pa = pa; 256 } 257 } 258 259 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 260 { 261 pte_t *pte = pte_offset_kernel(pmd, 0); 262 unsigned long addr; 263 unsigned int i; 264 265 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 266 addr = start + i * PAGE_SIZE; 267 note_page(st, addr, 4, pte_val(*pte)); 268 269 } 270 } 271 272 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) 273 { 274 pmd_t *pmd = pmd_offset(pud, 0); 275 unsigned long addr; 276 unsigned int i; 277 278 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 279 addr = start + i * PMD_SIZE; 280 if (!pmd_none(*pmd) && !pmd_huge(*pmd)) 281 /* pmd exists */ 282 walk_pte(st, pmd, addr); 283 else 284 note_page(st, addr, 3, pmd_val(*pmd)); 285 } 286 } 287 288 static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) 289 { 290 pud_t *pud = pud_offset(pgd, 0); 291 unsigned long addr; 292 unsigned int i; 293 294 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 295 addr = start + i * PUD_SIZE; 296 if (!pud_none(*pud) && !pud_huge(*pud)) 297 /* pud exists */ 298 walk_pmd(st, pud, addr); 299 else 300 note_page(st, addr, 2, pud_val(*pud)); 301 } 302 } 303 304 static void walk_pagetables(struct pg_state *st) 305 { 306 pgd_t *pgd = pgd_offset_k(0UL); 307 unsigned int i; 308 unsigned long addr; 309 310 addr = st->start_address; 311 312 /* 313 * Traverse the linux pagetable structure and dump pages that are in 314 * the hash pagetable. 315 */ 316 for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) { 317 if (!pgd_none(*pgd) && !pgd_huge(*pgd)) 318 /* pgd exists */ 319 walk_pud(st, pgd, addr); 320 else 321 note_page(st, addr, 1, pgd_val(*pgd)); 322 } 323 } 324 325 static void populate_markers(void) 326 { 327 int i = 0; 328 329 address_markers[i++].start_address = PAGE_OFFSET; 330 address_markers[i++].start_address = VMALLOC_START; 331 address_markers[i++].start_address = VMALLOC_END; 332 #ifdef CONFIG_PPC64 333 address_markers[i++].start_address = ISA_IO_BASE; 334 address_markers[i++].start_address = ISA_IO_END; 335 address_markers[i++].start_address = PHB_IO_BASE; 336 address_markers[i++].start_address = PHB_IO_END; 337 address_markers[i++].start_address = IOREMAP_BASE; 338 address_markers[i++].start_address = IOREMAP_END; 339 /* What is the ifdef about? */ 340 #ifdef CONFIG_PPC_BOOK3S_64 341 address_markers[i++].start_address = H_VMEMMAP_START; 342 #else 343 address_markers[i++].start_address = VMEMMAP_BASE; 344 #endif 345 #else /* !CONFIG_PPC64 */ 346 address_markers[i++].start_address = ioremap_bot; 347 address_markers[i++].start_address = IOREMAP_TOP; 348 #ifdef CONFIG_NOT_COHERENT_CACHE 349 address_markers[i++].start_address = IOREMAP_TOP; 350 address_markers[i++].start_address = IOREMAP_TOP + 351 CONFIG_CONSISTENT_SIZE; 352 #endif 353 #ifdef CONFIG_HIGHMEM 354 address_markers[i++].start_address = PKMAP_BASE; 355 address_markers[i++].start_address = PKMAP_ADDR(LAST_PKMAP); 356 #endif 357 address_markers[i++].start_address = FIXADDR_START; 358 address_markers[i++].start_address = FIXADDR_TOP; 359 #ifdef CONFIG_KASAN 360 address_markers[i++].start_address = KASAN_SHADOW_START; 361 address_markers[i++].start_address = KASAN_SHADOW_END; 362 #endif 363 #endif /* CONFIG_PPC64 */ 364 } 365 366 static int ptdump_show(struct seq_file *m, void *v) 367 { 368 struct pg_state st = { 369 .seq = m, 370 .marker = address_markers, 371 }; 372 373 if (radix_enabled()) 374 st.start_address = PAGE_OFFSET; 375 else 376 st.start_address = KERN_VIRT_START; 377 378 /* Traverse kernel page tables */ 379 walk_pagetables(&st); 380 note_page(&st, 0, 0, 0); 381 return 0; 382 } 383 384 385 static int ptdump_open(struct inode *inode, struct file *file) 386 { 387 return single_open(file, ptdump_show, NULL); 388 } 389 390 static const struct file_operations ptdump_fops = { 391 .open = ptdump_open, 392 .read = seq_read, 393 .llseek = seq_lseek, 394 .release = single_release, 395 }; 396 397 static void build_pgtable_complete_mask(void) 398 { 399 unsigned int i, j; 400 401 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 402 if (pg_level[i].flag) 403 for (j = 0; j < pg_level[i].num; j++) 404 pg_level[i].mask |= pg_level[i].flag[j].mask; 405 } 406 407 #ifdef CONFIG_PPC_DEBUG_WX 408 void ptdump_check_wx(void) 409 { 410 struct pg_state st = { 411 .seq = NULL, 412 .marker = address_markers, 413 .check_wx = true, 414 }; 415 416 if (radix_enabled()) 417 st.start_address = PAGE_OFFSET; 418 else 419 st.start_address = KERN_VIRT_START; 420 421 walk_pagetables(&st); 422 423 if (st.wx_pages) 424 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", 425 st.wx_pages); 426 else 427 pr_info("Checked W+X mappings: passed, no W+X pages found\n"); 428 } 429 #endif 430 431 static int ptdump_init(void) 432 { 433 struct dentry *debugfs_file; 434 435 populate_markers(); 436 build_pgtable_complete_mask(); 437 debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL, 438 NULL, &ptdump_fops); 439 return debugfs_file ? 0 : -ENOMEM; 440 } 441 device_initcall(ptdump_init); 442