1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2014, The Linux Foundation. All rights reserved. 4 * Debug helper to dump the current kernel pagetables of the system 5 * so that we can see what the various memory ranges are set to. 6 * 7 * Derived from x86 and arm implementation: 8 * (C) Copyright 2008 Intel Corporation 9 * 10 * Author: Arjan van de Ven <arjan@linux.intel.com> 11 */ 12 #include <linux/debugfs.h> 13 #include <linux/errno.h> 14 #include <linux/fs.h> 15 #include <linux/io.h> 16 #include <linux/init.h> 17 #include <linux/mm.h> 18 #include <linux/ptdump.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 22 #include <asm/fixmap.h> 23 #include <asm/kasan.h> 24 #include <asm/memory.h> 25 #include <asm/pgtable-hwdef.h> 26 #include <asm/ptdump.h> 27 28 29 enum address_markers_idx { 30 PAGE_OFFSET_NR = 0, 31 PAGE_END_NR, 32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 33 KASAN_START_NR, 34 #endif 35 }; 36 37 static struct addr_marker address_markers[] = { 38 { PAGE_OFFSET, "Linear Mapping start" }, 39 { 0 /* PAGE_END */, "Linear Mapping end" }, 40 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 41 { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, 42 { KASAN_SHADOW_END, "Kasan shadow end" }, 43 #endif 44 { BPF_JIT_REGION_START, "BPF start" }, 45 { BPF_JIT_REGION_END, "BPF end" }, 46 { MODULES_VADDR, "Modules start" }, 47 { MODULES_END, "Modules end" }, 48 { VMALLOC_START, "vmalloc() area" }, 49 { VMALLOC_END, "vmalloc() end" }, 50 { FIXADDR_START, "Fixmap start" }, 51 { FIXADDR_TOP, "Fixmap end" }, 52 { PCI_IO_START, "PCI I/O start" }, 53 { PCI_IO_END, "PCI I/O end" }, 54 #ifdef CONFIG_SPARSEMEM_VMEMMAP 55 { VMEMMAP_START, "vmemmap start" }, 56 { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, 57 #endif 58 { -1, NULL }, 59 }; 60 61 #define pt_dump_seq_printf(m, fmt, args...) \ 62 ({ \ 63 if (m) \ 64 seq_printf(m, fmt, ##args); \ 65 }) 66 67 #define pt_dump_seq_puts(m, fmt) \ 68 ({ \ 69 if (m) \ 70 seq_printf(m, fmt); \ 71 }) 72 73 /* 74 * The page dumper groups page table entries of the same type into a single 75 * description. It uses pg_state to track the range information while 76 * iterating over the pte entries. When the continuity is broken it then 77 * dumps out a description of the range. 78 */ 79 struct pg_state { 80 struct ptdump_state ptdump; 81 struct seq_file *seq; 82 const struct addr_marker *marker; 83 unsigned long start_address; 84 int level; 85 u64 current_prot; 86 bool check_wx; 87 unsigned long wx_pages; 88 unsigned long uxn_pages; 89 }; 90 91 struct prot_bits { 92 u64 mask; 93 u64 val; 94 const char *set; 95 const char *clear; 96 }; 97 98 static const struct prot_bits pte_bits[] = { 99 { 100 .mask = PTE_VALID, 101 .val = PTE_VALID, 102 .set = " ", 103 .clear = "F", 104 }, { 105 .mask = PTE_USER, 106 .val = PTE_USER, 107 .set = "USR", 108 .clear = " ", 109 }, { 110 .mask = PTE_RDONLY, 111 .val = PTE_RDONLY, 112 .set = "ro", 113 .clear = "RW", 114 }, { 115 .mask = PTE_PXN, 116 .val = PTE_PXN, 117 .set = "NX", 118 .clear = "x ", 119 }, { 120 .mask = PTE_SHARED, 121 .val = PTE_SHARED, 122 .set = "SHD", 123 .clear = " ", 124 }, { 125 .mask = PTE_AF, 126 .val = PTE_AF, 127 .set = "AF", 128 .clear = " ", 129 }, { 130 .mask = PTE_NG, 131 .val = PTE_NG, 132 .set = "NG", 133 .clear = " ", 134 }, { 135 .mask = PTE_CONT, 136 .val = PTE_CONT, 137 .set = "CON", 138 .clear = " ", 139 }, { 140 .mask = PTE_TABLE_BIT, 141 .val = PTE_TABLE_BIT, 142 .set = " ", 143 .clear = "BLK", 144 }, { 145 .mask = PTE_UXN, 146 .val = PTE_UXN, 147 .set = "UXN", 148 .clear = " ", 149 }, { 150 .mask = PTE_GP, 151 .val = PTE_GP, 152 .set = "GP", 153 .clear = " ", 154 }, { 155 .mask = PTE_ATTRINDX_MASK, 156 .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), 157 .set = "DEVICE/nGnRnE", 158 }, { 159 .mask = PTE_ATTRINDX_MASK, 160 .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), 161 .set = "DEVICE/nGnRE", 162 }, { 163 .mask = PTE_ATTRINDX_MASK, 164 .val = PTE_ATTRINDX(MT_DEVICE_GRE), 165 .set = "DEVICE/GRE", 166 }, { 167 .mask = PTE_ATTRINDX_MASK, 168 .val = PTE_ATTRINDX(MT_NORMAL_NC), 169 .set = "MEM/NORMAL-NC", 170 }, { 171 .mask = PTE_ATTRINDX_MASK, 172 .val = PTE_ATTRINDX(MT_NORMAL), 173 .set = "MEM/NORMAL", 174 }, { 175 .mask = PTE_ATTRINDX_MASK, 176 .val = PTE_ATTRINDX(MT_NORMAL_TAGGED), 177 .set = "MEM/NORMAL-TAGGED", 178 } 179 }; 180 181 struct pg_level { 182 const struct prot_bits *bits; 183 const char *name; 184 size_t num; 185 u64 mask; 186 }; 187 188 static struct pg_level pg_level[] = { 189 { /* pgd */ 190 .name = "PGD", 191 .bits = pte_bits, 192 .num = ARRAY_SIZE(pte_bits), 193 }, { /* p4d */ 194 .name = "P4D", 195 .bits = pte_bits, 196 .num = ARRAY_SIZE(pte_bits), 197 }, { /* pud */ 198 .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", 199 .bits = pte_bits, 200 .num = ARRAY_SIZE(pte_bits), 201 }, { /* pmd */ 202 .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", 203 .bits = pte_bits, 204 .num = ARRAY_SIZE(pte_bits), 205 }, { /* pte */ 206 .name = "PTE", 207 .bits = pte_bits, 208 .num = ARRAY_SIZE(pte_bits), 209 }, 210 }; 211 212 static void dump_prot(struct pg_state *st, const struct prot_bits *bits, 213 size_t num) 214 { 215 unsigned i; 216 217 for (i = 0; i < num; i++, bits++) { 218 const char *s; 219 220 if ((st->current_prot & bits->mask) == bits->val) 221 s = bits->set; 222 else 223 s = bits->clear; 224 225 if (s) 226 pt_dump_seq_printf(st->seq, " %s", s); 227 } 228 } 229 230 static void note_prot_uxn(struct pg_state *st, unsigned long addr) 231 { 232 if (!st->check_wx) 233 return; 234 235 if ((st->current_prot & PTE_UXN) == PTE_UXN) 236 return; 237 238 WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n", 239 (void *)st->start_address, (void *)st->start_address); 240 241 st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; 242 } 243 244 static void note_prot_wx(struct pg_state *st, unsigned long addr) 245 { 246 if (!st->check_wx) 247 return; 248 if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) 249 return; 250 if ((st->current_prot & PTE_PXN) == PTE_PXN) 251 return; 252 253 WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n", 254 (void *)st->start_address, (void *)st->start_address); 255 256 st->wx_pages += (addr - st->start_address) / PAGE_SIZE; 257 } 258 259 static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, 260 u64 val) 261 { 262 struct pg_state *st = container_of(pt_st, struct pg_state, ptdump); 263 static const char units[] = "KMGTPE"; 264 u64 prot = 0; 265 266 if (level >= 0) 267 prot = val & pg_level[level].mask; 268 269 if (st->level == -1) { 270 st->level = level; 271 st->current_prot = prot; 272 st->start_address = addr; 273 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 274 } else if (prot != st->current_prot || level != st->level || 275 addr >= st->marker[1].start_address) { 276 const char *unit = units; 277 unsigned long delta; 278 279 if (st->current_prot) { 280 note_prot_uxn(st, addr); 281 note_prot_wx(st, addr); 282 } 283 284 pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", 285 st->start_address, addr); 286 287 delta = (addr - st->start_address) >> 10; 288 while (!(delta & 1023) && unit[1]) { 289 delta >>= 10; 290 unit++; 291 } 292 pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit, 293 pg_level[st->level].name); 294 if (st->current_prot && pg_level[st->level].bits) 295 dump_prot(st, pg_level[st->level].bits, 296 pg_level[st->level].num); 297 pt_dump_seq_puts(st->seq, "\n"); 298 299 if (addr >= st->marker[1].start_address) { 300 st->marker++; 301 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 302 } 303 304 st->start_address = addr; 305 st->current_prot = prot; 306 st->level = level; 307 } 308 309 if (addr >= st->marker[1].start_address) { 310 st->marker++; 311 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 312 } 313 314 } 315 316 void ptdump_walk(struct seq_file *s, struct ptdump_info *info) 317 { 318 unsigned long end = ~0UL; 319 struct pg_state st; 320 321 if (info->base_addr < TASK_SIZE_64) 322 end = TASK_SIZE_64; 323 324 st = (struct pg_state){ 325 .seq = s, 326 .marker = info->markers, 327 .ptdump = { 328 .note_page = note_page, 329 .range = (struct ptdump_range[]){ 330 {info->base_addr, end}, 331 {0, 0} 332 } 333 } 334 }; 335 336 ptdump_walk_pgd(&st.ptdump, info->mm, NULL); 337 } 338 339 static void ptdump_initialize(void) 340 { 341 unsigned i, j; 342 343 for (i = 0; i < ARRAY_SIZE(pg_level); i++) 344 if (pg_level[i].bits) 345 for (j = 0; j < pg_level[i].num; j++) 346 pg_level[i].mask |= pg_level[i].bits[j].mask; 347 } 348 349 static struct ptdump_info kernel_ptdump_info = { 350 .mm = &init_mm, 351 .markers = address_markers, 352 .base_addr = PAGE_OFFSET, 353 }; 354 355 void ptdump_check_wx(void) 356 { 357 struct pg_state st = { 358 .seq = NULL, 359 .marker = (struct addr_marker[]) { 360 { 0, NULL}, 361 { -1, NULL}, 362 }, 363 .level = -1, 364 .check_wx = true, 365 .ptdump = { 366 .note_page = note_page, 367 .range = (struct ptdump_range[]) { 368 {PAGE_OFFSET, ~0UL}, 369 {0, 0} 370 } 371 } 372 }; 373 374 ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); 375 376 if (st.wx_pages || st.uxn_pages) 377 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", 378 st.wx_pages, st.uxn_pages); 379 else 380 pr_info("Checked W+X mappings: passed, no W+X pages found\n"); 381 } 382 383 static int ptdump_init(void) 384 { 385 address_markers[PAGE_END_NR].start_address = PAGE_END; 386 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 387 address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; 388 #endif 389 ptdump_initialize(); 390 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); 391 return 0; 392 } 393 device_initcall(ptdump_init); 394