1 /* 2 * Debug helper to dump the current kernel pagetables of the system 3 * so that we can see what the various memory ranges are set to. 4 * 5 * (C) Copyright 2008 Intel Corporation 6 * 7 * Author: Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include <linux/debugfs.h> 16 #include <linux/kasan.h> 17 #include <linux/mm.h> 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/highmem.h> 22 #include <linux/pci.h> 23 24 #include <asm/e820/types.h> 25 #include <asm/pgtable.h> 26 27 /* 28 * The dumper groups pagetable entries of the same type into one, and for 29 * that it needs to keep some state when walking, and flush this state 30 * when a "break" in the continuity is found. 31 */ 32 struct pg_state { 33 int level; 34 pgprot_t current_prot; 35 pgprotval_t effective_prot; 36 unsigned long start_address; 37 unsigned long current_address; 38 const struct addr_marker *marker; 39 unsigned long lines; 40 bool to_dmesg; 41 bool check_wx; 42 unsigned long wx_pages; 43 }; 44 45 struct addr_marker { 46 unsigned long start_address; 47 const char *name; 48 unsigned long max_lines; 49 }; 50 51 /* Address space markers hints */ 52 53 #ifdef CONFIG_X86_64 54 55 enum address_markers_idx { 56 USER_SPACE_NR = 0, 57 KERNEL_SPACE_NR, 58 LOW_KERNEL_NR, 59 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) 60 LDT_NR, 61 #endif 62 VMALLOC_START_NR, 63 VMEMMAP_START_NR, 64 #ifdef CONFIG_KASAN 65 KASAN_SHADOW_START_NR, 66 KASAN_SHADOW_END_NR, 67 #endif 68 CPU_ENTRY_AREA_NR, 69 #if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) 70 LDT_NR, 71 #endif 72 #ifdef CONFIG_X86_ESPFIX64 73 ESPFIX_START_NR, 74 #endif 75 #ifdef CONFIG_EFI 76 EFI_END_NR, 77 #endif 78 HIGH_KERNEL_NR, 79 MODULES_VADDR_NR, 80 MODULES_END_NR, 81 FIXADDR_START_NR, 82 END_OF_SPACE_NR, 83 }; 84 85 static struct addr_marker address_markers[] = { 86 [USER_SPACE_NR] = { 0, "User Space" }, 87 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, 88 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, 89 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 90 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, 91 #ifdef CONFIG_KASAN 92 /* 93 * These fields get initialized with the (dynamic) 94 * KASAN_SHADOW_{START,END} values in pt_dump_init(). 95 */ 96 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" }, 97 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" }, 98 #endif 99 #ifdef CONFIG_MODIFY_LDT_SYSCALL 100 [LDT_NR] = { 0UL, "LDT remap" }, 101 #endif 102 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 103 #ifdef CONFIG_X86_ESPFIX64 104 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 105 #endif 106 #ifdef CONFIG_EFI 107 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, 108 #endif 109 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, 110 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, 111 [MODULES_END_NR] = { MODULES_END, "End Modules" }, 112 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, 113 [END_OF_SPACE_NR] = { -1, NULL } 114 }; 115 116 #define INIT_PGD ((pgd_t *) &init_top_pgt) 117 118 #else /* CONFIG_X86_64 */ 119 120 enum address_markers_idx { 121 USER_SPACE_NR = 0, 122 KERNEL_SPACE_NR, 123 VMALLOC_START_NR, 124 VMALLOC_END_NR, 125 #ifdef CONFIG_HIGHMEM 126 PKMAP_BASE_NR, 127 #endif 128 #ifdef CONFIG_MODIFY_LDT_SYSCALL 129 LDT_NR, 130 #endif 131 CPU_ENTRY_AREA_NR, 132 FIXADDR_START_NR, 133 END_OF_SPACE_NR, 134 }; 135 136 static struct addr_marker address_markers[] = { 137 [USER_SPACE_NR] = { 0, "User Space" }, 138 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, 139 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 140 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, 141 #ifdef CONFIG_HIGHMEM 142 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 143 #endif 144 #ifdef CONFIG_MODIFY_LDT_SYSCALL 145 [LDT_NR] = { 0UL, "LDT remap" }, 146 #endif 147 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 148 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 149 [END_OF_SPACE_NR] = { -1, NULL } 150 }; 151 152 #define INIT_PGD (swapper_pg_dir) 153 154 #endif /* !CONFIG_X86_64 */ 155 156 /* Multipliers for offsets within the PTEs */ 157 #define PTE_LEVEL_MULT (PAGE_SIZE) 158 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) 159 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) 160 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) 161 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT) 162 163 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \ 164 ({ \ 165 if (to_dmesg) \ 166 printk(KERN_INFO fmt, ##args); \ 167 else \ 168 if (m) \ 169 seq_printf(m, fmt, ##args); \ 170 }) 171 172 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \ 173 ({ \ 174 if (to_dmesg) \ 175 printk(KERN_CONT fmt, ##args); \ 176 else \ 177 if (m) \ 178 seq_printf(m, fmt, ##args); \ 179 }) 180 181 /* 182 * Print a readable form of a pgprot_t to the seq_file 183 */ 184 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) 185 { 186 pgprotval_t pr = pgprot_val(prot); 187 static const char * const level_name[] = 188 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; 189 190 if (!(pr & _PAGE_PRESENT)) { 191 /* Not present */ 192 pt_dump_cont_printf(m, dmsg, " "); 193 } else { 194 if (pr & _PAGE_USER) 195 pt_dump_cont_printf(m, dmsg, "USR "); 196 else 197 pt_dump_cont_printf(m, dmsg, " "); 198 if (pr & _PAGE_RW) 199 pt_dump_cont_printf(m, dmsg, "RW "); 200 else 201 pt_dump_cont_printf(m, dmsg, "ro "); 202 if (pr & _PAGE_PWT) 203 pt_dump_cont_printf(m, dmsg, "PWT "); 204 else 205 pt_dump_cont_printf(m, dmsg, " "); 206 if (pr & _PAGE_PCD) 207 pt_dump_cont_printf(m, dmsg, "PCD "); 208 else 209 pt_dump_cont_printf(m, dmsg, " "); 210 211 /* Bit 7 has a different meaning on level 3 vs 4 */ 212 if (level <= 4 && pr & _PAGE_PSE) 213 pt_dump_cont_printf(m, dmsg, "PSE "); 214 else 215 pt_dump_cont_printf(m, dmsg, " "); 216 if ((level == 5 && pr & _PAGE_PAT) || 217 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE)) 218 pt_dump_cont_printf(m, dmsg, "PAT "); 219 else 220 pt_dump_cont_printf(m, dmsg, " "); 221 if (pr & _PAGE_GLOBAL) 222 pt_dump_cont_printf(m, dmsg, "GLB "); 223 else 224 pt_dump_cont_printf(m, dmsg, " "); 225 if (pr & _PAGE_NX) 226 pt_dump_cont_printf(m, dmsg, "NX "); 227 else 228 pt_dump_cont_printf(m, dmsg, "x "); 229 } 230 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]); 231 } 232 233 /* 234 * On 64 bits, sign-extend the 48 bit address to 64 bit 235 */ 236 static unsigned long normalize_addr(unsigned long u) 237 { 238 int shift; 239 if (!IS_ENABLED(CONFIG_X86_64)) 240 return u; 241 242 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); 243 return (signed long)(u << shift) >> shift; 244 } 245 246 static void note_wx(struct pg_state *st) 247 { 248 unsigned long npages; 249 250 npages = (st->current_address - st->start_address) / PAGE_SIZE; 251 252 #ifdef CONFIG_PCI_BIOS 253 /* 254 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX. 255 * Inform about it, but avoid the warning. 256 */ 257 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN && 258 st->current_address <= PAGE_OFFSET + BIOS_END) { 259 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); 260 return; 261 } 262 #endif 263 /* Account the WX pages */ 264 st->wx_pages += npages; 265 WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n", 266 (void *)st->start_address); 267 } 268 269 /* 270 * This function gets called on a break in a continuous series 271 * of PTE entries; the next one is different so we need to 272 * print what we collected so far. 273 */ 274 static void note_page(struct seq_file *m, struct pg_state *st, 275 pgprot_t new_prot, pgprotval_t new_eff, int level) 276 { 277 pgprotval_t prot, cur, eff; 278 static const char units[] = "BKMGTPE"; 279 280 /* 281 * If we have a "break" in the series, we need to flush the state that 282 * we have now. "break" is either changing perms, levels or 283 * address space marker. 284 */ 285 prot = pgprot_val(new_prot); 286 cur = pgprot_val(st->current_prot); 287 eff = st->effective_prot; 288 289 if (!st->level) { 290 /* First entry */ 291 st->current_prot = new_prot; 292 st->effective_prot = new_eff; 293 st->level = level; 294 st->marker = address_markers; 295 st->lines = 0; 296 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 297 st->marker->name); 298 } else if (prot != cur || new_eff != eff || level != st->level || 299 st->current_address >= st->marker[1].start_address) { 300 const char *unit = units; 301 unsigned long delta; 302 int width = sizeof(unsigned long) * 2; 303 304 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) 305 note_wx(st); 306 307 /* 308 * Now print the actual finished series 309 */ 310 if (!st->marker->max_lines || 311 st->lines < st->marker->max_lines) { 312 pt_dump_seq_printf(m, st->to_dmesg, 313 "0x%0*lx-0x%0*lx ", 314 width, st->start_address, 315 width, st->current_address); 316 317 delta = st->current_address - st->start_address; 318 while (!(delta & 1023) && unit[1]) { 319 delta >>= 10; 320 unit++; 321 } 322 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", 323 delta, *unit); 324 printk_prot(m, st->current_prot, st->level, 325 st->to_dmesg); 326 } 327 st->lines++; 328 329 /* 330 * We print markers for special areas of address space, 331 * such as the start of vmalloc space etc. 332 * This helps in the interpretation. 333 */ 334 if (st->current_address >= st->marker[1].start_address) { 335 if (st->marker->max_lines && 336 st->lines > st->marker->max_lines) { 337 unsigned long nskip = 338 st->lines - st->marker->max_lines; 339 pt_dump_seq_printf(m, st->to_dmesg, 340 "... %lu entr%s skipped ... \n", 341 nskip, 342 nskip == 1 ? "y" : "ies"); 343 } 344 st->marker++; 345 st->lines = 0; 346 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 347 st->marker->name); 348 } 349 350 st->start_address = st->current_address; 351 st->current_prot = new_prot; 352 st->effective_prot = new_eff; 353 st->level = level; 354 } 355 } 356 357 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2) 358 { 359 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) | 360 ((prot1 | prot2) & _PAGE_NX); 361 } 362 363 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, 364 pgprotval_t eff_in, unsigned long P) 365 { 366 int i; 367 pte_t *pte; 368 pgprotval_t prot, eff; 369 370 for (i = 0; i < PTRS_PER_PTE; i++) { 371 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 372 pte = pte_offset_map(&addr, st->current_address); 373 prot = pte_flags(*pte); 374 eff = effective_prot(eff_in, prot); 375 note_page(m, st, __pgprot(prot), eff, 5); 376 pte_unmap(pte); 377 } 378 } 379 #ifdef CONFIG_KASAN 380 381 /* 382 * This is an optimization for KASAN=y case. Since all kasan page tables 383 * eventually point to the kasan_zero_page we could call note_page() 384 * right away without walking through lower level page tables. This saves 385 * us dozens of seconds (minutes for 5-level config) while checking for 386 * W+X mapping or reading kernel_page_tables debugfs file. 387 */ 388 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 389 void *pt) 390 { 391 if (__pa(pt) == __pa(kasan_zero_pmd) || 392 (pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) || 393 __pa(pt) == __pa(kasan_zero_pud)) { 394 pgprotval_t prot = pte_flags(kasan_zero_pte[0]); 395 note_page(m, st, __pgprot(prot), 0, 5); 396 return true; 397 } 398 return false; 399 } 400 #else 401 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 402 void *pt) 403 { 404 return false; 405 } 406 #endif 407 408 #if PTRS_PER_PMD > 1 409 410 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, 411 pgprotval_t eff_in, unsigned long P) 412 { 413 int i; 414 pmd_t *start, *pmd_start; 415 pgprotval_t prot, eff; 416 417 pmd_start = start = (pmd_t *)pud_page_vaddr(addr); 418 for (i = 0; i < PTRS_PER_PMD; i++) { 419 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); 420 if (!pmd_none(*start)) { 421 prot = pmd_flags(*start); 422 eff = effective_prot(eff_in, prot); 423 if (pmd_large(*start) || !pmd_present(*start)) { 424 note_page(m, st, __pgprot(prot), eff, 4); 425 } else if (!kasan_page_table(m, st, pmd_start)) { 426 walk_pte_level(m, st, *start, eff, 427 P + i * PMD_LEVEL_MULT); 428 } 429 } else 430 note_page(m, st, __pgprot(0), 0, 4); 431 start++; 432 } 433 } 434 435 #else 436 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p) 437 #define pud_large(a) pmd_large(__pmd(pud_val(a))) 438 #define pud_none(a) pmd_none(__pmd(pud_val(a))) 439 #endif 440 441 #if PTRS_PER_PUD > 1 442 443 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, 444 pgprotval_t eff_in, unsigned long P) 445 { 446 int i; 447 pud_t *start, *pud_start; 448 pgprotval_t prot, eff; 449 pud_t *prev_pud = NULL; 450 451 pud_start = start = (pud_t *)p4d_page_vaddr(addr); 452 453 for (i = 0; i < PTRS_PER_PUD; i++) { 454 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); 455 if (!pud_none(*start)) { 456 prot = pud_flags(*start); 457 eff = effective_prot(eff_in, prot); 458 if (pud_large(*start) || !pud_present(*start)) { 459 note_page(m, st, __pgprot(prot), eff, 3); 460 } else if (!kasan_page_table(m, st, pud_start)) { 461 walk_pmd_level(m, st, *start, eff, 462 P + i * PUD_LEVEL_MULT); 463 } 464 } else 465 note_page(m, st, __pgprot(0), 0, 3); 466 467 prev_pud = start; 468 start++; 469 } 470 } 471 472 #else 473 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p) 474 #define p4d_large(a) pud_large(__pud(p4d_val(a))) 475 #define p4d_none(a) pud_none(__pud(p4d_val(a))) 476 #endif 477 478 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, 479 pgprotval_t eff_in, unsigned long P) 480 { 481 int i; 482 p4d_t *start, *p4d_start; 483 pgprotval_t prot, eff; 484 485 if (PTRS_PER_P4D == 1) 486 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P); 487 488 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); 489 490 for (i = 0; i < PTRS_PER_P4D; i++) { 491 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT); 492 if (!p4d_none(*start)) { 493 prot = p4d_flags(*start); 494 eff = effective_prot(eff_in, prot); 495 if (p4d_large(*start) || !p4d_present(*start)) { 496 note_page(m, st, __pgprot(prot), eff, 2); 497 } else if (!kasan_page_table(m, st, p4d_start)) { 498 walk_pud_level(m, st, *start, eff, 499 P + i * P4D_LEVEL_MULT); 500 } 501 } else 502 note_page(m, st, __pgprot(0), 0, 2); 503 504 start++; 505 } 506 } 507 508 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 509 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 510 511 static inline bool is_hypervisor_range(int idx) 512 { 513 #ifdef CONFIG_X86_64 514 /* 515 * ffff800000000000 - ffff87ffffffffff is reserved for 516 * the hypervisor. 517 */ 518 return (idx >= pgd_index(__PAGE_OFFSET) - 16) && 519 (idx < pgd_index(__PAGE_OFFSET)); 520 #else 521 return false; 522 #endif 523 } 524 525 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 526 bool checkwx, bool dmesg) 527 { 528 pgd_t *start = INIT_PGD; 529 pgprotval_t prot, eff; 530 int i; 531 struct pg_state st = {}; 532 533 if (pgd) { 534 start = pgd; 535 st.to_dmesg = dmesg; 536 } 537 538 st.check_wx = checkwx; 539 if (checkwx) 540 st.wx_pages = 0; 541 542 for (i = 0; i < PTRS_PER_PGD; i++) { 543 st.current_address = normalize_addr(i * PGD_LEVEL_MULT); 544 if (!pgd_none(*start) && !is_hypervisor_range(i)) { 545 prot = pgd_flags(*start); 546 #ifdef CONFIG_X86_PAE 547 eff = _PAGE_USER | _PAGE_RW; 548 #else 549 eff = prot; 550 #endif 551 if (pgd_large(*start) || !pgd_present(*start)) { 552 note_page(m, &st, __pgprot(prot), eff, 1); 553 } else { 554 walk_p4d_level(m, &st, *start, eff, 555 i * PGD_LEVEL_MULT); 556 } 557 } else 558 note_page(m, &st, __pgprot(0), 0, 1); 559 560 cond_resched(); 561 start++; 562 } 563 564 /* Flush out the last page */ 565 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); 566 note_page(m, &st, __pgprot(0), 0, 0); 567 if (!checkwx) 568 return; 569 if (st.wx_pages) 570 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n", 571 st.wx_pages); 572 else 573 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n"); 574 } 575 576 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) 577 { 578 ptdump_walk_pgd_level_core(m, pgd, false, true); 579 } 580 581 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) 582 { 583 #ifdef CONFIG_PAGE_TABLE_ISOLATION 584 if (user && static_cpu_has(X86_FEATURE_PTI)) 585 pgd = kernel_to_user_pgdp(pgd); 586 #endif 587 ptdump_walk_pgd_level_core(m, pgd, false, false); 588 } 589 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); 590 591 void ptdump_walk_user_pgd_level_checkwx(void) 592 { 593 #ifdef CONFIG_PAGE_TABLE_ISOLATION 594 pgd_t *pgd = INIT_PGD; 595 596 if (!(__supported_pte_mask & _PAGE_NX) || 597 !static_cpu_has(X86_FEATURE_PTI)) 598 return; 599 600 pr_info("x86/mm: Checking user space page tables\n"); 601 pgd = kernel_to_user_pgdp(pgd); 602 ptdump_walk_pgd_level_core(NULL, pgd, true, false); 603 #endif 604 } 605 606 void ptdump_walk_pgd_level_checkwx(void) 607 { 608 ptdump_walk_pgd_level_core(NULL, NULL, true, false); 609 } 610 611 static int __init pt_dump_init(void) 612 { 613 /* 614 * Various markers are not compile-time constants, so assign them 615 * here. 616 */ 617 #ifdef CONFIG_X86_64 618 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET; 619 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 620 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; 621 #ifdef CONFIG_MODIFY_LDT_SYSCALL 622 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 623 #endif 624 #ifdef CONFIG_KASAN 625 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; 626 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; 627 #endif 628 #endif 629 #ifdef CONFIG_X86_32 630 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 631 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 632 # ifdef CONFIG_HIGHMEM 633 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 634 # endif 635 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 636 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 637 # ifdef CONFIG_MODIFY_LDT_SYSCALL 638 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 639 # endif 640 #endif 641 return 0; 642 } 643 __initcall(pt_dump_init); 644