1 /* 2 * Debug helper to dump the current kernel pagetables of the system 3 * so that we can see what the various memory ranges are set to. 4 * 5 * (C) Copyright 2008 Intel Corporation 6 * 7 * Author: Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include <linux/debugfs.h> 16 #include <linux/kasan.h> 17 #include <linux/mm.h> 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/highmem.h> 22 #include <linux/pci.h> 23 24 #include <asm/e820/types.h> 25 #include <asm/pgtable.h> 26 27 /* 28 * The dumper groups pagetable entries of the same type into one, and for 29 * that it needs to keep some state when walking, and flush this state 30 * when a "break" in the continuity is found. 31 */ 32 struct pg_state { 33 int level; 34 pgprot_t current_prot; 35 pgprotval_t effective_prot; 36 unsigned long start_address; 37 unsigned long current_address; 38 const struct addr_marker *marker; 39 unsigned long lines; 40 bool to_dmesg; 41 bool check_wx; 42 unsigned long wx_pages; 43 }; 44 45 struct addr_marker { 46 unsigned long start_address; 47 const char *name; 48 unsigned long max_lines; 49 }; 50 51 /* Address space markers hints */ 52 53 #ifdef CONFIG_X86_64 54 55 enum address_markers_idx { 56 USER_SPACE_NR = 0, 57 KERNEL_SPACE_NR, 58 #ifdef CONFIG_MODIFY_LDT_SYSCALL 59 LDT_NR, 60 #endif 61 LOW_KERNEL_NR, 62 VMALLOC_START_NR, 63 VMEMMAP_START_NR, 64 #ifdef CONFIG_KASAN 65 KASAN_SHADOW_START_NR, 66 KASAN_SHADOW_END_NR, 67 #endif 68 CPU_ENTRY_AREA_NR, 69 #ifdef CONFIG_X86_ESPFIX64 70 ESPFIX_START_NR, 71 #endif 72 #ifdef CONFIG_EFI 73 EFI_END_NR, 74 #endif 75 HIGH_KERNEL_NR, 76 MODULES_VADDR_NR, 77 MODULES_END_NR, 78 FIXADDR_START_NR, 79 END_OF_SPACE_NR, 80 }; 81 82 static struct addr_marker address_markers[] = { 83 [USER_SPACE_NR] = { 0, "User Space" }, 84 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, 85 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, 86 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 87 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, 88 #ifdef CONFIG_KASAN 89 /* 90 * These fields get initialized with the (dynamic) 91 * KASAN_SHADOW_{START,END} values in pt_dump_init(). 92 */ 93 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" }, 94 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" }, 95 #endif 96 #ifdef CONFIG_MODIFY_LDT_SYSCALL 97 [LDT_NR] = { 0UL, "LDT remap" }, 98 #endif 99 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 100 #ifdef CONFIG_X86_ESPFIX64 101 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 102 #endif 103 #ifdef CONFIG_EFI 104 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, 105 #endif 106 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, 107 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, 108 [MODULES_END_NR] = { MODULES_END, "End Modules" }, 109 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, 110 [END_OF_SPACE_NR] = { -1, NULL } 111 }; 112 113 #define INIT_PGD ((pgd_t *) &init_top_pgt) 114 115 #else /* CONFIG_X86_64 */ 116 117 enum address_markers_idx { 118 USER_SPACE_NR = 0, 119 KERNEL_SPACE_NR, 120 VMALLOC_START_NR, 121 VMALLOC_END_NR, 122 #ifdef CONFIG_HIGHMEM 123 PKMAP_BASE_NR, 124 #endif 125 #ifdef CONFIG_MODIFY_LDT_SYSCALL 126 LDT_NR, 127 #endif 128 CPU_ENTRY_AREA_NR, 129 FIXADDR_START_NR, 130 END_OF_SPACE_NR, 131 }; 132 133 static struct addr_marker address_markers[] = { 134 [USER_SPACE_NR] = { 0, "User Space" }, 135 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, 136 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 137 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, 138 #ifdef CONFIG_HIGHMEM 139 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 140 #endif 141 #ifdef CONFIG_MODIFY_LDT_SYSCALL 142 [LDT_NR] = { 0UL, "LDT remap" }, 143 #endif 144 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 145 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 146 [END_OF_SPACE_NR] = { -1, NULL } 147 }; 148 149 #define INIT_PGD (swapper_pg_dir) 150 151 #endif /* !CONFIG_X86_64 */ 152 153 /* Multipliers for offsets within the PTEs */ 154 #define PTE_LEVEL_MULT (PAGE_SIZE) 155 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) 156 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) 157 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) 158 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT) 159 160 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \ 161 ({ \ 162 if (to_dmesg) \ 163 printk(KERN_INFO fmt, ##args); \ 164 else \ 165 if (m) \ 166 seq_printf(m, fmt, ##args); \ 167 }) 168 169 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \ 170 ({ \ 171 if (to_dmesg) \ 172 printk(KERN_CONT fmt, ##args); \ 173 else \ 174 if (m) \ 175 seq_printf(m, fmt, ##args); \ 176 }) 177 178 /* 179 * Print a readable form of a pgprot_t to the seq_file 180 */ 181 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) 182 { 183 pgprotval_t pr = pgprot_val(prot); 184 static const char * const level_name[] = 185 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; 186 187 if (!(pr & _PAGE_PRESENT)) { 188 /* Not present */ 189 pt_dump_cont_printf(m, dmsg, " "); 190 } else { 191 if (pr & _PAGE_USER) 192 pt_dump_cont_printf(m, dmsg, "USR "); 193 else 194 pt_dump_cont_printf(m, dmsg, " "); 195 if (pr & _PAGE_RW) 196 pt_dump_cont_printf(m, dmsg, "RW "); 197 else 198 pt_dump_cont_printf(m, dmsg, "ro "); 199 if (pr & _PAGE_PWT) 200 pt_dump_cont_printf(m, dmsg, "PWT "); 201 else 202 pt_dump_cont_printf(m, dmsg, " "); 203 if (pr & _PAGE_PCD) 204 pt_dump_cont_printf(m, dmsg, "PCD "); 205 else 206 pt_dump_cont_printf(m, dmsg, " "); 207 208 /* Bit 7 has a different meaning on level 3 vs 4 */ 209 if (level <= 4 && pr & _PAGE_PSE) 210 pt_dump_cont_printf(m, dmsg, "PSE "); 211 else 212 pt_dump_cont_printf(m, dmsg, " "); 213 if ((level == 5 && pr & _PAGE_PAT) || 214 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE)) 215 pt_dump_cont_printf(m, dmsg, "PAT "); 216 else 217 pt_dump_cont_printf(m, dmsg, " "); 218 if (pr & _PAGE_GLOBAL) 219 pt_dump_cont_printf(m, dmsg, "GLB "); 220 else 221 pt_dump_cont_printf(m, dmsg, " "); 222 if (pr & _PAGE_NX) 223 pt_dump_cont_printf(m, dmsg, "NX "); 224 else 225 pt_dump_cont_printf(m, dmsg, "x "); 226 } 227 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]); 228 } 229 230 /* 231 * On 64 bits, sign-extend the 48 bit address to 64 bit 232 */ 233 static unsigned long normalize_addr(unsigned long u) 234 { 235 int shift; 236 if (!IS_ENABLED(CONFIG_X86_64)) 237 return u; 238 239 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); 240 return (signed long)(u << shift) >> shift; 241 } 242 243 static void note_wx(struct pg_state *st) 244 { 245 unsigned long npages; 246 247 npages = (st->current_address - st->start_address) / PAGE_SIZE; 248 249 #ifdef CONFIG_PCI_BIOS 250 /* 251 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX. 252 * Inform about it, but avoid the warning. 253 */ 254 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN && 255 st->current_address <= PAGE_OFFSET + BIOS_END) { 256 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); 257 return; 258 } 259 #endif 260 /* Account the WX pages */ 261 st->wx_pages += npages; 262 WARN_ONCE(__supported_pte_mask & _PAGE_NX, 263 "x86/mm: Found insecure W+X mapping at address %pS\n", 264 (void *)st->start_address); 265 } 266 267 /* 268 * This function gets called on a break in a continuous series 269 * of PTE entries; the next one is different so we need to 270 * print what we collected so far. 271 */ 272 static void note_page(struct seq_file *m, struct pg_state *st, 273 pgprot_t new_prot, pgprotval_t new_eff, int level) 274 { 275 pgprotval_t prot, cur, eff; 276 static const char units[] = "BKMGTPE"; 277 278 /* 279 * If we have a "break" in the series, we need to flush the state that 280 * we have now. "break" is either changing perms, levels or 281 * address space marker. 282 */ 283 prot = pgprot_val(new_prot); 284 cur = pgprot_val(st->current_prot); 285 eff = st->effective_prot; 286 287 if (!st->level) { 288 /* First entry */ 289 st->current_prot = new_prot; 290 st->effective_prot = new_eff; 291 st->level = level; 292 st->marker = address_markers; 293 st->lines = 0; 294 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 295 st->marker->name); 296 } else if (prot != cur || new_eff != eff || level != st->level || 297 st->current_address >= st->marker[1].start_address) { 298 const char *unit = units; 299 unsigned long delta; 300 int width = sizeof(unsigned long) * 2; 301 302 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) 303 note_wx(st); 304 305 /* 306 * Now print the actual finished series 307 */ 308 if (!st->marker->max_lines || 309 st->lines < st->marker->max_lines) { 310 pt_dump_seq_printf(m, st->to_dmesg, 311 "0x%0*lx-0x%0*lx ", 312 width, st->start_address, 313 width, st->current_address); 314 315 delta = st->current_address - st->start_address; 316 while (!(delta & 1023) && unit[1]) { 317 delta >>= 10; 318 unit++; 319 } 320 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", 321 delta, *unit); 322 printk_prot(m, st->current_prot, st->level, 323 st->to_dmesg); 324 } 325 st->lines++; 326 327 /* 328 * We print markers for special areas of address space, 329 * such as the start of vmalloc space etc. 330 * This helps in the interpretation. 331 */ 332 if (st->current_address >= st->marker[1].start_address) { 333 if (st->marker->max_lines && 334 st->lines > st->marker->max_lines) { 335 unsigned long nskip = 336 st->lines - st->marker->max_lines; 337 pt_dump_seq_printf(m, st->to_dmesg, 338 "... %lu entr%s skipped ... \n", 339 nskip, 340 nskip == 1 ? "y" : "ies"); 341 } 342 st->marker++; 343 st->lines = 0; 344 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 345 st->marker->name); 346 } 347 348 st->start_address = st->current_address; 349 st->current_prot = new_prot; 350 st->effective_prot = new_eff; 351 st->level = level; 352 } 353 } 354 355 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2) 356 { 357 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) | 358 ((prot1 | prot2) & _PAGE_NX); 359 } 360 361 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, 362 pgprotval_t eff_in, unsigned long P) 363 { 364 int i; 365 pte_t *pte; 366 pgprotval_t prot, eff; 367 368 for (i = 0; i < PTRS_PER_PTE; i++) { 369 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 370 pte = pte_offset_map(&addr, st->current_address); 371 prot = pte_flags(*pte); 372 eff = effective_prot(eff_in, prot); 373 note_page(m, st, __pgprot(prot), eff, 5); 374 pte_unmap(pte); 375 } 376 } 377 #ifdef CONFIG_KASAN 378 379 /* 380 * This is an optimization for KASAN=y case. Since all kasan page tables 381 * eventually point to the kasan_early_shadow_page we could call note_page() 382 * right away without walking through lower level page tables. This saves 383 * us dozens of seconds (minutes for 5-level config) while checking for 384 * W+X mapping or reading kernel_page_tables debugfs file. 385 */ 386 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 387 void *pt) 388 { 389 if (__pa(pt) == __pa(kasan_early_shadow_pmd) || 390 (pgtable_l5_enabled() && 391 __pa(pt) == __pa(kasan_early_shadow_p4d)) || 392 __pa(pt) == __pa(kasan_early_shadow_pud)) { 393 pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]); 394 note_page(m, st, __pgprot(prot), 0, 5); 395 return true; 396 } 397 return false; 398 } 399 #else 400 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 401 void *pt) 402 { 403 return false; 404 } 405 #endif 406 407 #if PTRS_PER_PMD > 1 408 409 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, 410 pgprotval_t eff_in, unsigned long P) 411 { 412 int i; 413 pmd_t *start, *pmd_start; 414 pgprotval_t prot, eff; 415 416 pmd_start = start = (pmd_t *)pud_page_vaddr(addr); 417 for (i = 0; i < PTRS_PER_PMD; i++) { 418 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); 419 if (!pmd_none(*start)) { 420 prot = pmd_flags(*start); 421 eff = effective_prot(eff_in, prot); 422 if (pmd_large(*start) || !pmd_present(*start)) { 423 note_page(m, st, __pgprot(prot), eff, 4); 424 } else if (!kasan_page_table(m, st, pmd_start)) { 425 walk_pte_level(m, st, *start, eff, 426 P + i * PMD_LEVEL_MULT); 427 } 428 } else 429 note_page(m, st, __pgprot(0), 0, 4); 430 start++; 431 } 432 } 433 434 #else 435 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p) 436 #define pud_large(a) pmd_large(__pmd(pud_val(a))) 437 #define pud_none(a) pmd_none(__pmd(pud_val(a))) 438 #endif 439 440 #if PTRS_PER_PUD > 1 441 442 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, 443 pgprotval_t eff_in, unsigned long P) 444 { 445 int i; 446 pud_t *start, *pud_start; 447 pgprotval_t prot, eff; 448 449 pud_start = start = (pud_t *)p4d_page_vaddr(addr); 450 451 for (i = 0; i < PTRS_PER_PUD; i++) { 452 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); 453 if (!pud_none(*start)) { 454 prot = pud_flags(*start); 455 eff = effective_prot(eff_in, prot); 456 if (pud_large(*start) || !pud_present(*start)) { 457 note_page(m, st, __pgprot(prot), eff, 3); 458 } else if (!kasan_page_table(m, st, pud_start)) { 459 walk_pmd_level(m, st, *start, eff, 460 P + i * PUD_LEVEL_MULT); 461 } 462 } else 463 note_page(m, st, __pgprot(0), 0, 3); 464 465 start++; 466 } 467 } 468 469 #else 470 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p) 471 #define p4d_large(a) pud_large(__pud(p4d_val(a))) 472 #define p4d_none(a) pud_none(__pud(p4d_val(a))) 473 #endif 474 475 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, 476 pgprotval_t eff_in, unsigned long P) 477 { 478 int i; 479 p4d_t *start, *p4d_start; 480 pgprotval_t prot, eff; 481 482 if (PTRS_PER_P4D == 1) 483 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P); 484 485 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); 486 487 for (i = 0; i < PTRS_PER_P4D; i++) { 488 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT); 489 if (!p4d_none(*start)) { 490 prot = p4d_flags(*start); 491 eff = effective_prot(eff_in, prot); 492 if (p4d_large(*start) || !p4d_present(*start)) { 493 note_page(m, st, __pgprot(prot), eff, 2); 494 } else if (!kasan_page_table(m, st, p4d_start)) { 495 walk_pud_level(m, st, *start, eff, 496 P + i * P4D_LEVEL_MULT); 497 } 498 } else 499 note_page(m, st, __pgprot(0), 0, 2); 500 501 start++; 502 } 503 } 504 505 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 506 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 507 508 static inline bool is_hypervisor_range(int idx) 509 { 510 #ifdef CONFIG_X86_64 511 /* 512 * A hole in the beginning of kernel address space reserved 513 * for a hypervisor. 514 */ 515 return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && 516 (idx < pgd_index(GUARD_HOLE_END_ADDR)); 517 #else 518 return false; 519 #endif 520 } 521 522 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 523 bool checkwx, bool dmesg) 524 { 525 pgd_t *start = INIT_PGD; 526 pgprotval_t prot, eff; 527 int i; 528 struct pg_state st = {}; 529 530 if (pgd) { 531 start = pgd; 532 st.to_dmesg = dmesg; 533 } 534 535 st.check_wx = checkwx; 536 if (checkwx) 537 st.wx_pages = 0; 538 539 for (i = 0; i < PTRS_PER_PGD; i++) { 540 st.current_address = normalize_addr(i * PGD_LEVEL_MULT); 541 if (!pgd_none(*start) && !is_hypervisor_range(i)) { 542 prot = pgd_flags(*start); 543 #ifdef CONFIG_X86_PAE 544 eff = _PAGE_USER | _PAGE_RW; 545 #else 546 eff = prot; 547 #endif 548 if (pgd_large(*start) || !pgd_present(*start)) { 549 note_page(m, &st, __pgprot(prot), eff, 1); 550 } else { 551 walk_p4d_level(m, &st, *start, eff, 552 i * PGD_LEVEL_MULT); 553 } 554 } else 555 note_page(m, &st, __pgprot(0), 0, 1); 556 557 cond_resched(); 558 start++; 559 } 560 561 /* Flush out the last page */ 562 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); 563 note_page(m, &st, __pgprot(0), 0, 0); 564 if (!checkwx) 565 return; 566 if (st.wx_pages) 567 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n", 568 st.wx_pages); 569 else 570 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n"); 571 } 572 573 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) 574 { 575 ptdump_walk_pgd_level_core(m, pgd, false, true); 576 } 577 578 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) 579 { 580 #ifdef CONFIG_PAGE_TABLE_ISOLATION 581 if (user && boot_cpu_has(X86_FEATURE_PTI)) 582 pgd = kernel_to_user_pgdp(pgd); 583 #endif 584 ptdump_walk_pgd_level_core(m, pgd, false, false); 585 } 586 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); 587 588 void ptdump_walk_user_pgd_level_checkwx(void) 589 { 590 #ifdef CONFIG_PAGE_TABLE_ISOLATION 591 pgd_t *pgd = INIT_PGD; 592 593 if (!(__supported_pte_mask & _PAGE_NX) || 594 !boot_cpu_has(X86_FEATURE_PTI)) 595 return; 596 597 pr_info("x86/mm: Checking user space page tables\n"); 598 pgd = kernel_to_user_pgdp(pgd); 599 ptdump_walk_pgd_level_core(NULL, pgd, true, false); 600 #endif 601 } 602 603 void ptdump_walk_pgd_level_checkwx(void) 604 { 605 ptdump_walk_pgd_level_core(NULL, NULL, true, false); 606 } 607 608 static int __init pt_dump_init(void) 609 { 610 /* 611 * Various markers are not compile-time constants, so assign them 612 * here. 613 */ 614 #ifdef CONFIG_X86_64 615 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET; 616 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 617 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; 618 #ifdef CONFIG_MODIFY_LDT_SYSCALL 619 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 620 #endif 621 #ifdef CONFIG_KASAN 622 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; 623 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; 624 #endif 625 #endif 626 #ifdef CONFIG_X86_32 627 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 628 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 629 # ifdef CONFIG_HIGHMEM 630 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 631 # endif 632 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 633 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 634 # ifdef CONFIG_MODIFY_LDT_SYSCALL 635 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 636 # endif 637 #endif 638 return 0; 639 } 640 __initcall(pt_dump_init); 641