1 /* 2 * Debug helper to dump the current kernel pagetables of the system 3 * so that we can see what the various memory ranges are set to. 4 * 5 * (C) Copyright 2008 Intel Corporation 6 * 7 * Author: Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include <linux/debugfs.h> 16 #include <linux/kasan.h> 17 #include <linux/mm.h> 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/highmem.h> 22 #include <linux/pci.h> 23 24 #include <asm/e820/types.h> 25 #include <asm/pgtable.h> 26 27 /* 28 * The dumper groups pagetable entries of the same type into one, and for 29 * that it needs to keep some state when walking, and flush this state 30 * when a "break" in the continuity is found. 31 */ 32 struct pg_state { 33 int level; 34 pgprot_t current_prot; 35 pgprotval_t effective_prot; 36 unsigned long start_address; 37 unsigned long current_address; 38 const struct addr_marker *marker; 39 unsigned long lines; 40 bool to_dmesg; 41 bool check_wx; 42 unsigned long wx_pages; 43 }; 44 45 struct addr_marker { 46 unsigned long start_address; 47 const char *name; 48 unsigned long max_lines; 49 }; 50 51 /* Address space markers hints */ 52 53 #ifdef CONFIG_X86_64 54 55 enum address_markers_idx { 56 USER_SPACE_NR = 0, 57 KERNEL_SPACE_NR, 58 #ifdef CONFIG_MODIFY_LDT_SYSCALL 59 LDT_NR, 60 #endif 61 LOW_KERNEL_NR, 62 VMALLOC_START_NR, 63 VMEMMAP_START_NR, 64 #ifdef CONFIG_KASAN 65 KASAN_SHADOW_START_NR, 66 KASAN_SHADOW_END_NR, 67 #endif 68 CPU_ENTRY_AREA_NR, 69 #ifdef CONFIG_X86_ESPFIX64 70 ESPFIX_START_NR, 71 #endif 72 #ifdef CONFIG_EFI 73 EFI_END_NR, 74 #endif 75 HIGH_KERNEL_NR, 76 MODULES_VADDR_NR, 77 MODULES_END_NR, 78 FIXADDR_START_NR, 79 END_OF_SPACE_NR, 80 }; 81 82 static struct addr_marker address_markers[] = { 83 [USER_SPACE_NR] = { 0, "User Space" }, 84 [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, 85 [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, 86 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 87 [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, 88 #ifdef CONFIG_KASAN 89 /* 90 * These fields get initialized with the (dynamic) 91 * KASAN_SHADOW_{START,END} values in pt_dump_init(). 92 */ 93 [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" }, 94 [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" }, 95 #endif 96 #ifdef CONFIG_MODIFY_LDT_SYSCALL 97 [LDT_NR] = { 0UL, "LDT remap" }, 98 #endif 99 [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, 100 #ifdef CONFIG_X86_ESPFIX64 101 [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, 102 #endif 103 #ifdef CONFIG_EFI 104 [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, 105 #endif 106 [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, 107 [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, 108 [MODULES_END_NR] = { MODULES_END, "End Modules" }, 109 [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, 110 [END_OF_SPACE_NR] = { -1, NULL } 111 }; 112 113 #define INIT_PGD ((pgd_t *) &init_top_pgt) 114 115 #else /* CONFIG_X86_64 */ 116 117 enum address_markers_idx { 118 USER_SPACE_NR = 0, 119 KERNEL_SPACE_NR, 120 VMALLOC_START_NR, 121 VMALLOC_END_NR, 122 #ifdef CONFIG_HIGHMEM 123 PKMAP_BASE_NR, 124 #endif 125 #ifdef CONFIG_MODIFY_LDT_SYSCALL 126 LDT_NR, 127 #endif 128 CPU_ENTRY_AREA_NR, 129 FIXADDR_START_NR, 130 END_OF_SPACE_NR, 131 }; 132 133 static struct addr_marker address_markers[] = { 134 [USER_SPACE_NR] = { 0, "User Space" }, 135 [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, 136 [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, 137 [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, 138 #ifdef CONFIG_HIGHMEM 139 [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, 140 #endif 141 #ifdef CONFIG_MODIFY_LDT_SYSCALL 142 [LDT_NR] = { 0UL, "LDT remap" }, 143 #endif 144 [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, 145 [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, 146 [END_OF_SPACE_NR] = { -1, NULL } 147 }; 148 149 #define INIT_PGD (swapper_pg_dir) 150 151 #endif /* !CONFIG_X86_64 */ 152 153 /* Multipliers for offsets within the PTEs */ 154 #define PTE_LEVEL_MULT (PAGE_SIZE) 155 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) 156 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT) 157 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT) 158 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT) 159 160 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...) \ 161 ({ \ 162 if (to_dmesg) \ 163 printk(KERN_INFO fmt, ##args); \ 164 else \ 165 if (m) \ 166 seq_printf(m, fmt, ##args); \ 167 }) 168 169 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...) \ 170 ({ \ 171 if (to_dmesg) \ 172 printk(KERN_CONT fmt, ##args); \ 173 else \ 174 if (m) \ 175 seq_printf(m, fmt, ##args); \ 176 }) 177 178 /* 179 * Print a readable form of a pgprot_t to the seq_file 180 */ 181 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) 182 { 183 pgprotval_t pr = pgprot_val(prot); 184 static const char * const level_name[] = 185 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; 186 187 if (!(pr & _PAGE_PRESENT)) { 188 /* Not present */ 189 pt_dump_cont_printf(m, dmsg, " "); 190 } else { 191 if (pr & _PAGE_USER) 192 pt_dump_cont_printf(m, dmsg, "USR "); 193 else 194 pt_dump_cont_printf(m, dmsg, " "); 195 if (pr & _PAGE_RW) 196 pt_dump_cont_printf(m, dmsg, "RW "); 197 else 198 pt_dump_cont_printf(m, dmsg, "ro "); 199 if (pr & _PAGE_PWT) 200 pt_dump_cont_printf(m, dmsg, "PWT "); 201 else 202 pt_dump_cont_printf(m, dmsg, " "); 203 if (pr & _PAGE_PCD) 204 pt_dump_cont_printf(m, dmsg, "PCD "); 205 else 206 pt_dump_cont_printf(m, dmsg, " "); 207 208 /* Bit 7 has a different meaning on level 3 vs 4 */ 209 if (level <= 4 && pr & _PAGE_PSE) 210 pt_dump_cont_printf(m, dmsg, "PSE "); 211 else 212 pt_dump_cont_printf(m, dmsg, " "); 213 if ((level == 5 && pr & _PAGE_PAT) || 214 ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE)) 215 pt_dump_cont_printf(m, dmsg, "PAT "); 216 else 217 pt_dump_cont_printf(m, dmsg, " "); 218 if (pr & _PAGE_GLOBAL) 219 pt_dump_cont_printf(m, dmsg, "GLB "); 220 else 221 pt_dump_cont_printf(m, dmsg, " "); 222 if (pr & _PAGE_NX) 223 pt_dump_cont_printf(m, dmsg, "NX "); 224 else 225 pt_dump_cont_printf(m, dmsg, "x "); 226 } 227 pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]); 228 } 229 230 /* 231 * On 64 bits, sign-extend the 48 bit address to 64 bit 232 */ 233 static unsigned long normalize_addr(unsigned long u) 234 { 235 int shift; 236 if (!IS_ENABLED(CONFIG_X86_64)) 237 return u; 238 239 shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); 240 return (signed long)(u << shift) >> shift; 241 } 242 243 static void note_wx(struct pg_state *st) 244 { 245 unsigned long npages; 246 247 npages = (st->current_address - st->start_address) / PAGE_SIZE; 248 249 #ifdef CONFIG_PCI_BIOS 250 /* 251 * If PCI BIOS is enabled, the PCI BIOS area is forced to WX. 252 * Inform about it, but avoid the warning. 253 */ 254 if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN && 255 st->current_address <= PAGE_OFFSET + BIOS_END) { 256 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); 257 return; 258 } 259 #endif 260 /* Account the WX pages */ 261 st->wx_pages += npages; 262 WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n", 263 (void *)st->start_address); 264 } 265 266 /* 267 * This function gets called on a break in a continuous series 268 * of PTE entries; the next one is different so we need to 269 * print what we collected so far. 270 */ 271 static void note_page(struct seq_file *m, struct pg_state *st, 272 pgprot_t new_prot, pgprotval_t new_eff, int level) 273 { 274 pgprotval_t prot, cur, eff; 275 static const char units[] = "BKMGTPE"; 276 277 /* 278 * If we have a "break" in the series, we need to flush the state that 279 * we have now. "break" is either changing perms, levels or 280 * address space marker. 281 */ 282 prot = pgprot_val(new_prot); 283 cur = pgprot_val(st->current_prot); 284 eff = st->effective_prot; 285 286 if (!st->level) { 287 /* First entry */ 288 st->current_prot = new_prot; 289 st->effective_prot = new_eff; 290 st->level = level; 291 st->marker = address_markers; 292 st->lines = 0; 293 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 294 st->marker->name); 295 } else if (prot != cur || new_eff != eff || level != st->level || 296 st->current_address >= st->marker[1].start_address) { 297 const char *unit = units; 298 unsigned long delta; 299 int width = sizeof(unsigned long) * 2; 300 301 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) 302 note_wx(st); 303 304 /* 305 * Now print the actual finished series 306 */ 307 if (!st->marker->max_lines || 308 st->lines < st->marker->max_lines) { 309 pt_dump_seq_printf(m, st->to_dmesg, 310 "0x%0*lx-0x%0*lx ", 311 width, st->start_address, 312 width, st->current_address); 313 314 delta = st->current_address - st->start_address; 315 while (!(delta & 1023) && unit[1]) { 316 delta >>= 10; 317 unit++; 318 } 319 pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", 320 delta, *unit); 321 printk_prot(m, st->current_prot, st->level, 322 st->to_dmesg); 323 } 324 st->lines++; 325 326 /* 327 * We print markers for special areas of address space, 328 * such as the start of vmalloc space etc. 329 * This helps in the interpretation. 330 */ 331 if (st->current_address >= st->marker[1].start_address) { 332 if (st->marker->max_lines && 333 st->lines > st->marker->max_lines) { 334 unsigned long nskip = 335 st->lines - st->marker->max_lines; 336 pt_dump_seq_printf(m, st->to_dmesg, 337 "... %lu entr%s skipped ... \n", 338 nskip, 339 nskip == 1 ? "y" : "ies"); 340 } 341 st->marker++; 342 st->lines = 0; 343 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", 344 st->marker->name); 345 } 346 347 st->start_address = st->current_address; 348 st->current_prot = new_prot; 349 st->effective_prot = new_eff; 350 st->level = level; 351 } 352 } 353 354 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2) 355 { 356 return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) | 357 ((prot1 | prot2) & _PAGE_NX); 358 } 359 360 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, 361 pgprotval_t eff_in, unsigned long P) 362 { 363 int i; 364 pte_t *pte; 365 pgprotval_t prot, eff; 366 367 for (i = 0; i < PTRS_PER_PTE; i++) { 368 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); 369 pte = pte_offset_map(&addr, st->current_address); 370 prot = pte_flags(*pte); 371 eff = effective_prot(eff_in, prot); 372 note_page(m, st, __pgprot(prot), eff, 5); 373 pte_unmap(pte); 374 } 375 } 376 #ifdef CONFIG_KASAN 377 378 /* 379 * This is an optimization for KASAN=y case. Since all kasan page tables 380 * eventually point to the kasan_early_shadow_page we could call note_page() 381 * right away without walking through lower level page tables. This saves 382 * us dozens of seconds (minutes for 5-level config) while checking for 383 * W+X mapping or reading kernel_page_tables debugfs file. 384 */ 385 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 386 void *pt) 387 { 388 if (__pa(pt) == __pa(kasan_early_shadow_pmd) || 389 (pgtable_l5_enabled() && 390 __pa(pt) == __pa(kasan_early_shadow_p4d)) || 391 __pa(pt) == __pa(kasan_early_shadow_pud)) { 392 pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]); 393 note_page(m, st, __pgprot(prot), 0, 5); 394 return true; 395 } 396 return false; 397 } 398 #else 399 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, 400 void *pt) 401 { 402 return false; 403 } 404 #endif 405 406 #if PTRS_PER_PMD > 1 407 408 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, 409 pgprotval_t eff_in, unsigned long P) 410 { 411 int i; 412 pmd_t *start, *pmd_start; 413 pgprotval_t prot, eff; 414 415 pmd_start = start = (pmd_t *)pud_page_vaddr(addr); 416 for (i = 0; i < PTRS_PER_PMD; i++) { 417 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); 418 if (!pmd_none(*start)) { 419 prot = pmd_flags(*start); 420 eff = effective_prot(eff_in, prot); 421 if (pmd_large(*start) || !pmd_present(*start)) { 422 note_page(m, st, __pgprot(prot), eff, 4); 423 } else if (!kasan_page_table(m, st, pmd_start)) { 424 walk_pte_level(m, st, *start, eff, 425 P + i * PMD_LEVEL_MULT); 426 } 427 } else 428 note_page(m, st, __pgprot(0), 0, 4); 429 start++; 430 } 431 } 432 433 #else 434 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p) 435 #define pud_large(a) pmd_large(__pmd(pud_val(a))) 436 #define pud_none(a) pmd_none(__pmd(pud_val(a))) 437 #endif 438 439 #if PTRS_PER_PUD > 1 440 441 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, 442 pgprotval_t eff_in, unsigned long P) 443 { 444 int i; 445 pud_t *start, *pud_start; 446 pgprotval_t prot, eff; 447 448 pud_start = start = (pud_t *)p4d_page_vaddr(addr); 449 450 for (i = 0; i < PTRS_PER_PUD; i++) { 451 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); 452 if (!pud_none(*start)) { 453 prot = pud_flags(*start); 454 eff = effective_prot(eff_in, prot); 455 if (pud_large(*start) || !pud_present(*start)) { 456 note_page(m, st, __pgprot(prot), eff, 3); 457 } else if (!kasan_page_table(m, st, pud_start)) { 458 walk_pmd_level(m, st, *start, eff, 459 P + i * PUD_LEVEL_MULT); 460 } 461 } else 462 note_page(m, st, __pgprot(0), 0, 3); 463 464 start++; 465 } 466 } 467 468 #else 469 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p) 470 #define p4d_large(a) pud_large(__pud(p4d_val(a))) 471 #define p4d_none(a) pud_none(__pud(p4d_val(a))) 472 #endif 473 474 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, 475 pgprotval_t eff_in, unsigned long P) 476 { 477 int i; 478 p4d_t *start, *p4d_start; 479 pgprotval_t prot, eff; 480 481 if (PTRS_PER_P4D == 1) 482 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P); 483 484 p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); 485 486 for (i = 0; i < PTRS_PER_P4D; i++) { 487 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT); 488 if (!p4d_none(*start)) { 489 prot = p4d_flags(*start); 490 eff = effective_prot(eff_in, prot); 491 if (p4d_large(*start) || !p4d_present(*start)) { 492 note_page(m, st, __pgprot(prot), eff, 2); 493 } else if (!kasan_page_table(m, st, p4d_start)) { 494 walk_pud_level(m, st, *start, eff, 495 P + i * P4D_LEVEL_MULT); 496 } 497 } else 498 note_page(m, st, __pgprot(0), 0, 2); 499 500 start++; 501 } 502 } 503 504 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) 505 #define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) 506 507 static inline bool is_hypervisor_range(int idx) 508 { 509 #ifdef CONFIG_X86_64 510 /* 511 * A hole in the beginning of kernel address space reserved 512 * for a hypervisor. 513 */ 514 return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) && 515 (idx < pgd_index(GUARD_HOLE_END_ADDR)); 516 #else 517 return false; 518 #endif 519 } 520 521 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, 522 bool checkwx, bool dmesg) 523 { 524 pgd_t *start = INIT_PGD; 525 pgprotval_t prot, eff; 526 int i; 527 struct pg_state st = {}; 528 529 if (pgd) { 530 start = pgd; 531 st.to_dmesg = dmesg; 532 } 533 534 st.check_wx = checkwx; 535 if (checkwx) 536 st.wx_pages = 0; 537 538 for (i = 0; i < PTRS_PER_PGD; i++) { 539 st.current_address = normalize_addr(i * PGD_LEVEL_MULT); 540 if (!pgd_none(*start) && !is_hypervisor_range(i)) { 541 prot = pgd_flags(*start); 542 #ifdef CONFIG_X86_PAE 543 eff = _PAGE_USER | _PAGE_RW; 544 #else 545 eff = prot; 546 #endif 547 if (pgd_large(*start) || !pgd_present(*start)) { 548 note_page(m, &st, __pgprot(prot), eff, 1); 549 } else { 550 walk_p4d_level(m, &st, *start, eff, 551 i * PGD_LEVEL_MULT); 552 } 553 } else 554 note_page(m, &st, __pgprot(0), 0, 1); 555 556 cond_resched(); 557 start++; 558 } 559 560 /* Flush out the last page */ 561 st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); 562 note_page(m, &st, __pgprot(0), 0, 0); 563 if (!checkwx) 564 return; 565 if (st.wx_pages) 566 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n", 567 st.wx_pages); 568 else 569 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n"); 570 } 571 572 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) 573 { 574 ptdump_walk_pgd_level_core(m, pgd, false, true); 575 } 576 577 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) 578 { 579 #ifdef CONFIG_PAGE_TABLE_ISOLATION 580 if (user && static_cpu_has(X86_FEATURE_PTI)) 581 pgd = kernel_to_user_pgdp(pgd); 582 #endif 583 ptdump_walk_pgd_level_core(m, pgd, false, false); 584 } 585 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); 586 587 void ptdump_walk_user_pgd_level_checkwx(void) 588 { 589 #ifdef CONFIG_PAGE_TABLE_ISOLATION 590 pgd_t *pgd = INIT_PGD; 591 592 if (!(__supported_pte_mask & _PAGE_NX) || 593 !static_cpu_has(X86_FEATURE_PTI)) 594 return; 595 596 pr_info("x86/mm: Checking user space page tables\n"); 597 pgd = kernel_to_user_pgdp(pgd); 598 ptdump_walk_pgd_level_core(NULL, pgd, true, false); 599 #endif 600 } 601 602 void ptdump_walk_pgd_level_checkwx(void) 603 { 604 ptdump_walk_pgd_level_core(NULL, NULL, true, false); 605 } 606 607 static int __init pt_dump_init(void) 608 { 609 /* 610 * Various markers are not compile-time constants, so assign them 611 * here. 612 */ 613 #ifdef CONFIG_X86_64 614 address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET; 615 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 616 address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; 617 #ifdef CONFIG_MODIFY_LDT_SYSCALL 618 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 619 #endif 620 #ifdef CONFIG_KASAN 621 address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; 622 address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; 623 #endif 624 #endif 625 #ifdef CONFIG_X86_32 626 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 627 address_markers[VMALLOC_END_NR].start_address = VMALLOC_END; 628 # ifdef CONFIG_HIGHMEM 629 address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; 630 # endif 631 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 632 address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; 633 # ifdef CONFIG_MODIFY_LDT_SYSCALL 634 address_markers[LDT_NR].start_address = LDT_BASE_ADDR; 635 # endif 636 #endif 637 return 0; 638 } 639 __initcall(pt_dump_init); 640