1 /* 2 * linux/arch/x86_64/mm/init.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> 6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> 7 */ 8 9 #include <linux/signal.h> 10 #include <linux/sched.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/string.h> 14 #include <linux/types.h> 15 #include <linux/ptrace.h> 16 #include <linux/mman.h> 17 #include <linux/mm.h> 18 #include <linux/swap.h> 19 #include <linux/smp.h> 20 #include <linux/init.h> 21 #include <linux/initrd.h> 22 #include <linux/pagemap.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/proc_fs.h> 26 #include <linux/pci.h> 27 #include <linux/pfn.h> 28 #include <linux/poison.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/module.h> 31 #include <linux/memory.h> 32 #include <linux/memory_hotplug.h> 33 #include <linux/nmi.h> 34 #include <linux/gfp.h> 35 #include <linux/kcore.h> 36 37 #include <asm/processor.h> 38 #include <asm/bios_ebda.h> 39 #include <asm/uaccess.h> 40 #include <asm/pgtable.h> 41 #include <asm/pgalloc.h> 42 #include <asm/dma.h> 43 #include <asm/fixmap.h> 44 #include <asm/e820.h> 45 #include <asm/apic.h> 46 #include <asm/tlb.h> 47 #include <asm/mmu_context.h> 48 #include <asm/proto.h> 49 #include <asm/smp.h> 50 #include <asm/sections.h> 51 #include <asm/kdebug.h> 52 #include <asm/numa.h> 53 #include <asm/cacheflush.h> 54 #include <asm/init.h> 55 #include <asm/uv/uv.h> 56 #include <asm/setup.h> 57 58 #include "mm_internal.h" 59 60 static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 61 unsigned long addr, unsigned long end) 62 { 63 addr &= PMD_MASK; 64 for (; addr < end; addr += PMD_SIZE) { 65 pmd_t *pmd = pmd_page + pmd_index(addr); 66 67 if (!pmd_present(*pmd)) 68 set_pmd(pmd, __pmd(addr | pmd_flag)); 69 } 70 } 71 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, 72 unsigned long addr, unsigned long end) 73 { 74 unsigned long next; 75 76 for (; addr < end; addr = next) { 77 pud_t *pud = pud_page + pud_index(addr); 78 pmd_t *pmd; 79 80 next = (addr & PUD_MASK) + PUD_SIZE; 81 if (next > end) 82 next = end; 83 84 if (pud_present(*pud)) { 85 pmd = pmd_offset(pud, 0); 86 ident_pmd_init(info->pmd_flag, pmd, addr, next); 87 continue; 88 } 89 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 90 if (!pmd) 91 return -ENOMEM; 92 ident_pmd_init(info->pmd_flag, pmd, addr, next); 93 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 94 } 95 96 return 0; 97 } 98 99 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 100 unsigned long addr, unsigned long end) 101 { 102 unsigned long next; 103 int result; 104 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; 105 106 for (; addr < end; addr = next) { 107 pgd_t *pgd = pgd_page + pgd_index(addr) + off; 108 pud_t *pud; 109 110 next = (addr & PGDIR_MASK) + PGDIR_SIZE; 111 if (next > end) 112 next = end; 113 114 if (pgd_present(*pgd)) { 115 pud = pud_offset(pgd, 0); 116 result = ident_pud_init(info, pud, addr, next); 117 if (result) 118 return result; 119 continue; 120 } 121 122 pud = (pud_t *)info->alloc_pgt_page(info->context); 123 if (!pud) 124 return -ENOMEM; 125 result = ident_pud_init(info, pud, addr, next); 126 if (result) 127 return result; 128 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); 129 } 130 131 return 0; 132 } 133 134 static int __init parse_direct_gbpages_off(char *arg) 135 { 136 direct_gbpages = 0; 137 return 0; 138 } 139 early_param("nogbpages", parse_direct_gbpages_off); 140 141 static int __init parse_direct_gbpages_on(char *arg) 142 { 143 direct_gbpages = 1; 144 return 0; 145 } 146 early_param("gbpages", parse_direct_gbpages_on); 147 148 /* 149 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the 150 * physical space so we can cache the place of the first one and move 151 * around without checking the pgd every time. 152 */ 153 154 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; 155 EXPORT_SYMBOL_GPL(__supported_pte_mask); 156 157 int force_personality32; 158 159 /* 160 * noexec32=on|off 161 * Control non executable heap for 32bit processes. 162 * To control the stack too use noexec=off 163 * 164 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) 165 * off PROT_READ implies PROT_EXEC 166 */ 167 static int __init nonx32_setup(char *str) 168 { 169 if (!strcmp(str, "on")) 170 force_personality32 &= ~READ_IMPLIES_EXEC; 171 else if (!strcmp(str, "off")) 172 force_personality32 |= READ_IMPLIES_EXEC; 173 return 1; 174 } 175 __setup("noexec32=", nonx32_setup); 176 177 /* 178 * When memory was added/removed make sure all the processes MM have 179 * suitable PGD entries in the local PGD level page. 180 */ 181 void sync_global_pgds(unsigned long start, unsigned long end) 182 { 183 unsigned long address; 184 185 for (address = start; address <= end; address += PGDIR_SIZE) { 186 const pgd_t *pgd_ref = pgd_offset_k(address); 187 struct page *page; 188 189 if (pgd_none(*pgd_ref)) 190 continue; 191 192 spin_lock(&pgd_lock); 193 list_for_each_entry(page, &pgd_list, lru) { 194 pgd_t *pgd; 195 spinlock_t *pgt_lock; 196 197 pgd = (pgd_t *)page_address(page) + pgd_index(address); 198 /* the pgt_lock only for Xen */ 199 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 200 spin_lock(pgt_lock); 201 202 if (pgd_none(*pgd)) 203 set_pgd(pgd, *pgd_ref); 204 else 205 BUG_ON(pgd_page_vaddr(*pgd) 206 != pgd_page_vaddr(*pgd_ref)); 207 208 spin_unlock(pgt_lock); 209 } 210 spin_unlock(&pgd_lock); 211 } 212 } 213 214 /* 215 * NOTE: This function is marked __ref because it calls __init function 216 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 217 */ 218 static __ref void *spp_getpage(void) 219 { 220 void *ptr; 221 222 if (after_bootmem) 223 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); 224 else 225 ptr = alloc_bootmem_pages(PAGE_SIZE); 226 227 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { 228 panic("set_pte_phys: cannot allocate page data %s\n", 229 after_bootmem ? "after bootmem" : ""); 230 } 231 232 pr_debug("spp_getpage %p\n", ptr); 233 234 return ptr; 235 } 236 237 static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) 238 { 239 if (pgd_none(*pgd)) { 240 pud_t *pud = (pud_t *)spp_getpage(); 241 pgd_populate(&init_mm, pgd, pud); 242 if (pud != pud_offset(pgd, 0)) 243 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", 244 pud, pud_offset(pgd, 0)); 245 } 246 return pud_offset(pgd, vaddr); 247 } 248 249 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) 250 { 251 if (pud_none(*pud)) { 252 pmd_t *pmd = (pmd_t *) spp_getpage(); 253 pud_populate(&init_mm, pud, pmd); 254 if (pmd != pmd_offset(pud, 0)) 255 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 256 pmd, pmd_offset(pud, 0)); 257 } 258 return pmd_offset(pud, vaddr); 259 } 260 261 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) 262 { 263 if (pmd_none(*pmd)) { 264 pte_t *pte = (pte_t *) spp_getpage(); 265 pmd_populate_kernel(&init_mm, pmd, pte); 266 if (pte != pte_offset_kernel(pmd, 0)) 267 printk(KERN_ERR "PAGETABLE BUG #02!\n"); 268 } 269 return pte_offset_kernel(pmd, vaddr); 270 } 271 272 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) 273 { 274 pud_t *pud; 275 pmd_t *pmd; 276 pte_t *pte; 277 278 pud = pud_page + pud_index(vaddr); 279 pmd = fill_pmd(pud, vaddr); 280 pte = fill_pte(pmd, vaddr); 281 282 set_pte(pte, new_pte); 283 284 /* 285 * It's enough to flush this one mapping. 286 * (PGE mappings get flushed as well) 287 */ 288 __flush_tlb_one(vaddr); 289 } 290 291 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 292 { 293 pgd_t *pgd; 294 pud_t *pud_page; 295 296 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); 297 298 pgd = pgd_offset_k(vaddr); 299 if (pgd_none(*pgd)) { 300 printk(KERN_ERR 301 "PGD FIXMAP MISSING, it should be setup in head.S!\n"); 302 return; 303 } 304 pud_page = (pud_t*)pgd_page_vaddr(*pgd); 305 set_pte_vaddr_pud(pud_page, vaddr, pteval); 306 } 307 308 pmd_t * __init populate_extra_pmd(unsigned long vaddr) 309 { 310 pgd_t *pgd; 311 pud_t *pud; 312 313 pgd = pgd_offset_k(vaddr); 314 pud = fill_pud(pgd, vaddr); 315 return fill_pmd(pud, vaddr); 316 } 317 318 pte_t * __init populate_extra_pte(unsigned long vaddr) 319 { 320 pmd_t *pmd; 321 322 pmd = populate_extra_pmd(vaddr); 323 return fill_pte(pmd, vaddr); 324 } 325 326 /* 327 * Create large page table mappings for a range of physical addresses. 328 */ 329 static void __init __init_extra_mapping(unsigned long phys, unsigned long size, 330 pgprot_t prot) 331 { 332 pgd_t *pgd; 333 pud_t *pud; 334 pmd_t *pmd; 335 336 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); 337 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { 338 pgd = pgd_offset_k((unsigned long)__va(phys)); 339 if (pgd_none(*pgd)) { 340 pud = (pud_t *) spp_getpage(); 341 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | 342 _PAGE_USER)); 343 } 344 pud = pud_offset(pgd, (unsigned long)__va(phys)); 345 if (pud_none(*pud)) { 346 pmd = (pmd_t *) spp_getpage(); 347 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | 348 _PAGE_USER)); 349 } 350 pmd = pmd_offset(pud, phys); 351 BUG_ON(!pmd_none(*pmd)); 352 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); 353 } 354 } 355 356 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) 357 { 358 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); 359 } 360 361 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) 362 { 363 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); 364 } 365 366 /* 367 * The head.S code sets up the kernel high mapping: 368 * 369 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) 370 * 371 * phys_addr holds the negative offset to the kernel, which is added 372 * to the compile time generated pmds. This results in invalid pmds up 373 * to the point where we hit the physaddr 0 mapping. 374 * 375 * We limit the mappings to the region from _text to _brk_end. _brk_end 376 * is rounded up to the 2MB boundary. This catches the invalid pmds as 377 * well, as they are located before _text: 378 */ 379 void __init cleanup_highmap(void) 380 { 381 unsigned long vaddr = __START_KERNEL_map; 382 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; 383 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; 384 pmd_t *pmd = level2_kernel_pgt; 385 386 /* 387 * Native path, max_pfn_mapped is not set yet. 388 * Xen has valid max_pfn_mapped set in 389 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). 390 */ 391 if (max_pfn_mapped) 392 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); 393 394 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { 395 if (pmd_none(*pmd)) 396 continue; 397 if (vaddr < (unsigned long) _text || vaddr > end) 398 set_pmd(pmd, __pmd(0)); 399 } 400 } 401 402 static unsigned long __meminit 403 phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, 404 pgprot_t prot) 405 { 406 unsigned long pages = 0, next; 407 unsigned long last_map_addr = end; 408 int i; 409 410 pte_t *pte = pte_page + pte_index(addr); 411 412 for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) { 413 next = (addr & PAGE_MASK) + PAGE_SIZE; 414 if (addr >= end) { 415 if (!after_bootmem && 416 !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) && 417 !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN)) 418 set_pte(pte, __pte(0)); 419 continue; 420 } 421 422 /* 423 * We will re-use the existing mapping. 424 * Xen for example has some special requirements, like mapping 425 * pagetable pages as RO. So assume someone who pre-setup 426 * these mappings are more intelligent. 427 */ 428 if (pte_val(*pte)) { 429 if (!after_bootmem) 430 pages++; 431 continue; 432 } 433 434 if (0) 435 printk(" pte=%p addr=%lx pte=%016lx\n", 436 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); 437 pages++; 438 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); 439 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; 440 } 441 442 update_page_count(PG_LEVEL_4K, pages); 443 444 return last_map_addr; 445 } 446 447 static unsigned long __meminit 448 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 449 unsigned long page_size_mask, pgprot_t prot) 450 { 451 unsigned long pages = 0, next; 452 unsigned long last_map_addr = end; 453 454 int i = pmd_index(address); 455 456 for (; i < PTRS_PER_PMD; i++, address = next) { 457 pmd_t *pmd = pmd_page + pmd_index(address); 458 pte_t *pte; 459 pgprot_t new_prot = prot; 460 461 next = (address & PMD_MASK) + PMD_SIZE; 462 if (address >= end) { 463 if (!after_bootmem && 464 !e820_any_mapped(address & PMD_MASK, next, E820_RAM) && 465 !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN)) 466 set_pmd(pmd, __pmd(0)); 467 continue; 468 } 469 470 if (pmd_val(*pmd)) { 471 if (!pmd_large(*pmd)) { 472 spin_lock(&init_mm.page_table_lock); 473 pte = (pte_t *)pmd_page_vaddr(*pmd); 474 last_map_addr = phys_pte_init(pte, address, 475 end, prot); 476 spin_unlock(&init_mm.page_table_lock); 477 continue; 478 } 479 /* 480 * If we are ok with PG_LEVEL_2M mapping, then we will 481 * use the existing mapping, 482 * 483 * Otherwise, we will split the large page mapping but 484 * use the same existing protection bits except for 485 * large page, so that we don't violate Intel's TLB 486 * Application note (317080) which says, while changing 487 * the page sizes, new and old translations should 488 * not differ with respect to page frame and 489 * attributes. 490 */ 491 if (page_size_mask & (1 << PG_LEVEL_2M)) { 492 if (!after_bootmem) 493 pages++; 494 last_map_addr = next; 495 continue; 496 } 497 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 498 } 499 500 if (page_size_mask & (1<<PG_LEVEL_2M)) { 501 pages++; 502 spin_lock(&init_mm.page_table_lock); 503 set_pte((pte_t *)pmd, 504 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT, 505 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 506 spin_unlock(&init_mm.page_table_lock); 507 last_map_addr = next; 508 continue; 509 } 510 511 pte = alloc_low_page(); 512 last_map_addr = phys_pte_init(pte, address, end, new_prot); 513 514 spin_lock(&init_mm.page_table_lock); 515 pmd_populate_kernel(&init_mm, pmd, pte); 516 spin_unlock(&init_mm.page_table_lock); 517 } 518 update_page_count(PG_LEVEL_2M, pages); 519 return last_map_addr; 520 } 521 522 static unsigned long __meminit 523 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, 524 unsigned long page_size_mask) 525 { 526 unsigned long pages = 0, next; 527 unsigned long last_map_addr = end; 528 int i = pud_index(addr); 529 530 for (; i < PTRS_PER_PUD; i++, addr = next) { 531 pud_t *pud = pud_page + pud_index(addr); 532 pmd_t *pmd; 533 pgprot_t prot = PAGE_KERNEL; 534 535 next = (addr & PUD_MASK) + PUD_SIZE; 536 if (addr >= end) { 537 if (!after_bootmem && 538 !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) && 539 !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN)) 540 set_pud(pud, __pud(0)); 541 continue; 542 } 543 544 if (pud_val(*pud)) { 545 if (!pud_large(*pud)) { 546 pmd = pmd_offset(pud, 0); 547 last_map_addr = phys_pmd_init(pmd, addr, end, 548 page_size_mask, prot); 549 __flush_tlb_all(); 550 continue; 551 } 552 /* 553 * If we are ok with PG_LEVEL_1G mapping, then we will 554 * use the existing mapping. 555 * 556 * Otherwise, we will split the gbpage mapping but use 557 * the same existing protection bits except for large 558 * page, so that we don't violate Intel's TLB 559 * Application note (317080) which says, while changing 560 * the page sizes, new and old translations should 561 * not differ with respect to page frame and 562 * attributes. 563 */ 564 if (page_size_mask & (1 << PG_LEVEL_1G)) { 565 if (!after_bootmem) 566 pages++; 567 last_map_addr = next; 568 continue; 569 } 570 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 571 } 572 573 if (page_size_mask & (1<<PG_LEVEL_1G)) { 574 pages++; 575 spin_lock(&init_mm.page_table_lock); 576 set_pte((pte_t *)pud, 577 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT, 578 PAGE_KERNEL_LARGE)); 579 spin_unlock(&init_mm.page_table_lock); 580 last_map_addr = next; 581 continue; 582 } 583 584 pmd = alloc_low_page(); 585 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, 586 prot); 587 588 spin_lock(&init_mm.page_table_lock); 589 pud_populate(&init_mm, pud, pmd); 590 spin_unlock(&init_mm.page_table_lock); 591 } 592 __flush_tlb_all(); 593 594 update_page_count(PG_LEVEL_1G, pages); 595 596 return last_map_addr; 597 } 598 599 unsigned long __meminit 600 kernel_physical_mapping_init(unsigned long start, 601 unsigned long end, 602 unsigned long page_size_mask) 603 { 604 bool pgd_changed = false; 605 unsigned long next, last_map_addr = end; 606 unsigned long addr; 607 608 start = (unsigned long)__va(start); 609 end = (unsigned long)__va(end); 610 addr = start; 611 612 for (; start < end; start = next) { 613 pgd_t *pgd = pgd_offset_k(start); 614 pud_t *pud; 615 616 next = (start & PGDIR_MASK) + PGDIR_SIZE; 617 618 if (pgd_val(*pgd)) { 619 pud = (pud_t *)pgd_page_vaddr(*pgd); 620 last_map_addr = phys_pud_init(pud, __pa(start), 621 __pa(end), page_size_mask); 622 continue; 623 } 624 625 pud = alloc_low_page(); 626 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), 627 page_size_mask); 628 629 spin_lock(&init_mm.page_table_lock); 630 pgd_populate(&init_mm, pgd, pud); 631 spin_unlock(&init_mm.page_table_lock); 632 pgd_changed = true; 633 } 634 635 if (pgd_changed) 636 sync_global_pgds(addr, end - 1); 637 638 __flush_tlb_all(); 639 640 return last_map_addr; 641 } 642 643 #ifndef CONFIG_NUMA 644 void __init initmem_init(void) 645 { 646 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); 647 } 648 #endif 649 650 void __init paging_init(void) 651 { 652 sparse_memory_present_with_active_regions(MAX_NUMNODES); 653 sparse_init(); 654 655 /* 656 * clear the default setting with node 0 657 * note: don't use nodes_clear here, that is really clearing when 658 * numa support is not compiled in, and later node_set_state 659 * will not set it back. 660 */ 661 node_clear_state(0, N_MEMORY); 662 if (N_MEMORY != N_NORMAL_MEMORY) 663 node_clear_state(0, N_NORMAL_MEMORY); 664 665 zone_sizes_init(); 666 } 667 668 /* 669 * Memory hotplug specific functions 670 */ 671 #ifdef CONFIG_MEMORY_HOTPLUG 672 /* 673 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need 674 * updating. 675 */ 676 static void update_end_of_memory_vars(u64 start, u64 size) 677 { 678 unsigned long end_pfn = PFN_UP(start + size); 679 680 if (end_pfn > max_pfn) { 681 max_pfn = end_pfn; 682 max_low_pfn = end_pfn; 683 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 684 } 685 } 686 687 /* 688 * Memory is added always to NORMAL zone. This means you will never get 689 * additional DMA/DMA32 memory. 690 */ 691 int arch_add_memory(int nid, u64 start, u64 size) 692 { 693 struct pglist_data *pgdat = NODE_DATA(nid); 694 struct zone *zone = pgdat->node_zones + ZONE_NORMAL; 695 unsigned long start_pfn = start >> PAGE_SHIFT; 696 unsigned long nr_pages = size >> PAGE_SHIFT; 697 int ret; 698 699 init_memory_mapping(start, start + size); 700 701 ret = __add_pages(nid, zone, start_pfn, nr_pages); 702 WARN_ON_ONCE(ret); 703 704 /* update max_pfn, max_low_pfn and high_memory */ 705 update_end_of_memory_vars(start, size); 706 707 return ret; 708 } 709 EXPORT_SYMBOL_GPL(arch_add_memory); 710 711 #define PAGE_INUSE 0xFD 712 713 static void __meminit free_pagetable(struct page *page, int order) 714 { 715 struct zone *zone; 716 bool bootmem = false; 717 unsigned long magic; 718 unsigned int nr_pages = 1 << order; 719 720 /* bootmem page has reserved flag */ 721 if (PageReserved(page)) { 722 __ClearPageReserved(page); 723 bootmem = true; 724 725 magic = (unsigned long)page->lru.next; 726 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { 727 while (nr_pages--) 728 put_page_bootmem(page++); 729 } else 730 __free_pages_bootmem(page, order); 731 } else 732 free_pages((unsigned long)page_address(page), order); 733 734 /* 735 * SECTION_INFO pages and MIX_SECTION_INFO pages 736 * are all allocated by bootmem. 737 */ 738 if (bootmem) { 739 zone = page_zone(page); 740 zone_span_writelock(zone); 741 zone->present_pages += nr_pages; 742 zone_span_writeunlock(zone); 743 totalram_pages += nr_pages; 744 } 745 } 746 747 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) 748 { 749 pte_t *pte; 750 int i; 751 752 for (i = 0; i < PTRS_PER_PTE; i++) { 753 pte = pte_start + i; 754 if (pte_val(*pte)) 755 return; 756 } 757 758 /* free a pte talbe */ 759 free_pagetable(pmd_page(*pmd), 0); 760 spin_lock(&init_mm.page_table_lock); 761 pmd_clear(pmd); 762 spin_unlock(&init_mm.page_table_lock); 763 } 764 765 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) 766 { 767 pmd_t *pmd; 768 int i; 769 770 for (i = 0; i < PTRS_PER_PMD; i++) { 771 pmd = pmd_start + i; 772 if (pmd_val(*pmd)) 773 return; 774 } 775 776 /* free a pmd talbe */ 777 free_pagetable(pud_page(*pud), 0); 778 spin_lock(&init_mm.page_table_lock); 779 pud_clear(pud); 780 spin_unlock(&init_mm.page_table_lock); 781 } 782 783 /* Return true if pgd is changed, otherwise return false. */ 784 static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd) 785 { 786 pud_t *pud; 787 int i; 788 789 for (i = 0; i < PTRS_PER_PUD; i++) { 790 pud = pud_start + i; 791 if (pud_val(*pud)) 792 return false; 793 } 794 795 /* free a pud table */ 796 free_pagetable(pgd_page(*pgd), 0); 797 spin_lock(&init_mm.page_table_lock); 798 pgd_clear(pgd); 799 spin_unlock(&init_mm.page_table_lock); 800 801 return true; 802 } 803 804 static void __meminit 805 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, 806 bool direct) 807 { 808 unsigned long next, pages = 0; 809 pte_t *pte; 810 void *page_addr; 811 phys_addr_t phys_addr; 812 813 pte = pte_start + pte_index(addr); 814 for (; addr < end; addr = next, pte++) { 815 next = (addr + PAGE_SIZE) & PAGE_MASK; 816 if (next > end) 817 next = end; 818 819 if (!pte_present(*pte)) 820 continue; 821 822 /* 823 * We mapped [0,1G) memory as identity mapping when 824 * initializing, in arch/x86/kernel/head_64.S. These 825 * pagetables cannot be removed. 826 */ 827 phys_addr = pte_val(*pte) + (addr & PAGE_MASK); 828 if (phys_addr < (phys_addr_t)0x40000000) 829 return; 830 831 if (IS_ALIGNED(addr, PAGE_SIZE) && 832 IS_ALIGNED(next, PAGE_SIZE)) { 833 /* 834 * Do not free direct mapping pages since they were 835 * freed when offlining, or simplely not in use. 836 */ 837 if (!direct) 838 free_pagetable(pte_page(*pte), 0); 839 840 spin_lock(&init_mm.page_table_lock); 841 pte_clear(&init_mm, addr, pte); 842 spin_unlock(&init_mm.page_table_lock); 843 844 /* For non-direct mapping, pages means nothing. */ 845 pages++; 846 } else { 847 /* 848 * If we are here, we are freeing vmemmap pages since 849 * direct mapped memory ranges to be freed are aligned. 850 * 851 * If we are not removing the whole page, it means 852 * other page structs in this page are being used and 853 * we canot remove them. So fill the unused page_structs 854 * with 0xFD, and remove the page when it is wholly 855 * filled with 0xFD. 856 */ 857 memset((void *)addr, PAGE_INUSE, next - addr); 858 859 page_addr = page_address(pte_page(*pte)); 860 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { 861 free_pagetable(pte_page(*pte), 0); 862 863 spin_lock(&init_mm.page_table_lock); 864 pte_clear(&init_mm, addr, pte); 865 spin_unlock(&init_mm.page_table_lock); 866 } 867 } 868 } 869 870 /* Call free_pte_table() in remove_pmd_table(). */ 871 flush_tlb_all(); 872 if (direct) 873 update_page_count(PG_LEVEL_4K, -pages); 874 } 875 876 static void __meminit 877 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, 878 bool direct) 879 { 880 unsigned long next, pages = 0; 881 pte_t *pte_base; 882 pmd_t *pmd; 883 void *page_addr; 884 885 pmd = pmd_start + pmd_index(addr); 886 for (; addr < end; addr = next, pmd++) { 887 next = pmd_addr_end(addr, end); 888 889 if (!pmd_present(*pmd)) 890 continue; 891 892 if (pmd_large(*pmd)) { 893 if (IS_ALIGNED(addr, PMD_SIZE) && 894 IS_ALIGNED(next, PMD_SIZE)) { 895 if (!direct) 896 free_pagetable(pmd_page(*pmd), 897 get_order(PMD_SIZE)); 898 899 spin_lock(&init_mm.page_table_lock); 900 pmd_clear(pmd); 901 spin_unlock(&init_mm.page_table_lock); 902 pages++; 903 } else { 904 /* If here, we are freeing vmemmap pages. */ 905 memset((void *)addr, PAGE_INUSE, next - addr); 906 907 page_addr = page_address(pmd_page(*pmd)); 908 if (!memchr_inv(page_addr, PAGE_INUSE, 909 PMD_SIZE)) { 910 free_pagetable(pmd_page(*pmd), 911 get_order(PMD_SIZE)); 912 913 spin_lock(&init_mm.page_table_lock); 914 pmd_clear(pmd); 915 spin_unlock(&init_mm.page_table_lock); 916 } 917 } 918 919 continue; 920 } 921 922 pte_base = (pte_t *)pmd_page_vaddr(*pmd); 923 remove_pte_table(pte_base, addr, next, direct); 924 free_pte_table(pte_base, pmd); 925 } 926 927 /* Call free_pmd_table() in remove_pud_table(). */ 928 if (direct) 929 update_page_count(PG_LEVEL_2M, -pages); 930 } 931 932 static void __meminit 933 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, 934 bool direct) 935 { 936 unsigned long next, pages = 0; 937 pmd_t *pmd_base; 938 pud_t *pud; 939 void *page_addr; 940 941 pud = pud_start + pud_index(addr); 942 for (; addr < end; addr = next, pud++) { 943 next = pud_addr_end(addr, end); 944 945 if (!pud_present(*pud)) 946 continue; 947 948 if (pud_large(*pud)) { 949 if (IS_ALIGNED(addr, PUD_SIZE) && 950 IS_ALIGNED(next, PUD_SIZE)) { 951 if (!direct) 952 free_pagetable(pud_page(*pud), 953 get_order(PUD_SIZE)); 954 955 spin_lock(&init_mm.page_table_lock); 956 pud_clear(pud); 957 spin_unlock(&init_mm.page_table_lock); 958 pages++; 959 } else { 960 /* If here, we are freeing vmemmap pages. */ 961 memset((void *)addr, PAGE_INUSE, next - addr); 962 963 page_addr = page_address(pud_page(*pud)); 964 if (!memchr_inv(page_addr, PAGE_INUSE, 965 PUD_SIZE)) { 966 free_pagetable(pud_page(*pud), 967 get_order(PUD_SIZE)); 968 969 spin_lock(&init_mm.page_table_lock); 970 pud_clear(pud); 971 spin_unlock(&init_mm.page_table_lock); 972 } 973 } 974 975 continue; 976 } 977 978 pmd_base = (pmd_t *)pud_page_vaddr(*pud); 979 remove_pmd_table(pmd_base, addr, next, direct); 980 free_pmd_table(pmd_base, pud); 981 } 982 983 if (direct) 984 update_page_count(PG_LEVEL_1G, -pages); 985 } 986 987 /* start and end are both virtual address. */ 988 static void __meminit 989 remove_pagetable(unsigned long start, unsigned long end, bool direct) 990 { 991 unsigned long next; 992 pgd_t *pgd; 993 pud_t *pud; 994 bool pgd_changed = false; 995 996 for (; start < end; start = next) { 997 next = pgd_addr_end(start, end); 998 999 pgd = pgd_offset_k(start); 1000 if (!pgd_present(*pgd)) 1001 continue; 1002 1003 pud = (pud_t *)pgd_page_vaddr(*pgd); 1004 remove_pud_table(pud, start, next, direct); 1005 if (free_pud_table(pud, pgd)) 1006 pgd_changed = true; 1007 } 1008 1009 if (pgd_changed) 1010 sync_global_pgds(start, end - 1); 1011 1012 flush_tlb_all(); 1013 } 1014 1015 void __ref vmemmap_free(unsigned long start, unsigned long end) 1016 { 1017 remove_pagetable(start, end, false); 1018 } 1019 1020 #ifdef CONFIG_MEMORY_HOTREMOVE 1021 static void __meminit 1022 kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1023 { 1024 start = (unsigned long)__va(start); 1025 end = (unsigned long)__va(end); 1026 1027 remove_pagetable(start, end, true); 1028 } 1029 1030 int __ref arch_remove_memory(u64 start, u64 size) 1031 { 1032 unsigned long start_pfn = start >> PAGE_SHIFT; 1033 unsigned long nr_pages = size >> PAGE_SHIFT; 1034 struct zone *zone; 1035 int ret; 1036 1037 zone = page_zone(pfn_to_page(start_pfn)); 1038 kernel_physical_mapping_remove(start, start + size); 1039 ret = __remove_pages(zone, start_pfn, nr_pages); 1040 WARN_ON_ONCE(ret); 1041 1042 return ret; 1043 } 1044 #endif 1045 #endif /* CONFIG_MEMORY_HOTPLUG */ 1046 1047 static struct kcore_list kcore_vsyscall; 1048 1049 static void __init register_page_bootmem_info(void) 1050 { 1051 #ifdef CONFIG_NUMA 1052 int i; 1053 1054 for_each_online_node(i) 1055 register_page_bootmem_info_node(NODE_DATA(i)); 1056 #endif 1057 } 1058 1059 void __init mem_init(void) 1060 { 1061 long codesize, reservedpages, datasize, initsize; 1062 unsigned long absent_pages; 1063 1064 pci_iommu_alloc(); 1065 1066 /* clear_bss() already clear the empty_zero_page */ 1067 1068 register_page_bootmem_info(); 1069 1070 /* this will put all memory onto the freelists */ 1071 totalram_pages = free_all_bootmem(); 1072 1073 absent_pages = absent_pages_in_range(0, max_pfn); 1074 reservedpages = max_pfn - totalram_pages - absent_pages; 1075 after_bootmem = 1; 1076 1077 codesize = (unsigned long) &_etext - (unsigned long) &_text; 1078 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 1079 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 1080 1081 /* Register memory areas for /proc/kcore */ 1082 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, 1083 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); 1084 1085 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " 1086 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", 1087 nr_free_pages() << (PAGE_SHIFT-10), 1088 max_pfn << (PAGE_SHIFT-10), 1089 codesize >> 10, 1090 absent_pages << (PAGE_SHIFT-10), 1091 reservedpages << (PAGE_SHIFT-10), 1092 datasize >> 10, 1093 initsize >> 10); 1094 } 1095 1096 #ifdef CONFIG_DEBUG_RODATA 1097 const int rodata_test_data = 0xC3; 1098 EXPORT_SYMBOL_GPL(rodata_test_data); 1099 1100 int kernel_set_to_readonly; 1101 1102 void set_kernel_text_rw(void) 1103 { 1104 unsigned long start = PFN_ALIGN(_text); 1105 unsigned long end = PFN_ALIGN(__stop___ex_table); 1106 1107 if (!kernel_set_to_readonly) 1108 return; 1109 1110 pr_debug("Set kernel text: %lx - %lx for read write\n", 1111 start, end); 1112 1113 /* 1114 * Make the kernel identity mapping for text RW. Kernel text 1115 * mapping will always be RO. Refer to the comment in 1116 * static_protections() in pageattr.c 1117 */ 1118 set_memory_rw(start, (end - start) >> PAGE_SHIFT); 1119 } 1120 1121 void set_kernel_text_ro(void) 1122 { 1123 unsigned long start = PFN_ALIGN(_text); 1124 unsigned long end = PFN_ALIGN(__stop___ex_table); 1125 1126 if (!kernel_set_to_readonly) 1127 return; 1128 1129 pr_debug("Set kernel text: %lx - %lx for read only\n", 1130 start, end); 1131 1132 /* 1133 * Set the kernel identity mapping for text RO. 1134 */ 1135 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 1136 } 1137 1138 void mark_rodata_ro(void) 1139 { 1140 unsigned long start = PFN_ALIGN(_text); 1141 unsigned long rodata_start = PFN_ALIGN(__start_rodata); 1142 unsigned long end = (unsigned long) &__end_rodata_hpage_align; 1143 unsigned long text_end = PFN_ALIGN(&__stop___ex_table); 1144 unsigned long rodata_end = PFN_ALIGN(&__end_rodata); 1145 unsigned long all_end = PFN_ALIGN(&_end); 1146 1147 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 1148 (end - start) >> 10); 1149 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 1150 1151 kernel_set_to_readonly = 1; 1152 1153 /* 1154 * The rodata/data/bss/brk section (but not the kernel text!) 1155 * should also be not-executable. 1156 */ 1157 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); 1158 1159 rodata_test(); 1160 1161 #ifdef CONFIG_CPA_DEBUG 1162 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); 1163 set_memory_rw(start, (end-start) >> PAGE_SHIFT); 1164 1165 printk(KERN_INFO "Testing CPA: again\n"); 1166 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 1167 #endif 1168 1169 free_init_pages("unused kernel memory", 1170 (unsigned long) __va(__pa_symbol(text_end)), 1171 (unsigned long) __va(__pa_symbol(rodata_start))); 1172 1173 free_init_pages("unused kernel memory", 1174 (unsigned long) __va(__pa_symbol(rodata_end)), 1175 (unsigned long) __va(__pa_symbol(_sdata))); 1176 } 1177 1178 #endif 1179 1180 int kern_addr_valid(unsigned long addr) 1181 { 1182 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 1183 pgd_t *pgd; 1184 pud_t *pud; 1185 pmd_t *pmd; 1186 pte_t *pte; 1187 1188 if (above != 0 && above != -1UL) 1189 return 0; 1190 1191 pgd = pgd_offset_k(addr); 1192 if (pgd_none(*pgd)) 1193 return 0; 1194 1195 pud = pud_offset(pgd, addr); 1196 if (pud_none(*pud)) 1197 return 0; 1198 1199 if (pud_large(*pud)) 1200 return pfn_valid(pud_pfn(*pud)); 1201 1202 pmd = pmd_offset(pud, addr); 1203 if (pmd_none(*pmd)) 1204 return 0; 1205 1206 if (pmd_large(*pmd)) 1207 return pfn_valid(pmd_pfn(*pmd)); 1208 1209 pte = pte_offset_kernel(pmd, addr); 1210 if (pte_none(*pte)) 1211 return 0; 1212 1213 return pfn_valid(pte_pfn(*pte)); 1214 } 1215 1216 /* 1217 * A pseudo VMA to allow ptrace access for the vsyscall page. This only 1218 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does 1219 * not need special handling anymore: 1220 */ 1221 static struct vm_area_struct gate_vma = { 1222 .vm_start = VSYSCALL_START, 1223 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), 1224 .vm_page_prot = PAGE_READONLY_EXEC, 1225 .vm_flags = VM_READ | VM_EXEC 1226 }; 1227 1228 struct vm_area_struct *get_gate_vma(struct mm_struct *mm) 1229 { 1230 #ifdef CONFIG_IA32_EMULATION 1231 if (!mm || mm->context.ia32_compat) 1232 return NULL; 1233 #endif 1234 return &gate_vma; 1235 } 1236 1237 int in_gate_area(struct mm_struct *mm, unsigned long addr) 1238 { 1239 struct vm_area_struct *vma = get_gate_vma(mm); 1240 1241 if (!vma) 1242 return 0; 1243 1244 return (addr >= vma->vm_start) && (addr < vma->vm_end); 1245 } 1246 1247 /* 1248 * Use this when you have no reliable mm, typically from interrupt 1249 * context. It is less reliable than using a task's mm and may give 1250 * false positives. 1251 */ 1252 int in_gate_area_no_mm(unsigned long addr) 1253 { 1254 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 1255 } 1256 1257 const char *arch_vma_name(struct vm_area_struct *vma) 1258 { 1259 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) 1260 return "[vdso]"; 1261 if (vma == &gate_vma) 1262 return "[vsyscall]"; 1263 return NULL; 1264 } 1265 1266 #ifdef CONFIG_X86_UV 1267 unsigned long memory_block_size_bytes(void) 1268 { 1269 if (is_uv_system()) { 1270 printk(KERN_INFO "UV: memory block size 2GB\n"); 1271 return 2UL * 1024 * 1024 * 1024; 1272 } 1273 return MIN_MEMORY_BLOCK_SIZE; 1274 } 1275 #endif 1276 1277 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1278 /* 1279 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 1280 */ 1281 static long __meminitdata addr_start, addr_end; 1282 static void __meminitdata *p_start, *p_end; 1283 static int __meminitdata node_start; 1284 1285 static int __meminit vmemmap_populate_hugepages(unsigned long start, 1286 unsigned long end, int node) 1287 { 1288 unsigned long addr; 1289 unsigned long next; 1290 pgd_t *pgd; 1291 pud_t *pud; 1292 pmd_t *pmd; 1293 1294 for (addr = start; addr < end; addr = next) { 1295 next = pmd_addr_end(addr, end); 1296 1297 pgd = vmemmap_pgd_populate(addr, node); 1298 if (!pgd) 1299 return -ENOMEM; 1300 1301 pud = vmemmap_pud_populate(pgd, addr, node); 1302 if (!pud) 1303 return -ENOMEM; 1304 1305 pmd = pmd_offset(pud, addr); 1306 if (pmd_none(*pmd)) { 1307 void *p; 1308 1309 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 1310 if (p) { 1311 pte_t entry; 1312 1313 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1314 PAGE_KERNEL_LARGE); 1315 set_pmd(pmd, __pmd(pte_val(entry))); 1316 1317 /* check to see if we have contiguous blocks */ 1318 if (p_end != p || node_start != node) { 1319 if (p_start) 1320 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1321 addr_start, addr_end-1, p_start, p_end-1, node_start); 1322 addr_start = addr; 1323 node_start = node; 1324 p_start = p; 1325 } 1326 1327 addr_end = addr + PMD_SIZE; 1328 p_end = p + PMD_SIZE; 1329 continue; 1330 } 1331 } else if (pmd_large(*pmd)) { 1332 vmemmap_verify((pte_t *)pmd, node, addr, next); 1333 continue; 1334 } 1335 pr_warn_once("vmemmap: falling back to regular page backing\n"); 1336 if (vmemmap_populate_basepages(addr, next, node)) 1337 return -ENOMEM; 1338 } 1339 return 0; 1340 } 1341 1342 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 1343 { 1344 int err; 1345 1346 if (cpu_has_pse) 1347 err = vmemmap_populate_hugepages(start, end, node); 1348 else 1349 err = vmemmap_populate_basepages(start, end, node); 1350 if (!err) 1351 sync_global_pgds(start, end - 1); 1352 return err; 1353 } 1354 1355 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) 1356 void register_page_bootmem_memmap(unsigned long section_nr, 1357 struct page *start_page, unsigned long size) 1358 { 1359 unsigned long addr = (unsigned long)start_page; 1360 unsigned long end = (unsigned long)(start_page + size); 1361 unsigned long next; 1362 pgd_t *pgd; 1363 pud_t *pud; 1364 pmd_t *pmd; 1365 unsigned int nr_pages; 1366 struct page *page; 1367 1368 for (; addr < end; addr = next) { 1369 pte_t *pte = NULL; 1370 1371 pgd = pgd_offset_k(addr); 1372 if (pgd_none(*pgd)) { 1373 next = (addr + PAGE_SIZE) & PAGE_MASK; 1374 continue; 1375 } 1376 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); 1377 1378 pud = pud_offset(pgd, addr); 1379 if (pud_none(*pud)) { 1380 next = (addr + PAGE_SIZE) & PAGE_MASK; 1381 continue; 1382 } 1383 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); 1384 1385 if (!cpu_has_pse) { 1386 next = (addr + PAGE_SIZE) & PAGE_MASK; 1387 pmd = pmd_offset(pud, addr); 1388 if (pmd_none(*pmd)) 1389 continue; 1390 get_page_bootmem(section_nr, pmd_page(*pmd), 1391 MIX_SECTION_INFO); 1392 1393 pte = pte_offset_kernel(pmd, addr); 1394 if (pte_none(*pte)) 1395 continue; 1396 get_page_bootmem(section_nr, pte_page(*pte), 1397 SECTION_INFO); 1398 } else { 1399 next = pmd_addr_end(addr, end); 1400 1401 pmd = pmd_offset(pud, addr); 1402 if (pmd_none(*pmd)) 1403 continue; 1404 1405 nr_pages = 1 << (get_order(PMD_SIZE)); 1406 page = pmd_page(*pmd); 1407 while (nr_pages--) 1408 get_page_bootmem(section_nr, page++, 1409 SECTION_INFO); 1410 } 1411 } 1412 } 1413 #endif 1414 1415 void __meminit vmemmap_populate_print_last(void) 1416 { 1417 if (p_start) { 1418 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1419 addr_start, addr_end-1, p_start, p_end-1, node_start); 1420 p_start = NULL; 1421 p_end = NULL; 1422 node_start = 0; 1423 } 1424 } 1425 #endif 1426