1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/x86_64/mm/init.c 4 * 5 * Copyright (C) 1995 Linus Torvalds 6 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> 8 */ 9 10 #include <linux/signal.h> 11 #include <linux/sched.h> 12 #include <linux/kernel.h> 13 #include <linux/errno.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/ptrace.h> 17 #include <linux/mman.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/smp.h> 21 #include <linux/init.h> 22 #include <linux/initrd.h> 23 #include <linux/pagemap.h> 24 #include <linux/memblock.h> 25 #include <linux/proc_fs.h> 26 #include <linux/pci.h> 27 #include <linux/pfn.h> 28 #include <linux/poison.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/memory.h> 31 #include <linux/memory_hotplug.h> 32 #include <linux/memremap.h> 33 #include <linux/nmi.h> 34 #include <linux/gfp.h> 35 #include <linux/kcore.h> 36 37 #include <asm/processor.h> 38 #include <asm/bios_ebda.h> 39 #include <linux/uaccess.h> 40 #include <asm/pgtable.h> 41 #include <asm/pgalloc.h> 42 #include <asm/dma.h> 43 #include <asm/fixmap.h> 44 #include <asm/e820/api.h> 45 #include <asm/apic.h> 46 #include <asm/tlb.h> 47 #include <asm/mmu_context.h> 48 #include <asm/proto.h> 49 #include <asm/smp.h> 50 #include <asm/sections.h> 51 #include <asm/kdebug.h> 52 #include <asm/numa.h> 53 #include <asm/set_memory.h> 54 #include <asm/init.h> 55 #include <asm/uv/uv.h> 56 #include <asm/setup.h> 57 #include <asm/ftrace.h> 58 59 #include "mm_internal.h" 60 61 #include "ident_map.c" 62 63 #define DEFINE_POPULATE(fname, type1, type2, init) \ 64 static inline void fname##_init(struct mm_struct *mm, \ 65 type1##_t *arg1, type2##_t *arg2, bool init) \ 66 { \ 67 if (init) \ 68 fname##_safe(mm, arg1, arg2); \ 69 else \ 70 fname(mm, arg1, arg2); \ 71 } 72 73 DEFINE_POPULATE(p4d_populate, p4d, pud, init) 74 DEFINE_POPULATE(pgd_populate, pgd, p4d, init) 75 DEFINE_POPULATE(pud_populate, pud, pmd, init) 76 DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init) 77 78 #define DEFINE_ENTRY(type1, type2, init) \ 79 static inline void set_##type1##_init(type1##_t *arg1, \ 80 type2##_t arg2, bool init) \ 81 { \ 82 if (init) \ 83 set_##type1##_safe(arg1, arg2); \ 84 else \ 85 set_##type1(arg1, arg2); \ 86 } 87 88 DEFINE_ENTRY(p4d, p4d, init) 89 DEFINE_ENTRY(pud, pud, init) 90 DEFINE_ENTRY(pmd, pmd, init) 91 DEFINE_ENTRY(pte, pte, init) 92 93 94 /* 95 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the 96 * physical space so we can cache the place of the first one and move 97 * around without checking the pgd every time. 98 */ 99 100 /* Bits supported by the hardware: */ 101 pteval_t __supported_pte_mask __read_mostly = ~0; 102 /* Bits allowed in normal kernel mappings: */ 103 pteval_t __default_kernel_pte_mask __read_mostly = ~0; 104 EXPORT_SYMBOL_GPL(__supported_pte_mask); 105 /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */ 106 EXPORT_SYMBOL(__default_kernel_pte_mask); 107 108 int force_personality32; 109 110 /* 111 * noexec32=on|off 112 * Control non executable heap for 32bit processes. 113 * To control the stack too use noexec=off 114 * 115 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) 116 * off PROT_READ implies PROT_EXEC 117 */ 118 static int __init nonx32_setup(char *str) 119 { 120 if (!strcmp(str, "on")) 121 force_personality32 &= ~READ_IMPLIES_EXEC; 122 else if (!strcmp(str, "off")) 123 force_personality32 |= READ_IMPLIES_EXEC; 124 return 1; 125 } 126 __setup("noexec32=", nonx32_setup); 127 128 static void sync_global_pgds_l5(unsigned long start, unsigned long end) 129 { 130 unsigned long addr; 131 132 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 133 const pgd_t *pgd_ref = pgd_offset_k(addr); 134 struct page *page; 135 136 /* Check for overflow */ 137 if (addr < start) 138 break; 139 140 if (pgd_none(*pgd_ref)) 141 continue; 142 143 spin_lock(&pgd_lock); 144 list_for_each_entry(page, &pgd_list, lru) { 145 pgd_t *pgd; 146 spinlock_t *pgt_lock; 147 148 pgd = (pgd_t *)page_address(page) + pgd_index(addr); 149 /* the pgt_lock only for Xen */ 150 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 151 spin_lock(pgt_lock); 152 153 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) 154 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 155 156 if (pgd_none(*pgd)) 157 set_pgd(pgd, *pgd_ref); 158 159 spin_unlock(pgt_lock); 160 } 161 spin_unlock(&pgd_lock); 162 } 163 } 164 165 static void sync_global_pgds_l4(unsigned long start, unsigned long end) 166 { 167 unsigned long addr; 168 169 for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { 170 pgd_t *pgd_ref = pgd_offset_k(addr); 171 const p4d_t *p4d_ref; 172 struct page *page; 173 174 /* 175 * With folded p4d, pgd_none() is always false, we need to 176 * handle synchonization on p4d level. 177 */ 178 MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); 179 p4d_ref = p4d_offset(pgd_ref, addr); 180 181 if (p4d_none(*p4d_ref)) 182 continue; 183 184 spin_lock(&pgd_lock); 185 list_for_each_entry(page, &pgd_list, lru) { 186 pgd_t *pgd; 187 p4d_t *p4d; 188 spinlock_t *pgt_lock; 189 190 pgd = (pgd_t *)page_address(page) + pgd_index(addr); 191 p4d = p4d_offset(pgd, addr); 192 /* the pgt_lock only for Xen */ 193 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 194 spin_lock(pgt_lock); 195 196 if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) 197 BUG_ON(p4d_page_vaddr(*p4d) 198 != p4d_page_vaddr(*p4d_ref)); 199 200 if (p4d_none(*p4d)) 201 set_p4d(p4d, *p4d_ref); 202 203 spin_unlock(pgt_lock); 204 } 205 spin_unlock(&pgd_lock); 206 } 207 } 208 209 /* 210 * When memory was added make sure all the processes MM have 211 * suitable PGD entries in the local PGD level page. 212 */ 213 void sync_global_pgds(unsigned long start, unsigned long end) 214 { 215 if (pgtable_l5_enabled()) 216 sync_global_pgds_l5(start, end); 217 else 218 sync_global_pgds_l4(start, end); 219 } 220 221 void arch_sync_kernel_mappings(unsigned long start, unsigned long end) 222 { 223 sync_global_pgds(start, end); 224 } 225 226 /* 227 * NOTE: This function is marked __ref because it calls __init function 228 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. 229 */ 230 static __ref void *spp_getpage(void) 231 { 232 void *ptr; 233 234 if (after_bootmem) 235 ptr = (void *) get_zeroed_page(GFP_ATOMIC); 236 else 237 ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 238 239 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { 240 panic("set_pte_phys: cannot allocate page data %s\n", 241 after_bootmem ? "after bootmem" : ""); 242 } 243 244 pr_debug("spp_getpage %p\n", ptr); 245 246 return ptr; 247 } 248 249 static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) 250 { 251 if (pgd_none(*pgd)) { 252 p4d_t *p4d = (p4d_t *)spp_getpage(); 253 pgd_populate(&init_mm, pgd, p4d); 254 if (p4d != p4d_offset(pgd, 0)) 255 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", 256 p4d, p4d_offset(pgd, 0)); 257 } 258 return p4d_offset(pgd, vaddr); 259 } 260 261 static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) 262 { 263 if (p4d_none(*p4d)) { 264 pud_t *pud = (pud_t *)spp_getpage(); 265 p4d_populate(&init_mm, p4d, pud); 266 if (pud != pud_offset(p4d, 0)) 267 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", 268 pud, pud_offset(p4d, 0)); 269 } 270 return pud_offset(p4d, vaddr); 271 } 272 273 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) 274 { 275 if (pud_none(*pud)) { 276 pmd_t *pmd = (pmd_t *) spp_getpage(); 277 pud_populate(&init_mm, pud, pmd); 278 if (pmd != pmd_offset(pud, 0)) 279 printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", 280 pmd, pmd_offset(pud, 0)); 281 } 282 return pmd_offset(pud, vaddr); 283 } 284 285 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) 286 { 287 if (pmd_none(*pmd)) { 288 pte_t *pte = (pte_t *) spp_getpage(); 289 pmd_populate_kernel(&init_mm, pmd, pte); 290 if (pte != pte_offset_kernel(pmd, 0)) 291 printk(KERN_ERR "PAGETABLE BUG #03!\n"); 292 } 293 return pte_offset_kernel(pmd, vaddr); 294 } 295 296 static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) 297 { 298 pmd_t *pmd = fill_pmd(pud, vaddr); 299 pte_t *pte = fill_pte(pmd, vaddr); 300 301 set_pte(pte, new_pte); 302 303 /* 304 * It's enough to flush this one mapping. 305 * (PGE mappings get flushed as well) 306 */ 307 __flush_tlb_one_kernel(vaddr); 308 } 309 310 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) 311 { 312 p4d_t *p4d = p4d_page + p4d_index(vaddr); 313 pud_t *pud = fill_pud(p4d, vaddr); 314 315 __set_pte_vaddr(pud, vaddr, new_pte); 316 } 317 318 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) 319 { 320 pud_t *pud = pud_page + pud_index(vaddr); 321 322 __set_pte_vaddr(pud, vaddr, new_pte); 323 } 324 325 void set_pte_vaddr(unsigned long vaddr, pte_t pteval) 326 { 327 pgd_t *pgd; 328 p4d_t *p4d_page; 329 330 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); 331 332 pgd = pgd_offset_k(vaddr); 333 if (pgd_none(*pgd)) { 334 printk(KERN_ERR 335 "PGD FIXMAP MISSING, it should be setup in head.S!\n"); 336 return; 337 } 338 339 p4d_page = p4d_offset(pgd, 0); 340 set_pte_vaddr_p4d(p4d_page, vaddr, pteval); 341 } 342 343 pmd_t * __init populate_extra_pmd(unsigned long vaddr) 344 { 345 pgd_t *pgd; 346 p4d_t *p4d; 347 pud_t *pud; 348 349 pgd = pgd_offset_k(vaddr); 350 p4d = fill_p4d(pgd, vaddr); 351 pud = fill_pud(p4d, vaddr); 352 return fill_pmd(pud, vaddr); 353 } 354 355 pte_t * __init populate_extra_pte(unsigned long vaddr) 356 { 357 pmd_t *pmd; 358 359 pmd = populate_extra_pmd(vaddr); 360 return fill_pte(pmd, vaddr); 361 } 362 363 /* 364 * Create large page table mappings for a range of physical addresses. 365 */ 366 static void __init __init_extra_mapping(unsigned long phys, unsigned long size, 367 enum page_cache_mode cache) 368 { 369 pgd_t *pgd; 370 p4d_t *p4d; 371 pud_t *pud; 372 pmd_t *pmd; 373 pgprot_t prot; 374 375 pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | 376 pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); 377 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); 378 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { 379 pgd = pgd_offset_k((unsigned long)__va(phys)); 380 if (pgd_none(*pgd)) { 381 p4d = (p4d_t *) spp_getpage(); 382 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | 383 _PAGE_USER)); 384 } 385 p4d = p4d_offset(pgd, (unsigned long)__va(phys)); 386 if (p4d_none(*p4d)) { 387 pud = (pud_t *) spp_getpage(); 388 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | 389 _PAGE_USER)); 390 } 391 pud = pud_offset(p4d, (unsigned long)__va(phys)); 392 if (pud_none(*pud)) { 393 pmd = (pmd_t *) spp_getpage(); 394 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | 395 _PAGE_USER)); 396 } 397 pmd = pmd_offset(pud, phys); 398 BUG_ON(!pmd_none(*pmd)); 399 set_pmd(pmd, __pmd(phys | pgprot_val(prot))); 400 } 401 } 402 403 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) 404 { 405 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); 406 } 407 408 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) 409 { 410 __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); 411 } 412 413 /* 414 * The head.S code sets up the kernel high mapping: 415 * 416 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) 417 * 418 * phys_base holds the negative offset to the kernel, which is added 419 * to the compile time generated pmds. This results in invalid pmds up 420 * to the point where we hit the physaddr 0 mapping. 421 * 422 * We limit the mappings to the region from _text to _brk_end. _brk_end 423 * is rounded up to the 2MB boundary. This catches the invalid pmds as 424 * well, as they are located before _text: 425 */ 426 void __init cleanup_highmap(void) 427 { 428 unsigned long vaddr = __START_KERNEL_map; 429 unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; 430 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; 431 pmd_t *pmd = level2_kernel_pgt; 432 433 /* 434 * Native path, max_pfn_mapped is not set yet. 435 * Xen has valid max_pfn_mapped set in 436 * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). 437 */ 438 if (max_pfn_mapped) 439 vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); 440 441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { 442 if (pmd_none(*pmd)) 443 continue; 444 if (vaddr < (unsigned long) _text || vaddr > end) 445 set_pmd(pmd, __pmd(0)); 446 } 447 } 448 449 /* 450 * Create PTE level page table mapping for physical addresses. 451 * It returns the last physical address mapped. 452 */ 453 static unsigned long __meminit 454 phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, 455 pgprot_t prot, bool init) 456 { 457 unsigned long pages = 0, paddr_next; 458 unsigned long paddr_last = paddr_end; 459 pte_t *pte; 460 int i; 461 462 pte = pte_page + pte_index(paddr); 463 i = pte_index(paddr); 464 465 for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { 466 paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; 467 if (paddr >= paddr_end) { 468 if (!after_bootmem && 469 !e820__mapped_any(paddr & PAGE_MASK, paddr_next, 470 E820_TYPE_RAM) && 471 !e820__mapped_any(paddr & PAGE_MASK, paddr_next, 472 E820_TYPE_RESERVED_KERN)) 473 set_pte_init(pte, __pte(0), init); 474 continue; 475 } 476 477 /* 478 * We will re-use the existing mapping. 479 * Xen for example has some special requirements, like mapping 480 * pagetable pages as RO. So assume someone who pre-setup 481 * these mappings are more intelligent. 482 */ 483 if (!pte_none(*pte)) { 484 if (!after_bootmem) 485 pages++; 486 continue; 487 } 488 489 if (0) 490 pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, 491 pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); 492 pages++; 493 set_pte_init(pte, pfn_pte(paddr >> PAGE_SHIFT, prot), init); 494 paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; 495 } 496 497 update_page_count(PG_LEVEL_4K, pages); 498 499 return paddr_last; 500 } 501 502 /* 503 * Create PMD level page table mapping for physical addresses. The virtual 504 * and physical address have to be aligned at this level. 505 * It returns the last physical address mapped. 506 */ 507 static unsigned long __meminit 508 phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, 509 unsigned long page_size_mask, pgprot_t prot, bool init) 510 { 511 unsigned long pages = 0, paddr_next; 512 unsigned long paddr_last = paddr_end; 513 514 int i = pmd_index(paddr); 515 516 for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { 517 pmd_t *pmd = pmd_page + pmd_index(paddr); 518 pte_t *pte; 519 pgprot_t new_prot = prot; 520 521 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; 522 if (paddr >= paddr_end) { 523 if (!after_bootmem && 524 !e820__mapped_any(paddr & PMD_MASK, paddr_next, 525 E820_TYPE_RAM) && 526 !e820__mapped_any(paddr & PMD_MASK, paddr_next, 527 E820_TYPE_RESERVED_KERN)) 528 set_pmd_init(pmd, __pmd(0), init); 529 continue; 530 } 531 532 if (!pmd_none(*pmd)) { 533 if (!pmd_large(*pmd)) { 534 spin_lock(&init_mm.page_table_lock); 535 pte = (pte_t *)pmd_page_vaddr(*pmd); 536 paddr_last = phys_pte_init(pte, paddr, 537 paddr_end, prot, 538 init); 539 spin_unlock(&init_mm.page_table_lock); 540 continue; 541 } 542 /* 543 * If we are ok with PG_LEVEL_2M mapping, then we will 544 * use the existing mapping, 545 * 546 * Otherwise, we will split the large page mapping but 547 * use the same existing protection bits except for 548 * large page, so that we don't violate Intel's TLB 549 * Application note (317080) which says, while changing 550 * the page sizes, new and old translations should 551 * not differ with respect to page frame and 552 * attributes. 553 */ 554 if (page_size_mask & (1 << PG_LEVEL_2M)) { 555 if (!after_bootmem) 556 pages++; 557 paddr_last = paddr_next; 558 continue; 559 } 560 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 561 } 562 563 if (page_size_mask & (1<<PG_LEVEL_2M)) { 564 pages++; 565 spin_lock(&init_mm.page_table_lock); 566 set_pte_init((pte_t *)pmd, 567 pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, 568 __pgprot(pgprot_val(prot) | _PAGE_PSE)), 569 init); 570 spin_unlock(&init_mm.page_table_lock); 571 paddr_last = paddr_next; 572 continue; 573 } 574 575 pte = alloc_low_page(); 576 paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot, init); 577 578 spin_lock(&init_mm.page_table_lock); 579 pmd_populate_kernel_init(&init_mm, pmd, pte, init); 580 spin_unlock(&init_mm.page_table_lock); 581 } 582 update_page_count(PG_LEVEL_2M, pages); 583 return paddr_last; 584 } 585 586 /* 587 * Create PUD level page table mapping for physical addresses. The virtual 588 * and physical address do not have to be aligned at this level. KASLR can 589 * randomize virtual addresses up to this level. 590 * It returns the last physical address mapped. 591 */ 592 static unsigned long __meminit 593 phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, 594 unsigned long page_size_mask, pgprot_t _prot, bool init) 595 { 596 unsigned long pages = 0, paddr_next; 597 unsigned long paddr_last = paddr_end; 598 unsigned long vaddr = (unsigned long)__va(paddr); 599 int i = pud_index(vaddr); 600 601 for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { 602 pud_t *pud; 603 pmd_t *pmd; 604 pgprot_t prot = _prot; 605 606 vaddr = (unsigned long)__va(paddr); 607 pud = pud_page + pud_index(vaddr); 608 paddr_next = (paddr & PUD_MASK) + PUD_SIZE; 609 610 if (paddr >= paddr_end) { 611 if (!after_bootmem && 612 !e820__mapped_any(paddr & PUD_MASK, paddr_next, 613 E820_TYPE_RAM) && 614 !e820__mapped_any(paddr & PUD_MASK, paddr_next, 615 E820_TYPE_RESERVED_KERN)) 616 set_pud_init(pud, __pud(0), init); 617 continue; 618 } 619 620 if (!pud_none(*pud)) { 621 if (!pud_large(*pud)) { 622 pmd = pmd_offset(pud, 0); 623 paddr_last = phys_pmd_init(pmd, paddr, 624 paddr_end, 625 page_size_mask, 626 prot, init); 627 continue; 628 } 629 /* 630 * If we are ok with PG_LEVEL_1G mapping, then we will 631 * use the existing mapping. 632 * 633 * Otherwise, we will split the gbpage mapping but use 634 * the same existing protection bits except for large 635 * page, so that we don't violate Intel's TLB 636 * Application note (317080) which says, while changing 637 * the page sizes, new and old translations should 638 * not differ with respect to page frame and 639 * attributes. 640 */ 641 if (page_size_mask & (1 << PG_LEVEL_1G)) { 642 if (!after_bootmem) 643 pages++; 644 paddr_last = paddr_next; 645 continue; 646 } 647 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 648 } 649 650 if (page_size_mask & (1<<PG_LEVEL_1G)) { 651 pages++; 652 spin_lock(&init_mm.page_table_lock); 653 654 prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE); 655 656 set_pte_init((pte_t *)pud, 657 pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, 658 prot), 659 init); 660 spin_unlock(&init_mm.page_table_lock); 661 paddr_last = paddr_next; 662 continue; 663 } 664 665 pmd = alloc_low_page(); 666 paddr_last = phys_pmd_init(pmd, paddr, paddr_end, 667 page_size_mask, prot, init); 668 669 spin_lock(&init_mm.page_table_lock); 670 pud_populate_init(&init_mm, pud, pmd, init); 671 spin_unlock(&init_mm.page_table_lock); 672 } 673 674 update_page_count(PG_LEVEL_1G, pages); 675 676 return paddr_last; 677 } 678 679 static unsigned long __meminit 680 phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, 681 unsigned long page_size_mask, pgprot_t prot, bool init) 682 { 683 unsigned long vaddr, vaddr_end, vaddr_next, paddr_next, paddr_last; 684 685 paddr_last = paddr_end; 686 vaddr = (unsigned long)__va(paddr); 687 vaddr_end = (unsigned long)__va(paddr_end); 688 689 if (!pgtable_l5_enabled()) 690 return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, 691 page_size_mask, prot, init); 692 693 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 694 p4d_t *p4d = p4d_page + p4d_index(vaddr); 695 pud_t *pud; 696 697 vaddr_next = (vaddr & P4D_MASK) + P4D_SIZE; 698 paddr = __pa(vaddr); 699 700 if (paddr >= paddr_end) { 701 paddr_next = __pa(vaddr_next); 702 if (!after_bootmem && 703 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 704 E820_TYPE_RAM) && 705 !e820__mapped_any(paddr & P4D_MASK, paddr_next, 706 E820_TYPE_RESERVED_KERN)) 707 set_p4d_init(p4d, __p4d(0), init); 708 continue; 709 } 710 711 if (!p4d_none(*p4d)) { 712 pud = pud_offset(p4d, 0); 713 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 714 page_size_mask, prot, init); 715 continue; 716 } 717 718 pud = alloc_low_page(); 719 paddr_last = phys_pud_init(pud, paddr, __pa(vaddr_end), 720 page_size_mask, prot, init); 721 722 spin_lock(&init_mm.page_table_lock); 723 p4d_populate_init(&init_mm, p4d, pud, init); 724 spin_unlock(&init_mm.page_table_lock); 725 } 726 727 return paddr_last; 728 } 729 730 static unsigned long __meminit 731 __kernel_physical_mapping_init(unsigned long paddr_start, 732 unsigned long paddr_end, 733 unsigned long page_size_mask, 734 pgprot_t prot, bool init) 735 { 736 bool pgd_changed = false; 737 unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; 738 739 paddr_last = paddr_end; 740 vaddr = (unsigned long)__va(paddr_start); 741 vaddr_end = (unsigned long)__va(paddr_end); 742 vaddr_start = vaddr; 743 744 for (; vaddr < vaddr_end; vaddr = vaddr_next) { 745 pgd_t *pgd = pgd_offset_k(vaddr); 746 p4d_t *p4d; 747 748 vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; 749 750 if (pgd_val(*pgd)) { 751 p4d = (p4d_t *)pgd_page_vaddr(*pgd); 752 paddr_last = phys_p4d_init(p4d, __pa(vaddr), 753 __pa(vaddr_end), 754 page_size_mask, 755 prot, init); 756 continue; 757 } 758 759 p4d = alloc_low_page(); 760 paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), 761 page_size_mask, prot, init); 762 763 spin_lock(&init_mm.page_table_lock); 764 if (pgtable_l5_enabled()) 765 pgd_populate_init(&init_mm, pgd, p4d, init); 766 else 767 p4d_populate_init(&init_mm, p4d_offset(pgd, vaddr), 768 (pud_t *) p4d, init); 769 770 spin_unlock(&init_mm.page_table_lock); 771 pgd_changed = true; 772 } 773 774 if (pgd_changed) 775 sync_global_pgds(vaddr_start, vaddr_end - 1); 776 777 return paddr_last; 778 } 779 780 781 /* 782 * Create page table mapping for the physical memory for specific physical 783 * addresses. Note that it can only be used to populate non-present entries. 784 * The virtual and physical addresses have to be aligned on PMD level 785 * down. It returns the last physical address mapped. 786 */ 787 unsigned long __meminit 788 kernel_physical_mapping_init(unsigned long paddr_start, 789 unsigned long paddr_end, 790 unsigned long page_size_mask, pgprot_t prot) 791 { 792 return __kernel_physical_mapping_init(paddr_start, paddr_end, 793 page_size_mask, prot, true); 794 } 795 796 /* 797 * This function is similar to kernel_physical_mapping_init() above with the 798 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe() 799 * when updating the mapping. The caller is responsible to flush the TLBs after 800 * the function returns. 801 */ 802 unsigned long __meminit 803 kernel_physical_mapping_change(unsigned long paddr_start, 804 unsigned long paddr_end, 805 unsigned long page_size_mask) 806 { 807 return __kernel_physical_mapping_init(paddr_start, paddr_end, 808 page_size_mask, PAGE_KERNEL, 809 false); 810 } 811 812 #ifndef CONFIG_NUMA 813 void __init initmem_init(void) 814 { 815 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 816 } 817 #endif 818 819 void __init paging_init(void) 820 { 821 sparse_memory_present_with_active_regions(MAX_NUMNODES); 822 sparse_init(); 823 824 /* 825 * clear the default setting with node 0 826 * note: don't use nodes_clear here, that is really clearing when 827 * numa support is not compiled in, and later node_set_state 828 * will not set it back. 829 */ 830 node_clear_state(0, N_MEMORY); 831 node_clear_state(0, N_NORMAL_MEMORY); 832 833 zone_sizes_init(); 834 } 835 836 /* 837 * Memory hotplug specific functions 838 */ 839 #ifdef CONFIG_MEMORY_HOTPLUG 840 /* 841 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need 842 * updating. 843 */ 844 static void update_end_of_memory_vars(u64 start, u64 size) 845 { 846 unsigned long end_pfn = PFN_UP(start + size); 847 848 if (end_pfn > max_pfn) { 849 max_pfn = end_pfn; 850 max_low_pfn = end_pfn; 851 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; 852 } 853 } 854 855 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 856 struct mhp_params *params) 857 { 858 int ret; 859 860 ret = __add_pages(nid, start_pfn, nr_pages, params); 861 WARN_ON_ONCE(ret); 862 863 /* update max_pfn, max_low_pfn and high_memory */ 864 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, 865 nr_pages << PAGE_SHIFT); 866 867 return ret; 868 } 869 870 int arch_add_memory(int nid, u64 start, u64 size, 871 struct mhp_params *params) 872 { 873 unsigned long start_pfn = start >> PAGE_SHIFT; 874 unsigned long nr_pages = size >> PAGE_SHIFT; 875 876 init_memory_mapping(start, start + size, params->pgprot); 877 878 return add_pages(nid, start_pfn, nr_pages, params); 879 } 880 881 #define PAGE_INUSE 0xFD 882 883 static void __meminit free_pagetable(struct page *page, int order) 884 { 885 unsigned long magic; 886 unsigned int nr_pages = 1 << order; 887 888 /* bootmem page has reserved flag */ 889 if (PageReserved(page)) { 890 __ClearPageReserved(page); 891 892 magic = (unsigned long)page->freelist; 893 if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { 894 while (nr_pages--) 895 put_page_bootmem(page++); 896 } else 897 while (nr_pages--) 898 free_reserved_page(page++); 899 } else 900 free_pages((unsigned long)page_address(page), order); 901 } 902 903 static void __meminit free_hugepage_table(struct page *page, 904 struct vmem_altmap *altmap) 905 { 906 if (altmap) 907 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); 908 else 909 free_pagetable(page, get_order(PMD_SIZE)); 910 } 911 912 static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) 913 { 914 pte_t *pte; 915 int i; 916 917 for (i = 0; i < PTRS_PER_PTE; i++) { 918 pte = pte_start + i; 919 if (!pte_none(*pte)) 920 return; 921 } 922 923 /* free a pte talbe */ 924 free_pagetable(pmd_page(*pmd), 0); 925 spin_lock(&init_mm.page_table_lock); 926 pmd_clear(pmd); 927 spin_unlock(&init_mm.page_table_lock); 928 } 929 930 static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) 931 { 932 pmd_t *pmd; 933 int i; 934 935 for (i = 0; i < PTRS_PER_PMD; i++) { 936 pmd = pmd_start + i; 937 if (!pmd_none(*pmd)) 938 return; 939 } 940 941 /* free a pmd talbe */ 942 free_pagetable(pud_page(*pud), 0); 943 spin_lock(&init_mm.page_table_lock); 944 pud_clear(pud); 945 spin_unlock(&init_mm.page_table_lock); 946 } 947 948 static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) 949 { 950 pud_t *pud; 951 int i; 952 953 for (i = 0; i < PTRS_PER_PUD; i++) { 954 pud = pud_start + i; 955 if (!pud_none(*pud)) 956 return; 957 } 958 959 /* free a pud talbe */ 960 free_pagetable(p4d_page(*p4d), 0); 961 spin_lock(&init_mm.page_table_lock); 962 p4d_clear(p4d); 963 spin_unlock(&init_mm.page_table_lock); 964 } 965 966 static void __meminit 967 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, 968 bool direct) 969 { 970 unsigned long next, pages = 0; 971 pte_t *pte; 972 void *page_addr; 973 phys_addr_t phys_addr; 974 975 pte = pte_start + pte_index(addr); 976 for (; addr < end; addr = next, pte++) { 977 next = (addr + PAGE_SIZE) & PAGE_MASK; 978 if (next > end) 979 next = end; 980 981 if (!pte_present(*pte)) 982 continue; 983 984 /* 985 * We mapped [0,1G) memory as identity mapping when 986 * initializing, in arch/x86/kernel/head_64.S. These 987 * pagetables cannot be removed. 988 */ 989 phys_addr = pte_val(*pte) + (addr & PAGE_MASK); 990 if (phys_addr < (phys_addr_t)0x40000000) 991 return; 992 993 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { 994 /* 995 * Do not free direct mapping pages since they were 996 * freed when offlining, or simplely not in use. 997 */ 998 if (!direct) 999 free_pagetable(pte_page(*pte), 0); 1000 1001 spin_lock(&init_mm.page_table_lock); 1002 pte_clear(&init_mm, addr, pte); 1003 spin_unlock(&init_mm.page_table_lock); 1004 1005 /* For non-direct mapping, pages means nothing. */ 1006 pages++; 1007 } else { 1008 /* 1009 * If we are here, we are freeing vmemmap pages since 1010 * direct mapped memory ranges to be freed are aligned. 1011 * 1012 * If we are not removing the whole page, it means 1013 * other page structs in this page are being used and 1014 * we canot remove them. So fill the unused page_structs 1015 * with 0xFD, and remove the page when it is wholly 1016 * filled with 0xFD. 1017 */ 1018 memset((void *)addr, PAGE_INUSE, next - addr); 1019 1020 page_addr = page_address(pte_page(*pte)); 1021 if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { 1022 free_pagetable(pte_page(*pte), 0); 1023 1024 spin_lock(&init_mm.page_table_lock); 1025 pte_clear(&init_mm, addr, pte); 1026 spin_unlock(&init_mm.page_table_lock); 1027 } 1028 } 1029 } 1030 1031 /* Call free_pte_table() in remove_pmd_table(). */ 1032 flush_tlb_all(); 1033 if (direct) 1034 update_page_count(PG_LEVEL_4K, -pages); 1035 } 1036 1037 static void __meminit 1038 remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, 1039 bool direct, struct vmem_altmap *altmap) 1040 { 1041 unsigned long next, pages = 0; 1042 pte_t *pte_base; 1043 pmd_t *pmd; 1044 void *page_addr; 1045 1046 pmd = pmd_start + pmd_index(addr); 1047 for (; addr < end; addr = next, pmd++) { 1048 next = pmd_addr_end(addr, end); 1049 1050 if (!pmd_present(*pmd)) 1051 continue; 1052 1053 if (pmd_large(*pmd)) { 1054 if (IS_ALIGNED(addr, PMD_SIZE) && 1055 IS_ALIGNED(next, PMD_SIZE)) { 1056 if (!direct) 1057 free_hugepage_table(pmd_page(*pmd), 1058 altmap); 1059 1060 spin_lock(&init_mm.page_table_lock); 1061 pmd_clear(pmd); 1062 spin_unlock(&init_mm.page_table_lock); 1063 pages++; 1064 } else { 1065 /* If here, we are freeing vmemmap pages. */ 1066 memset((void *)addr, PAGE_INUSE, next - addr); 1067 1068 page_addr = page_address(pmd_page(*pmd)); 1069 if (!memchr_inv(page_addr, PAGE_INUSE, 1070 PMD_SIZE)) { 1071 free_hugepage_table(pmd_page(*pmd), 1072 altmap); 1073 1074 spin_lock(&init_mm.page_table_lock); 1075 pmd_clear(pmd); 1076 spin_unlock(&init_mm.page_table_lock); 1077 } 1078 } 1079 1080 continue; 1081 } 1082 1083 pte_base = (pte_t *)pmd_page_vaddr(*pmd); 1084 remove_pte_table(pte_base, addr, next, direct); 1085 free_pte_table(pte_base, pmd); 1086 } 1087 1088 /* Call free_pmd_table() in remove_pud_table(). */ 1089 if (direct) 1090 update_page_count(PG_LEVEL_2M, -pages); 1091 } 1092 1093 static void __meminit 1094 remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, 1095 struct vmem_altmap *altmap, bool direct) 1096 { 1097 unsigned long next, pages = 0; 1098 pmd_t *pmd_base; 1099 pud_t *pud; 1100 void *page_addr; 1101 1102 pud = pud_start + pud_index(addr); 1103 for (; addr < end; addr = next, pud++) { 1104 next = pud_addr_end(addr, end); 1105 1106 if (!pud_present(*pud)) 1107 continue; 1108 1109 if (pud_large(*pud)) { 1110 if (IS_ALIGNED(addr, PUD_SIZE) && 1111 IS_ALIGNED(next, PUD_SIZE)) { 1112 if (!direct) 1113 free_pagetable(pud_page(*pud), 1114 get_order(PUD_SIZE)); 1115 1116 spin_lock(&init_mm.page_table_lock); 1117 pud_clear(pud); 1118 spin_unlock(&init_mm.page_table_lock); 1119 pages++; 1120 } else { 1121 /* If here, we are freeing vmemmap pages. */ 1122 memset((void *)addr, PAGE_INUSE, next - addr); 1123 1124 page_addr = page_address(pud_page(*pud)); 1125 if (!memchr_inv(page_addr, PAGE_INUSE, 1126 PUD_SIZE)) { 1127 free_pagetable(pud_page(*pud), 1128 get_order(PUD_SIZE)); 1129 1130 spin_lock(&init_mm.page_table_lock); 1131 pud_clear(pud); 1132 spin_unlock(&init_mm.page_table_lock); 1133 } 1134 } 1135 1136 continue; 1137 } 1138 1139 pmd_base = pmd_offset(pud, 0); 1140 remove_pmd_table(pmd_base, addr, next, direct, altmap); 1141 free_pmd_table(pmd_base, pud); 1142 } 1143 1144 if (direct) 1145 update_page_count(PG_LEVEL_1G, -pages); 1146 } 1147 1148 static void __meminit 1149 remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, 1150 struct vmem_altmap *altmap, bool direct) 1151 { 1152 unsigned long next, pages = 0; 1153 pud_t *pud_base; 1154 p4d_t *p4d; 1155 1156 p4d = p4d_start + p4d_index(addr); 1157 for (; addr < end; addr = next, p4d++) { 1158 next = p4d_addr_end(addr, end); 1159 1160 if (!p4d_present(*p4d)) 1161 continue; 1162 1163 BUILD_BUG_ON(p4d_large(*p4d)); 1164 1165 pud_base = pud_offset(p4d, 0); 1166 remove_pud_table(pud_base, addr, next, altmap, direct); 1167 /* 1168 * For 4-level page tables we do not want to free PUDs, but in the 1169 * 5-level case we should free them. This code will have to change 1170 * to adapt for boot-time switching between 4 and 5 level page tables. 1171 */ 1172 if (pgtable_l5_enabled()) 1173 free_pud_table(pud_base, p4d); 1174 } 1175 1176 if (direct) 1177 update_page_count(PG_LEVEL_512G, -pages); 1178 } 1179 1180 /* start and end are both virtual address. */ 1181 static void __meminit 1182 remove_pagetable(unsigned long start, unsigned long end, bool direct, 1183 struct vmem_altmap *altmap) 1184 { 1185 unsigned long next; 1186 unsigned long addr; 1187 pgd_t *pgd; 1188 p4d_t *p4d; 1189 1190 for (addr = start; addr < end; addr = next) { 1191 next = pgd_addr_end(addr, end); 1192 1193 pgd = pgd_offset_k(addr); 1194 if (!pgd_present(*pgd)) 1195 continue; 1196 1197 p4d = p4d_offset(pgd, 0); 1198 remove_p4d_table(p4d, addr, next, altmap, direct); 1199 } 1200 1201 flush_tlb_all(); 1202 } 1203 1204 void __ref vmemmap_free(unsigned long start, unsigned long end, 1205 struct vmem_altmap *altmap) 1206 { 1207 remove_pagetable(start, end, false, altmap); 1208 } 1209 1210 static void __meminit 1211 kernel_physical_mapping_remove(unsigned long start, unsigned long end) 1212 { 1213 start = (unsigned long)__va(start); 1214 end = (unsigned long)__va(end); 1215 1216 remove_pagetable(start, end, true, NULL); 1217 } 1218 1219 void __ref arch_remove_memory(int nid, u64 start, u64 size, 1220 struct vmem_altmap *altmap) 1221 { 1222 unsigned long start_pfn = start >> PAGE_SHIFT; 1223 unsigned long nr_pages = size >> PAGE_SHIFT; 1224 1225 __remove_pages(start_pfn, nr_pages, altmap); 1226 kernel_physical_mapping_remove(start, start + size); 1227 } 1228 #endif /* CONFIG_MEMORY_HOTPLUG */ 1229 1230 static struct kcore_list kcore_vsyscall; 1231 1232 static void __init register_page_bootmem_info(void) 1233 { 1234 #ifdef CONFIG_NUMA 1235 int i; 1236 1237 for_each_online_node(i) 1238 register_page_bootmem_info_node(NODE_DATA(i)); 1239 #endif 1240 } 1241 1242 void __init mem_init(void) 1243 { 1244 pci_iommu_alloc(); 1245 1246 /* clear_bss() already clear the empty_zero_page */ 1247 1248 /* this will put all memory onto the freelists */ 1249 memblock_free_all(); 1250 after_bootmem = 1; 1251 x86_init.hyper.init_after_bootmem(); 1252 1253 /* 1254 * Must be done after boot memory is put on freelist, because here we 1255 * might set fields in deferred struct pages that have not yet been 1256 * initialized, and memblock_free_all() initializes all the reserved 1257 * deferred pages for us. 1258 */ 1259 register_page_bootmem_info(); 1260 1261 /* Register memory areas for /proc/kcore */ 1262 if (get_gate_vma(&init_mm)) 1263 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); 1264 1265 mem_init_print_info(NULL); 1266 } 1267 1268 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1269 int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask) 1270 { 1271 /* 1272 * More CPUs always led to greater speedups on tested systems, up to 1273 * all the nodes' CPUs. Use all since the system is otherwise idle 1274 * now. 1275 */ 1276 return max_t(int, cpumask_weight(node_cpumask), 1); 1277 } 1278 #endif 1279 1280 int kernel_set_to_readonly; 1281 1282 void mark_rodata_ro(void) 1283 { 1284 unsigned long start = PFN_ALIGN(_text); 1285 unsigned long rodata_start = PFN_ALIGN(__start_rodata); 1286 unsigned long end = (unsigned long)__end_rodata_hpage_align; 1287 unsigned long text_end = PFN_ALIGN(_etext); 1288 unsigned long rodata_end = PFN_ALIGN(__end_rodata); 1289 unsigned long all_end; 1290 1291 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 1292 (end - start) >> 10); 1293 set_memory_ro(start, (end - start) >> PAGE_SHIFT); 1294 1295 kernel_set_to_readonly = 1; 1296 1297 /* 1298 * The rodata/data/bss/brk section (but not the kernel text!) 1299 * should also be not-executable. 1300 * 1301 * We align all_end to PMD_SIZE because the existing mapping 1302 * is a full PMD. If we would align _brk_end to PAGE_SIZE we 1303 * split the PMD and the reminder between _brk_end and the end 1304 * of the PMD will remain mapped executable. 1305 * 1306 * Any PMD which was setup after the one which covers _brk_end 1307 * has been zapped already via cleanup_highmem(). 1308 */ 1309 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); 1310 set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); 1311 1312 set_ftrace_ops_ro(); 1313 1314 #ifdef CONFIG_CPA_DEBUG 1315 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); 1316 set_memory_rw(start, (end-start) >> PAGE_SHIFT); 1317 1318 printk(KERN_INFO "Testing CPA: again\n"); 1319 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 1320 #endif 1321 1322 free_kernel_image_pages("unused kernel image (text/rodata gap)", 1323 (void *)text_end, (void *)rodata_start); 1324 free_kernel_image_pages("unused kernel image (rodata/data gap)", 1325 (void *)rodata_end, (void *)_sdata); 1326 1327 debug_checkwx(); 1328 } 1329 1330 int kern_addr_valid(unsigned long addr) 1331 { 1332 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; 1333 pgd_t *pgd; 1334 p4d_t *p4d; 1335 pud_t *pud; 1336 pmd_t *pmd; 1337 pte_t *pte; 1338 1339 if (above != 0 && above != -1UL) 1340 return 0; 1341 1342 pgd = pgd_offset_k(addr); 1343 if (pgd_none(*pgd)) 1344 return 0; 1345 1346 p4d = p4d_offset(pgd, addr); 1347 if (p4d_none(*p4d)) 1348 return 0; 1349 1350 pud = pud_offset(p4d, addr); 1351 if (pud_none(*pud)) 1352 return 0; 1353 1354 if (pud_large(*pud)) 1355 return pfn_valid(pud_pfn(*pud)); 1356 1357 pmd = pmd_offset(pud, addr); 1358 if (pmd_none(*pmd)) 1359 return 0; 1360 1361 if (pmd_large(*pmd)) 1362 return pfn_valid(pmd_pfn(*pmd)); 1363 1364 pte = pte_offset_kernel(pmd, addr); 1365 if (pte_none(*pte)) 1366 return 0; 1367 1368 return pfn_valid(pte_pfn(*pte)); 1369 } 1370 1371 /* 1372 * Block size is the minimum amount of memory which can be hotplugged or 1373 * hotremoved. It must be power of two and must be equal or larger than 1374 * MIN_MEMORY_BLOCK_SIZE. 1375 */ 1376 #define MAX_BLOCK_SIZE (2UL << 30) 1377 1378 /* Amount of ram needed to start using large blocks */ 1379 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) 1380 1381 /* Adjustable memory block size */ 1382 static unsigned long set_memory_block_size; 1383 int __init set_memory_block_size_order(unsigned int order) 1384 { 1385 unsigned long size = 1UL << order; 1386 1387 if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) 1388 return -EINVAL; 1389 1390 set_memory_block_size = size; 1391 return 0; 1392 } 1393 1394 static unsigned long probe_memory_block_size(void) 1395 { 1396 unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; 1397 unsigned long bz; 1398 1399 /* If memory block size has been set, then use it */ 1400 bz = set_memory_block_size; 1401 if (bz) 1402 goto done; 1403 1404 /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ 1405 if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { 1406 bz = MIN_MEMORY_BLOCK_SIZE; 1407 goto done; 1408 } 1409 1410 /* Find the largest allowed block size that aligns to memory end */ 1411 for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { 1412 if (IS_ALIGNED(boot_mem_end, bz)) 1413 break; 1414 } 1415 done: 1416 pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); 1417 1418 return bz; 1419 } 1420 1421 static unsigned long memory_block_size_probed; 1422 unsigned long memory_block_size_bytes(void) 1423 { 1424 if (!memory_block_size_probed) 1425 memory_block_size_probed = probe_memory_block_size(); 1426 1427 return memory_block_size_probed; 1428 } 1429 1430 #ifdef CONFIG_SPARSEMEM_VMEMMAP 1431 /* 1432 * Initialise the sparsemem vmemmap using huge-pages at the PMD level. 1433 */ 1434 static long __meminitdata addr_start, addr_end; 1435 static void __meminitdata *p_start, *p_end; 1436 static int __meminitdata node_start; 1437 1438 static int __meminit vmemmap_populate_hugepages(unsigned long start, 1439 unsigned long end, int node, struct vmem_altmap *altmap) 1440 { 1441 unsigned long addr; 1442 unsigned long next; 1443 pgd_t *pgd; 1444 p4d_t *p4d; 1445 pud_t *pud; 1446 pmd_t *pmd; 1447 1448 for (addr = start; addr < end; addr = next) { 1449 next = pmd_addr_end(addr, end); 1450 1451 pgd = vmemmap_pgd_populate(addr, node); 1452 if (!pgd) 1453 return -ENOMEM; 1454 1455 p4d = vmemmap_p4d_populate(pgd, addr, node); 1456 if (!p4d) 1457 return -ENOMEM; 1458 1459 pud = vmemmap_pud_populate(p4d, addr, node); 1460 if (!pud) 1461 return -ENOMEM; 1462 1463 pmd = pmd_offset(pud, addr); 1464 if (pmd_none(*pmd)) { 1465 void *p; 1466 1467 if (altmap) 1468 p = altmap_alloc_block_buf(PMD_SIZE, altmap); 1469 else 1470 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 1471 if (p) { 1472 pte_t entry; 1473 1474 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, 1475 PAGE_KERNEL_LARGE); 1476 set_pmd(pmd, __pmd(pte_val(entry))); 1477 1478 /* check to see if we have contiguous blocks */ 1479 if (p_end != p || node_start != node) { 1480 if (p_start) 1481 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1482 addr_start, addr_end-1, p_start, p_end-1, node_start); 1483 addr_start = addr; 1484 node_start = node; 1485 p_start = p; 1486 } 1487 1488 addr_end = addr + PMD_SIZE; 1489 p_end = p + PMD_SIZE; 1490 continue; 1491 } else if (altmap) 1492 return -ENOMEM; /* no fallback */ 1493 } else if (pmd_large(*pmd)) { 1494 vmemmap_verify((pte_t *)pmd, node, addr, next); 1495 continue; 1496 } 1497 if (vmemmap_populate_basepages(addr, next, node)) 1498 return -ENOMEM; 1499 } 1500 return 0; 1501 } 1502 1503 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 1504 struct vmem_altmap *altmap) 1505 { 1506 int err; 1507 1508 if (end - start < PAGES_PER_SECTION * sizeof(struct page)) 1509 err = vmemmap_populate_basepages(start, end, node); 1510 else if (boot_cpu_has(X86_FEATURE_PSE)) 1511 err = vmemmap_populate_hugepages(start, end, node, altmap); 1512 else if (altmap) { 1513 pr_err_once("%s: no cpu support for altmap allocations\n", 1514 __func__); 1515 err = -ENOMEM; 1516 } else 1517 err = vmemmap_populate_basepages(start, end, node); 1518 if (!err) 1519 sync_global_pgds(start, end - 1); 1520 return err; 1521 } 1522 1523 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE) 1524 void register_page_bootmem_memmap(unsigned long section_nr, 1525 struct page *start_page, unsigned long nr_pages) 1526 { 1527 unsigned long addr = (unsigned long)start_page; 1528 unsigned long end = (unsigned long)(start_page + nr_pages); 1529 unsigned long next; 1530 pgd_t *pgd; 1531 p4d_t *p4d; 1532 pud_t *pud; 1533 pmd_t *pmd; 1534 unsigned int nr_pmd_pages; 1535 struct page *page; 1536 1537 for (; addr < end; addr = next) { 1538 pte_t *pte = NULL; 1539 1540 pgd = pgd_offset_k(addr); 1541 if (pgd_none(*pgd)) { 1542 next = (addr + PAGE_SIZE) & PAGE_MASK; 1543 continue; 1544 } 1545 get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); 1546 1547 p4d = p4d_offset(pgd, addr); 1548 if (p4d_none(*p4d)) { 1549 next = (addr + PAGE_SIZE) & PAGE_MASK; 1550 continue; 1551 } 1552 get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); 1553 1554 pud = pud_offset(p4d, addr); 1555 if (pud_none(*pud)) { 1556 next = (addr + PAGE_SIZE) & PAGE_MASK; 1557 continue; 1558 } 1559 get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); 1560 1561 if (!boot_cpu_has(X86_FEATURE_PSE)) { 1562 next = (addr + PAGE_SIZE) & PAGE_MASK; 1563 pmd = pmd_offset(pud, addr); 1564 if (pmd_none(*pmd)) 1565 continue; 1566 get_page_bootmem(section_nr, pmd_page(*pmd), 1567 MIX_SECTION_INFO); 1568 1569 pte = pte_offset_kernel(pmd, addr); 1570 if (pte_none(*pte)) 1571 continue; 1572 get_page_bootmem(section_nr, pte_page(*pte), 1573 SECTION_INFO); 1574 } else { 1575 next = pmd_addr_end(addr, end); 1576 1577 pmd = pmd_offset(pud, addr); 1578 if (pmd_none(*pmd)) 1579 continue; 1580 1581 nr_pmd_pages = 1 << get_order(PMD_SIZE); 1582 page = pmd_page(*pmd); 1583 while (nr_pmd_pages--) 1584 get_page_bootmem(section_nr, page++, 1585 SECTION_INFO); 1586 } 1587 } 1588 } 1589 #endif 1590 1591 void __meminit vmemmap_populate_print_last(void) 1592 { 1593 if (p_start) { 1594 pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", 1595 addr_start, addr_end-1, p_start, p_end-1, node_start); 1596 p_start = NULL; 1597 p_end = NULL; 1598 node_start = 0; 1599 } 1600 } 1601 #endif 1602