1 #include <linux/mm.h> 2 #include <linux/gfp.h> 3 #include <asm/pgalloc.h> 4 #include <asm/pgtable.h> 5 #include <asm/tlb.h> 6 #include <asm/fixmap.h> 7 #include <asm/mtrr.h> 8 9 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) 10 11 #ifdef CONFIG_HIGHPTE 12 #define PGALLOC_USER_GFP __GFP_HIGHMEM 13 #else 14 #define PGALLOC_USER_GFP 0 15 #endif 16 17 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; 18 19 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 20 { 21 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); 22 } 23 24 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) 25 { 26 struct page *pte; 27 28 pte = alloc_pages(__userpte_alloc_gfp, 0); 29 if (!pte) 30 return NULL; 31 if (!pgtable_page_ctor(pte)) { 32 __free_page(pte); 33 return NULL; 34 } 35 return pte; 36 } 37 38 static int __init setup_userpte(char *arg) 39 { 40 if (!arg) 41 return -EINVAL; 42 43 /* 44 * "userpte=nohigh" disables allocation of user pagetables in 45 * high memory. 46 */ 47 if (strcmp(arg, "nohigh") == 0) 48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM; 49 else 50 return -EINVAL; 51 return 0; 52 } 53 early_param("userpte", setup_userpte); 54 55 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 56 { 57 pgtable_page_dtor(pte); 58 paravirt_release_pte(page_to_pfn(pte)); 59 tlb_remove_page(tlb, pte); 60 } 61 62 #if CONFIG_PGTABLE_LEVELS > 2 63 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 64 { 65 struct page *page = virt_to_page(pmd); 66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 67 /* 68 * NOTE! For PAE, any changes to the top page-directory-pointer-table 69 * entries need a full cr3 reload to flush. 70 */ 71 #ifdef CONFIG_X86_PAE 72 tlb->need_flush_all = 1; 73 #endif 74 pgtable_pmd_page_dtor(page); 75 tlb_remove_page(tlb, page); 76 } 77 78 #if CONFIG_PGTABLE_LEVELS > 3 79 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 80 { 81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 82 tlb_remove_page(tlb, virt_to_page(pud)); 83 } 84 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 85 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 86 87 static inline void pgd_list_add(pgd_t *pgd) 88 { 89 struct page *page = virt_to_page(pgd); 90 91 list_add(&page->lru, &pgd_list); 92 } 93 94 static inline void pgd_list_del(pgd_t *pgd) 95 { 96 struct page *page = virt_to_page(pgd); 97 98 list_del(&page->lru); 99 } 100 101 #define UNSHARED_PTRS_PER_PGD \ 102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) 103 104 105 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) 106 { 107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); 108 virt_to_page(pgd)->index = (pgoff_t)mm; 109 } 110 111 struct mm_struct *pgd_page_get_mm(struct page *page) 112 { 113 return (struct mm_struct *)page->index; 114 } 115 116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) 117 { 118 /* If the pgd points to a shared pagetable level (either the 119 ptes in non-PAE, or shared PMD in PAE), then just copy the 120 references from swapper_pg_dir. */ 121 if (CONFIG_PGTABLE_LEVELS == 2 || 122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || 123 CONFIG_PGTABLE_LEVELS == 4) { 124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, 125 swapper_pg_dir + KERNEL_PGD_BOUNDARY, 126 KERNEL_PGD_PTRS); 127 } 128 129 /* list required to sync kernel mapping updates */ 130 if (!SHARED_KERNEL_PMD) { 131 pgd_set_mm(pgd, mm); 132 pgd_list_add(pgd); 133 } 134 } 135 136 static void pgd_dtor(pgd_t *pgd) 137 { 138 if (SHARED_KERNEL_PMD) 139 return; 140 141 spin_lock(&pgd_lock); 142 pgd_list_del(pgd); 143 spin_unlock(&pgd_lock); 144 } 145 146 /* 147 * List of all pgd's needed for non-PAE so it can invalidate entries 148 * in both cached and uncached pgd's; not needed for PAE since the 149 * kernel pmd is shared. If PAE were not to share the pmd a similar 150 * tactic would be needed. This is essentially codepath-based locking 151 * against pageattr.c; it is the unique case in which a valid change 152 * of kernel pagetables can't be lazily synchronized by vmalloc faults. 153 * vmalloc faults work because attached pagetables are never freed. 154 * -- nyc 155 */ 156 157 #ifdef CONFIG_X86_PAE 158 /* 159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when 160 * updating the top-level pagetable entries to guarantee the 161 * processor notices the update. Since this is expensive, and 162 * all 4 top-level entries are used almost immediately in a 163 * new process's life, we just pre-populate them here. 164 * 165 * Also, if we're in a paravirt environment where the kernel pmd is 166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate 167 * and initialize the kernel pmds here. 168 */ 169 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD 170 171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) 172 { 173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); 174 175 /* Note: almost everything apart from _PAGE_PRESENT is 176 reserved at the pmd (PDPT) level. */ 177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); 178 179 /* 180 * According to Intel App note "TLBs, Paging-Structure Caches, 181 * and Their Invalidation", April 2007, document 317080-001, 182 * section 8.1: in PAE mode we explicitly have to flush the 183 * TLB via cr3 if the top-level pgd is changed... 184 */ 185 flush_tlb_mm(mm); 186 } 187 #else /* !CONFIG_X86_PAE */ 188 189 /* No need to prepopulate any pagetable entries in non-PAE modes. */ 190 #define PREALLOCATED_PMDS 0 191 192 #endif /* CONFIG_X86_PAE */ 193 194 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) 195 { 196 int i; 197 198 for(i = 0; i < PREALLOCATED_PMDS; i++) 199 if (pmds[i]) { 200 pgtable_pmd_page_dtor(virt_to_page(pmds[i])); 201 free_page((unsigned long)pmds[i]); 202 mm_dec_nr_pmds(mm); 203 } 204 } 205 206 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) 207 { 208 int i; 209 bool failed = false; 210 gfp_t gfp = PGALLOC_GFP; 211 212 if (mm == &init_mm) 213 gfp &= ~__GFP_ACCOUNT; 214 215 for(i = 0; i < PREALLOCATED_PMDS; i++) { 216 pmd_t *pmd = (pmd_t *)__get_free_page(gfp); 217 if (!pmd) 218 failed = true; 219 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { 220 free_page((unsigned long)pmd); 221 pmd = NULL; 222 failed = true; 223 } 224 if (pmd) 225 mm_inc_nr_pmds(mm); 226 pmds[i] = pmd; 227 } 228 229 if (failed) { 230 free_pmds(mm, pmds); 231 return -ENOMEM; 232 } 233 234 return 0; 235 } 236 237 /* 238 * Mop up any pmd pages which may still be attached to the pgd. 239 * Normally they will be freed by munmap/exit_mmap, but any pmd we 240 * preallocate which never got a corresponding vma will need to be 241 * freed manually. 242 */ 243 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) 244 { 245 int i; 246 247 for(i = 0; i < PREALLOCATED_PMDS; i++) { 248 pgd_t pgd = pgdp[i]; 249 250 if (pgd_val(pgd) != 0) { 251 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); 252 253 pgdp[i] = native_make_pgd(0); 254 255 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); 256 pmd_free(mm, pmd); 257 mm_dec_nr_pmds(mm); 258 } 259 } 260 } 261 262 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) 263 { 264 pud_t *pud; 265 int i; 266 267 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ 268 return; 269 270 pud = pud_offset(pgd, 0); 271 272 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { 273 pmd_t *pmd = pmds[i]; 274 275 if (i >= KERNEL_PGD_BOUNDARY) 276 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), 277 sizeof(pmd_t) * PTRS_PER_PMD); 278 279 pud_populate(mm, pud, pmd); 280 } 281 } 282 283 /* 284 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also 285 * assumes that pgd should be in one page. 286 * 287 * But kernel with PAE paging that is not running as a Xen domain 288 * only needs to allocate 32 bytes for pgd instead of one page. 289 */ 290 #ifdef CONFIG_X86_PAE 291 292 #include <linux/slab.h> 293 294 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 295 #define PGD_ALIGN 32 296 297 static struct kmem_cache *pgd_cache; 298 299 static int __init pgd_cache_init(void) 300 { 301 /* 302 * When PAE kernel is running as a Xen domain, it does not use 303 * shared kernel pmd. And this requires a whole page for pgd. 304 */ 305 if (!SHARED_KERNEL_PMD) 306 return 0; 307 308 /* 309 * when PAE kernel is not running as a Xen domain, it uses 310 * shared kernel pmd. Shared kernel pmd does not require a whole 311 * page for pgd. We are able to just allocate a 32-byte for pgd. 312 * During boot time, we create a 32-byte slab for pgd table allocation. 313 */ 314 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, 315 SLAB_PANIC, NULL); 316 if (!pgd_cache) 317 return -ENOMEM; 318 319 return 0; 320 } 321 core_initcall(pgd_cache_init); 322 323 static inline pgd_t *_pgd_alloc(void) 324 { 325 /* 326 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. 327 * We allocate one page for pgd. 328 */ 329 if (!SHARED_KERNEL_PMD) 330 return (pgd_t *)__get_free_page(PGALLOC_GFP); 331 332 /* 333 * Now PAE kernel is not running as a Xen domain. We can allocate 334 * a 32-byte slab for pgd to save memory space. 335 */ 336 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); 337 } 338 339 static inline void _pgd_free(pgd_t *pgd) 340 { 341 if (!SHARED_KERNEL_PMD) 342 free_page((unsigned long)pgd); 343 else 344 kmem_cache_free(pgd_cache, pgd); 345 } 346 #else 347 static inline pgd_t *_pgd_alloc(void) 348 { 349 return (pgd_t *)__get_free_page(PGALLOC_GFP); 350 } 351 352 static inline void _pgd_free(pgd_t *pgd) 353 { 354 free_page((unsigned long)pgd); 355 } 356 #endif /* CONFIG_X86_PAE */ 357 358 pgd_t *pgd_alloc(struct mm_struct *mm) 359 { 360 pgd_t *pgd; 361 pmd_t *pmds[PREALLOCATED_PMDS]; 362 363 pgd = _pgd_alloc(); 364 365 if (pgd == NULL) 366 goto out; 367 368 mm->pgd = pgd; 369 370 if (preallocate_pmds(mm, pmds) != 0) 371 goto out_free_pgd; 372 373 if (paravirt_pgd_alloc(mm) != 0) 374 goto out_free_pmds; 375 376 /* 377 * Make sure that pre-populating the pmds is atomic with 378 * respect to anything walking the pgd_list, so that they 379 * never see a partially populated pgd. 380 */ 381 spin_lock(&pgd_lock); 382 383 pgd_ctor(mm, pgd); 384 pgd_prepopulate_pmd(mm, pgd, pmds); 385 386 spin_unlock(&pgd_lock); 387 388 return pgd; 389 390 out_free_pmds: 391 free_pmds(mm, pmds); 392 out_free_pgd: 393 _pgd_free(pgd); 394 out: 395 return NULL; 396 } 397 398 void pgd_free(struct mm_struct *mm, pgd_t *pgd) 399 { 400 pgd_mop_up_pmds(mm, pgd); 401 pgd_dtor(pgd); 402 paravirt_pgd_free(mm, pgd); 403 _pgd_free(pgd); 404 } 405 406 /* 407 * Used to set accessed or dirty bits in the page table entries 408 * on other architectures. On x86, the accessed and dirty bits 409 * are tracked by hardware. However, do_wp_page calls this function 410 * to also make the pte writeable at the same time the dirty bit is 411 * set. In that case we do actually need to write the PTE. 412 */ 413 int ptep_set_access_flags(struct vm_area_struct *vma, 414 unsigned long address, pte_t *ptep, 415 pte_t entry, int dirty) 416 { 417 int changed = !pte_same(*ptep, entry); 418 419 if (changed && dirty) { 420 *ptep = entry; 421 pte_update(vma->vm_mm, address, ptep); 422 } 423 424 return changed; 425 } 426 427 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 428 int pmdp_set_access_flags(struct vm_area_struct *vma, 429 unsigned long address, pmd_t *pmdp, 430 pmd_t entry, int dirty) 431 { 432 int changed = !pmd_same(*pmdp, entry); 433 434 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 435 436 if (changed && dirty) { 437 *pmdp = entry; 438 /* 439 * We had a write-protection fault here and changed the pmd 440 * to to more permissive. No need to flush the TLB for that, 441 * #PF is architecturally guaranteed to do that and in the 442 * worst-case we'll generate a spurious fault. 443 */ 444 } 445 446 return changed; 447 } 448 449 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address, 450 pud_t *pudp, pud_t entry, int dirty) 451 { 452 int changed = !pud_same(*pudp, entry); 453 454 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 455 456 if (changed && dirty) { 457 *pudp = entry; 458 /* 459 * We had a write-protection fault here and changed the pud 460 * to to more permissive. No need to flush the TLB for that, 461 * #PF is architecturally guaranteed to do that and in the 462 * worst-case we'll generate a spurious fault. 463 */ 464 } 465 466 return changed; 467 } 468 #endif 469 470 int ptep_test_and_clear_young(struct vm_area_struct *vma, 471 unsigned long addr, pte_t *ptep) 472 { 473 int ret = 0; 474 475 if (pte_young(*ptep)) 476 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 477 (unsigned long *) &ptep->pte); 478 479 if (ret) 480 pte_update(vma->vm_mm, addr, ptep); 481 482 return ret; 483 } 484 485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 486 int pmdp_test_and_clear_young(struct vm_area_struct *vma, 487 unsigned long addr, pmd_t *pmdp) 488 { 489 int ret = 0; 490 491 if (pmd_young(*pmdp)) 492 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 493 (unsigned long *)pmdp); 494 495 return ret; 496 } 497 int pudp_test_and_clear_young(struct vm_area_struct *vma, 498 unsigned long addr, pud_t *pudp) 499 { 500 int ret = 0; 501 502 if (pud_young(*pudp)) 503 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 504 (unsigned long *)pudp); 505 506 return ret; 507 } 508 #endif 509 510 int ptep_clear_flush_young(struct vm_area_struct *vma, 511 unsigned long address, pte_t *ptep) 512 { 513 /* 514 * On x86 CPUs, clearing the accessed bit without a TLB flush 515 * doesn't cause data corruption. [ It could cause incorrect 516 * page aging and the (mistaken) reclaim of hot pages, but the 517 * chance of that should be relatively low. ] 518 * 519 * So as a performance optimization don't flush the TLB when 520 * clearing the accessed bit, it will eventually be flushed by 521 * a context switch or a VM operation anyway. [ In the rare 522 * event of it not getting flushed for a long time the delay 523 * shouldn't really matter because there's no real memory 524 * pressure for swapout to react to. ] 525 */ 526 return ptep_test_and_clear_young(vma, address, ptep); 527 } 528 529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 530 int pmdp_clear_flush_young(struct vm_area_struct *vma, 531 unsigned long address, pmd_t *pmdp) 532 { 533 int young; 534 535 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 536 537 young = pmdp_test_and_clear_young(vma, address, pmdp); 538 if (young) 539 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 540 541 return young; 542 } 543 #endif 544 545 /** 546 * reserve_top_address - reserves a hole in the top of kernel address space 547 * @reserve - size of hole to reserve 548 * 549 * Can be used to relocate the fixmap area and poke a hole in the top 550 * of kernel address space to make room for a hypervisor. 551 */ 552 void __init reserve_top_address(unsigned long reserve) 553 { 554 #ifdef CONFIG_X86_32 555 BUG_ON(fixmaps_set > 0); 556 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; 557 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n", 558 -reserve, __FIXADDR_TOP + PAGE_SIZE); 559 #endif 560 } 561 562 int fixmaps_set; 563 564 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 565 { 566 unsigned long address = __fix_to_virt(idx); 567 568 if (idx >= __end_of_fixed_addresses) { 569 BUG(); 570 return; 571 } 572 set_pte_vaddr(address, pte); 573 fixmaps_set++; 574 } 575 576 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, 577 pgprot_t flags) 578 { 579 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); 580 } 581 582 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 583 /** 584 * pud_set_huge - setup kernel PUD mapping 585 * 586 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this 587 * function sets up a huge page only if any of the following conditions are met: 588 * 589 * - MTRRs are disabled, or 590 * 591 * - MTRRs are enabled and the range is completely covered by a single MTRR, or 592 * 593 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which 594 * has no effect on the requested PAT memory type. 595 * 596 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger 597 * page mapping attempt fails. 598 * 599 * Returns 1 on success and 0 on failure. 600 */ 601 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) 602 { 603 u8 mtrr, uniform; 604 605 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); 606 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && 607 (mtrr != MTRR_TYPE_WRBACK)) 608 return 0; 609 610 prot = pgprot_4k_2_large(prot); 611 612 set_pte((pte_t *)pud, pfn_pte( 613 (u64)addr >> PAGE_SHIFT, 614 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 615 616 return 1; 617 } 618 619 /** 620 * pmd_set_huge - setup kernel PMD mapping 621 * 622 * See text over pud_set_huge() above. 623 * 624 * Returns 1 on success and 0 on failure. 625 */ 626 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) 627 { 628 u8 mtrr, uniform; 629 630 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform); 631 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) && 632 (mtrr != MTRR_TYPE_WRBACK)) { 633 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n", 634 __func__, addr, addr + PMD_SIZE); 635 return 0; 636 } 637 638 prot = pgprot_4k_2_large(prot); 639 640 set_pte((pte_t *)pmd, pfn_pte( 641 (u64)addr >> PAGE_SHIFT, 642 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 643 644 return 1; 645 } 646 647 /** 648 * pud_clear_huge - clear kernel PUD mapping when it is set 649 * 650 * Returns 1 on success and 0 on failure (no PUD map is found). 651 */ 652 int pud_clear_huge(pud_t *pud) 653 { 654 if (pud_large(*pud)) { 655 pud_clear(pud); 656 return 1; 657 } 658 659 return 0; 660 } 661 662 /** 663 * pmd_clear_huge - clear kernel PMD mapping when it is set 664 * 665 * Returns 1 on success and 0 on failure (no PMD map is found). 666 */ 667 int pmd_clear_huge(pmd_t *pmd) 668 { 669 if (pmd_large(*pmd)) { 670 pmd_clear(pmd); 671 return 1; 672 } 673 674 return 0; 675 } 676 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 677