1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002 Andi Kleen, SuSE Labs. 4 * Thanks to Ben LaHaise for precious feedback. 5 */ 6 #include <linux/highmem.h> 7 #include <linux/memblock.h> 8 #include <linux/sched.h> 9 #include <linux/mm.h> 10 #include <linux/interrupt.h> 11 #include <linux/seq_file.h> 12 #include <linux/debugfs.h> 13 #include <linux/pfn.h> 14 #include <linux/percpu.h> 15 #include <linux/gfp.h> 16 #include <linux/pci.h> 17 #include <linux/vmalloc.h> 18 #include <linux/libnvdimm.h> 19 #include <linux/vmstat.h> 20 #include <linux/kernel.h> 21 #include <linux/cc_platform.h> 22 #include <linux/set_memory.h> 23 24 #include <asm/e820/api.h> 25 #include <asm/processor.h> 26 #include <asm/tlbflush.h> 27 #include <asm/sections.h> 28 #include <asm/setup.h> 29 #include <linux/uaccess.h> 30 #include <asm/pgalloc.h> 31 #include <asm/proto.h> 32 #include <asm/memtype.h> 33 #include <asm/hyperv-tlfs.h> 34 #include <asm/mshyperv.h> 35 36 #include "../mm_internal.h" 37 38 /* 39 * The current flushing context - we pass it instead of 5 arguments: 40 */ 41 struct cpa_data { 42 unsigned long *vaddr; 43 pgd_t *pgd; 44 pgprot_t mask_set; 45 pgprot_t mask_clr; 46 unsigned long numpages; 47 unsigned long curpage; 48 unsigned long pfn; 49 unsigned int flags; 50 unsigned int force_split : 1, 51 force_static_prot : 1, 52 force_flush_all : 1; 53 struct page **pages; 54 }; 55 56 enum cpa_warn { 57 CPA_CONFLICT, 58 CPA_PROTECT, 59 CPA_DETECT, 60 }; 61 62 static const int cpa_warn_level = CPA_PROTECT; 63 64 /* 65 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) 66 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb 67 * entries change the page attribute in parallel to some other cpu 68 * splitting a large page entry along with changing the attribute. 69 */ 70 static DEFINE_SPINLOCK(cpa_lock); 71 72 #define CPA_FLUSHTLB 1 73 #define CPA_ARRAY 2 74 #define CPA_PAGES_ARRAY 4 75 #define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */ 76 77 static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm) 78 { 79 return __pgprot(cachemode2protval(pcm)); 80 } 81 82 #ifdef CONFIG_PROC_FS 83 static unsigned long direct_pages_count[PG_LEVEL_NUM]; 84 85 void update_page_count(int level, unsigned long pages) 86 { 87 /* Protect against CPA */ 88 spin_lock(&pgd_lock); 89 direct_pages_count[level] += pages; 90 spin_unlock(&pgd_lock); 91 } 92 93 static void split_page_count(int level) 94 { 95 if (direct_pages_count[level] == 0) 96 return; 97 98 direct_pages_count[level]--; 99 if (system_state == SYSTEM_RUNNING) { 100 if (level == PG_LEVEL_2M) 101 count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); 102 else if (level == PG_LEVEL_1G) 103 count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); 104 } 105 direct_pages_count[level - 1] += PTRS_PER_PTE; 106 } 107 108 void arch_report_meminfo(struct seq_file *m) 109 { 110 seq_printf(m, "DirectMap4k: %8lu kB\n", 111 direct_pages_count[PG_LEVEL_4K] << 2); 112 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 113 seq_printf(m, "DirectMap2M: %8lu kB\n", 114 direct_pages_count[PG_LEVEL_2M] << 11); 115 #else 116 seq_printf(m, "DirectMap4M: %8lu kB\n", 117 direct_pages_count[PG_LEVEL_2M] << 12); 118 #endif 119 if (direct_gbpages) 120 seq_printf(m, "DirectMap1G: %8lu kB\n", 121 direct_pages_count[PG_LEVEL_1G] << 20); 122 } 123 #else 124 static inline void split_page_count(int level) { } 125 #endif 126 127 #ifdef CONFIG_X86_CPA_STATISTICS 128 129 static unsigned long cpa_1g_checked; 130 static unsigned long cpa_1g_sameprot; 131 static unsigned long cpa_1g_preserved; 132 static unsigned long cpa_2m_checked; 133 static unsigned long cpa_2m_sameprot; 134 static unsigned long cpa_2m_preserved; 135 static unsigned long cpa_4k_install; 136 137 static inline void cpa_inc_1g_checked(void) 138 { 139 cpa_1g_checked++; 140 } 141 142 static inline void cpa_inc_2m_checked(void) 143 { 144 cpa_2m_checked++; 145 } 146 147 static inline void cpa_inc_4k_install(void) 148 { 149 data_race(cpa_4k_install++); 150 } 151 152 static inline void cpa_inc_lp_sameprot(int level) 153 { 154 if (level == PG_LEVEL_1G) 155 cpa_1g_sameprot++; 156 else 157 cpa_2m_sameprot++; 158 } 159 160 static inline void cpa_inc_lp_preserved(int level) 161 { 162 if (level == PG_LEVEL_1G) 163 cpa_1g_preserved++; 164 else 165 cpa_2m_preserved++; 166 } 167 168 static int cpastats_show(struct seq_file *m, void *p) 169 { 170 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked); 171 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot); 172 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved); 173 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked); 174 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot); 175 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved); 176 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); 177 return 0; 178 } 179 180 static int cpastats_open(struct inode *inode, struct file *file) 181 { 182 return single_open(file, cpastats_show, NULL); 183 } 184 185 static const struct file_operations cpastats_fops = { 186 .open = cpastats_open, 187 .read = seq_read, 188 .llseek = seq_lseek, 189 .release = single_release, 190 }; 191 192 static int __init cpa_stats_init(void) 193 { 194 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL, 195 &cpastats_fops); 196 return 0; 197 } 198 late_initcall(cpa_stats_init); 199 #else 200 static inline void cpa_inc_1g_checked(void) { } 201 static inline void cpa_inc_2m_checked(void) { } 202 static inline void cpa_inc_4k_install(void) { } 203 static inline void cpa_inc_lp_sameprot(int level) { } 204 static inline void cpa_inc_lp_preserved(int level) { } 205 #endif 206 207 208 static inline int 209 within(unsigned long addr, unsigned long start, unsigned long end) 210 { 211 return addr >= start && addr < end; 212 } 213 214 static inline int 215 within_inclusive(unsigned long addr, unsigned long start, unsigned long end) 216 { 217 return addr >= start && addr <= end; 218 } 219 220 #ifdef CONFIG_X86_64 221 222 static inline unsigned long highmap_start_pfn(void) 223 { 224 return __pa_symbol(_text) >> PAGE_SHIFT; 225 } 226 227 static inline unsigned long highmap_end_pfn(void) 228 { 229 /* Do not reference physical address outside the kernel. */ 230 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; 231 } 232 233 static bool __cpa_pfn_in_highmap(unsigned long pfn) 234 { 235 /* 236 * Kernel text has an alias mapping at a high address, known 237 * here as "highmap". 238 */ 239 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); 240 } 241 242 #else 243 244 static bool __cpa_pfn_in_highmap(unsigned long pfn) 245 { 246 /* There is no highmap on 32-bit */ 247 return false; 248 } 249 250 #endif 251 252 /* 253 * See set_mce_nospec(). 254 * 255 * Machine check recovery code needs to change cache mode of poisoned pages to 256 * UC to avoid speculative access logging another error. But passing the 257 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a 258 * speculative access. So we cheat and flip the top bit of the address. This 259 * works fine for the code that updates the page tables. But at the end of the 260 * process we need to flush the TLB and cache and the non-canonical address 261 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. 262 * 263 * But in the common case we already have a canonical address. This code 264 * will fix the top bit if needed and is a no-op otherwise. 265 */ 266 static inline unsigned long fix_addr(unsigned long addr) 267 { 268 #ifdef CONFIG_X86_64 269 return (long)(addr << 1) >> 1; 270 #else 271 return addr; 272 #endif 273 } 274 275 static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) 276 { 277 if (cpa->flags & CPA_PAGES_ARRAY) { 278 struct page *page = cpa->pages[idx]; 279 280 if (unlikely(PageHighMem(page))) 281 return 0; 282 283 return (unsigned long)page_address(page); 284 } 285 286 if (cpa->flags & CPA_ARRAY) 287 return cpa->vaddr[idx]; 288 289 return *cpa->vaddr + idx * PAGE_SIZE; 290 } 291 292 /* 293 * Flushing functions 294 */ 295 296 static void clflush_cache_range_opt(void *vaddr, unsigned int size) 297 { 298 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; 299 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); 300 void *vend = vaddr + size; 301 302 if (p >= vend) 303 return; 304 305 for (; p < vend; p += clflush_size) 306 clflushopt(p); 307 } 308 309 /** 310 * clflush_cache_range - flush a cache range with clflush 311 * @vaddr: virtual start address 312 * @size: number of bytes to flush 313 * 314 * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or 315 * SFENCE to avoid ordering issues. 316 */ 317 void clflush_cache_range(void *vaddr, unsigned int size) 318 { 319 mb(); 320 clflush_cache_range_opt(vaddr, size); 321 mb(); 322 } 323 EXPORT_SYMBOL_GPL(clflush_cache_range); 324 325 #ifdef CONFIG_ARCH_HAS_PMEM_API 326 void arch_invalidate_pmem(void *addr, size_t size) 327 { 328 clflush_cache_range(addr, size); 329 } 330 EXPORT_SYMBOL_GPL(arch_invalidate_pmem); 331 #endif 332 333 static void __cpa_flush_all(void *arg) 334 { 335 unsigned long cache = (unsigned long)arg; 336 337 /* 338 * Flush all to work around Errata in early athlons regarding 339 * large page flushing. 340 */ 341 __flush_tlb_all(); 342 343 if (cache && boot_cpu_data.x86 >= 4) 344 wbinvd(); 345 } 346 347 static void cpa_flush_all(unsigned long cache) 348 { 349 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); 350 351 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 352 } 353 354 static void __cpa_flush_tlb(void *data) 355 { 356 struct cpa_data *cpa = data; 357 unsigned int i; 358 359 for (i = 0; i < cpa->numpages; i++) 360 flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); 361 } 362 363 static void cpa_flush(struct cpa_data *data, int cache) 364 { 365 struct cpa_data *cpa = data; 366 unsigned int i; 367 368 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); 369 370 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { 371 cpa_flush_all(cache); 372 return; 373 } 374 375 if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) 376 flush_tlb_all(); 377 else 378 on_each_cpu(__cpa_flush_tlb, cpa, 1); 379 380 if (!cache) 381 return; 382 383 mb(); 384 for (i = 0; i < cpa->numpages; i++) { 385 unsigned long addr = __cpa_addr(cpa, i); 386 unsigned int level; 387 388 pte_t *pte = lookup_address(addr, &level); 389 390 /* 391 * Only flush present addresses: 392 */ 393 if (pte && (pte_val(*pte) & _PAGE_PRESENT)) 394 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); 395 } 396 mb(); 397 } 398 399 static bool overlaps(unsigned long r1_start, unsigned long r1_end, 400 unsigned long r2_start, unsigned long r2_end) 401 { 402 return (r1_start <= r2_end && r1_end >= r2_start) || 403 (r2_start <= r1_end && r2_end >= r1_start); 404 } 405 406 #ifdef CONFIG_PCI_BIOS 407 /* 408 * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS 409 * based config access (CONFIG_PCI_GOBIOS) support. 410 */ 411 #define BIOS_PFN PFN_DOWN(BIOS_BEGIN) 412 #define BIOS_PFN_END PFN_DOWN(BIOS_END - 1) 413 414 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) 415 { 416 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END)) 417 return _PAGE_NX; 418 return 0; 419 } 420 #else 421 static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) 422 { 423 return 0; 424 } 425 #endif 426 427 /* 428 * The .rodata section needs to be read-only. Using the pfn catches all 429 * aliases. This also includes __ro_after_init, so do not enforce until 430 * kernel_set_to_readonly is true. 431 */ 432 static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn) 433 { 434 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata)); 435 436 /* 437 * Note: __end_rodata is at page aligned and not inclusive, so 438 * subtract 1 to get the last enforced PFN in the rodata area. 439 */ 440 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; 441 442 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro)) 443 return _PAGE_RW; 444 return 0; 445 } 446 447 /* 448 * Protect kernel text against becoming non executable by forbidding 449 * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext) 450 * out of which the kernel actually executes. Do not protect the low 451 * mapping. 452 * 453 * This does not cover __inittext since that is gone after boot. 454 */ 455 static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) 456 { 457 unsigned long t_end = (unsigned long)_etext - 1; 458 unsigned long t_start = (unsigned long)_text; 459 460 if (overlaps(start, end, t_start, t_end)) 461 return _PAGE_NX; 462 return 0; 463 } 464 465 #if defined(CONFIG_X86_64) 466 /* 467 * Once the kernel maps the text as RO (kernel_set_to_readonly is set), 468 * kernel text mappings for the large page aligned text, rodata sections 469 * will be always read-only. For the kernel identity mappings covering the 470 * holes caused by this alignment can be anything that user asks. 471 * 472 * This will preserve the large page mappings for kernel text/data at no 473 * extra cost. 474 */ 475 static pgprotval_t protect_kernel_text_ro(unsigned long start, 476 unsigned long end) 477 { 478 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; 479 unsigned long t_start = (unsigned long)_text; 480 unsigned int level; 481 482 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) 483 return 0; 484 /* 485 * Don't enforce the !RW mapping for the kernel text mapping, if 486 * the current mapping is already using small page mapping. No 487 * need to work hard to preserve large page mappings in this case. 488 * 489 * This also fixes the Linux Xen paravirt guest boot failure caused 490 * by unexpected read-only mappings for kernel identity 491 * mappings. In this paravirt guest case, the kernel text mapping 492 * and the kernel identity mapping share the same page-table pages, 493 * so the protections for kernel text and identity mappings have to 494 * be the same. 495 */ 496 if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) 497 return _PAGE_RW; 498 return 0; 499 } 500 #else 501 static pgprotval_t protect_kernel_text_ro(unsigned long start, 502 unsigned long end) 503 { 504 return 0; 505 } 506 #endif 507 508 static inline bool conflicts(pgprot_t prot, pgprotval_t val) 509 { 510 return (pgprot_val(prot) & ~val) != pgprot_val(prot); 511 } 512 513 static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, 514 unsigned long start, unsigned long end, 515 unsigned long pfn, const char *txt) 516 { 517 static const char *lvltxt[] = { 518 [CPA_CONFLICT] = "conflict", 519 [CPA_PROTECT] = "protect", 520 [CPA_DETECT] = "detect", 521 }; 522 523 if (warnlvl > cpa_warn_level || !conflicts(prot, val)) 524 return; 525 526 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", 527 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), 528 (unsigned long long)val); 529 } 530 531 /* 532 * Certain areas of memory on x86 require very specific protection flags, 533 * for example the BIOS area or kernel text. Callers don't always get this 534 * right (again, ioremap() on BIOS memory is not uncommon) so this function 535 * checks and fixes these known static required protection bits. 536 */ 537 static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, 538 unsigned long pfn, unsigned long npg, 539 unsigned long lpsize, int warnlvl) 540 { 541 pgprotval_t forbidden, res; 542 unsigned long end; 543 544 /* 545 * There is no point in checking RW/NX conflicts when the requested 546 * mapping is setting the page !PRESENT. 547 */ 548 if (!(pgprot_val(prot) & _PAGE_PRESENT)) 549 return prot; 550 551 /* Operate on the virtual address */ 552 end = start + npg * PAGE_SIZE - 1; 553 554 res = protect_kernel_text(start, end); 555 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); 556 forbidden = res; 557 558 /* 559 * Special case to preserve a large page. If the change spawns the 560 * full large page mapping then there is no point to split it 561 * up. Happens with ftrace and is going to be removed once ftrace 562 * switched to text_poke(). 563 */ 564 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) { 565 res = protect_kernel_text_ro(start, end); 566 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); 567 forbidden |= res; 568 } 569 570 /* Check the PFN directly */ 571 res = protect_pci_bios(pfn, pfn + npg - 1); 572 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); 573 forbidden |= res; 574 575 res = protect_rodata(pfn, pfn + npg - 1); 576 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); 577 forbidden |= res; 578 579 return __pgprot(pgprot_val(prot) & ~forbidden); 580 } 581 582 /* 583 * Validate strict W^X semantics. 584 */ 585 static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start, 586 unsigned long pfn, unsigned long npg) 587 { 588 unsigned long end; 589 590 /* Kernel text is rw at boot up */ 591 if (system_state == SYSTEM_BOOTING) 592 return new; 593 594 /* 595 * 32-bit has some unfixable W+X issues, like EFI code 596 * and writeable data being in the same page. Disable 597 * detection and enforcement there. 598 */ 599 if (IS_ENABLED(CONFIG_X86_32)) 600 return new; 601 602 /* Only verify when NX is supported: */ 603 if (!(__supported_pte_mask & _PAGE_NX)) 604 return new; 605 606 if (!((pgprot_val(old) ^ pgprot_val(new)) & (_PAGE_RW | _PAGE_NX))) 607 return new; 608 609 if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW) 610 return new; 611 612 end = start + npg * PAGE_SIZE - 1; 613 WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n", 614 (unsigned long long)pgprot_val(old), 615 (unsigned long long)pgprot_val(new), 616 start, end, pfn); 617 618 /* 619 * For now, allow all permission change attempts by returning the 620 * attempted permissions. This can 'return old' to actively 621 * refuse the permission change at a later time. 622 */ 623 return new; 624 } 625 626 /* 627 * Lookup the page table entry for a virtual address in a specific pgd. 628 * Return a pointer to the entry and the level of the mapping. 629 */ 630 pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, 631 unsigned int *level) 632 { 633 p4d_t *p4d; 634 pud_t *pud; 635 pmd_t *pmd; 636 637 *level = PG_LEVEL_NONE; 638 639 if (pgd_none(*pgd)) 640 return NULL; 641 642 p4d = p4d_offset(pgd, address); 643 if (p4d_none(*p4d)) 644 return NULL; 645 646 *level = PG_LEVEL_512G; 647 if (p4d_large(*p4d) || !p4d_present(*p4d)) 648 return (pte_t *)p4d; 649 650 pud = pud_offset(p4d, address); 651 if (pud_none(*pud)) 652 return NULL; 653 654 *level = PG_LEVEL_1G; 655 if (pud_large(*pud) || !pud_present(*pud)) 656 return (pte_t *)pud; 657 658 pmd = pmd_offset(pud, address); 659 if (pmd_none(*pmd)) 660 return NULL; 661 662 *level = PG_LEVEL_2M; 663 if (pmd_large(*pmd) || !pmd_present(*pmd)) 664 return (pte_t *)pmd; 665 666 *level = PG_LEVEL_4K; 667 668 return pte_offset_kernel(pmd, address); 669 } 670 671 /* 672 * Lookup the page table entry for a virtual address. Return a pointer 673 * to the entry and the level of the mapping. 674 * 675 * Note: We return pud and pmd either when the entry is marked large 676 * or when the present bit is not set. Otherwise we would return a 677 * pointer to a nonexisting mapping. 678 */ 679 pte_t *lookup_address(unsigned long address, unsigned int *level) 680 { 681 return lookup_address_in_pgd(pgd_offset_k(address), address, level); 682 } 683 EXPORT_SYMBOL_GPL(lookup_address); 684 685 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, 686 unsigned int *level) 687 { 688 if (cpa->pgd) 689 return lookup_address_in_pgd(cpa->pgd + pgd_index(address), 690 address, level); 691 692 return lookup_address(address, level); 693 } 694 695 /* 696 * Lookup the PMD entry for a virtual address. Return a pointer to the entry 697 * or NULL if not present. 698 */ 699 pmd_t *lookup_pmd_address(unsigned long address) 700 { 701 pgd_t *pgd; 702 p4d_t *p4d; 703 pud_t *pud; 704 705 pgd = pgd_offset_k(address); 706 if (pgd_none(*pgd)) 707 return NULL; 708 709 p4d = p4d_offset(pgd, address); 710 if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d)) 711 return NULL; 712 713 pud = pud_offset(p4d, address); 714 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) 715 return NULL; 716 717 return pmd_offset(pud, address); 718 } 719 720 /* 721 * This is necessary because __pa() does not work on some 722 * kinds of memory, like vmalloc() or the alloc_remap() 723 * areas on 32-bit NUMA systems. The percpu areas can 724 * end up in this kind of memory, for instance. 725 * 726 * This could be optimized, but it is only intended to be 727 * used at initialization time, and keeping it 728 * unoptimized should increase the testing coverage for 729 * the more obscure platforms. 730 */ 731 phys_addr_t slow_virt_to_phys(void *__virt_addr) 732 { 733 unsigned long virt_addr = (unsigned long)__virt_addr; 734 phys_addr_t phys_addr; 735 unsigned long offset; 736 enum pg_level level; 737 pte_t *pte; 738 739 pte = lookup_address(virt_addr, &level); 740 BUG_ON(!pte); 741 742 /* 743 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t 744 * before being left-shifted PAGE_SHIFT bits -- this trick is to 745 * make 32-PAE kernel work correctly. 746 */ 747 switch (level) { 748 case PG_LEVEL_1G: 749 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; 750 offset = virt_addr & ~PUD_PAGE_MASK; 751 break; 752 case PG_LEVEL_2M: 753 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; 754 offset = virt_addr & ~PMD_PAGE_MASK; 755 break; 756 default: 757 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; 758 offset = virt_addr & ~PAGE_MASK; 759 } 760 761 return (phys_addr_t)(phys_addr | offset); 762 } 763 EXPORT_SYMBOL_GPL(slow_virt_to_phys); 764 765 /* 766 * Set the new pmd in all the pgds we know about: 767 */ 768 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 769 { 770 /* change init_mm */ 771 set_pte_atomic(kpte, pte); 772 #ifdef CONFIG_X86_32 773 if (!SHARED_KERNEL_PMD) { 774 struct page *page; 775 776 list_for_each_entry(page, &pgd_list, lru) { 777 pgd_t *pgd; 778 p4d_t *p4d; 779 pud_t *pud; 780 pmd_t *pmd; 781 782 pgd = (pgd_t *)page_address(page) + pgd_index(address); 783 p4d = p4d_offset(pgd, address); 784 pud = pud_offset(p4d, address); 785 pmd = pmd_offset(pud, address); 786 set_pte_atomic((pte_t *)pmd, pte); 787 } 788 } 789 #endif 790 } 791 792 static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) 793 { 794 /* 795 * _PAGE_GLOBAL means "global page" for present PTEs. 796 * But, it is also used to indicate _PAGE_PROTNONE 797 * for non-present PTEs. 798 * 799 * This ensures that a _PAGE_GLOBAL PTE going from 800 * present to non-present is not confused as 801 * _PAGE_PROTNONE. 802 */ 803 if (!(pgprot_val(prot) & _PAGE_PRESENT)) 804 pgprot_val(prot) &= ~_PAGE_GLOBAL; 805 806 return prot; 807 } 808 809 static int __should_split_large_page(pte_t *kpte, unsigned long address, 810 struct cpa_data *cpa) 811 { 812 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; 813 pgprot_t old_prot, new_prot, req_prot, chk_prot; 814 pte_t new_pte, *tmp; 815 enum pg_level level; 816 817 /* 818 * Check for races, another CPU might have split this page 819 * up already: 820 */ 821 tmp = _lookup_address_cpa(cpa, address, &level); 822 if (tmp != kpte) 823 return 1; 824 825 switch (level) { 826 case PG_LEVEL_2M: 827 old_prot = pmd_pgprot(*(pmd_t *)kpte); 828 old_pfn = pmd_pfn(*(pmd_t *)kpte); 829 cpa_inc_2m_checked(); 830 break; 831 case PG_LEVEL_1G: 832 old_prot = pud_pgprot(*(pud_t *)kpte); 833 old_pfn = pud_pfn(*(pud_t *)kpte); 834 cpa_inc_1g_checked(); 835 break; 836 default: 837 return -EINVAL; 838 } 839 840 psize = page_level_size(level); 841 pmask = page_level_mask(level); 842 843 /* 844 * Calculate the number of pages, which fit into this large 845 * page starting at address: 846 */ 847 lpaddr = (address + psize) & pmask; 848 numpages = (lpaddr - address) >> PAGE_SHIFT; 849 if (numpages < cpa->numpages) 850 cpa->numpages = numpages; 851 852 /* 853 * We are safe now. Check whether the new pgprot is the same: 854 * Convert protection attributes to 4k-format, as cpa->mask* are set 855 * up accordingly. 856 */ 857 858 /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ 859 req_prot = pgprot_large_2_4k(old_prot); 860 861 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 862 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 863 864 /* 865 * req_prot is in format of 4k pages. It must be converted to large 866 * page format: the caching mode includes the PAT bit located at 867 * different bit positions in the two formats. 868 */ 869 req_prot = pgprot_4k_2_large(req_prot); 870 req_prot = pgprot_clear_protnone_bits(req_prot); 871 if (pgprot_val(req_prot) & _PAGE_PRESENT) 872 pgprot_val(req_prot) |= _PAGE_PSE; 873 874 /* 875 * old_pfn points to the large page base pfn. So we need to add the 876 * offset of the virtual address: 877 */ 878 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); 879 cpa->pfn = pfn; 880 881 /* 882 * Calculate the large page base address and the number of 4K pages 883 * in the large page 884 */ 885 lpaddr = address & pmask; 886 numpages = psize >> PAGE_SHIFT; 887 888 /* 889 * Sanity check that the existing mapping is correct versus the static 890 * protections. static_protections() guards against !PRESENT, so no 891 * extra conditional required here. 892 */ 893 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, 894 psize, CPA_CONFLICT); 895 896 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { 897 /* 898 * Split the large page and tell the split code to 899 * enforce static protections. 900 */ 901 cpa->force_static_prot = 1; 902 return 1; 903 } 904 905 /* 906 * Optimization: If the requested pgprot is the same as the current 907 * pgprot, then the large page can be preserved and no updates are 908 * required independent of alignment and length of the requested 909 * range. The above already established that the current pgprot is 910 * correct, which in consequence makes the requested pgprot correct 911 * as well if it is the same. The static protection scan below will 912 * not come to a different conclusion. 913 */ 914 if (pgprot_val(req_prot) == pgprot_val(old_prot)) { 915 cpa_inc_lp_sameprot(level); 916 return 0; 917 } 918 919 /* 920 * If the requested range does not cover the full page, split it up 921 */ 922 if (address != lpaddr || cpa->numpages != numpages) 923 return 1; 924 925 /* 926 * Check whether the requested pgprot is conflicting with a static 927 * protection requirement in the large page. 928 */ 929 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, 930 psize, CPA_DETECT); 931 932 new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages); 933 934 /* 935 * If there is a conflict, split the large page. 936 * 937 * There used to be a 4k wise evaluation trying really hard to 938 * preserve the large pages, but experimentation has shown, that this 939 * does not help at all. There might be corner cases which would 940 * preserve one large page occasionally, but it's really not worth the 941 * extra code and cycles for the common case. 942 */ 943 if (pgprot_val(req_prot) != pgprot_val(new_prot)) 944 return 1; 945 946 /* All checks passed. Update the large page mapping. */ 947 new_pte = pfn_pte(old_pfn, new_prot); 948 __set_pmd_pte(kpte, address, new_pte); 949 cpa->flags |= CPA_FLUSHTLB; 950 cpa_inc_lp_preserved(level); 951 return 0; 952 } 953 954 static int should_split_large_page(pte_t *kpte, unsigned long address, 955 struct cpa_data *cpa) 956 { 957 int do_split; 958 959 if (cpa->force_split) 960 return 1; 961 962 spin_lock(&pgd_lock); 963 do_split = __should_split_large_page(kpte, address, cpa); 964 spin_unlock(&pgd_lock); 965 966 return do_split; 967 } 968 969 static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, 970 pgprot_t ref_prot, unsigned long address, 971 unsigned long size) 972 { 973 unsigned int npg = PFN_DOWN(size); 974 pgprot_t prot; 975 976 /* 977 * If should_split_large_page() discovered an inconsistent mapping, 978 * remove the invalid protection in the split mapping. 979 */ 980 if (!cpa->force_static_prot) 981 goto set; 982 983 /* Hand in lpsize = 0 to enforce the protection mechanism */ 984 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT); 985 986 if (pgprot_val(prot) == pgprot_val(ref_prot)) 987 goto set; 988 989 /* 990 * If this is splitting a PMD, fix it up. PUD splits cannot be 991 * fixed trivially as that would require to rescan the newly 992 * installed PMD mappings after returning from split_large_page() 993 * so an eventual further split can allocate the necessary PTE 994 * pages. Warn for now and revisit it in case this actually 995 * happens. 996 */ 997 if (size == PAGE_SIZE) 998 ref_prot = prot; 999 else 1000 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n"); 1001 set: 1002 set_pte(pte, pfn_pte(pfn, ref_prot)); 1003 } 1004 1005 static int 1006 __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, 1007 struct page *base) 1008 { 1009 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; 1010 pte_t *pbase = (pte_t *)page_address(base); 1011 unsigned int i, level; 1012 pgprot_t ref_prot; 1013 pte_t *tmp; 1014 1015 spin_lock(&pgd_lock); 1016 /* 1017 * Check for races, another CPU might have split this page 1018 * up for us already: 1019 */ 1020 tmp = _lookup_address_cpa(cpa, address, &level); 1021 if (tmp != kpte) { 1022 spin_unlock(&pgd_lock); 1023 return 1; 1024 } 1025 1026 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); 1027 1028 switch (level) { 1029 case PG_LEVEL_2M: 1030 ref_prot = pmd_pgprot(*(pmd_t *)kpte); 1031 /* 1032 * Clear PSE (aka _PAGE_PAT) and move 1033 * PAT bit to correct position. 1034 */ 1035 ref_prot = pgprot_large_2_4k(ref_prot); 1036 ref_pfn = pmd_pfn(*(pmd_t *)kpte); 1037 lpaddr = address & PMD_MASK; 1038 lpinc = PAGE_SIZE; 1039 break; 1040 1041 case PG_LEVEL_1G: 1042 ref_prot = pud_pgprot(*(pud_t *)kpte); 1043 ref_pfn = pud_pfn(*(pud_t *)kpte); 1044 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 1045 lpaddr = address & PUD_MASK; 1046 lpinc = PMD_SIZE; 1047 /* 1048 * Clear the PSE flags if the PRESENT flag is not set 1049 * otherwise pmd_present/pmd_huge will return true 1050 * even on a non present pmd. 1051 */ 1052 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT)) 1053 pgprot_val(ref_prot) &= ~_PAGE_PSE; 1054 break; 1055 1056 default: 1057 spin_unlock(&pgd_lock); 1058 return 1; 1059 } 1060 1061 ref_prot = pgprot_clear_protnone_bits(ref_prot); 1062 1063 /* 1064 * Get the target pfn from the original entry: 1065 */ 1066 pfn = ref_pfn; 1067 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc) 1068 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc); 1069 1070 if (virt_addr_valid(address)) { 1071 unsigned long pfn = PFN_DOWN(__pa(address)); 1072 1073 if (pfn_range_is_mapped(pfn, pfn + 1)) 1074 split_page_count(level); 1075 } 1076 1077 /* 1078 * Install the new, split up pagetable. 1079 * 1080 * We use the standard kernel pagetable protections for the new 1081 * pagetable protections, the actual ptes set above control the 1082 * primary protection behavior: 1083 */ 1084 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); 1085 1086 /* 1087 * Do a global flush tlb after splitting the large page 1088 * and before we do the actual change page attribute in the PTE. 1089 * 1090 * Without this, we violate the TLB application note, that says: 1091 * "The TLBs may contain both ordinary and large-page 1092 * translations for a 4-KByte range of linear addresses. This 1093 * may occur if software modifies the paging structures so that 1094 * the page size used for the address range changes. If the two 1095 * translations differ with respect to page frame or attributes 1096 * (e.g., permissions), processor behavior is undefined and may 1097 * be implementation-specific." 1098 * 1099 * We do this global tlb flush inside the cpa_lock, so that we 1100 * don't allow any other cpu, with stale tlb entries change the 1101 * page attribute in parallel, that also falls into the 1102 * just split large page entry. 1103 */ 1104 flush_tlb_all(); 1105 spin_unlock(&pgd_lock); 1106 1107 return 0; 1108 } 1109 1110 static int split_large_page(struct cpa_data *cpa, pte_t *kpte, 1111 unsigned long address) 1112 { 1113 struct page *base; 1114 1115 if (!debug_pagealloc_enabled()) 1116 spin_unlock(&cpa_lock); 1117 base = alloc_pages(GFP_KERNEL, 0); 1118 if (!debug_pagealloc_enabled()) 1119 spin_lock(&cpa_lock); 1120 if (!base) 1121 return -ENOMEM; 1122 1123 if (__split_large_page(cpa, kpte, address, base)) 1124 __free_page(base); 1125 1126 return 0; 1127 } 1128 1129 static bool try_to_free_pte_page(pte_t *pte) 1130 { 1131 int i; 1132 1133 for (i = 0; i < PTRS_PER_PTE; i++) 1134 if (!pte_none(pte[i])) 1135 return false; 1136 1137 free_page((unsigned long)pte); 1138 return true; 1139 } 1140 1141 static bool try_to_free_pmd_page(pmd_t *pmd) 1142 { 1143 int i; 1144 1145 for (i = 0; i < PTRS_PER_PMD; i++) 1146 if (!pmd_none(pmd[i])) 1147 return false; 1148 1149 free_page((unsigned long)pmd); 1150 return true; 1151 } 1152 1153 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) 1154 { 1155 pte_t *pte = pte_offset_kernel(pmd, start); 1156 1157 while (start < end) { 1158 set_pte(pte, __pte(0)); 1159 1160 start += PAGE_SIZE; 1161 pte++; 1162 } 1163 1164 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { 1165 pmd_clear(pmd); 1166 return true; 1167 } 1168 return false; 1169 } 1170 1171 static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, 1172 unsigned long start, unsigned long end) 1173 { 1174 if (unmap_pte_range(pmd, start, end)) 1175 if (try_to_free_pmd_page(pud_pgtable(*pud))) 1176 pud_clear(pud); 1177 } 1178 1179 static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) 1180 { 1181 pmd_t *pmd = pmd_offset(pud, start); 1182 1183 /* 1184 * Not on a 2MB page boundary? 1185 */ 1186 if (start & (PMD_SIZE - 1)) { 1187 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; 1188 unsigned long pre_end = min_t(unsigned long, end, next_page); 1189 1190 __unmap_pmd_range(pud, pmd, start, pre_end); 1191 1192 start = pre_end; 1193 pmd++; 1194 } 1195 1196 /* 1197 * Try to unmap in 2M chunks. 1198 */ 1199 while (end - start >= PMD_SIZE) { 1200 if (pmd_large(*pmd)) 1201 pmd_clear(pmd); 1202 else 1203 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); 1204 1205 start += PMD_SIZE; 1206 pmd++; 1207 } 1208 1209 /* 1210 * 4K leftovers? 1211 */ 1212 if (start < end) 1213 return __unmap_pmd_range(pud, pmd, start, end); 1214 1215 /* 1216 * Try again to free the PMD page if haven't succeeded above. 1217 */ 1218 if (!pud_none(*pud)) 1219 if (try_to_free_pmd_page(pud_pgtable(*pud))) 1220 pud_clear(pud); 1221 } 1222 1223 static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) 1224 { 1225 pud_t *pud = pud_offset(p4d, start); 1226 1227 /* 1228 * Not on a GB page boundary? 1229 */ 1230 if (start & (PUD_SIZE - 1)) { 1231 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; 1232 unsigned long pre_end = min_t(unsigned long, end, next_page); 1233 1234 unmap_pmd_range(pud, start, pre_end); 1235 1236 start = pre_end; 1237 pud++; 1238 } 1239 1240 /* 1241 * Try to unmap in 1G chunks? 1242 */ 1243 while (end - start >= PUD_SIZE) { 1244 1245 if (pud_large(*pud)) 1246 pud_clear(pud); 1247 else 1248 unmap_pmd_range(pud, start, start + PUD_SIZE); 1249 1250 start += PUD_SIZE; 1251 pud++; 1252 } 1253 1254 /* 1255 * 2M leftovers? 1256 */ 1257 if (start < end) 1258 unmap_pmd_range(pud, start, end); 1259 1260 /* 1261 * No need to try to free the PUD page because we'll free it in 1262 * populate_pgd's error path 1263 */ 1264 } 1265 1266 static int alloc_pte_page(pmd_t *pmd) 1267 { 1268 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL); 1269 if (!pte) 1270 return -1; 1271 1272 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); 1273 return 0; 1274 } 1275 1276 static int alloc_pmd_page(pud_t *pud) 1277 { 1278 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); 1279 if (!pmd) 1280 return -1; 1281 1282 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 1283 return 0; 1284 } 1285 1286 static void populate_pte(struct cpa_data *cpa, 1287 unsigned long start, unsigned long end, 1288 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) 1289 { 1290 pte_t *pte; 1291 1292 pte = pte_offset_kernel(pmd, start); 1293 1294 pgprot = pgprot_clear_protnone_bits(pgprot); 1295 1296 while (num_pages-- && start < end) { 1297 set_pte(pte, pfn_pte(cpa->pfn, pgprot)); 1298 1299 start += PAGE_SIZE; 1300 cpa->pfn++; 1301 pte++; 1302 } 1303 } 1304 1305 static long populate_pmd(struct cpa_data *cpa, 1306 unsigned long start, unsigned long end, 1307 unsigned num_pages, pud_t *pud, pgprot_t pgprot) 1308 { 1309 long cur_pages = 0; 1310 pmd_t *pmd; 1311 pgprot_t pmd_pgprot; 1312 1313 /* 1314 * Not on a 2M boundary? 1315 */ 1316 if (start & (PMD_SIZE - 1)) { 1317 unsigned long pre_end = start + (num_pages << PAGE_SHIFT); 1318 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; 1319 1320 pre_end = min_t(unsigned long, pre_end, next_page); 1321 cur_pages = (pre_end - start) >> PAGE_SHIFT; 1322 cur_pages = min_t(unsigned int, num_pages, cur_pages); 1323 1324 /* 1325 * Need a PTE page? 1326 */ 1327 pmd = pmd_offset(pud, start); 1328 if (pmd_none(*pmd)) 1329 if (alloc_pte_page(pmd)) 1330 return -1; 1331 1332 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); 1333 1334 start = pre_end; 1335 } 1336 1337 /* 1338 * We mapped them all? 1339 */ 1340 if (num_pages == cur_pages) 1341 return cur_pages; 1342 1343 pmd_pgprot = pgprot_4k_2_large(pgprot); 1344 1345 while (end - start >= PMD_SIZE) { 1346 1347 /* 1348 * We cannot use a 1G page so allocate a PMD page if needed. 1349 */ 1350 if (pud_none(*pud)) 1351 if (alloc_pmd_page(pud)) 1352 return -1; 1353 1354 pmd = pmd_offset(pud, start); 1355 1356 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, 1357 canon_pgprot(pmd_pgprot)))); 1358 1359 start += PMD_SIZE; 1360 cpa->pfn += PMD_SIZE >> PAGE_SHIFT; 1361 cur_pages += PMD_SIZE >> PAGE_SHIFT; 1362 } 1363 1364 /* 1365 * Map trailing 4K pages. 1366 */ 1367 if (start < end) { 1368 pmd = pmd_offset(pud, start); 1369 if (pmd_none(*pmd)) 1370 if (alloc_pte_page(pmd)) 1371 return -1; 1372 1373 populate_pte(cpa, start, end, num_pages - cur_pages, 1374 pmd, pgprot); 1375 } 1376 return num_pages; 1377 } 1378 1379 static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, 1380 pgprot_t pgprot) 1381 { 1382 pud_t *pud; 1383 unsigned long end; 1384 long cur_pages = 0; 1385 pgprot_t pud_pgprot; 1386 1387 end = start + (cpa->numpages << PAGE_SHIFT); 1388 1389 /* 1390 * Not on a Gb page boundary? => map everything up to it with 1391 * smaller pages. 1392 */ 1393 if (start & (PUD_SIZE - 1)) { 1394 unsigned long pre_end; 1395 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; 1396 1397 pre_end = min_t(unsigned long, end, next_page); 1398 cur_pages = (pre_end - start) >> PAGE_SHIFT; 1399 cur_pages = min_t(int, (int)cpa->numpages, cur_pages); 1400 1401 pud = pud_offset(p4d, start); 1402 1403 /* 1404 * Need a PMD page? 1405 */ 1406 if (pud_none(*pud)) 1407 if (alloc_pmd_page(pud)) 1408 return -1; 1409 1410 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, 1411 pud, pgprot); 1412 if (cur_pages < 0) 1413 return cur_pages; 1414 1415 start = pre_end; 1416 } 1417 1418 /* We mapped them all? */ 1419 if (cpa->numpages == cur_pages) 1420 return cur_pages; 1421 1422 pud = pud_offset(p4d, start); 1423 pud_pgprot = pgprot_4k_2_large(pgprot); 1424 1425 /* 1426 * Map everything starting from the Gb boundary, possibly with 1G pages 1427 */ 1428 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { 1429 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, 1430 canon_pgprot(pud_pgprot)))); 1431 1432 start += PUD_SIZE; 1433 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; 1434 cur_pages += PUD_SIZE >> PAGE_SHIFT; 1435 pud++; 1436 } 1437 1438 /* Map trailing leftover */ 1439 if (start < end) { 1440 long tmp; 1441 1442 pud = pud_offset(p4d, start); 1443 if (pud_none(*pud)) 1444 if (alloc_pmd_page(pud)) 1445 return -1; 1446 1447 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, 1448 pud, pgprot); 1449 if (tmp < 0) 1450 return cur_pages; 1451 1452 cur_pages += tmp; 1453 } 1454 return cur_pages; 1455 } 1456 1457 /* 1458 * Restrictions for kernel page table do not necessarily apply when mapping in 1459 * an alternate PGD. 1460 */ 1461 static int populate_pgd(struct cpa_data *cpa, unsigned long addr) 1462 { 1463 pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 1464 pud_t *pud = NULL; /* shut up gcc */ 1465 p4d_t *p4d; 1466 pgd_t *pgd_entry; 1467 long ret; 1468 1469 pgd_entry = cpa->pgd + pgd_index(addr); 1470 1471 if (pgd_none(*pgd_entry)) { 1472 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL); 1473 if (!p4d) 1474 return -1; 1475 1476 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); 1477 } 1478 1479 /* 1480 * Allocate a PUD page and hand it down for mapping. 1481 */ 1482 p4d = p4d_offset(pgd_entry, addr); 1483 if (p4d_none(*p4d)) { 1484 pud = (pud_t *)get_zeroed_page(GFP_KERNEL); 1485 if (!pud) 1486 return -1; 1487 1488 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); 1489 } 1490 1491 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); 1492 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); 1493 1494 ret = populate_pud(cpa, addr, p4d, pgprot); 1495 if (ret < 0) { 1496 /* 1497 * Leave the PUD page in place in case some other CPU or thread 1498 * already found it, but remove any useless entries we just 1499 * added to it. 1500 */ 1501 unmap_pud_range(p4d, addr, 1502 addr + (cpa->numpages << PAGE_SHIFT)); 1503 return ret; 1504 } 1505 1506 cpa->numpages = ret; 1507 return 0; 1508 } 1509 1510 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, 1511 int primary) 1512 { 1513 if (cpa->pgd) { 1514 /* 1515 * Right now, we only execute this code path when mapping 1516 * the EFI virtual memory map regions, no other users 1517 * provide a ->pgd value. This may change in the future. 1518 */ 1519 return populate_pgd(cpa, vaddr); 1520 } 1521 1522 /* 1523 * Ignore all non primary paths. 1524 */ 1525 if (!primary) { 1526 cpa->numpages = 1; 1527 return 0; 1528 } 1529 1530 /* 1531 * Ignore the NULL PTE for kernel identity mapping, as it is expected 1532 * to have holes. 1533 * Also set numpages to '1' indicating that we processed cpa req for 1534 * one virtual address page and its pfn. TBD: numpages can be set based 1535 * on the initial value and the level returned by lookup_address(). 1536 */ 1537 if (within(vaddr, PAGE_OFFSET, 1538 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { 1539 cpa->numpages = 1; 1540 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; 1541 return 0; 1542 1543 } else if (__cpa_pfn_in_highmap(cpa->pfn)) { 1544 /* Faults in the highmap are OK, so do not warn: */ 1545 return -EFAULT; 1546 } else { 1547 WARN(1, KERN_WARNING "CPA: called for zero pte. " 1548 "vaddr = %lx cpa->vaddr = %lx\n", vaddr, 1549 *cpa->vaddr); 1550 1551 return -EFAULT; 1552 } 1553 } 1554 1555 static int __change_page_attr(struct cpa_data *cpa, int primary) 1556 { 1557 unsigned long address; 1558 int do_split, err; 1559 unsigned int level; 1560 pte_t *kpte, old_pte; 1561 1562 address = __cpa_addr(cpa, cpa->curpage); 1563 repeat: 1564 kpte = _lookup_address_cpa(cpa, address, &level); 1565 if (!kpte) 1566 return __cpa_process_fault(cpa, address, primary); 1567 1568 old_pte = *kpte; 1569 if (pte_none(old_pte)) 1570 return __cpa_process_fault(cpa, address, primary); 1571 1572 if (level == PG_LEVEL_4K) { 1573 pte_t new_pte; 1574 pgprot_t old_prot = pte_pgprot(old_pte); 1575 pgprot_t new_prot = pte_pgprot(old_pte); 1576 unsigned long pfn = pte_pfn(old_pte); 1577 1578 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); 1579 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 1580 1581 cpa_inc_4k_install(); 1582 /* Hand in lpsize = 0 to enforce the protection mechanism */ 1583 new_prot = static_protections(new_prot, address, pfn, 1, 0, 1584 CPA_PROTECT); 1585 1586 new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1); 1587 1588 new_prot = pgprot_clear_protnone_bits(new_prot); 1589 1590 /* 1591 * We need to keep the pfn from the existing PTE, 1592 * after all we're only going to change it's attributes 1593 * not the memory it points to 1594 */ 1595 new_pte = pfn_pte(pfn, new_prot); 1596 cpa->pfn = pfn; 1597 /* 1598 * Do we really change anything ? 1599 */ 1600 if (pte_val(old_pte) != pte_val(new_pte)) { 1601 set_pte_atomic(kpte, new_pte); 1602 cpa->flags |= CPA_FLUSHTLB; 1603 } 1604 cpa->numpages = 1; 1605 return 0; 1606 } 1607 1608 /* 1609 * Check, whether we can keep the large page intact 1610 * and just change the pte: 1611 */ 1612 do_split = should_split_large_page(kpte, address, cpa); 1613 /* 1614 * When the range fits into the existing large page, 1615 * return. cp->numpages and cpa->tlbflush have been updated in 1616 * try_large_page: 1617 */ 1618 if (do_split <= 0) 1619 return do_split; 1620 1621 /* 1622 * We have to split the large page: 1623 */ 1624 err = split_large_page(cpa, kpte, address); 1625 if (!err) 1626 goto repeat; 1627 1628 return err; 1629 } 1630 1631 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); 1632 1633 static int cpa_process_alias(struct cpa_data *cpa) 1634 { 1635 struct cpa_data alias_cpa; 1636 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); 1637 unsigned long vaddr; 1638 int ret; 1639 1640 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) 1641 return 0; 1642 1643 /* 1644 * No need to redo, when the primary call touched the direct 1645 * mapping already: 1646 */ 1647 vaddr = __cpa_addr(cpa, cpa->curpage); 1648 if (!(within(vaddr, PAGE_OFFSET, 1649 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { 1650 1651 alias_cpa = *cpa; 1652 alias_cpa.vaddr = &laddr; 1653 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 1654 alias_cpa.curpage = 0; 1655 1656 cpa->force_flush_all = 1; 1657 1658 ret = __change_page_attr_set_clr(&alias_cpa, 0); 1659 if (ret) 1660 return ret; 1661 } 1662 1663 #ifdef CONFIG_X86_64 1664 /* 1665 * If the primary call didn't touch the high mapping already 1666 * and the physical address is inside the kernel map, we need 1667 * to touch the high mapped kernel as well: 1668 */ 1669 if (!within(vaddr, (unsigned long)_text, _brk_end) && 1670 __cpa_pfn_in_highmap(cpa->pfn)) { 1671 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + 1672 __START_KERNEL_map - phys_base; 1673 alias_cpa = *cpa; 1674 alias_cpa.vaddr = &temp_cpa_vaddr; 1675 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); 1676 alias_cpa.curpage = 0; 1677 1678 cpa->force_flush_all = 1; 1679 /* 1680 * The high mapping range is imprecise, so ignore the 1681 * return value. 1682 */ 1683 __change_page_attr_set_clr(&alias_cpa, 0); 1684 } 1685 #endif 1686 1687 return 0; 1688 } 1689 1690 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 1691 { 1692 unsigned long numpages = cpa->numpages; 1693 unsigned long rempages = numpages; 1694 int ret = 0; 1695 1696 while (rempages) { 1697 /* 1698 * Store the remaining nr of pages for the large page 1699 * preservation check. 1700 */ 1701 cpa->numpages = rempages; 1702 /* for array changes, we can't use large page */ 1703 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) 1704 cpa->numpages = 1; 1705 1706 if (!debug_pagealloc_enabled()) 1707 spin_lock(&cpa_lock); 1708 ret = __change_page_attr(cpa, checkalias); 1709 if (!debug_pagealloc_enabled()) 1710 spin_unlock(&cpa_lock); 1711 if (ret) 1712 goto out; 1713 1714 if (checkalias) { 1715 ret = cpa_process_alias(cpa); 1716 if (ret) 1717 goto out; 1718 } 1719 1720 /* 1721 * Adjust the number of pages with the result of the 1722 * CPA operation. Either a large page has been 1723 * preserved or a single page update happened. 1724 */ 1725 BUG_ON(cpa->numpages > rempages || !cpa->numpages); 1726 rempages -= cpa->numpages; 1727 cpa->curpage += cpa->numpages; 1728 } 1729 1730 out: 1731 /* Restore the original numpages */ 1732 cpa->numpages = numpages; 1733 return ret; 1734 } 1735 1736 static int change_page_attr_set_clr(unsigned long *addr, int numpages, 1737 pgprot_t mask_set, pgprot_t mask_clr, 1738 int force_split, int in_flag, 1739 struct page **pages) 1740 { 1741 struct cpa_data cpa; 1742 int ret, cache, checkalias; 1743 1744 memset(&cpa, 0, sizeof(cpa)); 1745 1746 /* 1747 * Check, if we are requested to set a not supported 1748 * feature. Clearing non-supported features is OK. 1749 */ 1750 mask_set = canon_pgprot(mask_set); 1751 1752 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) 1753 return 0; 1754 1755 /* Ensure we are PAGE_SIZE aligned */ 1756 if (in_flag & CPA_ARRAY) { 1757 int i; 1758 for (i = 0; i < numpages; i++) { 1759 if (addr[i] & ~PAGE_MASK) { 1760 addr[i] &= PAGE_MASK; 1761 WARN_ON_ONCE(1); 1762 } 1763 } 1764 } else if (!(in_flag & CPA_PAGES_ARRAY)) { 1765 /* 1766 * in_flag of CPA_PAGES_ARRAY implies it is aligned. 1767 * No need to check in that case 1768 */ 1769 if (*addr & ~PAGE_MASK) { 1770 *addr &= PAGE_MASK; 1771 /* 1772 * People should not be passing in unaligned addresses: 1773 */ 1774 WARN_ON_ONCE(1); 1775 } 1776 } 1777 1778 /* Must avoid aliasing mappings in the highmem code */ 1779 kmap_flush_unused(); 1780 1781 vm_unmap_aliases(); 1782 1783 cpa.vaddr = addr; 1784 cpa.pages = pages; 1785 cpa.numpages = numpages; 1786 cpa.mask_set = mask_set; 1787 cpa.mask_clr = mask_clr; 1788 cpa.flags = 0; 1789 cpa.curpage = 0; 1790 cpa.force_split = force_split; 1791 1792 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) 1793 cpa.flags |= in_flag; 1794 1795 /* No alias checking for _NX bit modifications */ 1796 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; 1797 /* Has caller explicitly disabled alias checking? */ 1798 if (in_flag & CPA_NO_CHECK_ALIAS) 1799 checkalias = 0; 1800 1801 ret = __change_page_attr_set_clr(&cpa, checkalias); 1802 1803 /* 1804 * Check whether we really changed something: 1805 */ 1806 if (!(cpa.flags & CPA_FLUSHTLB)) 1807 goto out; 1808 1809 /* 1810 * No need to flush, when we did not set any of the caching 1811 * attributes: 1812 */ 1813 cache = !!pgprot2cachemode(mask_set); 1814 1815 /* 1816 * On error; flush everything to be sure. 1817 */ 1818 if (ret) { 1819 cpa_flush_all(cache); 1820 goto out; 1821 } 1822 1823 cpa_flush(&cpa, cache); 1824 out: 1825 return ret; 1826 } 1827 1828 static inline int change_page_attr_set(unsigned long *addr, int numpages, 1829 pgprot_t mask, int array) 1830 { 1831 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, 1832 (array ? CPA_ARRAY : 0), NULL); 1833 } 1834 1835 static inline int change_page_attr_clear(unsigned long *addr, int numpages, 1836 pgprot_t mask, int array) 1837 { 1838 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, 1839 (array ? CPA_ARRAY : 0), NULL); 1840 } 1841 1842 static inline int cpa_set_pages_array(struct page **pages, int numpages, 1843 pgprot_t mask) 1844 { 1845 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, 1846 CPA_PAGES_ARRAY, pages); 1847 } 1848 1849 static inline int cpa_clear_pages_array(struct page **pages, int numpages, 1850 pgprot_t mask) 1851 { 1852 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, 1853 CPA_PAGES_ARRAY, pages); 1854 } 1855 1856 /* 1857 * __set_memory_prot is an internal helper for callers that have been passed 1858 * a pgprot_t value from upper layers and a reservation has already been taken. 1859 * If you want to set the pgprot to a specific page protocol, use the 1860 * set_memory_xx() functions. 1861 */ 1862 int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot) 1863 { 1864 return change_page_attr_set_clr(&addr, numpages, prot, 1865 __pgprot(~pgprot_val(prot)), 0, 0, 1866 NULL); 1867 } 1868 1869 int _set_memory_uc(unsigned long addr, int numpages) 1870 { 1871 /* 1872 * for now UC MINUS. see comments in ioremap() 1873 * If you really need strong UC use ioremap_uc(), but note 1874 * that you cannot override IO areas with set_memory_*() as 1875 * these helpers cannot work with IO memory. 1876 */ 1877 return change_page_attr_set(&addr, numpages, 1878 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), 1879 0); 1880 } 1881 1882 int set_memory_uc(unsigned long addr, int numpages) 1883 { 1884 int ret; 1885 1886 /* 1887 * for now UC MINUS. see comments in ioremap() 1888 */ 1889 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1890 _PAGE_CACHE_MODE_UC_MINUS, NULL); 1891 if (ret) 1892 goto out_err; 1893 1894 ret = _set_memory_uc(addr, numpages); 1895 if (ret) 1896 goto out_free; 1897 1898 return 0; 1899 1900 out_free: 1901 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1902 out_err: 1903 return ret; 1904 } 1905 EXPORT_SYMBOL(set_memory_uc); 1906 1907 int _set_memory_wc(unsigned long addr, int numpages) 1908 { 1909 int ret; 1910 1911 ret = change_page_attr_set(&addr, numpages, 1912 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), 1913 0); 1914 if (!ret) { 1915 ret = change_page_attr_set_clr(&addr, numpages, 1916 cachemode2pgprot(_PAGE_CACHE_MODE_WC), 1917 __pgprot(_PAGE_CACHE_MASK), 1918 0, 0, NULL); 1919 } 1920 return ret; 1921 } 1922 1923 int set_memory_wc(unsigned long addr, int numpages) 1924 { 1925 int ret; 1926 1927 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, 1928 _PAGE_CACHE_MODE_WC, NULL); 1929 if (ret) 1930 return ret; 1931 1932 ret = _set_memory_wc(addr, numpages); 1933 if (ret) 1934 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1935 1936 return ret; 1937 } 1938 EXPORT_SYMBOL(set_memory_wc); 1939 1940 int _set_memory_wt(unsigned long addr, int numpages) 1941 { 1942 return change_page_attr_set(&addr, numpages, 1943 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0); 1944 } 1945 1946 int _set_memory_wb(unsigned long addr, int numpages) 1947 { 1948 /* WB cache mode is hard wired to all cache attribute bits being 0 */ 1949 return change_page_attr_clear(&addr, numpages, 1950 __pgprot(_PAGE_CACHE_MASK), 0); 1951 } 1952 1953 int set_memory_wb(unsigned long addr, int numpages) 1954 { 1955 int ret; 1956 1957 ret = _set_memory_wb(addr, numpages); 1958 if (ret) 1959 return ret; 1960 1961 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); 1962 return 0; 1963 } 1964 EXPORT_SYMBOL(set_memory_wb); 1965 1966 /* Prevent speculative access to a page by marking it not-present */ 1967 #ifdef CONFIG_X86_64 1968 int set_mce_nospec(unsigned long pfn) 1969 { 1970 unsigned long decoy_addr; 1971 int rc; 1972 1973 /* SGX pages are not in the 1:1 map */ 1974 if (arch_is_platform_page(pfn << PAGE_SHIFT)) 1975 return 0; 1976 /* 1977 * We would like to just call: 1978 * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); 1979 * but doing that would radically increase the odds of a 1980 * speculative access to the poison page because we'd have 1981 * the virtual address of the kernel 1:1 mapping sitting 1982 * around in registers. 1983 * Instead we get tricky. We create a non-canonical address 1984 * that looks just like the one we want, but has bit 63 flipped. 1985 * This relies on set_memory_XX() properly sanitizing any __pa() 1986 * results with __PHYSICAL_MASK or PTE_PFN_MASK. 1987 */ 1988 decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); 1989 1990 rc = set_memory_np(decoy_addr, 1); 1991 if (rc) 1992 pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); 1993 return rc; 1994 } 1995 1996 static int set_memory_p(unsigned long *addr, int numpages) 1997 { 1998 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0); 1999 } 2000 2001 /* Restore full speculative operation to the pfn. */ 2002 int clear_mce_nospec(unsigned long pfn) 2003 { 2004 unsigned long addr = (unsigned long) pfn_to_kaddr(pfn); 2005 2006 return set_memory_p(&addr, 1); 2007 } 2008 EXPORT_SYMBOL_GPL(clear_mce_nospec); 2009 #endif /* CONFIG_X86_64 */ 2010 2011 int set_memory_x(unsigned long addr, int numpages) 2012 { 2013 if (!(__supported_pte_mask & _PAGE_NX)) 2014 return 0; 2015 2016 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); 2017 } 2018 2019 int set_memory_nx(unsigned long addr, int numpages) 2020 { 2021 if (!(__supported_pte_mask & _PAGE_NX)) 2022 return 0; 2023 2024 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); 2025 } 2026 2027 int set_memory_ro(unsigned long addr, int numpages) 2028 { 2029 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); 2030 } 2031 2032 int set_memory_rw(unsigned long addr, int numpages) 2033 { 2034 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); 2035 } 2036 2037 int set_memory_np(unsigned long addr, int numpages) 2038 { 2039 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); 2040 } 2041 2042 int set_memory_np_noalias(unsigned long addr, int numpages) 2043 { 2044 int cpa_flags = CPA_NO_CHECK_ALIAS; 2045 2046 return change_page_attr_set_clr(&addr, numpages, __pgprot(0), 2047 __pgprot(_PAGE_PRESENT), 0, 2048 cpa_flags, NULL); 2049 } 2050 2051 int set_memory_4k(unsigned long addr, int numpages) 2052 { 2053 return change_page_attr_set_clr(&addr, numpages, __pgprot(0), 2054 __pgprot(0), 1, 0, NULL); 2055 } 2056 2057 int set_memory_nonglobal(unsigned long addr, int numpages) 2058 { 2059 return change_page_attr_clear(&addr, numpages, 2060 __pgprot(_PAGE_GLOBAL), 0); 2061 } 2062 2063 int set_memory_global(unsigned long addr, int numpages) 2064 { 2065 return change_page_attr_set(&addr, numpages, 2066 __pgprot(_PAGE_GLOBAL), 0); 2067 } 2068 2069 /* 2070 * __set_memory_enc_pgtable() is used for the hypervisors that get 2071 * informed about "encryption" status via page tables. 2072 */ 2073 static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) 2074 { 2075 pgprot_t empty = __pgprot(0); 2076 struct cpa_data cpa; 2077 int ret; 2078 2079 /* Should not be working on unaligned addresses */ 2080 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) 2081 addr &= PAGE_MASK; 2082 2083 memset(&cpa, 0, sizeof(cpa)); 2084 cpa.vaddr = &addr; 2085 cpa.numpages = numpages; 2086 cpa.mask_set = enc ? pgprot_encrypted(empty) : pgprot_decrypted(empty); 2087 cpa.mask_clr = enc ? pgprot_decrypted(empty) : pgprot_encrypted(empty); 2088 cpa.pgd = init_mm.pgd; 2089 2090 /* Must avoid aliasing mappings in the highmem code */ 2091 kmap_flush_unused(); 2092 vm_unmap_aliases(); 2093 2094 /* Flush the caches as needed before changing the encryption attribute. */ 2095 if (x86_platform.guest.enc_tlb_flush_required(enc)) 2096 cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); 2097 2098 /* Notify hypervisor that we are about to set/clr encryption attribute. */ 2099 x86_platform.guest.enc_status_change_prepare(addr, numpages, enc); 2100 2101 ret = __change_page_attr_set_clr(&cpa, 1); 2102 2103 /* 2104 * After changing the encryption attribute, we need to flush TLBs again 2105 * in case any speculative TLB caching occurred (but no need to flush 2106 * caches again). We could just use cpa_flush_all(), but in case TLB 2107 * flushing gets optimized in the cpa_flush() path use the same logic 2108 * as above. 2109 */ 2110 cpa_flush(&cpa, 0); 2111 2112 /* Notify hypervisor that we have successfully set/clr encryption attribute. */ 2113 if (!ret) { 2114 if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc)) 2115 ret = -EIO; 2116 } 2117 2118 return ret; 2119 } 2120 2121 static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) 2122 { 2123 if (hv_is_isolation_supported()) 2124 return hv_set_mem_host_visibility(addr, numpages, !enc); 2125 2126 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 2127 return __set_memory_enc_pgtable(addr, numpages, enc); 2128 2129 return 0; 2130 } 2131 2132 int set_memory_encrypted(unsigned long addr, int numpages) 2133 { 2134 return __set_memory_enc_dec(addr, numpages, true); 2135 } 2136 EXPORT_SYMBOL_GPL(set_memory_encrypted); 2137 2138 int set_memory_decrypted(unsigned long addr, int numpages) 2139 { 2140 return __set_memory_enc_dec(addr, numpages, false); 2141 } 2142 EXPORT_SYMBOL_GPL(set_memory_decrypted); 2143 2144 int set_pages_uc(struct page *page, int numpages) 2145 { 2146 unsigned long addr = (unsigned long)page_address(page); 2147 2148 return set_memory_uc(addr, numpages); 2149 } 2150 EXPORT_SYMBOL(set_pages_uc); 2151 2152 static int _set_pages_array(struct page **pages, int numpages, 2153 enum page_cache_mode new_type) 2154 { 2155 unsigned long start; 2156 unsigned long end; 2157 enum page_cache_mode set_type; 2158 int i; 2159 int free_idx; 2160 int ret; 2161 2162 for (i = 0; i < numpages; i++) { 2163 if (PageHighMem(pages[i])) 2164 continue; 2165 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2166 end = start + PAGE_SIZE; 2167 if (memtype_reserve(start, end, new_type, NULL)) 2168 goto err_out; 2169 } 2170 2171 /* If WC, set to UC- first and then WC */ 2172 set_type = (new_type == _PAGE_CACHE_MODE_WC) ? 2173 _PAGE_CACHE_MODE_UC_MINUS : new_type; 2174 2175 ret = cpa_set_pages_array(pages, numpages, 2176 cachemode2pgprot(set_type)); 2177 if (!ret && new_type == _PAGE_CACHE_MODE_WC) 2178 ret = change_page_attr_set_clr(NULL, numpages, 2179 cachemode2pgprot( 2180 _PAGE_CACHE_MODE_WC), 2181 __pgprot(_PAGE_CACHE_MASK), 2182 0, CPA_PAGES_ARRAY, pages); 2183 if (ret) 2184 goto err_out; 2185 return 0; /* Success */ 2186 err_out: 2187 free_idx = i; 2188 for (i = 0; i < free_idx; i++) { 2189 if (PageHighMem(pages[i])) 2190 continue; 2191 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2192 end = start + PAGE_SIZE; 2193 memtype_free(start, end); 2194 } 2195 return -EINVAL; 2196 } 2197 2198 int set_pages_array_uc(struct page **pages, int numpages) 2199 { 2200 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS); 2201 } 2202 EXPORT_SYMBOL(set_pages_array_uc); 2203 2204 int set_pages_array_wc(struct page **pages, int numpages) 2205 { 2206 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC); 2207 } 2208 EXPORT_SYMBOL(set_pages_array_wc); 2209 2210 int set_pages_wb(struct page *page, int numpages) 2211 { 2212 unsigned long addr = (unsigned long)page_address(page); 2213 2214 return set_memory_wb(addr, numpages); 2215 } 2216 EXPORT_SYMBOL(set_pages_wb); 2217 2218 int set_pages_array_wb(struct page **pages, int numpages) 2219 { 2220 int retval; 2221 unsigned long start; 2222 unsigned long end; 2223 int i; 2224 2225 /* WB cache mode is hard wired to all cache attribute bits being 0 */ 2226 retval = cpa_clear_pages_array(pages, numpages, 2227 __pgprot(_PAGE_CACHE_MASK)); 2228 if (retval) 2229 return retval; 2230 2231 for (i = 0; i < numpages; i++) { 2232 if (PageHighMem(pages[i])) 2233 continue; 2234 start = page_to_pfn(pages[i]) << PAGE_SHIFT; 2235 end = start + PAGE_SIZE; 2236 memtype_free(start, end); 2237 } 2238 2239 return 0; 2240 } 2241 EXPORT_SYMBOL(set_pages_array_wb); 2242 2243 int set_pages_ro(struct page *page, int numpages) 2244 { 2245 unsigned long addr = (unsigned long)page_address(page); 2246 2247 return set_memory_ro(addr, numpages); 2248 } 2249 2250 int set_pages_rw(struct page *page, int numpages) 2251 { 2252 unsigned long addr = (unsigned long)page_address(page); 2253 2254 return set_memory_rw(addr, numpages); 2255 } 2256 2257 static int __set_pages_p(struct page *page, int numpages) 2258 { 2259 unsigned long tempaddr = (unsigned long) page_address(page); 2260 struct cpa_data cpa = { .vaddr = &tempaddr, 2261 .pgd = NULL, 2262 .numpages = numpages, 2263 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), 2264 .mask_clr = __pgprot(0), 2265 .flags = 0}; 2266 2267 /* 2268 * No alias checking needed for setting present flag. otherwise, 2269 * we may need to break large pages for 64-bit kernel text 2270 * mappings (this adds to complexity if we want to do this from 2271 * atomic context especially). Let's keep it simple! 2272 */ 2273 return __change_page_attr_set_clr(&cpa, 0); 2274 } 2275 2276 static int __set_pages_np(struct page *page, int numpages) 2277 { 2278 unsigned long tempaddr = (unsigned long) page_address(page); 2279 struct cpa_data cpa = { .vaddr = &tempaddr, 2280 .pgd = NULL, 2281 .numpages = numpages, 2282 .mask_set = __pgprot(0), 2283 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), 2284 .flags = 0}; 2285 2286 /* 2287 * No alias checking needed for setting not present flag. otherwise, 2288 * we may need to break large pages for 64-bit kernel text 2289 * mappings (this adds to complexity if we want to do this from 2290 * atomic context especially). Let's keep it simple! 2291 */ 2292 return __change_page_attr_set_clr(&cpa, 0); 2293 } 2294 2295 int set_direct_map_invalid_noflush(struct page *page) 2296 { 2297 return __set_pages_np(page, 1); 2298 } 2299 2300 int set_direct_map_default_noflush(struct page *page) 2301 { 2302 return __set_pages_p(page, 1); 2303 } 2304 2305 #ifdef CONFIG_DEBUG_PAGEALLOC 2306 void __kernel_map_pages(struct page *page, int numpages, int enable) 2307 { 2308 if (PageHighMem(page)) 2309 return; 2310 if (!enable) { 2311 debug_check_no_locks_freed(page_address(page), 2312 numpages * PAGE_SIZE); 2313 } 2314 2315 /* 2316 * The return value is ignored as the calls cannot fail. 2317 * Large pages for identity mappings are not used at boot time 2318 * and hence no memory allocations during large page split. 2319 */ 2320 if (enable) 2321 __set_pages_p(page, numpages); 2322 else 2323 __set_pages_np(page, numpages); 2324 2325 /* 2326 * We should perform an IPI and flush all tlbs, 2327 * but that can deadlock->flush only current cpu. 2328 * Preemption needs to be disabled around __flush_tlb_all() due to 2329 * CR3 reload in __native_flush_tlb(). 2330 */ 2331 preempt_disable(); 2332 __flush_tlb_all(); 2333 preempt_enable(); 2334 2335 arch_flush_lazy_mmu_mode(); 2336 } 2337 #endif /* CONFIG_DEBUG_PAGEALLOC */ 2338 2339 bool kernel_page_present(struct page *page) 2340 { 2341 unsigned int level; 2342 pte_t *pte; 2343 2344 if (PageHighMem(page)) 2345 return false; 2346 2347 pte = lookup_address((unsigned long)page_address(page), &level); 2348 return (pte_val(*pte) & _PAGE_PRESENT); 2349 } 2350 2351 int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, 2352 unsigned numpages, unsigned long page_flags) 2353 { 2354 int retval = -EINVAL; 2355 2356 struct cpa_data cpa = { 2357 .vaddr = &address, 2358 .pfn = pfn, 2359 .pgd = pgd, 2360 .numpages = numpages, 2361 .mask_set = __pgprot(0), 2362 .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)), 2363 .flags = 0, 2364 }; 2365 2366 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); 2367 2368 if (!(__supported_pte_mask & _PAGE_NX)) 2369 goto out; 2370 2371 if (!(page_flags & _PAGE_ENC)) 2372 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr); 2373 2374 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); 2375 2376 retval = __change_page_attr_set_clr(&cpa, 0); 2377 __flush_tlb_all(); 2378 2379 out: 2380 return retval; 2381 } 2382 2383 /* 2384 * __flush_tlb_all() flushes mappings only on current CPU and hence this 2385 * function shouldn't be used in an SMP environment. Presently, it's used only 2386 * during boot (way before smp_init()) by EFI subsystem and hence is ok. 2387 */ 2388 int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address, 2389 unsigned long numpages) 2390 { 2391 int retval; 2392 2393 /* 2394 * The typical sequence for unmapping is to find a pte through 2395 * lookup_address_in_pgd() (ideally, it should never return NULL because 2396 * the address is already mapped) and change it's protections. As pfn is 2397 * the *target* of a mapping, it's not useful while unmapping. 2398 */ 2399 struct cpa_data cpa = { 2400 .vaddr = &address, 2401 .pfn = 0, 2402 .pgd = pgd, 2403 .numpages = numpages, 2404 .mask_set = __pgprot(0), 2405 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), 2406 .flags = 0, 2407 }; 2408 2409 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP"); 2410 2411 retval = __change_page_attr_set_clr(&cpa, 0); 2412 __flush_tlb_all(); 2413 2414 return retval; 2415 } 2416 2417 /* 2418 * The testcases use internal knowledge of the implementation that shouldn't 2419 * be exposed to the rest of the kernel. Include these directly here. 2420 */ 2421 #ifdef CONFIG_CPA_DEBUG 2422 #include "cpa-test.c" 2423 #endif 2424