1 /* 2 * Xen mmu operations 3 * 4 * This file contains the various mmu fetch and update operations. 5 * The most important job they must perform is the mapping between the 6 * domain's pfn and the overall machine mfns. 7 * 8 * Xen allows guests to directly update the pagetable, in a controlled 9 * fashion. In other words, the guest modifies the same pagetable 10 * that the CPU actually uses, which eliminates the overhead of having 11 * a separate shadow pagetable. 12 * 13 * In order to allow this, it falls on the guest domain to map its 14 * notion of a "physical" pfn - which is just a domain-local linear 15 * address - into a real "machine address" which the CPU's MMU can 16 * use. 17 * 18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be 19 * inserted directly into the pagetable. When creating a new 20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, 21 * when reading the content back with __(pgd|pmd|pte)_val, it converts 22 * the mfn back into a pfn. 23 * 24 * The other constraint is that all pages which make up a pagetable 25 * must be mapped read-only in the guest. This prevents uncontrolled 26 * guest updates to the pagetable. Xen strictly enforces this, and 27 * will disallow any pagetable update which will end up mapping a 28 * pagetable page RW, and will disallow using any writable page as a 29 * pagetable. 30 * 31 * Naively, when loading %cr3 with the base of a new pagetable, Xen 32 * would need to validate the whole pagetable before going on. 33 * Naturally, this is quite slow. The solution is to "pin" a 34 * pagetable, which enforces all the constraints on the pagetable even 35 * when it is not actively in use. This menas that Xen can be assured 36 * that it is still valid when you do load it into %cr3, and doesn't 37 * need to revalidate it. 38 * 39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 40 */ 41 #include <linux/sched.h> 42 #include <linux/highmem.h> 43 #include <linux/debugfs.h> 44 #include <linux/bug.h> 45 #include <linux/vmalloc.h> 46 #include <linux/module.h> 47 #include <linux/gfp.h> 48 #include <linux/memblock.h> 49 #include <linux/seq_file.h> 50 51 #include <asm/pgtable.h> 52 #include <asm/tlbflush.h> 53 #include <asm/fixmap.h> 54 #include <asm/mmu_context.h> 55 #include <asm/setup.h> 56 #include <asm/paravirt.h> 57 #include <asm/e820.h> 58 #include <asm/linkage.h> 59 #include <asm/page.h> 60 #include <asm/init.h> 61 #include <asm/pat.h> 62 63 #include <asm/xen/hypercall.h> 64 #include <asm/xen/hypervisor.h> 65 66 #include <xen/xen.h> 67 #include <xen/page.h> 68 #include <xen/interface/xen.h> 69 #include <xen/interface/hvm/hvm_op.h> 70 #include <xen/interface/version.h> 71 #include <xen/interface/memory.h> 72 #include <xen/hvc-console.h> 73 74 #include "multicalls.h" 75 #include "mmu.h" 76 #include "debugfs.h" 77 78 #define MMU_UPDATE_HISTO 30 79 80 /* 81 * Protects atomic reservation decrease/increase against concurrent increases. 82 * Also protects non-atomic updates of current_pages and balloon lists. 83 */ 84 DEFINE_SPINLOCK(xen_reservation_lock); 85 86 #ifdef CONFIG_XEN_DEBUG_FS 87 88 static struct { 89 u32 pgd_update; 90 u32 pgd_update_pinned; 91 u32 pgd_update_batched; 92 93 u32 pud_update; 94 u32 pud_update_pinned; 95 u32 pud_update_batched; 96 97 u32 pmd_update; 98 u32 pmd_update_pinned; 99 u32 pmd_update_batched; 100 101 u32 pte_update; 102 u32 pte_update_pinned; 103 u32 pte_update_batched; 104 105 u32 mmu_update; 106 u32 mmu_update_extended; 107 u32 mmu_update_histo[MMU_UPDATE_HISTO]; 108 109 u32 prot_commit; 110 u32 prot_commit_batched; 111 112 u32 set_pte_at; 113 u32 set_pte_at_batched; 114 u32 set_pte_at_pinned; 115 u32 set_pte_at_current; 116 u32 set_pte_at_kernel; 117 } mmu_stats; 118 119 static u8 zero_stats; 120 121 static inline void check_zero(void) 122 { 123 if (unlikely(zero_stats)) { 124 memset(&mmu_stats, 0, sizeof(mmu_stats)); 125 zero_stats = 0; 126 } 127 } 128 129 #define ADD_STATS(elem, val) \ 130 do { check_zero(); mmu_stats.elem += (val); } while(0) 131 132 #else /* !CONFIG_XEN_DEBUG_FS */ 133 134 #define ADD_STATS(elem, val) do { (void)(val); } while(0) 135 136 #endif /* CONFIG_XEN_DEBUG_FS */ 137 138 139 /* 140 * Identity map, in addition to plain kernel map. This needs to be 141 * large enough to allocate page table pages to allocate the rest. 142 * Each page can map 2MB. 143 */ 144 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) 145 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); 146 147 #ifdef CONFIG_X86_64 148 /* l3 pud for userspace vsyscall mapping */ 149 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; 150 #endif /* CONFIG_X86_64 */ 151 152 /* 153 * Note about cr3 (pagetable base) values: 154 * 155 * xen_cr3 contains the current logical cr3 value; it contains the 156 * last set cr3. This may not be the current effective cr3, because 157 * its update may be being lazily deferred. However, a vcpu looking 158 * at its own cr3 can use this value knowing that it everything will 159 * be self-consistent. 160 * 161 * xen_current_cr3 contains the actual vcpu cr3; it is set once the 162 * hypercall to set the vcpu cr3 is complete (so it may be a little 163 * out of date, but it will never be set early). If one vcpu is 164 * looking at another vcpu's cr3 value, it should use this variable. 165 */ 166 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ 167 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ 168 169 170 /* 171 * Just beyond the highest usermode address. STACK_TOP_MAX has a 172 * redzone above it, so round it up to a PGD boundary. 173 */ 174 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) 175 176 unsigned long arbitrary_virt_to_mfn(void *vaddr) 177 { 178 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); 179 180 return PFN_DOWN(maddr.maddr); 181 } 182 183 xmaddr_t arbitrary_virt_to_machine(void *vaddr) 184 { 185 unsigned long address = (unsigned long)vaddr; 186 unsigned int level; 187 pte_t *pte; 188 unsigned offset; 189 190 /* 191 * if the PFN is in the linear mapped vaddr range, we can just use 192 * the (quick) virt_to_machine() p2m lookup 193 */ 194 if (virt_addr_valid(vaddr)) 195 return virt_to_machine(vaddr); 196 197 /* otherwise we have to do a (slower) full page-table walk */ 198 199 pte = lookup_address(address, &level); 200 BUG_ON(pte == NULL); 201 offset = address & ~PAGE_MASK; 202 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); 203 } 204 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 205 206 void make_lowmem_page_readonly(void *vaddr) 207 { 208 pte_t *pte, ptev; 209 unsigned long address = (unsigned long)vaddr; 210 unsigned int level; 211 212 pte = lookup_address(address, &level); 213 if (pte == NULL) 214 return; /* vaddr missing */ 215 216 ptev = pte_wrprotect(*pte); 217 218 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 219 BUG(); 220 } 221 222 void make_lowmem_page_readwrite(void *vaddr) 223 { 224 pte_t *pte, ptev; 225 unsigned long address = (unsigned long)vaddr; 226 unsigned int level; 227 228 pte = lookup_address(address, &level); 229 if (pte == NULL) 230 return; /* vaddr missing */ 231 232 ptev = pte_mkwrite(*pte); 233 234 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 235 BUG(); 236 } 237 238 239 static bool xen_page_pinned(void *ptr) 240 { 241 struct page *page = virt_to_page(ptr); 242 243 return PagePinned(page); 244 } 245 246 static bool xen_iomap_pte(pte_t pte) 247 { 248 return pte_flags(pte) & _PAGE_IOMAP; 249 } 250 251 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 252 { 253 struct multicall_space mcs; 254 struct mmu_update *u; 255 256 mcs = xen_mc_entry(sizeof(*u)); 257 u = mcs.args; 258 259 /* ptep might be kmapped when using 32-bit HIGHPTE */ 260 u->ptr = arbitrary_virt_to_machine(ptep).maddr; 261 u->val = pte_val_ma(pteval); 262 263 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 264 265 xen_mc_issue(PARAVIRT_LAZY_MMU); 266 } 267 EXPORT_SYMBOL_GPL(xen_set_domain_pte); 268 269 static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) 270 { 271 xen_set_domain_pte(ptep, pteval, DOMID_IO); 272 } 273 274 static void xen_extend_mmu_update(const struct mmu_update *update) 275 { 276 struct multicall_space mcs; 277 struct mmu_update *u; 278 279 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 280 281 if (mcs.mc != NULL) { 282 ADD_STATS(mmu_update_extended, 1); 283 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); 284 285 mcs.mc->args[1]++; 286 287 if (mcs.mc->args[1] < MMU_UPDATE_HISTO) 288 ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); 289 else 290 ADD_STATS(mmu_update_histo[0], 1); 291 } else { 292 ADD_STATS(mmu_update, 1); 293 mcs = __xen_mc_entry(sizeof(*u)); 294 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 295 ADD_STATS(mmu_update_histo[1], 1); 296 } 297 298 u = mcs.args; 299 *u = *update; 300 } 301 302 void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 303 { 304 struct mmu_update u; 305 306 preempt_disable(); 307 308 xen_mc_batch(); 309 310 /* ptr may be ioremapped for 64-bit pagetable setup */ 311 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 312 u.val = pmd_val_ma(val); 313 xen_extend_mmu_update(&u); 314 315 ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); 316 317 xen_mc_issue(PARAVIRT_LAZY_MMU); 318 319 preempt_enable(); 320 } 321 322 void xen_set_pmd(pmd_t *ptr, pmd_t val) 323 { 324 ADD_STATS(pmd_update, 1); 325 326 /* If page is not pinned, we can just update the entry 327 directly */ 328 if (!xen_page_pinned(ptr)) { 329 *ptr = val; 330 return; 331 } 332 333 ADD_STATS(pmd_update_pinned, 1); 334 335 xen_set_pmd_hyper(ptr, val); 336 } 337 338 /* 339 * Associate a virtual page frame with a given physical page frame 340 * and protection flags for that frame. 341 */ 342 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) 343 { 344 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 345 } 346 347 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 348 pte_t *ptep, pte_t pteval) 349 { 350 if (xen_iomap_pte(pteval)) { 351 xen_set_iomap_pte(ptep, pteval); 352 goto out; 353 } 354 355 ADD_STATS(set_pte_at, 1); 356 // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); 357 ADD_STATS(set_pte_at_current, mm == current->mm); 358 ADD_STATS(set_pte_at_kernel, mm == &init_mm); 359 360 if (mm == current->mm || mm == &init_mm) { 361 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { 362 struct multicall_space mcs; 363 mcs = xen_mc_entry(0); 364 365 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); 366 ADD_STATS(set_pte_at_batched, 1); 367 xen_mc_issue(PARAVIRT_LAZY_MMU); 368 goto out; 369 } else 370 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) 371 goto out; 372 } 373 xen_set_pte(ptep, pteval); 374 375 out: return; 376 } 377 378 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 379 unsigned long addr, pte_t *ptep) 380 { 381 /* Just return the pte as-is. We preserve the bits on commit */ 382 return *ptep; 383 } 384 385 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 386 pte_t *ptep, pte_t pte) 387 { 388 struct mmu_update u; 389 390 xen_mc_batch(); 391 392 u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 393 u.val = pte_val_ma(pte); 394 xen_extend_mmu_update(&u); 395 396 ADD_STATS(prot_commit, 1); 397 ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); 398 399 xen_mc_issue(PARAVIRT_LAZY_MMU); 400 } 401 402 /* Assume pteval_t is equivalent to all the other *val_t types. */ 403 static pteval_t pte_mfn_to_pfn(pteval_t val) 404 { 405 if (val & _PAGE_PRESENT) { 406 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 407 pteval_t flags = val & PTE_FLAGS_MASK; 408 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; 409 } 410 411 return val; 412 } 413 414 static pteval_t pte_pfn_to_mfn(pteval_t val) 415 { 416 if (val & _PAGE_PRESENT) { 417 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 418 pteval_t flags = val & PTE_FLAGS_MASK; 419 unsigned long mfn; 420 421 if (!xen_feature(XENFEAT_auto_translated_physmap)) 422 mfn = get_phys_to_machine(pfn); 423 else 424 mfn = pfn; 425 /* 426 * If there's no mfn for the pfn, then just create an 427 * empty non-present pte. Unfortunately this loses 428 * information about the original pfn, so 429 * pte_mfn_to_pfn is asymmetric. 430 */ 431 if (unlikely(mfn == INVALID_P2M_ENTRY)) { 432 mfn = 0; 433 flags = 0; 434 } else { 435 /* 436 * Paramount to do this test _after_ the 437 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & 438 * IDENTITY_FRAME_BIT resolves to true. 439 */ 440 mfn &= ~FOREIGN_FRAME_BIT; 441 if (mfn & IDENTITY_FRAME_BIT) { 442 mfn &= ~IDENTITY_FRAME_BIT; 443 flags |= _PAGE_IOMAP; 444 } 445 } 446 val = ((pteval_t)mfn << PAGE_SHIFT) | flags; 447 } 448 449 return val; 450 } 451 452 static pteval_t iomap_pte(pteval_t val) 453 { 454 if (val & _PAGE_PRESENT) { 455 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 456 pteval_t flags = val & PTE_FLAGS_MASK; 457 458 /* We assume the pte frame number is a MFN, so 459 just use it as-is. */ 460 val = ((pteval_t)pfn << PAGE_SHIFT) | flags; 461 } 462 463 return val; 464 } 465 466 pteval_t xen_pte_val(pte_t pte) 467 { 468 pteval_t pteval = pte.pte; 469 470 /* If this is a WC pte, convert back from Xen WC to Linux WC */ 471 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { 472 WARN_ON(!pat_enabled); 473 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; 474 } 475 476 if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) 477 return pteval; 478 479 return pte_mfn_to_pfn(pteval); 480 } 481 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 482 483 pgdval_t xen_pgd_val(pgd_t pgd) 484 { 485 return pte_mfn_to_pfn(pgd.pgd); 486 } 487 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); 488 489 /* 490 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 491 * are reserved for now, to correspond to the Intel-reserved PAT 492 * types. 493 * 494 * We expect Linux's PAT set as follows: 495 * 496 * Idx PTE flags Linux Xen Default 497 * 0 WB WB WB 498 * 1 PWT WC WT WT 499 * 2 PCD UC- UC- UC- 500 * 3 PCD PWT UC UC UC 501 * 4 PAT WB WC WB 502 * 5 PAT PWT WC WP WT 503 * 6 PAT PCD UC- UC UC- 504 * 7 PAT PCD PWT UC UC UC 505 */ 506 507 void xen_set_pat(u64 pat) 508 { 509 /* We expect Linux to use a PAT setting of 510 * UC UC- WC WB (ignoring the PAT flag) */ 511 WARN_ON(pat != 0x0007010600070106ull); 512 } 513 514 pte_t xen_make_pte(pteval_t pte) 515 { 516 phys_addr_t addr = (pte & PTE_PFN_MASK); 517 518 /* If Linux is trying to set a WC pte, then map to the Xen WC. 519 * If _PAGE_PAT is set, then it probably means it is really 520 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope 521 * things work out OK... 522 * 523 * (We should never see kernel mappings with _PAGE_PSE set, 524 * but we could see hugetlbfs mappings, I think.). 525 */ 526 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { 527 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) 528 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; 529 } 530 531 /* 532 * Unprivileged domains are allowed to do IOMAPpings for 533 * PCI passthrough, but not map ISA space. The ISA 534 * mappings are just dummy local mappings to keep other 535 * parts of the kernel happy. 536 */ 537 if (unlikely(pte & _PAGE_IOMAP) && 538 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { 539 pte = iomap_pte(pte); 540 } else { 541 pte &= ~_PAGE_IOMAP; 542 pte = pte_pfn_to_mfn(pte); 543 } 544 545 return native_make_pte(pte); 546 } 547 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); 548 549 #ifdef CONFIG_XEN_DEBUG 550 pte_t xen_make_pte_debug(pteval_t pte) 551 { 552 phys_addr_t addr = (pte & PTE_PFN_MASK); 553 phys_addr_t other_addr; 554 bool io_page = false; 555 pte_t _pte; 556 557 if (pte & _PAGE_IOMAP) 558 io_page = true; 559 560 _pte = xen_make_pte(pte); 561 562 if (!addr) 563 return _pte; 564 565 if (io_page && 566 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { 567 other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; 568 WARN(addr != other_addr, 569 "0x%lx is using VM_IO, but it is 0x%lx!\n", 570 (unsigned long)addr, (unsigned long)other_addr); 571 } else { 572 pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; 573 other_addr = (_pte.pte & PTE_PFN_MASK); 574 WARN((addr == other_addr) && (!io_page) && (!iomap_set), 575 "0x%lx is missing VM_IO (and wasn't fixed)!\n", 576 (unsigned long)addr); 577 } 578 579 return _pte; 580 } 581 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); 582 #endif 583 584 pgd_t xen_make_pgd(pgdval_t pgd) 585 { 586 pgd = pte_pfn_to_mfn(pgd); 587 return native_make_pgd(pgd); 588 } 589 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 590 591 pmdval_t xen_pmd_val(pmd_t pmd) 592 { 593 return pte_mfn_to_pfn(pmd.pmd); 594 } 595 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 596 597 void xen_set_pud_hyper(pud_t *ptr, pud_t val) 598 { 599 struct mmu_update u; 600 601 preempt_disable(); 602 603 xen_mc_batch(); 604 605 /* ptr may be ioremapped for 64-bit pagetable setup */ 606 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 607 u.val = pud_val_ma(val); 608 xen_extend_mmu_update(&u); 609 610 ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); 611 612 xen_mc_issue(PARAVIRT_LAZY_MMU); 613 614 preempt_enable(); 615 } 616 617 void xen_set_pud(pud_t *ptr, pud_t val) 618 { 619 ADD_STATS(pud_update, 1); 620 621 /* If page is not pinned, we can just update the entry 622 directly */ 623 if (!xen_page_pinned(ptr)) { 624 *ptr = val; 625 return; 626 } 627 628 ADD_STATS(pud_update_pinned, 1); 629 630 xen_set_pud_hyper(ptr, val); 631 } 632 633 void xen_set_pte(pte_t *ptep, pte_t pte) 634 { 635 if (xen_iomap_pte(pte)) { 636 xen_set_iomap_pte(ptep, pte); 637 return; 638 } 639 640 ADD_STATS(pte_update, 1); 641 // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); 642 ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); 643 644 #ifdef CONFIG_X86_PAE 645 ptep->pte_high = pte.pte_high; 646 smp_wmb(); 647 ptep->pte_low = pte.pte_low; 648 #else 649 *ptep = pte; 650 #endif 651 } 652 653 #ifdef CONFIG_X86_PAE 654 void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 655 { 656 if (xen_iomap_pte(pte)) { 657 xen_set_iomap_pte(ptep, pte); 658 return; 659 } 660 661 set_64bit((u64 *)ptep, native_pte_val(pte)); 662 } 663 664 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 665 { 666 ptep->pte_low = 0; 667 smp_wmb(); /* make sure low gets written first */ 668 ptep->pte_high = 0; 669 } 670 671 void xen_pmd_clear(pmd_t *pmdp) 672 { 673 set_pmd(pmdp, __pmd(0)); 674 } 675 #endif /* CONFIG_X86_PAE */ 676 677 pmd_t xen_make_pmd(pmdval_t pmd) 678 { 679 pmd = pte_pfn_to_mfn(pmd); 680 return native_make_pmd(pmd); 681 } 682 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 683 684 #if PAGETABLE_LEVELS == 4 685 pudval_t xen_pud_val(pud_t pud) 686 { 687 return pte_mfn_to_pfn(pud.pud); 688 } 689 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 690 691 pud_t xen_make_pud(pudval_t pud) 692 { 693 pud = pte_pfn_to_mfn(pud); 694 695 return native_make_pud(pud); 696 } 697 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 698 699 pgd_t *xen_get_user_pgd(pgd_t *pgd) 700 { 701 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 702 unsigned offset = pgd - pgd_page; 703 pgd_t *user_ptr = NULL; 704 705 if (offset < pgd_index(USER_LIMIT)) { 706 struct page *page = virt_to_page(pgd_page); 707 user_ptr = (pgd_t *)page->private; 708 if (user_ptr) 709 user_ptr += offset; 710 } 711 712 return user_ptr; 713 } 714 715 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 716 { 717 struct mmu_update u; 718 719 u.ptr = virt_to_machine(ptr).maddr; 720 u.val = pgd_val_ma(val); 721 xen_extend_mmu_update(&u); 722 } 723 724 /* 725 * Raw hypercall-based set_pgd, intended for in early boot before 726 * there's a page structure. This implies: 727 * 1. The only existing pagetable is the kernel's 728 * 2. It is always pinned 729 * 3. It has no user pagetable attached to it 730 */ 731 void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 732 { 733 preempt_disable(); 734 735 xen_mc_batch(); 736 737 __xen_set_pgd_hyper(ptr, val); 738 739 xen_mc_issue(PARAVIRT_LAZY_MMU); 740 741 preempt_enable(); 742 } 743 744 void xen_set_pgd(pgd_t *ptr, pgd_t val) 745 { 746 pgd_t *user_ptr = xen_get_user_pgd(ptr); 747 748 ADD_STATS(pgd_update, 1); 749 750 /* If page is not pinned, we can just update the entry 751 directly */ 752 if (!xen_page_pinned(ptr)) { 753 *ptr = val; 754 if (user_ptr) { 755 WARN_ON(xen_page_pinned(user_ptr)); 756 *user_ptr = val; 757 } 758 return; 759 } 760 761 ADD_STATS(pgd_update_pinned, 1); 762 ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); 763 764 /* If it's pinned, then we can at least batch the kernel and 765 user updates together. */ 766 xen_mc_batch(); 767 768 __xen_set_pgd_hyper(ptr, val); 769 if (user_ptr) 770 __xen_set_pgd_hyper(user_ptr, val); 771 772 xen_mc_issue(PARAVIRT_LAZY_MMU); 773 } 774 #endif /* PAGETABLE_LEVELS == 4 */ 775 776 /* 777 * (Yet another) pagetable walker. This one is intended for pinning a 778 * pagetable. This means that it walks a pagetable and calls the 779 * callback function on each page it finds making up the page table, 780 * at every level. It walks the entire pagetable, but it only bothers 781 * pinning pte pages which are below limit. In the normal case this 782 * will be STACK_TOP_MAX, but at boot we need to pin up to 783 * FIXADDR_TOP. 784 * 785 * For 32-bit the important bit is that we don't pin beyond there, 786 * because then we start getting into Xen's ptes. 787 * 788 * For 64-bit, we must skip the Xen hole in the middle of the address 789 * space, just after the big x86-64 virtual hole. 790 */ 791 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, 792 int (*func)(struct mm_struct *mm, struct page *, 793 enum pt_level), 794 unsigned long limit) 795 { 796 int flush = 0; 797 unsigned hole_low, hole_high; 798 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; 799 unsigned pgdidx, pudidx, pmdidx; 800 801 /* The limit is the last byte to be touched */ 802 limit--; 803 BUG_ON(limit >= FIXADDR_TOP); 804 805 if (xen_feature(XENFEAT_auto_translated_physmap)) 806 return 0; 807 808 /* 809 * 64-bit has a great big hole in the middle of the address 810 * space, which contains the Xen mappings. On 32-bit these 811 * will end up making a zero-sized hole and so is a no-op. 812 */ 813 hole_low = pgd_index(USER_LIMIT); 814 hole_high = pgd_index(PAGE_OFFSET); 815 816 pgdidx_limit = pgd_index(limit); 817 #if PTRS_PER_PUD > 1 818 pudidx_limit = pud_index(limit); 819 #else 820 pudidx_limit = 0; 821 #endif 822 #if PTRS_PER_PMD > 1 823 pmdidx_limit = pmd_index(limit); 824 #else 825 pmdidx_limit = 0; 826 #endif 827 828 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { 829 pud_t *pud; 830 831 if (pgdidx >= hole_low && pgdidx < hole_high) 832 continue; 833 834 if (!pgd_val(pgd[pgdidx])) 835 continue; 836 837 pud = pud_offset(&pgd[pgdidx], 0); 838 839 if (PTRS_PER_PUD > 1) /* not folded */ 840 flush |= (*func)(mm, virt_to_page(pud), PT_PUD); 841 842 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { 843 pmd_t *pmd; 844 845 if (pgdidx == pgdidx_limit && 846 pudidx > pudidx_limit) 847 goto out; 848 849 if (pud_none(pud[pudidx])) 850 continue; 851 852 pmd = pmd_offset(&pud[pudidx], 0); 853 854 if (PTRS_PER_PMD > 1) /* not folded */ 855 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); 856 857 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { 858 struct page *pte; 859 860 if (pgdidx == pgdidx_limit && 861 pudidx == pudidx_limit && 862 pmdidx > pmdidx_limit) 863 goto out; 864 865 if (pmd_none(pmd[pmdidx])) 866 continue; 867 868 pte = pmd_page(pmd[pmdidx]); 869 flush |= (*func)(mm, pte, PT_PTE); 870 } 871 } 872 } 873 874 out: 875 /* Do the top level last, so that the callbacks can use it as 876 a cue to do final things like tlb flushes. */ 877 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); 878 879 return flush; 880 } 881 882 static int xen_pgd_walk(struct mm_struct *mm, 883 int (*func)(struct mm_struct *mm, struct page *, 884 enum pt_level), 885 unsigned long limit) 886 { 887 return __xen_pgd_walk(mm, mm->pgd, func, limit); 888 } 889 890 /* If we're using split pte locks, then take the page's lock and 891 return a pointer to it. Otherwise return NULL. */ 892 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 893 { 894 spinlock_t *ptl = NULL; 895 896 #if USE_SPLIT_PTLOCKS 897 ptl = __pte_lockptr(page); 898 spin_lock_nest_lock(ptl, &mm->page_table_lock); 899 #endif 900 901 return ptl; 902 } 903 904 static void xen_pte_unlock(void *v) 905 { 906 spinlock_t *ptl = v; 907 spin_unlock(ptl); 908 } 909 910 static void xen_do_pin(unsigned level, unsigned long pfn) 911 { 912 struct mmuext_op *op; 913 struct multicall_space mcs; 914 915 mcs = __xen_mc_entry(sizeof(*op)); 916 op = mcs.args; 917 op->cmd = level; 918 op->arg1.mfn = pfn_to_mfn(pfn); 919 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 920 } 921 922 static int xen_pin_page(struct mm_struct *mm, struct page *page, 923 enum pt_level level) 924 { 925 unsigned pgfl = TestSetPagePinned(page); 926 int flush; 927 928 if (pgfl) 929 flush = 0; /* already pinned */ 930 else if (PageHighMem(page)) 931 /* kmaps need flushing if we found an unpinned 932 highpage */ 933 flush = 1; 934 else { 935 void *pt = lowmem_page_address(page); 936 unsigned long pfn = page_to_pfn(page); 937 struct multicall_space mcs = __xen_mc_entry(0); 938 spinlock_t *ptl; 939 940 flush = 0; 941 942 /* 943 * We need to hold the pagetable lock between the time 944 * we make the pagetable RO and when we actually pin 945 * it. If we don't, then other users may come in and 946 * attempt to update the pagetable by writing it, 947 * which will fail because the memory is RO but not 948 * pinned, so Xen won't do the trap'n'emulate. 949 * 950 * If we're using split pte locks, we can't hold the 951 * entire pagetable's worth of locks during the 952 * traverse, because we may wrap the preempt count (8 953 * bits). The solution is to mark RO and pin each PTE 954 * page while holding the lock. This means the number 955 * of locks we end up holding is never more than a 956 * batch size (~32 entries, at present). 957 * 958 * If we're not using split pte locks, we needn't pin 959 * the PTE pages independently, because we're 960 * protected by the overall pagetable lock. 961 */ 962 ptl = NULL; 963 if (level == PT_PTE) 964 ptl = xen_pte_lock(page, mm); 965 966 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 967 pfn_pte(pfn, PAGE_KERNEL_RO), 968 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 969 970 if (ptl) { 971 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); 972 973 /* Queue a deferred unlock for when this batch 974 is completed. */ 975 xen_mc_callback(xen_pte_unlock, ptl); 976 } 977 } 978 979 return flush; 980 } 981 982 /* This is called just after a mm has been created, but it has not 983 been used yet. We need to make sure that its pagetable is all 984 read-only, and can be pinned. */ 985 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 986 { 987 xen_mc_batch(); 988 989 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 990 /* re-enable interrupts for flushing */ 991 xen_mc_issue(0); 992 993 kmap_flush_unused(); 994 995 xen_mc_batch(); 996 } 997 998 #ifdef CONFIG_X86_64 999 { 1000 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1001 1002 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); 1003 1004 if (user_pgd) { 1005 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); 1006 xen_do_pin(MMUEXT_PIN_L4_TABLE, 1007 PFN_DOWN(__pa(user_pgd))); 1008 } 1009 } 1010 #else /* CONFIG_X86_32 */ 1011 #ifdef CONFIG_X86_PAE 1012 /* Need to make sure unshared kernel PMD is pinnable */ 1013 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 1014 PT_PMD); 1015 #endif 1016 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); 1017 #endif /* CONFIG_X86_64 */ 1018 xen_mc_issue(0); 1019 } 1020 1021 static void xen_pgd_pin(struct mm_struct *mm) 1022 { 1023 __xen_pgd_pin(mm, mm->pgd); 1024 } 1025 1026 /* 1027 * On save, we need to pin all pagetables to make sure they get their 1028 * mfns turned into pfns. Search the list for any unpinned pgds and pin 1029 * them (unpinned pgds are not currently in use, probably because the 1030 * process is under construction or destruction). 1031 * 1032 * Expected to be called in stop_machine() ("equivalent to taking 1033 * every spinlock in the system"), so the locking doesn't really 1034 * matter all that much. 1035 */ 1036 void xen_mm_pin_all(void) 1037 { 1038 struct page *page; 1039 1040 spin_lock(&pgd_lock); 1041 1042 list_for_each_entry(page, &pgd_list, lru) { 1043 if (!PagePinned(page)) { 1044 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); 1045 SetPageSavePinned(page); 1046 } 1047 } 1048 1049 spin_unlock(&pgd_lock); 1050 } 1051 1052 /* 1053 * The init_mm pagetable is really pinned as soon as its created, but 1054 * that's before we have page structures to store the bits. So do all 1055 * the book-keeping now. 1056 */ 1057 static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, 1058 enum pt_level level) 1059 { 1060 SetPagePinned(page); 1061 return 0; 1062 } 1063 1064 static void __init xen_mark_init_mm_pinned(void) 1065 { 1066 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 1067 } 1068 1069 static int xen_unpin_page(struct mm_struct *mm, struct page *page, 1070 enum pt_level level) 1071 { 1072 unsigned pgfl = TestClearPagePinned(page); 1073 1074 if (pgfl && !PageHighMem(page)) { 1075 void *pt = lowmem_page_address(page); 1076 unsigned long pfn = page_to_pfn(page); 1077 spinlock_t *ptl = NULL; 1078 struct multicall_space mcs; 1079 1080 /* 1081 * Do the converse to pin_page. If we're using split 1082 * pte locks, we must be holding the lock for while 1083 * the pte page is unpinned but still RO to prevent 1084 * concurrent updates from seeing it in this 1085 * partially-pinned state. 1086 */ 1087 if (level == PT_PTE) { 1088 ptl = xen_pte_lock(page, mm); 1089 1090 if (ptl) 1091 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); 1092 } 1093 1094 mcs = __xen_mc_entry(0); 1095 1096 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 1097 pfn_pte(pfn, PAGE_KERNEL), 1098 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 1099 1100 if (ptl) { 1101 /* unlock when batch completed */ 1102 xen_mc_callback(xen_pte_unlock, ptl); 1103 } 1104 } 1105 1106 return 0; /* never need to flush on unpin */ 1107 } 1108 1109 /* Release a pagetables pages back as normal RW */ 1110 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) 1111 { 1112 xen_mc_batch(); 1113 1114 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1115 1116 #ifdef CONFIG_X86_64 1117 { 1118 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1119 1120 if (user_pgd) { 1121 xen_do_pin(MMUEXT_UNPIN_TABLE, 1122 PFN_DOWN(__pa(user_pgd))); 1123 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); 1124 } 1125 } 1126 #endif 1127 1128 #ifdef CONFIG_X86_PAE 1129 /* Need to make sure unshared kernel PMD is unpinned */ 1130 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 1131 PT_PMD); 1132 #endif 1133 1134 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); 1135 1136 xen_mc_issue(0); 1137 } 1138 1139 static void xen_pgd_unpin(struct mm_struct *mm) 1140 { 1141 __xen_pgd_unpin(mm, mm->pgd); 1142 } 1143 1144 /* 1145 * On resume, undo any pinning done at save, so that the rest of the 1146 * kernel doesn't see any unexpected pinned pagetables. 1147 */ 1148 void xen_mm_unpin_all(void) 1149 { 1150 struct page *page; 1151 1152 spin_lock(&pgd_lock); 1153 1154 list_for_each_entry(page, &pgd_list, lru) { 1155 if (PageSavePinned(page)) { 1156 BUG_ON(!PagePinned(page)); 1157 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); 1158 ClearPageSavePinned(page); 1159 } 1160 } 1161 1162 spin_unlock(&pgd_lock); 1163 } 1164 1165 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1166 { 1167 spin_lock(&next->page_table_lock); 1168 xen_pgd_pin(next); 1169 spin_unlock(&next->page_table_lock); 1170 } 1171 1172 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 1173 { 1174 spin_lock(&mm->page_table_lock); 1175 xen_pgd_pin(mm); 1176 spin_unlock(&mm->page_table_lock); 1177 } 1178 1179 1180 #ifdef CONFIG_SMP 1181 /* Another cpu may still have their %cr3 pointing at the pagetable, so 1182 we need to repoint it somewhere else before we can unpin it. */ 1183 static void drop_other_mm_ref(void *info) 1184 { 1185 struct mm_struct *mm = info; 1186 struct mm_struct *active_mm; 1187 1188 active_mm = percpu_read(cpu_tlbstate.active_mm); 1189 1190 if (active_mm == mm) 1191 leave_mm(smp_processor_id()); 1192 1193 /* If this cpu still has a stale cr3 reference, then make sure 1194 it has been flushed. */ 1195 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) 1196 load_cr3(swapper_pg_dir); 1197 } 1198 1199 static void xen_drop_mm_ref(struct mm_struct *mm) 1200 { 1201 cpumask_var_t mask; 1202 unsigned cpu; 1203 1204 if (current->active_mm == mm) { 1205 if (current->mm == mm) 1206 load_cr3(swapper_pg_dir); 1207 else 1208 leave_mm(smp_processor_id()); 1209 } 1210 1211 /* Get the "official" set of cpus referring to our pagetable. */ 1212 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1213 for_each_online_cpu(cpu) { 1214 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) 1215 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1216 continue; 1217 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1218 } 1219 return; 1220 } 1221 cpumask_copy(mask, mm_cpumask(mm)); 1222 1223 /* It's possible that a vcpu may have a stale reference to our 1224 cr3, because its in lazy mode, and it hasn't yet flushed 1225 its set of pending hypercalls yet. In this case, we can 1226 look at its actual current cr3 value, and force it to flush 1227 if needed. */ 1228 for_each_online_cpu(cpu) { 1229 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1230 cpumask_set_cpu(cpu, mask); 1231 } 1232 1233 if (!cpumask_empty(mask)) 1234 smp_call_function_many(mask, drop_other_mm_ref, mm, 1); 1235 free_cpumask_var(mask); 1236 } 1237 #else 1238 static void xen_drop_mm_ref(struct mm_struct *mm) 1239 { 1240 if (current->active_mm == mm) 1241 load_cr3(swapper_pg_dir); 1242 } 1243 #endif 1244 1245 /* 1246 * While a process runs, Xen pins its pagetables, which means that the 1247 * hypervisor forces it to be read-only, and it controls all updates 1248 * to it. This means that all pagetable updates have to go via the 1249 * hypervisor, which is moderately expensive. 1250 * 1251 * Since we're pulling the pagetable down, we switch to use init_mm, 1252 * unpin old process pagetable and mark it all read-write, which 1253 * allows further operations on it to be simple memory accesses. 1254 * 1255 * The only subtle point is that another CPU may be still using the 1256 * pagetable because of lazy tlb flushing. This means we need need to 1257 * switch all CPUs off this pagetable before we can unpin it. 1258 */ 1259 void xen_exit_mmap(struct mm_struct *mm) 1260 { 1261 get_cpu(); /* make sure we don't move around */ 1262 xen_drop_mm_ref(mm); 1263 put_cpu(); 1264 1265 spin_lock(&mm->page_table_lock); 1266 1267 /* pgd may not be pinned in the error exit path of execve */ 1268 if (xen_page_pinned(mm->pgd)) 1269 xen_pgd_unpin(mm); 1270 1271 spin_unlock(&mm->page_table_lock); 1272 } 1273 1274 static __init void xen_pagetable_setup_start(pgd_t *base) 1275 { 1276 } 1277 1278 static void xen_post_allocator_init(void); 1279 1280 static __init void xen_pagetable_setup_done(pgd_t *base) 1281 { 1282 xen_setup_shared_info(); 1283 xen_post_allocator_init(); 1284 } 1285 1286 static void xen_write_cr2(unsigned long cr2) 1287 { 1288 percpu_read(xen_vcpu)->arch.cr2 = cr2; 1289 } 1290 1291 static unsigned long xen_read_cr2(void) 1292 { 1293 return percpu_read(xen_vcpu)->arch.cr2; 1294 } 1295 1296 unsigned long xen_read_cr2_direct(void) 1297 { 1298 return percpu_read(xen_vcpu_info.arch.cr2); 1299 } 1300 1301 static void xen_flush_tlb(void) 1302 { 1303 struct mmuext_op *op; 1304 struct multicall_space mcs; 1305 1306 preempt_disable(); 1307 1308 mcs = xen_mc_entry(sizeof(*op)); 1309 1310 op = mcs.args; 1311 op->cmd = MMUEXT_TLB_FLUSH_LOCAL; 1312 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1313 1314 xen_mc_issue(PARAVIRT_LAZY_MMU); 1315 1316 preempt_enable(); 1317 } 1318 1319 static void xen_flush_tlb_single(unsigned long addr) 1320 { 1321 struct mmuext_op *op; 1322 struct multicall_space mcs; 1323 1324 preempt_disable(); 1325 1326 mcs = xen_mc_entry(sizeof(*op)); 1327 op = mcs.args; 1328 op->cmd = MMUEXT_INVLPG_LOCAL; 1329 op->arg1.linear_addr = addr & PAGE_MASK; 1330 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1331 1332 xen_mc_issue(PARAVIRT_LAZY_MMU); 1333 1334 preempt_enable(); 1335 } 1336 1337 static void xen_flush_tlb_others(const struct cpumask *cpus, 1338 struct mm_struct *mm, unsigned long va) 1339 { 1340 struct { 1341 struct mmuext_op op; 1342 DECLARE_BITMAP(mask, NR_CPUS); 1343 } *args; 1344 struct multicall_space mcs; 1345 1346 if (cpumask_empty(cpus)) 1347 return; /* nothing to do */ 1348 1349 mcs = xen_mc_entry(sizeof(*args)); 1350 args = mcs.args; 1351 args->op.arg2.vcpumask = to_cpumask(args->mask); 1352 1353 /* Remove us, and any offline CPUS. */ 1354 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); 1355 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1356 1357 if (va == TLB_FLUSH_ALL) { 1358 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1359 } else { 1360 args->op.cmd = MMUEXT_INVLPG_MULTI; 1361 args->op.arg1.linear_addr = va; 1362 } 1363 1364 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); 1365 1366 xen_mc_issue(PARAVIRT_LAZY_MMU); 1367 } 1368 1369 static unsigned long xen_read_cr3(void) 1370 { 1371 return percpu_read(xen_cr3); 1372 } 1373 1374 static void set_current_cr3(void *v) 1375 { 1376 percpu_write(xen_current_cr3, (unsigned long)v); 1377 } 1378 1379 static void __xen_write_cr3(bool kernel, unsigned long cr3) 1380 { 1381 struct mmuext_op *op; 1382 struct multicall_space mcs; 1383 unsigned long mfn; 1384 1385 if (cr3) 1386 mfn = pfn_to_mfn(PFN_DOWN(cr3)); 1387 else 1388 mfn = 0; 1389 1390 WARN_ON(mfn == 0 && kernel); 1391 1392 mcs = __xen_mc_entry(sizeof(*op)); 1393 1394 op = mcs.args; 1395 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; 1396 op->arg1.mfn = mfn; 1397 1398 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1399 1400 if (kernel) { 1401 percpu_write(xen_cr3, cr3); 1402 1403 /* Update xen_current_cr3 once the batch has actually 1404 been submitted. */ 1405 xen_mc_callback(set_current_cr3, (void *)cr3); 1406 } 1407 } 1408 1409 static void xen_write_cr3(unsigned long cr3) 1410 { 1411 BUG_ON(preemptible()); 1412 1413 xen_mc_batch(); /* disables interrupts */ 1414 1415 /* Update while interrupts are disabled, so its atomic with 1416 respect to ipis */ 1417 percpu_write(xen_cr3, cr3); 1418 1419 __xen_write_cr3(true, cr3); 1420 1421 #ifdef CONFIG_X86_64 1422 { 1423 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); 1424 if (user_pgd) 1425 __xen_write_cr3(false, __pa(user_pgd)); 1426 else 1427 __xen_write_cr3(false, 0); 1428 } 1429 #endif 1430 1431 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1432 } 1433 1434 static int xen_pgd_alloc(struct mm_struct *mm) 1435 { 1436 pgd_t *pgd = mm->pgd; 1437 int ret = 0; 1438 1439 BUG_ON(PagePinned(virt_to_page(pgd))); 1440 1441 #ifdef CONFIG_X86_64 1442 { 1443 struct page *page = virt_to_page(pgd); 1444 pgd_t *user_pgd; 1445 1446 BUG_ON(page->private != 0); 1447 1448 ret = -ENOMEM; 1449 1450 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 1451 page->private = (unsigned long)user_pgd; 1452 1453 if (user_pgd != NULL) { 1454 user_pgd[pgd_index(VSYSCALL_START)] = 1455 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); 1456 ret = 0; 1457 } 1458 1459 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); 1460 } 1461 #endif 1462 1463 return ret; 1464 } 1465 1466 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1467 { 1468 #ifdef CONFIG_X86_64 1469 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1470 1471 if (user_pgd) 1472 free_page((unsigned long)user_pgd); 1473 #endif 1474 } 1475 1476 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1477 { 1478 unsigned long pfn = pte_pfn(pte); 1479 1480 #ifdef CONFIG_X86_32 1481 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1482 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1483 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1484 pte_val_ma(pte)); 1485 #endif 1486 1487 /* 1488 * If the new pfn is within the range of the newly allocated 1489 * kernel pagetable, and it isn't being mapped into an 1490 * early_ioremap fixmap slot as a freshly allocated page, make sure 1491 * it is RO. 1492 */ 1493 if (((!is_early_ioremap_ptep(ptep) && 1494 pfn >= pgt_buf_start && pfn < pgt_buf_end)) || 1495 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) 1496 pte = pte_wrprotect(pte); 1497 1498 return pte; 1499 } 1500 1501 /* Init-time set_pte while constructing initial pagetables, which 1502 doesn't allow RO pagetable pages to be remapped RW */ 1503 static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) 1504 { 1505 pte = mask_rw_pte(ptep, pte); 1506 1507 xen_set_pte(ptep, pte); 1508 } 1509 1510 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1511 { 1512 struct mmuext_op op; 1513 op.cmd = cmd; 1514 op.arg1.mfn = pfn_to_mfn(pfn); 1515 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 1516 BUG(); 1517 } 1518 1519 /* Early in boot, while setting up the initial pagetable, assume 1520 everything is pinned. */ 1521 static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1522 { 1523 #ifdef CONFIG_FLATMEM 1524 BUG_ON(mem_map); /* should only be used early */ 1525 #endif 1526 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1527 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1528 } 1529 1530 /* Used for pmd and pud */ 1531 static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) 1532 { 1533 #ifdef CONFIG_FLATMEM 1534 BUG_ON(mem_map); /* should only be used early */ 1535 #endif 1536 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1537 } 1538 1539 /* Early release_pte assumes that all pts are pinned, since there's 1540 only init_mm and anything attached to that is pinned. */ 1541 static __init void xen_release_pte_init(unsigned long pfn) 1542 { 1543 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1544 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1545 } 1546 1547 static __init void xen_release_pmd_init(unsigned long pfn) 1548 { 1549 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1550 } 1551 1552 /* This needs to make sure the new pte page is pinned iff its being 1553 attached to a pinned pagetable. */ 1554 static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) 1555 { 1556 struct page *page = pfn_to_page(pfn); 1557 1558 if (PagePinned(virt_to_page(mm->pgd))) { 1559 SetPagePinned(page); 1560 1561 if (!PageHighMem(page)) { 1562 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); 1563 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1564 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1565 } else { 1566 /* make sure there are no stray mappings of 1567 this page */ 1568 kmap_flush_unused(); 1569 } 1570 } 1571 } 1572 1573 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) 1574 { 1575 xen_alloc_ptpage(mm, pfn, PT_PTE); 1576 } 1577 1578 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 1579 { 1580 xen_alloc_ptpage(mm, pfn, PT_PMD); 1581 } 1582 1583 /* This should never happen until we're OK to use struct page */ 1584 static void xen_release_ptpage(unsigned long pfn, unsigned level) 1585 { 1586 struct page *page = pfn_to_page(pfn); 1587 1588 if (PagePinned(page)) { 1589 if (!PageHighMem(page)) { 1590 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1591 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1592 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1593 } 1594 ClearPagePinned(page); 1595 } 1596 } 1597 1598 static void xen_release_pte(unsigned long pfn) 1599 { 1600 xen_release_ptpage(pfn, PT_PTE); 1601 } 1602 1603 static void xen_release_pmd(unsigned long pfn) 1604 { 1605 xen_release_ptpage(pfn, PT_PMD); 1606 } 1607 1608 #if PAGETABLE_LEVELS == 4 1609 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1610 { 1611 xen_alloc_ptpage(mm, pfn, PT_PUD); 1612 } 1613 1614 static void xen_release_pud(unsigned long pfn) 1615 { 1616 xen_release_ptpage(pfn, PT_PUD); 1617 } 1618 #endif 1619 1620 void __init xen_reserve_top(void) 1621 { 1622 #ifdef CONFIG_X86_32 1623 unsigned long top = HYPERVISOR_VIRT_START; 1624 struct xen_platform_parameters pp; 1625 1626 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) 1627 top = pp.virt_start; 1628 1629 reserve_top_address(-top); 1630 #endif /* CONFIG_X86_32 */ 1631 } 1632 1633 /* 1634 * Like __va(), but returns address in the kernel mapping (which is 1635 * all we have until the physical memory mapping has been set up. 1636 */ 1637 static void *__ka(phys_addr_t paddr) 1638 { 1639 #ifdef CONFIG_X86_64 1640 return (void *)(paddr + __START_KERNEL_map); 1641 #else 1642 return __va(paddr); 1643 #endif 1644 } 1645 1646 /* Convert a machine address to physical address */ 1647 static unsigned long m2p(phys_addr_t maddr) 1648 { 1649 phys_addr_t paddr; 1650 1651 maddr &= PTE_PFN_MASK; 1652 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; 1653 1654 return paddr; 1655 } 1656 1657 /* Convert a machine address to kernel virtual */ 1658 static void *m2v(phys_addr_t maddr) 1659 { 1660 return __ka(m2p(maddr)); 1661 } 1662 1663 /* Set the page permissions on an identity-mapped pages */ 1664 static void set_page_prot(void *addr, pgprot_t prot) 1665 { 1666 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1667 pte_t pte = pfn_pte(pfn, prot); 1668 1669 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1670 BUG(); 1671 } 1672 1673 static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1674 { 1675 unsigned pmdidx, pteidx; 1676 unsigned ident_pte; 1677 unsigned long pfn; 1678 1679 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, 1680 PAGE_SIZE); 1681 1682 ident_pte = 0; 1683 pfn = 0; 1684 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { 1685 pte_t *pte_page; 1686 1687 /* Reuse or allocate a page of ptes */ 1688 if (pmd_present(pmd[pmdidx])) 1689 pte_page = m2v(pmd[pmdidx].pmd); 1690 else { 1691 /* Check for free pte pages */ 1692 if (ident_pte == LEVEL1_IDENT_ENTRIES) 1693 break; 1694 1695 pte_page = &level1_ident_pgt[ident_pte]; 1696 ident_pte += PTRS_PER_PTE; 1697 1698 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); 1699 } 1700 1701 /* Install mappings */ 1702 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1703 pte_t pte; 1704 1705 if (!pte_none(pte_page[pteidx])) 1706 continue; 1707 1708 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); 1709 pte_page[pteidx] = pte; 1710 } 1711 } 1712 1713 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) 1714 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); 1715 1716 set_page_prot(pmd, PAGE_KERNEL_RO); 1717 } 1718 1719 void __init xen_setup_machphys_mapping(void) 1720 { 1721 struct xen_machphys_mapping mapping; 1722 unsigned long machine_to_phys_nr_ents; 1723 1724 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { 1725 machine_to_phys_mapping = (unsigned long *)mapping.v_start; 1726 machine_to_phys_nr_ents = mapping.max_mfn + 1; 1727 } else { 1728 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; 1729 } 1730 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); 1731 } 1732 1733 #ifdef CONFIG_X86_64 1734 static void convert_pfn_mfn(void *v) 1735 { 1736 pte_t *pte = v; 1737 int i; 1738 1739 /* All levels are converted the same way, so just treat them 1740 as ptes. */ 1741 for (i = 0; i < PTRS_PER_PTE; i++) 1742 pte[i] = xen_make_pte(pte[i].pte); 1743 } 1744 1745 /* 1746 * Set up the initial kernel pagetable. 1747 * 1748 * We can construct this by grafting the Xen provided pagetable into 1749 * head_64.S's preconstructed pagetables. We copy the Xen L2's into 1750 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This 1751 * means that only the kernel has a physical mapping to start with - 1752 * but that's enough to get __va working. We need to fill in the rest 1753 * of the physical mapping once some sort of allocator has been set 1754 * up. 1755 */ 1756 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 1757 unsigned long max_pfn) 1758 { 1759 pud_t *l3; 1760 pmd_t *l2; 1761 1762 /* max_pfn_mapped is the last pfn mapped in the initial memory 1763 * mappings. Considering that on Xen after the kernel mappings we 1764 * have the mappings of some pages that don't exist in pfn space, we 1765 * set max_pfn_mapped to the last real pfn mapped. */ 1766 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1767 1768 /* Zap identity mapping */ 1769 init_level4_pgt[0] = __pgd(0); 1770 1771 /* Pre-constructed entries are in pfn, so convert to mfn */ 1772 convert_pfn_mfn(init_level4_pgt); 1773 convert_pfn_mfn(level3_ident_pgt); 1774 convert_pfn_mfn(level3_kernel_pgt); 1775 1776 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1777 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); 1778 1779 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); 1780 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); 1781 1782 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); 1783 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); 1784 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); 1785 1786 /* Set up identity map */ 1787 xen_map_identity_early(level2_ident_pgt, max_pfn); 1788 1789 /* Make pagetable pieces RO */ 1790 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1791 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1792 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1793 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1794 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1795 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1796 1797 /* Pin down new L4 */ 1798 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1799 PFN_DOWN(__pa_symbol(init_level4_pgt))); 1800 1801 /* Unpin Xen-provided one */ 1802 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1803 1804 /* Switch over */ 1805 pgd = init_level4_pgt; 1806 1807 /* 1808 * At this stage there can be no user pgd, and no page 1809 * structure to attach it to, so make sure we just set kernel 1810 * pgd. 1811 */ 1812 xen_mc_batch(); 1813 __xen_write_cr3(true, __pa(pgd)); 1814 xen_mc_issue(PARAVIRT_LAZY_CPU); 1815 1816 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 1817 __pa(xen_start_info->pt_base + 1818 xen_start_info->nr_pt_frames * PAGE_SIZE), 1819 "XEN PAGETABLES"); 1820 1821 return pgd; 1822 } 1823 #else /* !CONFIG_X86_64 */ 1824 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 1825 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); 1826 1827 static __init void xen_write_cr3_init(unsigned long cr3) 1828 { 1829 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); 1830 1831 BUG_ON(read_cr3() != __pa(initial_page_table)); 1832 BUG_ON(cr3 != __pa(swapper_pg_dir)); 1833 1834 /* 1835 * We are switching to swapper_pg_dir for the first time (from 1836 * initial_page_table) and therefore need to mark that page 1837 * read-only and then pin it. 1838 * 1839 * Xen disallows sharing of kernel PMDs for PAE 1840 * guests. Therefore we must copy the kernel PMD from 1841 * initial_page_table into a new kernel PMD to be used in 1842 * swapper_pg_dir. 1843 */ 1844 swapper_kernel_pmd = 1845 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1846 memcpy(swapper_kernel_pmd, initial_kernel_pmd, 1847 sizeof(pmd_t) * PTRS_PER_PMD); 1848 swapper_pg_dir[KERNEL_PGD_BOUNDARY] = 1849 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); 1850 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); 1851 1852 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 1853 xen_write_cr3(cr3); 1854 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); 1855 1856 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, 1857 PFN_DOWN(__pa(initial_page_table))); 1858 set_page_prot(initial_page_table, PAGE_KERNEL); 1859 set_page_prot(initial_kernel_pmd, PAGE_KERNEL); 1860 1861 pv_mmu_ops.write_cr3 = &xen_write_cr3; 1862 } 1863 1864 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 1865 unsigned long max_pfn) 1866 { 1867 pmd_t *kernel_pmd; 1868 1869 initial_kernel_pmd = 1870 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1871 1872 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1873 1874 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1875 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1876 1877 xen_map_identity_early(initial_kernel_pmd, max_pfn); 1878 1879 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); 1880 initial_page_table[KERNEL_PGD_BOUNDARY] = 1881 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); 1882 1883 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); 1884 set_page_prot(initial_page_table, PAGE_KERNEL_RO); 1885 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 1886 1887 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1888 1889 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, 1890 PFN_DOWN(__pa(initial_page_table))); 1891 xen_write_cr3(__pa(initial_page_table)); 1892 1893 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 1894 __pa(xen_start_info->pt_base + 1895 xen_start_info->nr_pt_frames * PAGE_SIZE), 1896 "XEN PAGETABLES"); 1897 1898 return initial_page_table; 1899 } 1900 #endif /* CONFIG_X86_64 */ 1901 1902 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; 1903 1904 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) 1905 { 1906 pte_t pte; 1907 1908 phys >>= PAGE_SHIFT; 1909 1910 switch (idx) { 1911 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 1912 #ifdef CONFIG_X86_F00F_BUG 1913 case FIX_F00F_IDT: 1914 #endif 1915 #ifdef CONFIG_X86_32 1916 case FIX_WP_TEST: 1917 case FIX_VDSO: 1918 # ifdef CONFIG_HIGHMEM 1919 case FIX_KMAP_BEGIN ... FIX_KMAP_END: 1920 # endif 1921 #else 1922 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: 1923 #endif 1924 case FIX_TEXT_POKE0: 1925 case FIX_TEXT_POKE1: 1926 /* All local page mappings */ 1927 pte = pfn_pte(phys, prot); 1928 break; 1929 1930 #ifdef CONFIG_X86_LOCAL_APIC 1931 case FIX_APIC_BASE: /* maps dummy local APIC */ 1932 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 1933 break; 1934 #endif 1935 1936 #ifdef CONFIG_X86_IO_APIC 1937 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: 1938 /* 1939 * We just don't map the IO APIC - all access is via 1940 * hypercalls. Keep the address in the pte for reference. 1941 */ 1942 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 1943 break; 1944 #endif 1945 1946 case FIX_PARAVIRT_BOOTMAP: 1947 /* This is an MFN, but it isn't an IO mapping from the 1948 IO domain */ 1949 pte = mfn_pte(phys, prot); 1950 break; 1951 1952 default: 1953 /* By default, set_fixmap is used for hardware mappings */ 1954 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); 1955 break; 1956 } 1957 1958 __native_set_fixmap(idx, pte); 1959 1960 #ifdef CONFIG_X86_64 1961 /* Replicate changes to map the vsyscall page into the user 1962 pagetable vsyscall mapping. */ 1963 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { 1964 unsigned long vaddr = __fix_to_virt(idx); 1965 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); 1966 } 1967 #endif 1968 } 1969 1970 __init void xen_ident_map_ISA(void) 1971 { 1972 unsigned long pa; 1973 1974 /* 1975 * If we're dom0, then linear map the ISA machine addresses into 1976 * the kernel's address space. 1977 */ 1978 if (!xen_initial_domain()) 1979 return; 1980 1981 xen_raw_printk("Xen: setup ISA identity maps\n"); 1982 1983 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { 1984 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); 1985 1986 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) 1987 BUG(); 1988 } 1989 1990 xen_flush_tlb(); 1991 } 1992 1993 static __init void xen_post_allocator_init(void) 1994 { 1995 #ifdef CONFIG_XEN_DEBUG 1996 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); 1997 #endif 1998 pv_mmu_ops.set_pte = xen_set_pte; 1999 pv_mmu_ops.set_pmd = xen_set_pmd; 2000 pv_mmu_ops.set_pud = xen_set_pud; 2001 #if PAGETABLE_LEVELS == 4 2002 pv_mmu_ops.set_pgd = xen_set_pgd; 2003 #endif 2004 2005 /* This will work as long as patching hasn't happened yet 2006 (which it hasn't) */ 2007 pv_mmu_ops.alloc_pte = xen_alloc_pte; 2008 pv_mmu_ops.alloc_pmd = xen_alloc_pmd; 2009 pv_mmu_ops.release_pte = xen_release_pte; 2010 pv_mmu_ops.release_pmd = xen_release_pmd; 2011 #if PAGETABLE_LEVELS == 4 2012 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2013 pv_mmu_ops.release_pud = xen_release_pud; 2014 #endif 2015 2016 #ifdef CONFIG_X86_64 2017 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2018 #endif 2019 xen_mark_init_mm_pinned(); 2020 } 2021 2022 static void xen_leave_lazy_mmu(void) 2023 { 2024 preempt_disable(); 2025 xen_mc_flush(); 2026 paravirt_leave_lazy_mmu(); 2027 preempt_enable(); 2028 } 2029 2030 static const struct pv_mmu_ops xen_mmu_ops __initdata = { 2031 .read_cr2 = xen_read_cr2, 2032 .write_cr2 = xen_write_cr2, 2033 2034 .read_cr3 = xen_read_cr3, 2035 #ifdef CONFIG_X86_32 2036 .write_cr3 = xen_write_cr3_init, 2037 #else 2038 .write_cr3 = xen_write_cr3, 2039 #endif 2040 2041 .flush_tlb_user = xen_flush_tlb, 2042 .flush_tlb_kernel = xen_flush_tlb, 2043 .flush_tlb_single = xen_flush_tlb_single, 2044 .flush_tlb_others = xen_flush_tlb_others, 2045 2046 .pte_update = paravirt_nop, 2047 .pte_update_defer = paravirt_nop, 2048 2049 .pgd_alloc = xen_pgd_alloc, 2050 .pgd_free = xen_pgd_free, 2051 2052 .alloc_pte = xen_alloc_pte_init, 2053 .release_pte = xen_release_pte_init, 2054 .alloc_pmd = xen_alloc_pmd_init, 2055 .release_pmd = xen_release_pmd_init, 2056 2057 .set_pte = xen_set_pte_init, 2058 .set_pte_at = xen_set_pte_at, 2059 .set_pmd = xen_set_pmd_hyper, 2060 2061 .ptep_modify_prot_start = __ptep_modify_prot_start, 2062 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 2063 2064 .pte_val = PV_CALLEE_SAVE(xen_pte_val), 2065 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), 2066 2067 .make_pte = PV_CALLEE_SAVE(xen_make_pte), 2068 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), 2069 2070 #ifdef CONFIG_X86_PAE 2071 .set_pte_atomic = xen_set_pte_atomic, 2072 .pte_clear = xen_pte_clear, 2073 .pmd_clear = xen_pmd_clear, 2074 #endif /* CONFIG_X86_PAE */ 2075 .set_pud = xen_set_pud_hyper, 2076 2077 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), 2078 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), 2079 2080 #if PAGETABLE_LEVELS == 4 2081 .pud_val = PV_CALLEE_SAVE(xen_pud_val), 2082 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 2083 .set_pgd = xen_set_pgd_hyper, 2084 2085 .alloc_pud = xen_alloc_pmd_init, 2086 .release_pud = xen_release_pmd_init, 2087 #endif /* PAGETABLE_LEVELS == 4 */ 2088 2089 .activate_mm = xen_activate_mm, 2090 .dup_mmap = xen_dup_mmap, 2091 .exit_mmap = xen_exit_mmap, 2092 2093 .lazy_mode = { 2094 .enter = paravirt_enter_lazy_mmu, 2095 .leave = xen_leave_lazy_mmu, 2096 }, 2097 2098 .set_fixmap = xen_set_fixmap, 2099 }; 2100 2101 void __init xen_init_mmu_ops(void) 2102 { 2103 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2104 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2105 pv_mmu_ops = xen_mmu_ops; 2106 2107 memset(dummy_mapping, 0xff, PAGE_SIZE); 2108 } 2109 2110 /* Protected by xen_reservation_lock. */ 2111 #define MAX_CONTIG_ORDER 9 /* 2MB */ 2112 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; 2113 2114 #define VOID_PTE (mfn_pte(0, __pgprot(0))) 2115 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, 2116 unsigned long *in_frames, 2117 unsigned long *out_frames) 2118 { 2119 int i; 2120 struct multicall_space mcs; 2121 2122 xen_mc_batch(); 2123 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { 2124 mcs = __xen_mc_entry(0); 2125 2126 if (in_frames) 2127 in_frames[i] = virt_to_mfn(vaddr); 2128 2129 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); 2130 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); 2131 2132 if (out_frames) 2133 out_frames[i] = virt_to_pfn(vaddr); 2134 } 2135 xen_mc_issue(0); 2136 } 2137 2138 /* 2139 * Update the pfn-to-mfn mappings for a virtual address range, either to 2140 * point to an array of mfns, or contiguously from a single starting 2141 * mfn. 2142 */ 2143 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, 2144 unsigned long *mfns, 2145 unsigned long first_mfn) 2146 { 2147 unsigned i, limit; 2148 unsigned long mfn; 2149 2150 xen_mc_batch(); 2151 2152 limit = 1u << order; 2153 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { 2154 struct multicall_space mcs; 2155 unsigned flags; 2156 2157 mcs = __xen_mc_entry(0); 2158 if (mfns) 2159 mfn = mfns[i]; 2160 else 2161 mfn = first_mfn + i; 2162 2163 if (i < (limit - 1)) 2164 flags = 0; 2165 else { 2166 if (order == 0) 2167 flags = UVMF_INVLPG | UVMF_ALL; 2168 else 2169 flags = UVMF_TLB_FLUSH | UVMF_ALL; 2170 } 2171 2172 MULTI_update_va_mapping(mcs.mc, vaddr, 2173 mfn_pte(mfn, PAGE_KERNEL), flags); 2174 2175 set_phys_to_machine(virt_to_pfn(vaddr), mfn); 2176 } 2177 2178 xen_mc_issue(0); 2179 } 2180 2181 /* 2182 * Perform the hypercall to exchange a region of our pfns to point to 2183 * memory with the required contiguous alignment. Takes the pfns as 2184 * input, and populates mfns as output. 2185 * 2186 * Returns a success code indicating whether the hypervisor was able to 2187 * satisfy the request or not. 2188 */ 2189 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, 2190 unsigned long *pfns_in, 2191 unsigned long extents_out, 2192 unsigned int order_out, 2193 unsigned long *mfns_out, 2194 unsigned int address_bits) 2195 { 2196 long rc; 2197 int success; 2198 2199 struct xen_memory_exchange exchange = { 2200 .in = { 2201 .nr_extents = extents_in, 2202 .extent_order = order_in, 2203 .extent_start = pfns_in, 2204 .domid = DOMID_SELF 2205 }, 2206 .out = { 2207 .nr_extents = extents_out, 2208 .extent_order = order_out, 2209 .extent_start = mfns_out, 2210 .address_bits = address_bits, 2211 .domid = DOMID_SELF 2212 } 2213 }; 2214 2215 BUG_ON(extents_in << order_in != extents_out << order_out); 2216 2217 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); 2218 success = (exchange.nr_exchanged == extents_in); 2219 2220 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); 2221 BUG_ON(success && (rc != 0)); 2222 2223 return success; 2224 } 2225 2226 int xen_create_contiguous_region(unsigned long vstart, unsigned int order, 2227 unsigned int address_bits) 2228 { 2229 unsigned long *in_frames = discontig_frames, out_frame; 2230 unsigned long flags; 2231 int success; 2232 2233 /* 2234 * Currently an auto-translated guest will not perform I/O, nor will 2235 * it require PAE page directories below 4GB. Therefore any calls to 2236 * this function are redundant and can be ignored. 2237 */ 2238 2239 if (xen_feature(XENFEAT_auto_translated_physmap)) 2240 return 0; 2241 2242 if (unlikely(order > MAX_CONTIG_ORDER)) 2243 return -ENOMEM; 2244 2245 memset((void *) vstart, 0, PAGE_SIZE << order); 2246 2247 spin_lock_irqsave(&xen_reservation_lock, flags); 2248 2249 /* 1. Zap current PTEs, remembering MFNs. */ 2250 xen_zap_pfn_range(vstart, order, in_frames, NULL); 2251 2252 /* 2. Get a new contiguous memory extent. */ 2253 out_frame = virt_to_pfn(vstart); 2254 success = xen_exchange_memory(1UL << order, 0, in_frames, 2255 1, order, &out_frame, 2256 address_bits); 2257 2258 /* 3. Map the new extent in place of old pages. */ 2259 if (success) 2260 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); 2261 else 2262 xen_remap_exchanged_ptes(vstart, order, in_frames, 0); 2263 2264 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2265 2266 return success ? 0 : -ENOMEM; 2267 } 2268 EXPORT_SYMBOL_GPL(xen_create_contiguous_region); 2269 2270 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) 2271 { 2272 unsigned long *out_frames = discontig_frames, in_frame; 2273 unsigned long flags; 2274 int success; 2275 2276 if (xen_feature(XENFEAT_auto_translated_physmap)) 2277 return; 2278 2279 if (unlikely(order > MAX_CONTIG_ORDER)) 2280 return; 2281 2282 memset((void *) vstart, 0, PAGE_SIZE << order); 2283 2284 spin_lock_irqsave(&xen_reservation_lock, flags); 2285 2286 /* 1. Find start MFN of contiguous extent. */ 2287 in_frame = virt_to_mfn(vstart); 2288 2289 /* 2. Zap current PTEs. */ 2290 xen_zap_pfn_range(vstart, order, NULL, out_frames); 2291 2292 /* 3. Do the exchange for non-contiguous MFNs. */ 2293 success = xen_exchange_memory(1, order, &in_frame, 1UL << order, 2294 0, out_frames, 0); 2295 2296 /* 4. Map new pages in place of old pages. */ 2297 if (success) 2298 xen_remap_exchanged_ptes(vstart, order, out_frames, 0); 2299 else 2300 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); 2301 2302 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2303 } 2304 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 2305 2306 #ifdef CONFIG_XEN_PVHVM 2307 static void xen_hvm_exit_mmap(struct mm_struct *mm) 2308 { 2309 struct xen_hvm_pagetable_dying a; 2310 int rc; 2311 2312 a.domid = DOMID_SELF; 2313 a.gpa = __pa(mm->pgd); 2314 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); 2315 WARN_ON_ONCE(rc < 0); 2316 } 2317 2318 static int is_pagetable_dying_supported(void) 2319 { 2320 struct xen_hvm_pagetable_dying a; 2321 int rc = 0; 2322 2323 a.domid = DOMID_SELF; 2324 a.gpa = 0x00; 2325 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); 2326 if (rc < 0) { 2327 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); 2328 return 0; 2329 } 2330 return 1; 2331 } 2332 2333 void __init xen_hvm_init_mmu_ops(void) 2334 { 2335 if (is_pagetable_dying_supported()) 2336 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; 2337 } 2338 #endif 2339 2340 #define REMAP_BATCH_SIZE 16 2341 2342 struct remap_data { 2343 unsigned long mfn; 2344 pgprot_t prot; 2345 struct mmu_update *mmu_update; 2346 }; 2347 2348 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, 2349 unsigned long addr, void *data) 2350 { 2351 struct remap_data *rmd = data; 2352 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); 2353 2354 rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; 2355 rmd->mmu_update->val = pte_val_ma(pte); 2356 rmd->mmu_update++; 2357 2358 return 0; 2359 } 2360 2361 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2362 unsigned long addr, 2363 unsigned long mfn, int nr, 2364 pgprot_t prot, unsigned domid) 2365 { 2366 struct remap_data rmd; 2367 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2368 int batch; 2369 unsigned long range; 2370 int err = 0; 2371 2372 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2373 2374 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == 2375 (VM_PFNMAP | VM_RESERVED | VM_IO))); 2376 2377 rmd.mfn = mfn; 2378 rmd.prot = prot; 2379 2380 while (nr) { 2381 batch = min(REMAP_BATCH_SIZE, nr); 2382 range = (unsigned long)batch << PAGE_SHIFT; 2383 2384 rmd.mmu_update = mmu_update; 2385 err = apply_to_page_range(vma->vm_mm, addr, range, 2386 remap_area_mfn_pte_fn, &rmd); 2387 if (err) 2388 goto out; 2389 2390 err = -EFAULT; 2391 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) 2392 goto out; 2393 2394 nr -= batch; 2395 addr += range; 2396 } 2397 2398 err = 0; 2399 out: 2400 2401 flush_tlb_all(); 2402 2403 return err; 2404 } 2405 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2406 2407 #ifdef CONFIG_XEN_DEBUG_FS 2408 2409 static int p2m_dump_open(struct inode *inode, struct file *filp) 2410 { 2411 return single_open(filp, p2m_dump_show, NULL); 2412 } 2413 2414 static const struct file_operations p2m_dump_fops = { 2415 .open = p2m_dump_open, 2416 .read = seq_read, 2417 .llseek = seq_lseek, 2418 .release = single_release, 2419 }; 2420 2421 static struct dentry *d_mmu_debug; 2422 2423 static int __init xen_mmu_debugfs(void) 2424 { 2425 struct dentry *d_xen = xen_init_debugfs(); 2426 2427 if (d_xen == NULL) 2428 return -ENOMEM; 2429 2430 d_mmu_debug = debugfs_create_dir("mmu", d_xen); 2431 2432 debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); 2433 2434 debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); 2435 debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, 2436 &mmu_stats.pgd_update_pinned); 2437 debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, 2438 &mmu_stats.pgd_update_pinned); 2439 2440 debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); 2441 debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, 2442 &mmu_stats.pud_update_pinned); 2443 debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, 2444 &mmu_stats.pud_update_pinned); 2445 2446 debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); 2447 debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, 2448 &mmu_stats.pmd_update_pinned); 2449 debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, 2450 &mmu_stats.pmd_update_pinned); 2451 2452 debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); 2453 // debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, 2454 // &mmu_stats.pte_update_pinned); 2455 debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, 2456 &mmu_stats.pte_update_pinned); 2457 2458 debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); 2459 debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, 2460 &mmu_stats.mmu_update_extended); 2461 xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, 2462 mmu_stats.mmu_update_histo, 20); 2463 2464 debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); 2465 debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, 2466 &mmu_stats.set_pte_at_batched); 2467 debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, 2468 &mmu_stats.set_pte_at_current); 2469 debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, 2470 &mmu_stats.set_pte_at_kernel); 2471 2472 debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); 2473 debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, 2474 &mmu_stats.prot_commit_batched); 2475 2476 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); 2477 return 0; 2478 } 2479 fs_initcall(xen_mmu_debugfs); 2480 2481 #endif /* CONFIG_XEN_DEBUG_FS */ 2482