1 /* 2 * Xen mmu operations 3 * 4 * This file contains the various mmu fetch and update operations. 5 * The most important job they must perform is the mapping between the 6 * domain's pfn and the overall machine mfns. 7 * 8 * Xen allows guests to directly update the pagetable, in a controlled 9 * fashion. In other words, the guest modifies the same pagetable 10 * that the CPU actually uses, which eliminates the overhead of having 11 * a separate shadow pagetable. 12 * 13 * In order to allow this, it falls on the guest domain to map its 14 * notion of a "physical" pfn - which is just a domain-local linear 15 * address - into a real "machine address" which the CPU's MMU can 16 * use. 17 * 18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be 19 * inserted directly into the pagetable. When creating a new 20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, 21 * when reading the content back with __(pgd|pmd|pte)_val, it converts 22 * the mfn back into a pfn. 23 * 24 * The other constraint is that all pages which make up a pagetable 25 * must be mapped read-only in the guest. This prevents uncontrolled 26 * guest updates to the pagetable. Xen strictly enforces this, and 27 * will disallow any pagetable update which will end up mapping a 28 * pagetable page RW, and will disallow using any writable page as a 29 * pagetable. 30 * 31 * Naively, when loading %cr3 with the base of a new pagetable, Xen 32 * would need to validate the whole pagetable before going on. 33 * Naturally, this is quite slow. The solution is to "pin" a 34 * pagetable, which enforces all the constraints on the pagetable even 35 * when it is not actively in use. This menas that Xen can be assured 36 * that it is still valid when you do load it into %cr3, and doesn't 37 * need to revalidate it. 38 * 39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 40 */ 41 #include <linux/sched.h> 42 #include <linux/highmem.h> 43 #include <linux/debugfs.h> 44 #include <linux/bug.h> 45 #include <linux/vmalloc.h> 46 #include <linux/module.h> 47 #include <linux/gfp.h> 48 #include <linux/memblock.h> 49 #include <linux/seq_file.h> 50 51 #include <trace/events/xen.h> 52 53 #include <asm/pgtable.h> 54 #include <asm/tlbflush.h> 55 #include <asm/fixmap.h> 56 #include <asm/mmu_context.h> 57 #include <asm/setup.h> 58 #include <asm/paravirt.h> 59 #include <asm/e820.h> 60 #include <asm/linkage.h> 61 #include <asm/page.h> 62 #include <asm/init.h> 63 #include <asm/pat.h> 64 #include <asm/smp.h> 65 66 #include <asm/xen/hypercall.h> 67 #include <asm/xen/hypervisor.h> 68 69 #include <xen/xen.h> 70 #include <xen/page.h> 71 #include <xen/interface/xen.h> 72 #include <xen/interface/hvm/hvm_op.h> 73 #include <xen/interface/version.h> 74 #include <xen/interface/memory.h> 75 #include <xen/hvc-console.h> 76 77 #include "multicalls.h" 78 #include "mmu.h" 79 #include "debugfs.h" 80 81 /* 82 * Protects atomic reservation decrease/increase against concurrent increases. 83 * Also protects non-atomic updates of current_pages and balloon lists. 84 */ 85 DEFINE_SPINLOCK(xen_reservation_lock); 86 87 #ifdef CONFIG_X86_32 88 /* 89 * Identity map, in addition to plain kernel map. This needs to be 90 * large enough to allocate page table pages to allocate the rest. 91 * Each page can map 2MB. 92 */ 93 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) 94 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); 95 #endif 96 #ifdef CONFIG_X86_64 97 /* l3 pud for userspace vsyscall mapping */ 98 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; 99 #endif /* CONFIG_X86_64 */ 100 101 /* 102 * Note about cr3 (pagetable base) values: 103 * 104 * xen_cr3 contains the current logical cr3 value; it contains the 105 * last set cr3. This may not be the current effective cr3, because 106 * its update may be being lazily deferred. However, a vcpu looking 107 * at its own cr3 can use this value knowing that it everything will 108 * be self-consistent. 109 * 110 * xen_current_cr3 contains the actual vcpu cr3; it is set once the 111 * hypercall to set the vcpu cr3 is complete (so it may be a little 112 * out of date, but it will never be set early). If one vcpu is 113 * looking at another vcpu's cr3 value, it should use this variable. 114 */ 115 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ 116 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ 117 118 119 /* 120 * Just beyond the highest usermode address. STACK_TOP_MAX has a 121 * redzone above it, so round it up to a PGD boundary. 122 */ 123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) 124 125 unsigned long arbitrary_virt_to_mfn(void *vaddr) 126 { 127 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); 128 129 return PFN_DOWN(maddr.maddr); 130 } 131 132 xmaddr_t arbitrary_virt_to_machine(void *vaddr) 133 { 134 unsigned long address = (unsigned long)vaddr; 135 unsigned int level; 136 pte_t *pte; 137 unsigned offset; 138 139 /* 140 * if the PFN is in the linear mapped vaddr range, we can just use 141 * the (quick) virt_to_machine() p2m lookup 142 */ 143 if (virt_addr_valid(vaddr)) 144 return virt_to_machine(vaddr); 145 146 /* otherwise we have to do a (slower) full page-table walk */ 147 148 pte = lookup_address(address, &level); 149 BUG_ON(pte == NULL); 150 offset = address & ~PAGE_MASK; 151 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); 152 } 153 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 154 155 void make_lowmem_page_readonly(void *vaddr) 156 { 157 pte_t *pte, ptev; 158 unsigned long address = (unsigned long)vaddr; 159 unsigned int level; 160 161 pte = lookup_address(address, &level); 162 if (pte == NULL) 163 return; /* vaddr missing */ 164 165 ptev = pte_wrprotect(*pte); 166 167 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 168 BUG(); 169 } 170 171 void make_lowmem_page_readwrite(void *vaddr) 172 { 173 pte_t *pte, ptev; 174 unsigned long address = (unsigned long)vaddr; 175 unsigned int level; 176 177 pte = lookup_address(address, &level); 178 if (pte == NULL) 179 return; /* vaddr missing */ 180 181 ptev = pte_mkwrite(*pte); 182 183 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 184 BUG(); 185 } 186 187 188 static bool xen_page_pinned(void *ptr) 189 { 190 struct page *page = virt_to_page(ptr); 191 192 return PagePinned(page); 193 } 194 195 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 196 { 197 struct multicall_space mcs; 198 struct mmu_update *u; 199 200 trace_xen_mmu_set_domain_pte(ptep, pteval, domid); 201 202 mcs = xen_mc_entry(sizeof(*u)); 203 u = mcs.args; 204 205 /* ptep might be kmapped when using 32-bit HIGHPTE */ 206 u->ptr = virt_to_machine(ptep).maddr; 207 u->val = pte_val_ma(pteval); 208 209 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 210 211 xen_mc_issue(PARAVIRT_LAZY_MMU); 212 } 213 EXPORT_SYMBOL_GPL(xen_set_domain_pte); 214 215 static void xen_extend_mmu_update(const struct mmu_update *update) 216 { 217 struct multicall_space mcs; 218 struct mmu_update *u; 219 220 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 221 222 if (mcs.mc != NULL) { 223 mcs.mc->args[1]++; 224 } else { 225 mcs = __xen_mc_entry(sizeof(*u)); 226 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 227 } 228 229 u = mcs.args; 230 *u = *update; 231 } 232 233 static void xen_extend_mmuext_op(const struct mmuext_op *op) 234 { 235 struct multicall_space mcs; 236 struct mmuext_op *u; 237 238 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); 239 240 if (mcs.mc != NULL) { 241 mcs.mc->args[1]++; 242 } else { 243 mcs = __xen_mc_entry(sizeof(*u)); 244 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 245 } 246 247 u = mcs.args; 248 *u = *op; 249 } 250 251 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 252 { 253 struct mmu_update u; 254 255 preempt_disable(); 256 257 xen_mc_batch(); 258 259 /* ptr may be ioremapped for 64-bit pagetable setup */ 260 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 261 u.val = pmd_val_ma(val); 262 xen_extend_mmu_update(&u); 263 264 xen_mc_issue(PARAVIRT_LAZY_MMU); 265 266 preempt_enable(); 267 } 268 269 static void xen_set_pmd(pmd_t *ptr, pmd_t val) 270 { 271 trace_xen_mmu_set_pmd(ptr, val); 272 273 /* If page is not pinned, we can just update the entry 274 directly */ 275 if (!xen_page_pinned(ptr)) { 276 *ptr = val; 277 return; 278 } 279 280 xen_set_pmd_hyper(ptr, val); 281 } 282 283 /* 284 * Associate a virtual page frame with a given physical page frame 285 * and protection flags for that frame. 286 */ 287 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) 288 { 289 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 290 } 291 292 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) 293 { 294 struct mmu_update u; 295 296 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) 297 return false; 298 299 xen_mc_batch(); 300 301 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; 302 u.val = pte_val_ma(pteval); 303 xen_extend_mmu_update(&u); 304 305 xen_mc_issue(PARAVIRT_LAZY_MMU); 306 307 return true; 308 } 309 310 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) 311 { 312 if (!xen_batched_set_pte(ptep, pteval)) { 313 /* 314 * Could call native_set_pte() here and trap and 315 * emulate the PTE write but with 32-bit guests this 316 * needs two traps (one for each of the two 32-bit 317 * words in the PTE) so do one hypercall directly 318 * instead. 319 */ 320 struct mmu_update u; 321 322 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; 323 u.val = pte_val_ma(pteval); 324 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); 325 } 326 } 327 328 static void xen_set_pte(pte_t *ptep, pte_t pteval) 329 { 330 trace_xen_mmu_set_pte(ptep, pteval); 331 __xen_set_pte(ptep, pteval); 332 } 333 334 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 335 pte_t *ptep, pte_t pteval) 336 { 337 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); 338 __xen_set_pte(ptep, pteval); 339 } 340 341 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 342 unsigned long addr, pte_t *ptep) 343 { 344 /* Just return the pte as-is. We preserve the bits on commit */ 345 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); 346 return *ptep; 347 } 348 349 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 350 pte_t *ptep, pte_t pte) 351 { 352 struct mmu_update u; 353 354 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); 355 xen_mc_batch(); 356 357 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 358 u.val = pte_val_ma(pte); 359 xen_extend_mmu_update(&u); 360 361 xen_mc_issue(PARAVIRT_LAZY_MMU); 362 } 363 364 /* Assume pteval_t is equivalent to all the other *val_t types. */ 365 static pteval_t pte_mfn_to_pfn(pteval_t val) 366 { 367 if (val & _PAGE_PRESENT) { 368 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 369 unsigned long pfn = mfn_to_pfn(mfn); 370 371 pteval_t flags = val & PTE_FLAGS_MASK; 372 if (unlikely(pfn == ~0)) 373 val = flags & ~_PAGE_PRESENT; 374 else 375 val = ((pteval_t)pfn << PAGE_SHIFT) | flags; 376 } 377 378 return val; 379 } 380 381 static pteval_t pte_pfn_to_mfn(pteval_t val) 382 { 383 if (val & _PAGE_PRESENT) { 384 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 385 pteval_t flags = val & PTE_FLAGS_MASK; 386 unsigned long mfn; 387 388 if (!xen_feature(XENFEAT_auto_translated_physmap)) 389 mfn = get_phys_to_machine(pfn); 390 else 391 mfn = pfn; 392 /* 393 * If there's no mfn for the pfn, then just create an 394 * empty non-present pte. Unfortunately this loses 395 * information about the original pfn, so 396 * pte_mfn_to_pfn is asymmetric. 397 */ 398 if (unlikely(mfn == INVALID_P2M_ENTRY)) { 399 mfn = 0; 400 flags = 0; 401 } else { 402 /* 403 * Paramount to do this test _after_ the 404 * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & 405 * IDENTITY_FRAME_BIT resolves to true. 406 */ 407 mfn &= ~FOREIGN_FRAME_BIT; 408 if (mfn & IDENTITY_FRAME_BIT) { 409 mfn &= ~IDENTITY_FRAME_BIT; 410 flags |= _PAGE_IOMAP; 411 } 412 } 413 val = ((pteval_t)mfn << PAGE_SHIFT) | flags; 414 } 415 416 return val; 417 } 418 419 static pteval_t iomap_pte(pteval_t val) 420 { 421 if (val & _PAGE_PRESENT) { 422 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 423 pteval_t flags = val & PTE_FLAGS_MASK; 424 425 /* We assume the pte frame number is a MFN, so 426 just use it as-is. */ 427 val = ((pteval_t)pfn << PAGE_SHIFT) | flags; 428 } 429 430 return val; 431 } 432 433 static pteval_t xen_pte_val(pte_t pte) 434 { 435 pteval_t pteval = pte.pte; 436 #if 0 437 /* If this is a WC pte, convert back from Xen WC to Linux WC */ 438 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { 439 WARN_ON(!pat_enabled); 440 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; 441 } 442 #endif 443 if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) 444 return pteval; 445 446 return pte_mfn_to_pfn(pteval); 447 } 448 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 449 450 static pgdval_t xen_pgd_val(pgd_t pgd) 451 { 452 return pte_mfn_to_pfn(pgd.pgd); 453 } 454 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); 455 456 /* 457 * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 458 * are reserved for now, to correspond to the Intel-reserved PAT 459 * types. 460 * 461 * We expect Linux's PAT set as follows: 462 * 463 * Idx PTE flags Linux Xen Default 464 * 0 WB WB WB 465 * 1 PWT WC WT WT 466 * 2 PCD UC- UC- UC- 467 * 3 PCD PWT UC UC UC 468 * 4 PAT WB WC WB 469 * 5 PAT PWT WC WP WT 470 * 6 PAT PCD UC- UC UC- 471 * 7 PAT PCD PWT UC UC UC 472 */ 473 474 void xen_set_pat(u64 pat) 475 { 476 /* We expect Linux to use a PAT setting of 477 * UC UC- WC WB (ignoring the PAT flag) */ 478 WARN_ON(pat != 0x0007010600070106ull); 479 } 480 481 static pte_t xen_make_pte(pteval_t pte) 482 { 483 phys_addr_t addr = (pte & PTE_PFN_MASK); 484 #if 0 485 /* If Linux is trying to set a WC pte, then map to the Xen WC. 486 * If _PAGE_PAT is set, then it probably means it is really 487 * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope 488 * things work out OK... 489 * 490 * (We should never see kernel mappings with _PAGE_PSE set, 491 * but we could see hugetlbfs mappings, I think.). 492 */ 493 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { 494 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) 495 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; 496 } 497 #endif 498 /* 499 * Unprivileged domains are allowed to do IOMAPpings for 500 * PCI passthrough, but not map ISA space. The ISA 501 * mappings are just dummy local mappings to keep other 502 * parts of the kernel happy. 503 */ 504 if (unlikely(pte & _PAGE_IOMAP) && 505 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { 506 pte = iomap_pte(pte); 507 } else { 508 pte &= ~_PAGE_IOMAP; 509 pte = pte_pfn_to_mfn(pte); 510 } 511 512 return native_make_pte(pte); 513 } 514 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); 515 516 static pgd_t xen_make_pgd(pgdval_t pgd) 517 { 518 pgd = pte_pfn_to_mfn(pgd); 519 return native_make_pgd(pgd); 520 } 521 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 522 523 static pmdval_t xen_pmd_val(pmd_t pmd) 524 { 525 return pte_mfn_to_pfn(pmd.pmd); 526 } 527 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 528 529 static void xen_set_pud_hyper(pud_t *ptr, pud_t val) 530 { 531 struct mmu_update u; 532 533 preempt_disable(); 534 535 xen_mc_batch(); 536 537 /* ptr may be ioremapped for 64-bit pagetable setup */ 538 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 539 u.val = pud_val_ma(val); 540 xen_extend_mmu_update(&u); 541 542 xen_mc_issue(PARAVIRT_LAZY_MMU); 543 544 preempt_enable(); 545 } 546 547 static void xen_set_pud(pud_t *ptr, pud_t val) 548 { 549 trace_xen_mmu_set_pud(ptr, val); 550 551 /* If page is not pinned, we can just update the entry 552 directly */ 553 if (!xen_page_pinned(ptr)) { 554 *ptr = val; 555 return; 556 } 557 558 xen_set_pud_hyper(ptr, val); 559 } 560 561 #ifdef CONFIG_X86_PAE 562 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 563 { 564 trace_xen_mmu_set_pte_atomic(ptep, pte); 565 set_64bit((u64 *)ptep, native_pte_val(pte)); 566 } 567 568 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 569 { 570 trace_xen_mmu_pte_clear(mm, addr, ptep); 571 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 572 native_pte_clear(mm, addr, ptep); 573 } 574 575 static void xen_pmd_clear(pmd_t *pmdp) 576 { 577 trace_xen_mmu_pmd_clear(pmdp); 578 set_pmd(pmdp, __pmd(0)); 579 } 580 #endif /* CONFIG_X86_PAE */ 581 582 static pmd_t xen_make_pmd(pmdval_t pmd) 583 { 584 pmd = pte_pfn_to_mfn(pmd); 585 return native_make_pmd(pmd); 586 } 587 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 588 589 #if PAGETABLE_LEVELS == 4 590 static pudval_t xen_pud_val(pud_t pud) 591 { 592 return pte_mfn_to_pfn(pud.pud); 593 } 594 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 595 596 static pud_t xen_make_pud(pudval_t pud) 597 { 598 pud = pte_pfn_to_mfn(pud); 599 600 return native_make_pud(pud); 601 } 602 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 603 604 static pgd_t *xen_get_user_pgd(pgd_t *pgd) 605 { 606 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 607 unsigned offset = pgd - pgd_page; 608 pgd_t *user_ptr = NULL; 609 610 if (offset < pgd_index(USER_LIMIT)) { 611 struct page *page = virt_to_page(pgd_page); 612 user_ptr = (pgd_t *)page->private; 613 if (user_ptr) 614 user_ptr += offset; 615 } 616 617 return user_ptr; 618 } 619 620 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 621 { 622 struct mmu_update u; 623 624 u.ptr = virt_to_machine(ptr).maddr; 625 u.val = pgd_val_ma(val); 626 xen_extend_mmu_update(&u); 627 } 628 629 /* 630 * Raw hypercall-based set_pgd, intended for in early boot before 631 * there's a page structure. This implies: 632 * 1. The only existing pagetable is the kernel's 633 * 2. It is always pinned 634 * 3. It has no user pagetable attached to it 635 */ 636 static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) 637 { 638 preempt_disable(); 639 640 xen_mc_batch(); 641 642 __xen_set_pgd_hyper(ptr, val); 643 644 xen_mc_issue(PARAVIRT_LAZY_MMU); 645 646 preempt_enable(); 647 } 648 649 static void xen_set_pgd(pgd_t *ptr, pgd_t val) 650 { 651 pgd_t *user_ptr = xen_get_user_pgd(ptr); 652 653 trace_xen_mmu_set_pgd(ptr, user_ptr, val); 654 655 /* If page is not pinned, we can just update the entry 656 directly */ 657 if (!xen_page_pinned(ptr)) { 658 *ptr = val; 659 if (user_ptr) { 660 WARN_ON(xen_page_pinned(user_ptr)); 661 *user_ptr = val; 662 } 663 return; 664 } 665 666 /* If it's pinned, then we can at least batch the kernel and 667 user updates together. */ 668 xen_mc_batch(); 669 670 __xen_set_pgd_hyper(ptr, val); 671 if (user_ptr) 672 __xen_set_pgd_hyper(user_ptr, val); 673 674 xen_mc_issue(PARAVIRT_LAZY_MMU); 675 } 676 #endif /* PAGETABLE_LEVELS == 4 */ 677 678 /* 679 * (Yet another) pagetable walker. This one is intended for pinning a 680 * pagetable. This means that it walks a pagetable and calls the 681 * callback function on each page it finds making up the page table, 682 * at every level. It walks the entire pagetable, but it only bothers 683 * pinning pte pages which are below limit. In the normal case this 684 * will be STACK_TOP_MAX, but at boot we need to pin up to 685 * FIXADDR_TOP. 686 * 687 * For 32-bit the important bit is that we don't pin beyond there, 688 * because then we start getting into Xen's ptes. 689 * 690 * For 64-bit, we must skip the Xen hole in the middle of the address 691 * space, just after the big x86-64 virtual hole. 692 */ 693 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, 694 int (*func)(struct mm_struct *mm, struct page *, 695 enum pt_level), 696 unsigned long limit) 697 { 698 int flush = 0; 699 unsigned hole_low, hole_high; 700 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; 701 unsigned pgdidx, pudidx, pmdidx; 702 703 /* The limit is the last byte to be touched */ 704 limit--; 705 BUG_ON(limit >= FIXADDR_TOP); 706 707 if (xen_feature(XENFEAT_auto_translated_physmap)) 708 return 0; 709 710 /* 711 * 64-bit has a great big hole in the middle of the address 712 * space, which contains the Xen mappings. On 32-bit these 713 * will end up making a zero-sized hole and so is a no-op. 714 */ 715 hole_low = pgd_index(USER_LIMIT); 716 hole_high = pgd_index(PAGE_OFFSET); 717 718 pgdidx_limit = pgd_index(limit); 719 #if PTRS_PER_PUD > 1 720 pudidx_limit = pud_index(limit); 721 #else 722 pudidx_limit = 0; 723 #endif 724 #if PTRS_PER_PMD > 1 725 pmdidx_limit = pmd_index(limit); 726 #else 727 pmdidx_limit = 0; 728 #endif 729 730 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { 731 pud_t *pud; 732 733 if (pgdidx >= hole_low && pgdidx < hole_high) 734 continue; 735 736 if (!pgd_val(pgd[pgdidx])) 737 continue; 738 739 pud = pud_offset(&pgd[pgdidx], 0); 740 741 if (PTRS_PER_PUD > 1) /* not folded */ 742 flush |= (*func)(mm, virt_to_page(pud), PT_PUD); 743 744 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { 745 pmd_t *pmd; 746 747 if (pgdidx == pgdidx_limit && 748 pudidx > pudidx_limit) 749 goto out; 750 751 if (pud_none(pud[pudidx])) 752 continue; 753 754 pmd = pmd_offset(&pud[pudidx], 0); 755 756 if (PTRS_PER_PMD > 1) /* not folded */ 757 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); 758 759 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { 760 struct page *pte; 761 762 if (pgdidx == pgdidx_limit && 763 pudidx == pudidx_limit && 764 pmdidx > pmdidx_limit) 765 goto out; 766 767 if (pmd_none(pmd[pmdidx])) 768 continue; 769 770 pte = pmd_page(pmd[pmdidx]); 771 flush |= (*func)(mm, pte, PT_PTE); 772 } 773 } 774 } 775 776 out: 777 /* Do the top level last, so that the callbacks can use it as 778 a cue to do final things like tlb flushes. */ 779 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); 780 781 return flush; 782 } 783 784 static int xen_pgd_walk(struct mm_struct *mm, 785 int (*func)(struct mm_struct *mm, struct page *, 786 enum pt_level), 787 unsigned long limit) 788 { 789 return __xen_pgd_walk(mm, mm->pgd, func, limit); 790 } 791 792 /* If we're using split pte locks, then take the page's lock and 793 return a pointer to it. Otherwise return NULL. */ 794 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 795 { 796 spinlock_t *ptl = NULL; 797 798 #if USE_SPLIT_PTLOCKS 799 ptl = __pte_lockptr(page); 800 spin_lock_nest_lock(ptl, &mm->page_table_lock); 801 #endif 802 803 return ptl; 804 } 805 806 static void xen_pte_unlock(void *v) 807 { 808 spinlock_t *ptl = v; 809 spin_unlock(ptl); 810 } 811 812 static void xen_do_pin(unsigned level, unsigned long pfn) 813 { 814 struct mmuext_op op; 815 816 op.cmd = level; 817 op.arg1.mfn = pfn_to_mfn(pfn); 818 819 xen_extend_mmuext_op(&op); 820 } 821 822 static int xen_pin_page(struct mm_struct *mm, struct page *page, 823 enum pt_level level) 824 { 825 unsigned pgfl = TestSetPagePinned(page); 826 int flush; 827 828 if (pgfl) 829 flush = 0; /* already pinned */ 830 else if (PageHighMem(page)) 831 /* kmaps need flushing if we found an unpinned 832 highpage */ 833 flush = 1; 834 else { 835 void *pt = lowmem_page_address(page); 836 unsigned long pfn = page_to_pfn(page); 837 struct multicall_space mcs = __xen_mc_entry(0); 838 spinlock_t *ptl; 839 840 flush = 0; 841 842 /* 843 * We need to hold the pagetable lock between the time 844 * we make the pagetable RO and when we actually pin 845 * it. If we don't, then other users may come in and 846 * attempt to update the pagetable by writing it, 847 * which will fail because the memory is RO but not 848 * pinned, so Xen won't do the trap'n'emulate. 849 * 850 * If we're using split pte locks, we can't hold the 851 * entire pagetable's worth of locks during the 852 * traverse, because we may wrap the preempt count (8 853 * bits). The solution is to mark RO and pin each PTE 854 * page while holding the lock. This means the number 855 * of locks we end up holding is never more than a 856 * batch size (~32 entries, at present). 857 * 858 * If we're not using split pte locks, we needn't pin 859 * the PTE pages independently, because we're 860 * protected by the overall pagetable lock. 861 */ 862 ptl = NULL; 863 if (level == PT_PTE) 864 ptl = xen_pte_lock(page, mm); 865 866 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 867 pfn_pte(pfn, PAGE_KERNEL_RO), 868 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 869 870 if (ptl) { 871 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); 872 873 /* Queue a deferred unlock for when this batch 874 is completed. */ 875 xen_mc_callback(xen_pte_unlock, ptl); 876 } 877 } 878 879 return flush; 880 } 881 882 /* This is called just after a mm has been created, but it has not 883 been used yet. We need to make sure that its pagetable is all 884 read-only, and can be pinned. */ 885 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 886 { 887 trace_xen_mmu_pgd_pin(mm, pgd); 888 889 xen_mc_batch(); 890 891 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 892 /* re-enable interrupts for flushing */ 893 xen_mc_issue(0); 894 895 kmap_flush_unused(); 896 897 xen_mc_batch(); 898 } 899 900 #ifdef CONFIG_X86_64 901 { 902 pgd_t *user_pgd = xen_get_user_pgd(pgd); 903 904 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); 905 906 if (user_pgd) { 907 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); 908 xen_do_pin(MMUEXT_PIN_L4_TABLE, 909 PFN_DOWN(__pa(user_pgd))); 910 } 911 } 912 #else /* CONFIG_X86_32 */ 913 #ifdef CONFIG_X86_PAE 914 /* Need to make sure unshared kernel PMD is pinnable */ 915 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 916 PT_PMD); 917 #endif 918 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); 919 #endif /* CONFIG_X86_64 */ 920 xen_mc_issue(0); 921 } 922 923 static void xen_pgd_pin(struct mm_struct *mm) 924 { 925 __xen_pgd_pin(mm, mm->pgd); 926 } 927 928 /* 929 * On save, we need to pin all pagetables to make sure they get their 930 * mfns turned into pfns. Search the list for any unpinned pgds and pin 931 * them (unpinned pgds are not currently in use, probably because the 932 * process is under construction or destruction). 933 * 934 * Expected to be called in stop_machine() ("equivalent to taking 935 * every spinlock in the system"), so the locking doesn't really 936 * matter all that much. 937 */ 938 void xen_mm_pin_all(void) 939 { 940 struct page *page; 941 942 spin_lock(&pgd_lock); 943 944 list_for_each_entry(page, &pgd_list, lru) { 945 if (!PagePinned(page)) { 946 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); 947 SetPageSavePinned(page); 948 } 949 } 950 951 spin_unlock(&pgd_lock); 952 } 953 954 /* 955 * The init_mm pagetable is really pinned as soon as its created, but 956 * that's before we have page structures to store the bits. So do all 957 * the book-keeping now. 958 */ 959 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, 960 enum pt_level level) 961 { 962 SetPagePinned(page); 963 return 0; 964 } 965 966 static void __init xen_mark_init_mm_pinned(void) 967 { 968 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 969 } 970 971 static int xen_unpin_page(struct mm_struct *mm, struct page *page, 972 enum pt_level level) 973 { 974 unsigned pgfl = TestClearPagePinned(page); 975 976 if (pgfl && !PageHighMem(page)) { 977 void *pt = lowmem_page_address(page); 978 unsigned long pfn = page_to_pfn(page); 979 spinlock_t *ptl = NULL; 980 struct multicall_space mcs; 981 982 /* 983 * Do the converse to pin_page. If we're using split 984 * pte locks, we must be holding the lock for while 985 * the pte page is unpinned but still RO to prevent 986 * concurrent updates from seeing it in this 987 * partially-pinned state. 988 */ 989 if (level == PT_PTE) { 990 ptl = xen_pte_lock(page, mm); 991 992 if (ptl) 993 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); 994 } 995 996 mcs = __xen_mc_entry(0); 997 998 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 999 pfn_pte(pfn, PAGE_KERNEL), 1000 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 1001 1002 if (ptl) { 1003 /* unlock when batch completed */ 1004 xen_mc_callback(xen_pte_unlock, ptl); 1005 } 1006 } 1007 1008 return 0; /* never need to flush on unpin */ 1009 } 1010 1011 /* Release a pagetables pages back as normal RW */ 1012 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) 1013 { 1014 trace_xen_mmu_pgd_unpin(mm, pgd); 1015 1016 xen_mc_batch(); 1017 1018 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1019 1020 #ifdef CONFIG_X86_64 1021 { 1022 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1023 1024 if (user_pgd) { 1025 xen_do_pin(MMUEXT_UNPIN_TABLE, 1026 PFN_DOWN(__pa(user_pgd))); 1027 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); 1028 } 1029 } 1030 #endif 1031 1032 #ifdef CONFIG_X86_PAE 1033 /* Need to make sure unshared kernel PMD is unpinned */ 1034 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 1035 PT_PMD); 1036 #endif 1037 1038 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); 1039 1040 xen_mc_issue(0); 1041 } 1042 1043 static void xen_pgd_unpin(struct mm_struct *mm) 1044 { 1045 __xen_pgd_unpin(mm, mm->pgd); 1046 } 1047 1048 /* 1049 * On resume, undo any pinning done at save, so that the rest of the 1050 * kernel doesn't see any unexpected pinned pagetables. 1051 */ 1052 void xen_mm_unpin_all(void) 1053 { 1054 struct page *page; 1055 1056 spin_lock(&pgd_lock); 1057 1058 list_for_each_entry(page, &pgd_list, lru) { 1059 if (PageSavePinned(page)) { 1060 BUG_ON(!PagePinned(page)); 1061 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); 1062 ClearPageSavePinned(page); 1063 } 1064 } 1065 1066 spin_unlock(&pgd_lock); 1067 } 1068 1069 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1070 { 1071 spin_lock(&next->page_table_lock); 1072 xen_pgd_pin(next); 1073 spin_unlock(&next->page_table_lock); 1074 } 1075 1076 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 1077 { 1078 spin_lock(&mm->page_table_lock); 1079 xen_pgd_pin(mm); 1080 spin_unlock(&mm->page_table_lock); 1081 } 1082 1083 1084 #ifdef CONFIG_SMP 1085 /* Another cpu may still have their %cr3 pointing at the pagetable, so 1086 we need to repoint it somewhere else before we can unpin it. */ 1087 static void drop_other_mm_ref(void *info) 1088 { 1089 struct mm_struct *mm = info; 1090 struct mm_struct *active_mm; 1091 1092 active_mm = this_cpu_read(cpu_tlbstate.active_mm); 1093 1094 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) 1095 leave_mm(smp_processor_id()); 1096 1097 /* If this cpu still has a stale cr3 reference, then make sure 1098 it has been flushed. */ 1099 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) 1100 load_cr3(swapper_pg_dir); 1101 } 1102 1103 static void xen_drop_mm_ref(struct mm_struct *mm) 1104 { 1105 cpumask_var_t mask; 1106 unsigned cpu; 1107 1108 if (current->active_mm == mm) { 1109 if (current->mm == mm) 1110 load_cr3(swapper_pg_dir); 1111 else 1112 leave_mm(smp_processor_id()); 1113 } 1114 1115 /* Get the "official" set of cpus referring to our pagetable. */ 1116 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1117 for_each_online_cpu(cpu) { 1118 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) 1119 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1120 continue; 1121 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); 1122 } 1123 return; 1124 } 1125 cpumask_copy(mask, mm_cpumask(mm)); 1126 1127 /* It's possible that a vcpu may have a stale reference to our 1128 cr3, because its in lazy mode, and it hasn't yet flushed 1129 its set of pending hypercalls yet. In this case, we can 1130 look at its actual current cr3 value, and force it to flush 1131 if needed. */ 1132 for_each_online_cpu(cpu) { 1133 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1134 cpumask_set_cpu(cpu, mask); 1135 } 1136 1137 if (!cpumask_empty(mask)) 1138 smp_call_function_many(mask, drop_other_mm_ref, mm, 1); 1139 free_cpumask_var(mask); 1140 } 1141 #else 1142 static void xen_drop_mm_ref(struct mm_struct *mm) 1143 { 1144 if (current->active_mm == mm) 1145 load_cr3(swapper_pg_dir); 1146 } 1147 #endif 1148 1149 /* 1150 * While a process runs, Xen pins its pagetables, which means that the 1151 * hypervisor forces it to be read-only, and it controls all updates 1152 * to it. This means that all pagetable updates have to go via the 1153 * hypervisor, which is moderately expensive. 1154 * 1155 * Since we're pulling the pagetable down, we switch to use init_mm, 1156 * unpin old process pagetable and mark it all read-write, which 1157 * allows further operations on it to be simple memory accesses. 1158 * 1159 * The only subtle point is that another CPU may be still using the 1160 * pagetable because of lazy tlb flushing. This means we need need to 1161 * switch all CPUs off this pagetable before we can unpin it. 1162 */ 1163 static void xen_exit_mmap(struct mm_struct *mm) 1164 { 1165 get_cpu(); /* make sure we don't move around */ 1166 xen_drop_mm_ref(mm); 1167 put_cpu(); 1168 1169 spin_lock(&mm->page_table_lock); 1170 1171 /* pgd may not be pinned in the error exit path of execve */ 1172 if (xen_page_pinned(mm->pgd)) 1173 xen_pgd_unpin(mm); 1174 1175 spin_unlock(&mm->page_table_lock); 1176 } 1177 1178 static void xen_post_allocator_init(void); 1179 1180 static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) 1181 { 1182 /* reserve the range used */ 1183 native_pagetable_reserve(start, end); 1184 1185 /* set as RW the rest */ 1186 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, 1187 PFN_PHYS(pgt_buf_top)); 1188 while (end < PFN_PHYS(pgt_buf_top)) { 1189 make_lowmem_page_readwrite(__va(end)); 1190 end += PAGE_SIZE; 1191 } 1192 } 1193 1194 #ifdef CONFIG_X86_64 1195 static void __init xen_cleanhighmap(unsigned long vaddr, 1196 unsigned long vaddr_end) 1197 { 1198 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; 1199 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); 1200 1201 /* NOTE: The loop is more greedy than the cleanup_highmap variant. 1202 * We include the PMD passed in on _both_ boundaries. */ 1203 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); 1204 pmd++, vaddr += PMD_SIZE) { 1205 if (pmd_none(*pmd)) 1206 continue; 1207 if (vaddr < (unsigned long) _text || vaddr > kernel_end) 1208 set_pmd(pmd, __pmd(0)); 1209 } 1210 /* In case we did something silly, we should crash in this function 1211 * instead of somewhere later and be confusing. */ 1212 xen_mc_flush(); 1213 } 1214 #endif 1215 static void __init xen_pagetable_init(void) 1216 { 1217 #ifdef CONFIG_X86_64 1218 unsigned long size; 1219 unsigned long addr; 1220 #endif 1221 paging_init(); 1222 xen_setup_shared_info(); 1223 #ifdef CONFIG_X86_64 1224 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1225 unsigned long new_mfn_list; 1226 1227 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 1228 1229 /* On 32-bit, we get zero so this never gets executed. */ 1230 new_mfn_list = xen_revector_p2m_tree(); 1231 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) { 1232 /* using __ka address and sticking INVALID_P2M_ENTRY! */ 1233 memset((void *)xen_start_info->mfn_list, 0xff, size); 1234 1235 /* We should be in __ka space. */ 1236 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map); 1237 addr = xen_start_info->mfn_list; 1238 /* We roundup to the PMD, which means that if anybody at this stage is 1239 * using the __ka address of xen_start_info or xen_start_info->shared_info 1240 * they are in going to crash. Fortunatly we have already revectored 1241 * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */ 1242 size = roundup(size, PMD_SIZE); 1243 xen_cleanhighmap(addr, addr + size); 1244 1245 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 1246 memblock_free(__pa(xen_start_info->mfn_list), size); 1247 /* And revector! Bye bye old array */ 1248 xen_start_info->mfn_list = new_mfn_list; 1249 } else 1250 goto skip; 1251 } 1252 /* At this stage, cleanup_highmap has already cleaned __ka space 1253 * from _brk_limit way up to the max_pfn_mapped (which is the end of 1254 * the ramdisk). We continue on, erasing PMD entries that point to page 1255 * tables - do note that they are accessible at this stage via __va. 1256 * For good measure we also round up to the PMD - which means that if 1257 * anybody is using __ka address to the initial boot-stack - and try 1258 * to use it - they are going to crash. The xen_start_info has been 1259 * taken care of already in xen_setup_kernel_pagetable. */ 1260 addr = xen_start_info->pt_base; 1261 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); 1262 1263 xen_cleanhighmap(addr, addr + size); 1264 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); 1265 #ifdef DEBUG 1266 /* This is superflous and is not neccessary, but you know what 1267 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of 1268 * anything at this stage. */ 1269 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); 1270 #endif 1271 skip: 1272 #endif 1273 xen_post_allocator_init(); 1274 } 1275 static void xen_write_cr2(unsigned long cr2) 1276 { 1277 this_cpu_read(xen_vcpu)->arch.cr2 = cr2; 1278 } 1279 1280 static unsigned long xen_read_cr2(void) 1281 { 1282 return this_cpu_read(xen_vcpu)->arch.cr2; 1283 } 1284 1285 unsigned long xen_read_cr2_direct(void) 1286 { 1287 return this_cpu_read(xen_vcpu_info.arch.cr2); 1288 } 1289 1290 static void xen_flush_tlb(void) 1291 { 1292 struct mmuext_op *op; 1293 struct multicall_space mcs; 1294 1295 trace_xen_mmu_flush_tlb(0); 1296 1297 preempt_disable(); 1298 1299 mcs = xen_mc_entry(sizeof(*op)); 1300 1301 op = mcs.args; 1302 op->cmd = MMUEXT_TLB_FLUSH_LOCAL; 1303 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1304 1305 xen_mc_issue(PARAVIRT_LAZY_MMU); 1306 1307 preempt_enable(); 1308 } 1309 1310 static void xen_flush_tlb_single(unsigned long addr) 1311 { 1312 struct mmuext_op *op; 1313 struct multicall_space mcs; 1314 1315 trace_xen_mmu_flush_tlb_single(addr); 1316 1317 preempt_disable(); 1318 1319 mcs = xen_mc_entry(sizeof(*op)); 1320 op = mcs.args; 1321 op->cmd = MMUEXT_INVLPG_LOCAL; 1322 op->arg1.linear_addr = addr & PAGE_MASK; 1323 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1324 1325 xen_mc_issue(PARAVIRT_LAZY_MMU); 1326 1327 preempt_enable(); 1328 } 1329 1330 static void xen_flush_tlb_others(const struct cpumask *cpus, 1331 struct mm_struct *mm, unsigned long start, 1332 unsigned long end) 1333 { 1334 struct { 1335 struct mmuext_op op; 1336 #ifdef CONFIG_SMP 1337 DECLARE_BITMAP(mask, num_processors); 1338 #else 1339 DECLARE_BITMAP(mask, NR_CPUS); 1340 #endif 1341 } *args; 1342 struct multicall_space mcs; 1343 1344 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end); 1345 1346 if (cpumask_empty(cpus)) 1347 return; /* nothing to do */ 1348 1349 mcs = xen_mc_entry(sizeof(*args)); 1350 args = mcs.args; 1351 args->op.arg2.vcpumask = to_cpumask(args->mask); 1352 1353 /* Remove us, and any offline CPUS. */ 1354 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); 1355 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1356 1357 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1358 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { 1359 args->op.cmd = MMUEXT_INVLPG_MULTI; 1360 args->op.arg1.linear_addr = start; 1361 } 1362 1363 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); 1364 1365 xen_mc_issue(PARAVIRT_LAZY_MMU); 1366 } 1367 1368 static unsigned long xen_read_cr3(void) 1369 { 1370 return this_cpu_read(xen_cr3); 1371 } 1372 1373 static void set_current_cr3(void *v) 1374 { 1375 this_cpu_write(xen_current_cr3, (unsigned long)v); 1376 } 1377 1378 static void __xen_write_cr3(bool kernel, unsigned long cr3) 1379 { 1380 struct mmuext_op op; 1381 unsigned long mfn; 1382 1383 trace_xen_mmu_write_cr3(kernel, cr3); 1384 1385 if (cr3) 1386 mfn = pfn_to_mfn(PFN_DOWN(cr3)); 1387 else 1388 mfn = 0; 1389 1390 WARN_ON(mfn == 0 && kernel); 1391 1392 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; 1393 op.arg1.mfn = mfn; 1394 1395 xen_extend_mmuext_op(&op); 1396 1397 if (kernel) { 1398 this_cpu_write(xen_cr3, cr3); 1399 1400 /* Update xen_current_cr3 once the batch has actually 1401 been submitted. */ 1402 xen_mc_callback(set_current_cr3, (void *)cr3); 1403 } 1404 } 1405 1406 static void xen_write_cr3(unsigned long cr3) 1407 { 1408 BUG_ON(preemptible()); 1409 1410 xen_mc_batch(); /* disables interrupts */ 1411 1412 /* Update while interrupts are disabled, so its atomic with 1413 respect to ipis */ 1414 this_cpu_write(xen_cr3, cr3); 1415 1416 __xen_write_cr3(true, cr3); 1417 1418 #ifdef CONFIG_X86_64 1419 { 1420 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); 1421 if (user_pgd) 1422 __xen_write_cr3(false, __pa(user_pgd)); 1423 else 1424 __xen_write_cr3(false, 0); 1425 } 1426 #endif 1427 1428 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1429 } 1430 1431 static int xen_pgd_alloc(struct mm_struct *mm) 1432 { 1433 pgd_t *pgd = mm->pgd; 1434 int ret = 0; 1435 1436 BUG_ON(PagePinned(virt_to_page(pgd))); 1437 1438 #ifdef CONFIG_X86_64 1439 { 1440 struct page *page = virt_to_page(pgd); 1441 pgd_t *user_pgd; 1442 1443 BUG_ON(page->private != 0); 1444 1445 ret = -ENOMEM; 1446 1447 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 1448 page->private = (unsigned long)user_pgd; 1449 1450 if (user_pgd != NULL) { 1451 user_pgd[pgd_index(VSYSCALL_START)] = 1452 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); 1453 ret = 0; 1454 } 1455 1456 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); 1457 } 1458 #endif 1459 1460 return ret; 1461 } 1462 1463 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1464 { 1465 #ifdef CONFIG_X86_64 1466 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1467 1468 if (user_pgd) 1469 free_page((unsigned long)user_pgd); 1470 #endif 1471 } 1472 1473 #ifdef CONFIG_X86_32 1474 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) 1475 { 1476 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1477 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1478 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1479 pte_val_ma(pte)); 1480 1481 return pte; 1482 } 1483 #else /* CONFIG_X86_64 */ 1484 static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) 1485 { 1486 unsigned long pfn = pte_pfn(pte); 1487 1488 /* 1489 * If the new pfn is within the range of the newly allocated 1490 * kernel pagetable, and it isn't being mapped into an 1491 * early_ioremap fixmap slot as a freshly allocated page, make sure 1492 * it is RO. 1493 */ 1494 if (((!is_early_ioremap_ptep(ptep) && 1495 pfn >= pgt_buf_start && pfn < pgt_buf_top)) || 1496 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) 1497 pte = pte_wrprotect(pte); 1498 1499 return pte; 1500 } 1501 #endif /* CONFIG_X86_64 */ 1502 1503 /* 1504 * Init-time set_pte while constructing initial pagetables, which 1505 * doesn't allow RO page table pages to be remapped RW. 1506 * 1507 * If there is no MFN for this PFN then this page is initially 1508 * ballooned out so clear the PTE (as in decrease_reservation() in 1509 * drivers/xen/balloon.c). 1510 * 1511 * Many of these PTE updates are done on unpinned and writable pages 1512 * and doing a hypercall for these is unnecessary and expensive. At 1513 * this point it is not possible to tell if a page is pinned or not, 1514 * so always write the PTE directly and rely on Xen trapping and 1515 * emulating any updates as necessary. 1516 */ 1517 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) 1518 { 1519 if (pte_mfn(pte) != INVALID_P2M_ENTRY) 1520 pte = mask_rw_pte(ptep, pte); 1521 else 1522 pte = __pte_ma(0); 1523 1524 native_set_pte(ptep, pte); 1525 } 1526 1527 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1528 { 1529 struct mmuext_op op; 1530 op.cmd = cmd; 1531 op.arg1.mfn = pfn_to_mfn(pfn); 1532 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 1533 BUG(); 1534 } 1535 1536 /* Early in boot, while setting up the initial pagetable, assume 1537 everything is pinned. */ 1538 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1539 { 1540 #ifdef CONFIG_FLATMEM 1541 BUG_ON(mem_map); /* should only be used early */ 1542 #endif 1543 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1544 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1545 } 1546 1547 /* Used for pmd and pud */ 1548 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) 1549 { 1550 #ifdef CONFIG_FLATMEM 1551 BUG_ON(mem_map); /* should only be used early */ 1552 #endif 1553 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1554 } 1555 1556 /* Early release_pte assumes that all pts are pinned, since there's 1557 only init_mm and anything attached to that is pinned. */ 1558 static void __init xen_release_pte_init(unsigned long pfn) 1559 { 1560 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1561 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1562 } 1563 1564 static void __init xen_release_pmd_init(unsigned long pfn) 1565 { 1566 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1567 } 1568 1569 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1570 { 1571 struct multicall_space mcs; 1572 struct mmuext_op *op; 1573 1574 mcs = __xen_mc_entry(sizeof(*op)); 1575 op = mcs.args; 1576 op->cmd = cmd; 1577 op->arg1.mfn = pfn_to_mfn(pfn); 1578 1579 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 1580 } 1581 1582 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) 1583 { 1584 struct multicall_space mcs; 1585 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); 1586 1587 mcs = __xen_mc_entry(0); 1588 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, 1589 pfn_pte(pfn, prot), 0); 1590 } 1591 1592 /* This needs to make sure the new pte page is pinned iff its being 1593 attached to a pinned pagetable. */ 1594 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, 1595 unsigned level) 1596 { 1597 bool pinned = PagePinned(virt_to_page(mm->pgd)); 1598 1599 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); 1600 1601 if (pinned) { 1602 struct page *page = pfn_to_page(pfn); 1603 1604 SetPagePinned(page); 1605 1606 if (!PageHighMem(page)) { 1607 xen_mc_batch(); 1608 1609 __set_pfn_prot(pfn, PAGE_KERNEL_RO); 1610 1611 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1612 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1613 1614 xen_mc_issue(PARAVIRT_LAZY_MMU); 1615 } else { 1616 /* make sure there are no stray mappings of 1617 this page */ 1618 kmap_flush_unused(); 1619 } 1620 } 1621 } 1622 1623 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) 1624 { 1625 xen_alloc_ptpage(mm, pfn, PT_PTE); 1626 } 1627 1628 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 1629 { 1630 xen_alloc_ptpage(mm, pfn, PT_PMD); 1631 } 1632 1633 /* This should never happen until we're OK to use struct page */ 1634 static inline void xen_release_ptpage(unsigned long pfn, unsigned level) 1635 { 1636 struct page *page = pfn_to_page(pfn); 1637 bool pinned = PagePinned(page); 1638 1639 trace_xen_mmu_release_ptpage(pfn, level, pinned); 1640 1641 if (pinned) { 1642 if (!PageHighMem(page)) { 1643 xen_mc_batch(); 1644 1645 if (level == PT_PTE && USE_SPLIT_PTLOCKS) 1646 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1647 1648 __set_pfn_prot(pfn, PAGE_KERNEL); 1649 1650 xen_mc_issue(PARAVIRT_LAZY_MMU); 1651 } 1652 ClearPagePinned(page); 1653 } 1654 } 1655 1656 static void xen_release_pte(unsigned long pfn) 1657 { 1658 xen_release_ptpage(pfn, PT_PTE); 1659 } 1660 1661 static void xen_release_pmd(unsigned long pfn) 1662 { 1663 xen_release_ptpage(pfn, PT_PMD); 1664 } 1665 1666 #if PAGETABLE_LEVELS == 4 1667 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1668 { 1669 xen_alloc_ptpage(mm, pfn, PT_PUD); 1670 } 1671 1672 static void xen_release_pud(unsigned long pfn) 1673 { 1674 xen_release_ptpage(pfn, PT_PUD); 1675 } 1676 #endif 1677 1678 void __init xen_reserve_top(void) 1679 { 1680 #ifdef CONFIG_X86_32 1681 unsigned long top = HYPERVISOR_VIRT_START; 1682 struct xen_platform_parameters pp; 1683 1684 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) 1685 top = pp.virt_start; 1686 1687 reserve_top_address(-top); 1688 #endif /* CONFIG_X86_32 */ 1689 } 1690 1691 /* 1692 * Like __va(), but returns address in the kernel mapping (which is 1693 * all we have until the physical memory mapping has been set up. 1694 */ 1695 static void *__ka(phys_addr_t paddr) 1696 { 1697 #ifdef CONFIG_X86_64 1698 return (void *)(paddr + __START_KERNEL_map); 1699 #else 1700 return __va(paddr); 1701 #endif 1702 } 1703 1704 /* Convert a machine address to physical address */ 1705 static unsigned long m2p(phys_addr_t maddr) 1706 { 1707 phys_addr_t paddr; 1708 1709 maddr &= PTE_PFN_MASK; 1710 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; 1711 1712 return paddr; 1713 } 1714 1715 /* Convert a machine address to kernel virtual */ 1716 static void *m2v(phys_addr_t maddr) 1717 { 1718 return __ka(m2p(maddr)); 1719 } 1720 1721 /* Set the page permissions on an identity-mapped pages */ 1722 static void set_page_prot(void *addr, pgprot_t prot) 1723 { 1724 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1725 pte_t pte = pfn_pte(pfn, prot); 1726 1727 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) 1728 BUG(); 1729 } 1730 #ifdef CONFIG_X86_32 1731 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1732 { 1733 unsigned pmdidx, pteidx; 1734 unsigned ident_pte; 1735 unsigned long pfn; 1736 1737 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, 1738 PAGE_SIZE); 1739 1740 ident_pte = 0; 1741 pfn = 0; 1742 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { 1743 pte_t *pte_page; 1744 1745 /* Reuse or allocate a page of ptes */ 1746 if (pmd_present(pmd[pmdidx])) 1747 pte_page = m2v(pmd[pmdidx].pmd); 1748 else { 1749 /* Check for free pte pages */ 1750 if (ident_pte == LEVEL1_IDENT_ENTRIES) 1751 break; 1752 1753 pte_page = &level1_ident_pgt[ident_pte]; 1754 ident_pte += PTRS_PER_PTE; 1755 1756 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); 1757 } 1758 1759 /* Install mappings */ 1760 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1761 pte_t pte; 1762 1763 #ifdef CONFIG_X86_32 1764 if (pfn > max_pfn_mapped) 1765 max_pfn_mapped = pfn; 1766 #endif 1767 1768 if (!pte_none(pte_page[pteidx])) 1769 continue; 1770 1771 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); 1772 pte_page[pteidx] = pte; 1773 } 1774 } 1775 1776 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) 1777 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); 1778 1779 set_page_prot(pmd, PAGE_KERNEL_RO); 1780 } 1781 #endif 1782 void __init xen_setup_machphys_mapping(void) 1783 { 1784 struct xen_machphys_mapping mapping; 1785 1786 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { 1787 machine_to_phys_mapping = (unsigned long *)mapping.v_start; 1788 machine_to_phys_nr = mapping.max_mfn + 1; 1789 } else { 1790 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; 1791 } 1792 #ifdef CONFIG_X86_32 1793 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) 1794 < machine_to_phys_mapping); 1795 #endif 1796 } 1797 1798 #ifdef CONFIG_X86_64 1799 static void convert_pfn_mfn(void *v) 1800 { 1801 pte_t *pte = v; 1802 int i; 1803 1804 /* All levels are converted the same way, so just treat them 1805 as ptes. */ 1806 for (i = 0; i < PTRS_PER_PTE; i++) 1807 pte[i] = xen_make_pte(pte[i].pte); 1808 } 1809 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, 1810 unsigned long addr) 1811 { 1812 if (*pt_base == PFN_DOWN(__pa(addr))) { 1813 set_page_prot((void *)addr, PAGE_KERNEL); 1814 clear_page((void *)addr); 1815 (*pt_base)++; 1816 } 1817 if (*pt_end == PFN_DOWN(__pa(addr))) { 1818 set_page_prot((void *)addr, PAGE_KERNEL); 1819 clear_page((void *)addr); 1820 (*pt_end)--; 1821 } 1822 } 1823 /* 1824 * Set up the initial kernel pagetable. 1825 * 1826 * We can construct this by grafting the Xen provided pagetable into 1827 * head_64.S's preconstructed pagetables. We copy the Xen L2's into 1828 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This 1829 * means that only the kernel has a physical mapping to start with - 1830 * but that's enough to get __va working. We need to fill in the rest 1831 * of the physical mapping once some sort of allocator has been set 1832 * up. 1833 */ 1834 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) 1835 { 1836 pud_t *l3; 1837 pmd_t *l2; 1838 unsigned long addr[3]; 1839 unsigned long pt_base, pt_end; 1840 unsigned i; 1841 1842 /* max_pfn_mapped is the last pfn mapped in the initial memory 1843 * mappings. Considering that on Xen after the kernel mappings we 1844 * have the mappings of some pages that don't exist in pfn space, we 1845 * set max_pfn_mapped to the last real pfn mapped. */ 1846 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1847 1848 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); 1849 pt_end = pt_base + xen_start_info->nr_pt_frames; 1850 1851 /* Zap identity mapping */ 1852 init_level4_pgt[0] = __pgd(0); 1853 1854 /* Pre-constructed entries are in pfn, so convert to mfn */ 1855 /* L4[272] -> level3_ident_pgt 1856 * L4[511] -> level3_kernel_pgt */ 1857 convert_pfn_mfn(init_level4_pgt); 1858 1859 /* L3_i[0] -> level2_ident_pgt */ 1860 convert_pfn_mfn(level3_ident_pgt); 1861 /* L3_k[510] -> level2_kernel_pgt 1862 * L3_i[511] -> level2_fixmap_pgt */ 1863 convert_pfn_mfn(level3_kernel_pgt); 1864 1865 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1866 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1867 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); 1868 1869 addr[0] = (unsigned long)pgd; 1870 addr[1] = (unsigned long)l3; 1871 addr[2] = (unsigned long)l2; 1872 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: 1873 * Both L4[272][0] and L4[511][511] have entries that point to the same 1874 * L2 (PMD) tables. Meaning that if you modify it in __va space 1875 * it will be also modified in the __ka space! (But if you just 1876 * modify the PMD table to point to other PTE's or none, then you 1877 * are OK - which is what cleanup_highmap does) */ 1878 copy_page(level2_ident_pgt, l2); 1879 /* Graft it onto L4[511][511] */ 1880 copy_page(level2_kernel_pgt, l2); 1881 1882 /* Get [511][510] and graft that in level2_fixmap_pgt */ 1883 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); 1884 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); 1885 copy_page(level2_fixmap_pgt, l2); 1886 /* Note that we don't do anything with level1_fixmap_pgt which 1887 * we don't need. */ 1888 1889 /* Make pagetable pieces RO */ 1890 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); 1891 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1892 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1893 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1894 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1895 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1896 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1897 1898 /* Pin down new L4 */ 1899 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1900 PFN_DOWN(__pa_symbol(init_level4_pgt))); 1901 1902 /* Unpin Xen-provided one */ 1903 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1904 1905 /* 1906 * At this stage there can be no user pgd, and no page 1907 * structure to attach it to, so make sure we just set kernel 1908 * pgd. 1909 */ 1910 xen_mc_batch(); 1911 __xen_write_cr3(true, __pa(init_level4_pgt)); 1912 xen_mc_issue(PARAVIRT_LAZY_CPU); 1913 1914 /* We can't that easily rip out L3 and L2, as the Xen pagetables are 1915 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for 1916 * the initial domain. For guests using the toolstack, they are in: 1917 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only 1918 * rip out the [L4] (pgd), but for guests we shave off three pages. 1919 */ 1920 for (i = 0; i < ARRAY_SIZE(addr); i++) 1921 check_pt_base(&pt_base, &pt_end, addr[i]); 1922 1923 /* Our (by three pages) smaller Xen pagetable that we are using */ 1924 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE); 1925 /* Revector the xen_start_info */ 1926 xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); 1927 } 1928 #else /* !CONFIG_X86_64 */ 1929 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 1930 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); 1931 1932 static void __init xen_write_cr3_init(unsigned long cr3) 1933 { 1934 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); 1935 1936 BUG_ON(read_cr3() != __pa(initial_page_table)); 1937 BUG_ON(cr3 != __pa(swapper_pg_dir)); 1938 1939 /* 1940 * We are switching to swapper_pg_dir for the first time (from 1941 * initial_page_table) and therefore need to mark that page 1942 * read-only and then pin it. 1943 * 1944 * Xen disallows sharing of kernel PMDs for PAE 1945 * guests. Therefore we must copy the kernel PMD from 1946 * initial_page_table into a new kernel PMD to be used in 1947 * swapper_pg_dir. 1948 */ 1949 swapper_kernel_pmd = 1950 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1951 copy_page(swapper_kernel_pmd, initial_kernel_pmd); 1952 swapper_pg_dir[KERNEL_PGD_BOUNDARY] = 1953 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); 1954 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); 1955 1956 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 1957 xen_write_cr3(cr3); 1958 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); 1959 1960 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, 1961 PFN_DOWN(__pa(initial_page_table))); 1962 set_page_prot(initial_page_table, PAGE_KERNEL); 1963 set_page_prot(initial_kernel_pmd, PAGE_KERNEL); 1964 1965 pv_mmu_ops.write_cr3 = &xen_write_cr3; 1966 } 1967 1968 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) 1969 { 1970 pmd_t *kernel_pmd; 1971 1972 initial_kernel_pmd = 1973 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1974 1975 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 1976 xen_start_info->nr_pt_frames * PAGE_SIZE + 1977 512*1024); 1978 1979 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1980 copy_page(initial_kernel_pmd, kernel_pmd); 1981 1982 xen_map_identity_early(initial_kernel_pmd, max_pfn); 1983 1984 copy_page(initial_page_table, pgd); 1985 initial_page_table[KERNEL_PGD_BOUNDARY] = 1986 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); 1987 1988 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); 1989 set_page_prot(initial_page_table, PAGE_KERNEL_RO); 1990 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 1991 1992 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1993 1994 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, 1995 PFN_DOWN(__pa(initial_page_table))); 1996 xen_write_cr3(__pa(initial_page_table)); 1997 1998 memblock_reserve(__pa(xen_start_info->pt_base), 1999 xen_start_info->nr_pt_frames * PAGE_SIZE); 2000 } 2001 #endif /* CONFIG_X86_64 */ 2002 2003 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; 2004 2005 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) 2006 { 2007 pte_t pte; 2008 2009 phys >>= PAGE_SHIFT; 2010 2011 switch (idx) { 2012 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2013 #ifdef CONFIG_X86_F00F_BUG 2014 case FIX_F00F_IDT: 2015 #endif 2016 #ifdef CONFIG_X86_32 2017 case FIX_WP_TEST: 2018 case FIX_VDSO: 2019 # ifdef CONFIG_HIGHMEM 2020 case FIX_KMAP_BEGIN ... FIX_KMAP_END: 2021 # endif 2022 #else 2023 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: 2024 case VVAR_PAGE: 2025 #endif 2026 case FIX_TEXT_POKE0: 2027 case FIX_TEXT_POKE1: 2028 /* All local page mappings */ 2029 pte = pfn_pte(phys, prot); 2030 break; 2031 2032 #ifdef CONFIG_X86_LOCAL_APIC 2033 case FIX_APIC_BASE: /* maps dummy local APIC */ 2034 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 2035 break; 2036 #endif 2037 2038 #ifdef CONFIG_X86_IO_APIC 2039 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: 2040 /* 2041 * We just don't map the IO APIC - all access is via 2042 * hypercalls. Keep the address in the pte for reference. 2043 */ 2044 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 2045 break; 2046 #endif 2047 2048 case FIX_PARAVIRT_BOOTMAP: 2049 /* This is an MFN, but it isn't an IO mapping from the 2050 IO domain */ 2051 pte = mfn_pte(phys, prot); 2052 break; 2053 2054 default: 2055 /* By default, set_fixmap is used for hardware mappings */ 2056 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); 2057 break; 2058 } 2059 2060 __native_set_fixmap(idx, pte); 2061 2062 #ifdef CONFIG_X86_64 2063 /* Replicate changes to map the vsyscall page into the user 2064 pagetable vsyscall mapping. */ 2065 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || 2066 idx == VVAR_PAGE) { 2067 unsigned long vaddr = __fix_to_virt(idx); 2068 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); 2069 } 2070 #endif 2071 } 2072 2073 static void __init xen_post_allocator_init(void) 2074 { 2075 pv_mmu_ops.set_pte = xen_set_pte; 2076 pv_mmu_ops.set_pmd = xen_set_pmd; 2077 pv_mmu_ops.set_pud = xen_set_pud; 2078 #if PAGETABLE_LEVELS == 4 2079 pv_mmu_ops.set_pgd = xen_set_pgd; 2080 #endif 2081 2082 /* This will work as long as patching hasn't happened yet 2083 (which it hasn't) */ 2084 pv_mmu_ops.alloc_pte = xen_alloc_pte; 2085 pv_mmu_ops.alloc_pmd = xen_alloc_pmd; 2086 pv_mmu_ops.release_pte = xen_release_pte; 2087 pv_mmu_ops.release_pmd = xen_release_pmd; 2088 #if PAGETABLE_LEVELS == 4 2089 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2090 pv_mmu_ops.release_pud = xen_release_pud; 2091 #endif 2092 2093 #ifdef CONFIG_X86_64 2094 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2095 #endif 2096 xen_mark_init_mm_pinned(); 2097 } 2098 2099 static void xen_leave_lazy_mmu(void) 2100 { 2101 preempt_disable(); 2102 xen_mc_flush(); 2103 paravirt_leave_lazy_mmu(); 2104 preempt_enable(); 2105 } 2106 2107 static const struct pv_mmu_ops xen_mmu_ops __initconst = { 2108 .read_cr2 = xen_read_cr2, 2109 .write_cr2 = xen_write_cr2, 2110 2111 .read_cr3 = xen_read_cr3, 2112 #ifdef CONFIG_X86_32 2113 .write_cr3 = xen_write_cr3_init, 2114 #else 2115 .write_cr3 = xen_write_cr3, 2116 #endif 2117 2118 .flush_tlb_user = xen_flush_tlb, 2119 .flush_tlb_kernel = xen_flush_tlb, 2120 .flush_tlb_single = xen_flush_tlb_single, 2121 .flush_tlb_others = xen_flush_tlb_others, 2122 2123 .pte_update = paravirt_nop, 2124 .pte_update_defer = paravirt_nop, 2125 2126 .pgd_alloc = xen_pgd_alloc, 2127 .pgd_free = xen_pgd_free, 2128 2129 .alloc_pte = xen_alloc_pte_init, 2130 .release_pte = xen_release_pte_init, 2131 .alloc_pmd = xen_alloc_pmd_init, 2132 .release_pmd = xen_release_pmd_init, 2133 2134 .set_pte = xen_set_pte_init, 2135 .set_pte_at = xen_set_pte_at, 2136 .set_pmd = xen_set_pmd_hyper, 2137 2138 .ptep_modify_prot_start = __ptep_modify_prot_start, 2139 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 2140 2141 .pte_val = PV_CALLEE_SAVE(xen_pte_val), 2142 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), 2143 2144 .make_pte = PV_CALLEE_SAVE(xen_make_pte), 2145 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), 2146 2147 #ifdef CONFIG_X86_PAE 2148 .set_pte_atomic = xen_set_pte_atomic, 2149 .pte_clear = xen_pte_clear, 2150 .pmd_clear = xen_pmd_clear, 2151 #endif /* CONFIG_X86_PAE */ 2152 .set_pud = xen_set_pud_hyper, 2153 2154 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), 2155 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), 2156 2157 #if PAGETABLE_LEVELS == 4 2158 .pud_val = PV_CALLEE_SAVE(xen_pud_val), 2159 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 2160 .set_pgd = xen_set_pgd_hyper, 2161 2162 .alloc_pud = xen_alloc_pmd_init, 2163 .release_pud = xen_release_pmd_init, 2164 #endif /* PAGETABLE_LEVELS == 4 */ 2165 2166 .activate_mm = xen_activate_mm, 2167 .dup_mmap = xen_dup_mmap, 2168 .exit_mmap = xen_exit_mmap, 2169 2170 .lazy_mode = { 2171 .enter = paravirt_enter_lazy_mmu, 2172 .leave = xen_leave_lazy_mmu, 2173 }, 2174 2175 .set_fixmap = xen_set_fixmap, 2176 }; 2177 2178 void __init xen_init_mmu_ops(void) 2179 { 2180 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; 2181 x86_init.paging.pagetable_init = xen_pagetable_init; 2182 pv_mmu_ops = xen_mmu_ops; 2183 2184 memset(dummy_mapping, 0xff, PAGE_SIZE); 2185 } 2186 2187 /* Protected by xen_reservation_lock. */ 2188 #define MAX_CONTIG_ORDER 9 /* 2MB */ 2189 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; 2190 2191 #define VOID_PTE (mfn_pte(0, __pgprot(0))) 2192 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, 2193 unsigned long *in_frames, 2194 unsigned long *out_frames) 2195 { 2196 int i; 2197 struct multicall_space mcs; 2198 2199 xen_mc_batch(); 2200 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { 2201 mcs = __xen_mc_entry(0); 2202 2203 if (in_frames) 2204 in_frames[i] = virt_to_mfn(vaddr); 2205 2206 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); 2207 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); 2208 2209 if (out_frames) 2210 out_frames[i] = virt_to_pfn(vaddr); 2211 } 2212 xen_mc_issue(0); 2213 } 2214 2215 /* 2216 * Update the pfn-to-mfn mappings for a virtual address range, either to 2217 * point to an array of mfns, or contiguously from a single starting 2218 * mfn. 2219 */ 2220 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, 2221 unsigned long *mfns, 2222 unsigned long first_mfn) 2223 { 2224 unsigned i, limit; 2225 unsigned long mfn; 2226 2227 xen_mc_batch(); 2228 2229 limit = 1u << order; 2230 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { 2231 struct multicall_space mcs; 2232 unsigned flags; 2233 2234 mcs = __xen_mc_entry(0); 2235 if (mfns) 2236 mfn = mfns[i]; 2237 else 2238 mfn = first_mfn + i; 2239 2240 if (i < (limit - 1)) 2241 flags = 0; 2242 else { 2243 if (order == 0) 2244 flags = UVMF_INVLPG | UVMF_ALL; 2245 else 2246 flags = UVMF_TLB_FLUSH | UVMF_ALL; 2247 } 2248 2249 MULTI_update_va_mapping(mcs.mc, vaddr, 2250 mfn_pte(mfn, PAGE_KERNEL), flags); 2251 2252 set_phys_to_machine(virt_to_pfn(vaddr), mfn); 2253 } 2254 2255 xen_mc_issue(0); 2256 } 2257 2258 /* 2259 * Perform the hypercall to exchange a region of our pfns to point to 2260 * memory with the required contiguous alignment. Takes the pfns as 2261 * input, and populates mfns as output. 2262 * 2263 * Returns a success code indicating whether the hypervisor was able to 2264 * satisfy the request or not. 2265 */ 2266 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, 2267 unsigned long *pfns_in, 2268 unsigned long extents_out, 2269 unsigned int order_out, 2270 unsigned long *mfns_out, 2271 unsigned int address_bits) 2272 { 2273 long rc; 2274 int success; 2275 2276 struct xen_memory_exchange exchange = { 2277 .in = { 2278 .nr_extents = extents_in, 2279 .extent_order = order_in, 2280 .extent_start = pfns_in, 2281 .domid = DOMID_SELF 2282 }, 2283 .out = { 2284 .nr_extents = extents_out, 2285 .extent_order = order_out, 2286 .extent_start = mfns_out, 2287 .address_bits = address_bits, 2288 .domid = DOMID_SELF 2289 } 2290 }; 2291 2292 BUG_ON(extents_in << order_in != extents_out << order_out); 2293 2294 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); 2295 success = (exchange.nr_exchanged == extents_in); 2296 2297 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); 2298 BUG_ON(success && (rc != 0)); 2299 2300 return success; 2301 } 2302 2303 int xen_create_contiguous_region(unsigned long vstart, unsigned int order, 2304 unsigned int address_bits) 2305 { 2306 unsigned long *in_frames = discontig_frames, out_frame; 2307 unsigned long flags; 2308 int success; 2309 2310 /* 2311 * Currently an auto-translated guest will not perform I/O, nor will 2312 * it require PAE page directories below 4GB. Therefore any calls to 2313 * this function are redundant and can be ignored. 2314 */ 2315 2316 if (xen_feature(XENFEAT_auto_translated_physmap)) 2317 return 0; 2318 2319 if (unlikely(order > MAX_CONTIG_ORDER)) 2320 return -ENOMEM; 2321 2322 memset((void *) vstart, 0, PAGE_SIZE << order); 2323 2324 spin_lock_irqsave(&xen_reservation_lock, flags); 2325 2326 /* 1. Zap current PTEs, remembering MFNs. */ 2327 xen_zap_pfn_range(vstart, order, in_frames, NULL); 2328 2329 /* 2. Get a new contiguous memory extent. */ 2330 out_frame = virt_to_pfn(vstart); 2331 success = xen_exchange_memory(1UL << order, 0, in_frames, 2332 1, order, &out_frame, 2333 address_bits); 2334 2335 /* 3. Map the new extent in place of old pages. */ 2336 if (success) 2337 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); 2338 else 2339 xen_remap_exchanged_ptes(vstart, order, in_frames, 0); 2340 2341 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2342 2343 return success ? 0 : -ENOMEM; 2344 } 2345 EXPORT_SYMBOL_GPL(xen_create_contiguous_region); 2346 2347 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) 2348 { 2349 unsigned long *out_frames = discontig_frames, in_frame; 2350 unsigned long flags; 2351 int success; 2352 2353 if (xen_feature(XENFEAT_auto_translated_physmap)) 2354 return; 2355 2356 if (unlikely(order > MAX_CONTIG_ORDER)) 2357 return; 2358 2359 memset((void *) vstart, 0, PAGE_SIZE << order); 2360 2361 spin_lock_irqsave(&xen_reservation_lock, flags); 2362 2363 /* 1. Find start MFN of contiguous extent. */ 2364 in_frame = virt_to_mfn(vstart); 2365 2366 /* 2. Zap current PTEs. */ 2367 xen_zap_pfn_range(vstart, order, NULL, out_frames); 2368 2369 /* 3. Do the exchange for non-contiguous MFNs. */ 2370 success = xen_exchange_memory(1, order, &in_frame, 1UL << order, 2371 0, out_frames, 0); 2372 2373 /* 4. Map new pages in place of old pages. */ 2374 if (success) 2375 xen_remap_exchanged_ptes(vstart, order, out_frames, 0); 2376 else 2377 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); 2378 2379 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2380 } 2381 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 2382 2383 #ifdef CONFIG_XEN_PVHVM 2384 static void xen_hvm_exit_mmap(struct mm_struct *mm) 2385 { 2386 struct xen_hvm_pagetable_dying a; 2387 int rc; 2388 2389 a.domid = DOMID_SELF; 2390 a.gpa = __pa(mm->pgd); 2391 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); 2392 WARN_ON_ONCE(rc < 0); 2393 } 2394 2395 static int is_pagetable_dying_supported(void) 2396 { 2397 struct xen_hvm_pagetable_dying a; 2398 int rc = 0; 2399 2400 a.domid = DOMID_SELF; 2401 a.gpa = 0x00; 2402 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); 2403 if (rc < 0) { 2404 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); 2405 return 0; 2406 } 2407 return 1; 2408 } 2409 2410 void __init xen_hvm_init_mmu_ops(void) 2411 { 2412 if (is_pagetable_dying_supported()) 2413 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; 2414 } 2415 #endif 2416 2417 #define REMAP_BATCH_SIZE 16 2418 2419 struct remap_data { 2420 unsigned long mfn; 2421 pgprot_t prot; 2422 struct mmu_update *mmu_update; 2423 }; 2424 2425 static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, 2426 unsigned long addr, void *data) 2427 { 2428 struct remap_data *rmd = data; 2429 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); 2430 2431 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 2432 rmd->mmu_update->val = pte_val_ma(pte); 2433 rmd->mmu_update++; 2434 2435 return 0; 2436 } 2437 2438 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2439 unsigned long addr, 2440 unsigned long mfn, int nr, 2441 pgprot_t prot, unsigned domid) 2442 { 2443 struct remap_data rmd; 2444 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2445 int batch; 2446 unsigned long range; 2447 int err = 0; 2448 2449 if (xen_feature(XENFEAT_auto_translated_physmap)) 2450 return -EINVAL; 2451 2452 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2453 2454 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == 2455 (VM_PFNMAP | VM_RESERVED | VM_IO))); 2456 2457 rmd.mfn = mfn; 2458 rmd.prot = prot; 2459 2460 while (nr) { 2461 batch = min(REMAP_BATCH_SIZE, nr); 2462 range = (unsigned long)batch << PAGE_SHIFT; 2463 2464 rmd.mmu_update = mmu_update; 2465 err = apply_to_page_range(vma->vm_mm, addr, range, 2466 remap_area_mfn_pte_fn, &rmd); 2467 if (err) 2468 goto out; 2469 2470 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid); 2471 if (err < 0) 2472 goto out; 2473 2474 nr -= batch; 2475 addr += range; 2476 } 2477 2478 err = 0; 2479 out: 2480 2481 flush_tlb_all(); 2482 2483 return err; 2484 } 2485 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2486