1 /* 2 * Xen mmu operations 3 * 4 * This file contains the various mmu fetch and update operations. 5 * The most important job they must perform is the mapping between the 6 * domain's pfn and the overall machine mfns. 7 * 8 * Xen allows guests to directly update the pagetable, in a controlled 9 * fashion. In other words, the guest modifies the same pagetable 10 * that the CPU actually uses, which eliminates the overhead of having 11 * a separate shadow pagetable. 12 * 13 * In order to allow this, it falls on the guest domain to map its 14 * notion of a "physical" pfn - which is just a domain-local linear 15 * address - into a real "machine address" which the CPU's MMU can 16 * use. 17 * 18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be 19 * inserted directly into the pagetable. When creating a new 20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, 21 * when reading the content back with __(pgd|pmd|pte)_val, it converts 22 * the mfn back into a pfn. 23 * 24 * The other constraint is that all pages which make up a pagetable 25 * must be mapped read-only in the guest. This prevents uncontrolled 26 * guest updates to the pagetable. Xen strictly enforces this, and 27 * will disallow any pagetable update which will end up mapping a 28 * pagetable page RW, and will disallow using any writable page as a 29 * pagetable. 30 * 31 * Naively, when loading %cr3 with the base of a new pagetable, Xen 32 * would need to validate the whole pagetable before going on. 33 * Naturally, this is quite slow. The solution is to "pin" a 34 * pagetable, which enforces all the constraints on the pagetable even 35 * when it is not actively in use. This menas that Xen can be assured 36 * that it is still valid when you do load it into %cr3, and doesn't 37 * need to revalidate it. 38 * 39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 40 */ 41 #include <linux/sched/mm.h> 42 #include <linux/highmem.h> 43 #include <linux/debugfs.h> 44 #include <linux/bug.h> 45 #include <linux/vmalloc.h> 46 #include <linux/export.h> 47 #include <linux/init.h> 48 #include <linux/gfp.h> 49 #include <linux/memblock.h> 50 #include <linux/seq_file.h> 51 #include <linux/crash_dump.h> 52 #ifdef CONFIG_KEXEC_CORE 53 #include <linux/kexec.h> 54 #endif 55 56 #include <trace/events/xen.h> 57 58 #include <asm/pgtable.h> 59 #include <asm/tlbflush.h> 60 #include <asm/fixmap.h> 61 #include <asm/mmu_context.h> 62 #include <asm/setup.h> 63 #include <asm/paravirt.h> 64 #include <asm/e820/api.h> 65 #include <asm/linkage.h> 66 #include <asm/page.h> 67 #include <asm/init.h> 68 #include <asm/pat.h> 69 #include <asm/smp.h> 70 71 #include <asm/xen/hypercall.h> 72 #include <asm/xen/hypervisor.h> 73 74 #include <xen/xen.h> 75 #include <xen/page.h> 76 #include <xen/interface/xen.h> 77 #include <xen/interface/hvm/hvm_op.h> 78 #include <xen/interface/version.h> 79 #include <xen/interface/memory.h> 80 #include <xen/hvc-console.h> 81 82 #include "multicalls.h" 83 #include "mmu.h" 84 #include "debugfs.h" 85 86 #ifdef CONFIG_X86_32 87 /* 88 * Identity map, in addition to plain kernel map. This needs to be 89 * large enough to allocate page table pages to allocate the rest. 90 * Each page can map 2MB. 91 */ 92 #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) 93 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); 94 #endif 95 #ifdef CONFIG_X86_64 96 /* l3 pud for userspace vsyscall mapping */ 97 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; 98 #endif /* CONFIG_X86_64 */ 99 100 /* 101 * Note about cr3 (pagetable base) values: 102 * 103 * xen_cr3 contains the current logical cr3 value; it contains the 104 * last set cr3. This may not be the current effective cr3, because 105 * its update may be being lazily deferred. However, a vcpu looking 106 * at its own cr3 can use this value knowing that it everything will 107 * be self-consistent. 108 * 109 * xen_current_cr3 contains the actual vcpu cr3; it is set once the 110 * hypercall to set the vcpu cr3 is complete (so it may be a little 111 * out of date, but it will never be set early). If one vcpu is 112 * looking at another vcpu's cr3 value, it should use this variable. 113 */ 114 DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ 115 DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ 116 117 static phys_addr_t xen_pt_base, xen_pt_size __initdata; 118 119 /* 120 * Just beyond the highest usermode address. STACK_TOP_MAX has a 121 * redzone above it, so round it up to a PGD boundary. 122 */ 123 #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) 124 125 void make_lowmem_page_readonly(void *vaddr) 126 { 127 pte_t *pte, ptev; 128 unsigned long address = (unsigned long)vaddr; 129 unsigned int level; 130 131 pte = lookup_address(address, &level); 132 if (pte == NULL) 133 return; /* vaddr missing */ 134 135 ptev = pte_wrprotect(*pte); 136 137 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 138 BUG(); 139 } 140 141 void make_lowmem_page_readwrite(void *vaddr) 142 { 143 pte_t *pte, ptev; 144 unsigned long address = (unsigned long)vaddr; 145 unsigned int level; 146 147 pte = lookup_address(address, &level); 148 if (pte == NULL) 149 return; /* vaddr missing */ 150 151 ptev = pte_mkwrite(*pte); 152 153 if (HYPERVISOR_update_va_mapping(address, ptev, 0)) 154 BUG(); 155 } 156 157 158 static bool xen_page_pinned(void *ptr) 159 { 160 struct page *page = virt_to_page(ptr); 161 162 return PagePinned(page); 163 } 164 165 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) 166 { 167 struct multicall_space mcs; 168 struct mmu_update *u; 169 170 trace_xen_mmu_set_domain_pte(ptep, pteval, domid); 171 172 mcs = xen_mc_entry(sizeof(*u)); 173 u = mcs.args; 174 175 /* ptep might be kmapped when using 32-bit HIGHPTE */ 176 u->ptr = virt_to_machine(ptep).maddr; 177 u->val = pte_val_ma(pteval); 178 179 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); 180 181 xen_mc_issue(PARAVIRT_LAZY_MMU); 182 } 183 EXPORT_SYMBOL_GPL(xen_set_domain_pte); 184 185 static void xen_extend_mmu_update(const struct mmu_update *update) 186 { 187 struct multicall_space mcs; 188 struct mmu_update *u; 189 190 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); 191 192 if (mcs.mc != NULL) { 193 mcs.mc->args[1]++; 194 } else { 195 mcs = __xen_mc_entry(sizeof(*u)); 196 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 197 } 198 199 u = mcs.args; 200 *u = *update; 201 } 202 203 static void xen_extend_mmuext_op(const struct mmuext_op *op) 204 { 205 struct multicall_space mcs; 206 struct mmuext_op *u; 207 208 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); 209 210 if (mcs.mc != NULL) { 211 mcs.mc->args[1]++; 212 } else { 213 mcs = __xen_mc_entry(sizeof(*u)); 214 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 215 } 216 217 u = mcs.args; 218 *u = *op; 219 } 220 221 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) 222 { 223 struct mmu_update u; 224 225 preempt_disable(); 226 227 xen_mc_batch(); 228 229 /* ptr may be ioremapped for 64-bit pagetable setup */ 230 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 231 u.val = pmd_val_ma(val); 232 xen_extend_mmu_update(&u); 233 234 xen_mc_issue(PARAVIRT_LAZY_MMU); 235 236 preempt_enable(); 237 } 238 239 static void xen_set_pmd(pmd_t *ptr, pmd_t val) 240 { 241 trace_xen_mmu_set_pmd(ptr, val); 242 243 /* If page is not pinned, we can just update the entry 244 directly */ 245 if (!xen_page_pinned(ptr)) { 246 *ptr = val; 247 return; 248 } 249 250 xen_set_pmd_hyper(ptr, val); 251 } 252 253 /* 254 * Associate a virtual page frame with a given physical page frame 255 * and protection flags for that frame. 256 */ 257 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) 258 { 259 set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); 260 } 261 262 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) 263 { 264 struct mmu_update u; 265 266 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) 267 return false; 268 269 xen_mc_batch(); 270 271 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; 272 u.val = pte_val_ma(pteval); 273 xen_extend_mmu_update(&u); 274 275 xen_mc_issue(PARAVIRT_LAZY_MMU); 276 277 return true; 278 } 279 280 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) 281 { 282 if (!xen_batched_set_pte(ptep, pteval)) { 283 /* 284 * Could call native_set_pte() here and trap and 285 * emulate the PTE write but with 32-bit guests this 286 * needs two traps (one for each of the two 32-bit 287 * words in the PTE) so do one hypercall directly 288 * instead. 289 */ 290 struct mmu_update u; 291 292 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; 293 u.val = pte_val_ma(pteval); 294 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); 295 } 296 } 297 298 static void xen_set_pte(pte_t *ptep, pte_t pteval) 299 { 300 trace_xen_mmu_set_pte(ptep, pteval); 301 __xen_set_pte(ptep, pteval); 302 } 303 304 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, 305 pte_t *ptep, pte_t pteval) 306 { 307 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); 308 __xen_set_pte(ptep, pteval); 309 } 310 311 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, 312 unsigned long addr, pte_t *ptep) 313 { 314 /* Just return the pte as-is. We preserve the bits on commit */ 315 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); 316 return *ptep; 317 } 318 319 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, 320 pte_t *ptep, pte_t pte) 321 { 322 struct mmu_update u; 323 324 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); 325 xen_mc_batch(); 326 327 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; 328 u.val = pte_val_ma(pte); 329 xen_extend_mmu_update(&u); 330 331 xen_mc_issue(PARAVIRT_LAZY_MMU); 332 } 333 334 /* Assume pteval_t is equivalent to all the other *val_t types. */ 335 static pteval_t pte_mfn_to_pfn(pteval_t val) 336 { 337 if (val & _PAGE_PRESENT) { 338 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 339 unsigned long pfn = mfn_to_pfn(mfn); 340 341 pteval_t flags = val & PTE_FLAGS_MASK; 342 if (unlikely(pfn == ~0)) 343 val = flags & ~_PAGE_PRESENT; 344 else 345 val = ((pteval_t)pfn << PAGE_SHIFT) | flags; 346 } 347 348 return val; 349 } 350 351 static pteval_t pte_pfn_to_mfn(pteval_t val) 352 { 353 if (val & _PAGE_PRESENT) { 354 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 355 pteval_t flags = val & PTE_FLAGS_MASK; 356 unsigned long mfn; 357 358 mfn = __pfn_to_mfn(pfn); 359 360 /* 361 * If there's no mfn for the pfn, then just create an 362 * empty non-present pte. Unfortunately this loses 363 * information about the original pfn, so 364 * pte_mfn_to_pfn is asymmetric. 365 */ 366 if (unlikely(mfn == INVALID_P2M_ENTRY)) { 367 mfn = 0; 368 flags = 0; 369 } else 370 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); 371 val = ((pteval_t)mfn << PAGE_SHIFT) | flags; 372 } 373 374 return val; 375 } 376 377 __visible pteval_t xen_pte_val(pte_t pte) 378 { 379 pteval_t pteval = pte.pte; 380 381 return pte_mfn_to_pfn(pteval); 382 } 383 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); 384 385 __visible pgdval_t xen_pgd_val(pgd_t pgd) 386 { 387 return pte_mfn_to_pfn(pgd.pgd); 388 } 389 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); 390 391 __visible pte_t xen_make_pte(pteval_t pte) 392 { 393 pte = pte_pfn_to_mfn(pte); 394 395 return native_make_pte(pte); 396 } 397 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); 398 399 __visible pgd_t xen_make_pgd(pgdval_t pgd) 400 { 401 pgd = pte_pfn_to_mfn(pgd); 402 return native_make_pgd(pgd); 403 } 404 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); 405 406 __visible pmdval_t xen_pmd_val(pmd_t pmd) 407 { 408 return pte_mfn_to_pfn(pmd.pmd); 409 } 410 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); 411 412 static void xen_set_pud_hyper(pud_t *ptr, pud_t val) 413 { 414 struct mmu_update u; 415 416 preempt_disable(); 417 418 xen_mc_batch(); 419 420 /* ptr may be ioremapped for 64-bit pagetable setup */ 421 u.ptr = arbitrary_virt_to_machine(ptr).maddr; 422 u.val = pud_val_ma(val); 423 xen_extend_mmu_update(&u); 424 425 xen_mc_issue(PARAVIRT_LAZY_MMU); 426 427 preempt_enable(); 428 } 429 430 static void xen_set_pud(pud_t *ptr, pud_t val) 431 { 432 trace_xen_mmu_set_pud(ptr, val); 433 434 /* If page is not pinned, we can just update the entry 435 directly */ 436 if (!xen_page_pinned(ptr)) { 437 *ptr = val; 438 return; 439 } 440 441 xen_set_pud_hyper(ptr, val); 442 } 443 444 #ifdef CONFIG_X86_PAE 445 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) 446 { 447 trace_xen_mmu_set_pte_atomic(ptep, pte); 448 set_64bit((u64 *)ptep, native_pte_val(pte)); 449 } 450 451 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 452 { 453 trace_xen_mmu_pte_clear(mm, addr, ptep); 454 if (!xen_batched_set_pte(ptep, native_make_pte(0))) 455 native_pte_clear(mm, addr, ptep); 456 } 457 458 static void xen_pmd_clear(pmd_t *pmdp) 459 { 460 trace_xen_mmu_pmd_clear(pmdp); 461 set_pmd(pmdp, __pmd(0)); 462 } 463 #endif /* CONFIG_X86_PAE */ 464 465 __visible pmd_t xen_make_pmd(pmdval_t pmd) 466 { 467 pmd = pte_pfn_to_mfn(pmd); 468 return native_make_pmd(pmd); 469 } 470 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); 471 472 #if CONFIG_PGTABLE_LEVELS == 4 473 __visible pudval_t xen_pud_val(pud_t pud) 474 { 475 return pte_mfn_to_pfn(pud.pud); 476 } 477 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); 478 479 __visible pud_t xen_make_pud(pudval_t pud) 480 { 481 pud = pte_pfn_to_mfn(pud); 482 483 return native_make_pud(pud); 484 } 485 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); 486 487 static pgd_t *xen_get_user_pgd(pgd_t *pgd) 488 { 489 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); 490 unsigned offset = pgd - pgd_page; 491 pgd_t *user_ptr = NULL; 492 493 if (offset < pgd_index(USER_LIMIT)) { 494 struct page *page = virt_to_page(pgd_page); 495 user_ptr = (pgd_t *)page->private; 496 if (user_ptr) 497 user_ptr += offset; 498 } 499 500 return user_ptr; 501 } 502 503 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) 504 { 505 struct mmu_update u; 506 507 u.ptr = virt_to_machine(ptr).maddr; 508 u.val = p4d_val_ma(val); 509 xen_extend_mmu_update(&u); 510 } 511 512 /* 513 * Raw hypercall-based set_p4d, intended for in early boot before 514 * there's a page structure. This implies: 515 * 1. The only existing pagetable is the kernel's 516 * 2. It is always pinned 517 * 3. It has no user pagetable attached to it 518 */ 519 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) 520 { 521 preempt_disable(); 522 523 xen_mc_batch(); 524 525 __xen_set_p4d_hyper(ptr, val); 526 527 xen_mc_issue(PARAVIRT_LAZY_MMU); 528 529 preempt_enable(); 530 } 531 532 static void xen_set_p4d(p4d_t *ptr, p4d_t val) 533 { 534 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr); 535 pgd_t pgd_val; 536 537 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val); 538 539 /* If page is not pinned, we can just update the entry 540 directly */ 541 if (!xen_page_pinned(ptr)) { 542 *ptr = val; 543 if (user_ptr) { 544 WARN_ON(xen_page_pinned(user_ptr)); 545 pgd_val.pgd = p4d_val_ma(val); 546 *user_ptr = pgd_val; 547 } 548 return; 549 } 550 551 /* If it's pinned, then we can at least batch the kernel and 552 user updates together. */ 553 xen_mc_batch(); 554 555 __xen_set_p4d_hyper(ptr, val); 556 if (user_ptr) 557 __xen_set_p4d_hyper((p4d_t *)user_ptr, val); 558 559 xen_mc_issue(PARAVIRT_LAZY_MMU); 560 } 561 #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 562 563 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, 564 int (*func)(struct mm_struct *mm, struct page *, enum pt_level), 565 bool last, unsigned long limit) 566 { 567 int i, nr, flush = 0; 568 569 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD; 570 for (i = 0; i < nr; i++) { 571 if (!pmd_none(pmd[i])) 572 flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE); 573 } 574 return flush; 575 } 576 577 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud, 578 int (*func)(struct mm_struct *mm, struct page *, enum pt_level), 579 bool last, unsigned long limit) 580 { 581 int i, nr, flush = 0; 582 583 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD; 584 for (i = 0; i < nr; i++) { 585 pmd_t *pmd; 586 587 if (pud_none(pud[i])) 588 continue; 589 590 pmd = pmd_offset(&pud[i], 0); 591 if (PTRS_PER_PMD > 1) 592 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); 593 flush |= xen_pmd_walk(mm, pmd, func, 594 last && i == nr - 1, limit); 595 } 596 return flush; 597 } 598 599 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, 600 int (*func)(struct mm_struct *mm, struct page *, enum pt_level), 601 bool last, unsigned long limit) 602 { 603 int i, nr, flush = 0; 604 605 nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; 606 for (i = 0; i < nr; i++) { 607 pud_t *pud; 608 609 if (p4d_none(p4d[i])) 610 continue; 611 612 pud = pud_offset(&p4d[i], 0); 613 if (PTRS_PER_PUD > 1) 614 flush |= (*func)(mm, virt_to_page(pud), PT_PUD); 615 flush |= xen_pud_walk(mm, pud, func, 616 last && i == nr - 1, limit); 617 } 618 return flush; 619 } 620 621 /* 622 * (Yet another) pagetable walker. This one is intended for pinning a 623 * pagetable. This means that it walks a pagetable and calls the 624 * callback function on each page it finds making up the page table, 625 * at every level. It walks the entire pagetable, but it only bothers 626 * pinning pte pages which are below limit. In the normal case this 627 * will be STACK_TOP_MAX, but at boot we need to pin up to 628 * FIXADDR_TOP. 629 * 630 * For 32-bit the important bit is that we don't pin beyond there, 631 * because then we start getting into Xen's ptes. 632 * 633 * For 64-bit, we must skip the Xen hole in the middle of the address 634 * space, just after the big x86-64 virtual hole. 635 */ 636 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, 637 int (*func)(struct mm_struct *mm, struct page *, 638 enum pt_level), 639 unsigned long limit) 640 { 641 int i, nr, flush = 0; 642 unsigned hole_low, hole_high; 643 644 /* The limit is the last byte to be touched */ 645 limit--; 646 BUG_ON(limit >= FIXADDR_TOP); 647 648 /* 649 * 64-bit has a great big hole in the middle of the address 650 * space, which contains the Xen mappings. On 32-bit these 651 * will end up making a zero-sized hole and so is a no-op. 652 */ 653 hole_low = pgd_index(USER_LIMIT); 654 hole_high = pgd_index(PAGE_OFFSET); 655 656 nr = pgd_index(limit) + 1; 657 for (i = 0; i < nr; i++) { 658 p4d_t *p4d; 659 660 if (i >= hole_low && i < hole_high) 661 continue; 662 663 if (pgd_none(pgd[i])) 664 continue; 665 666 p4d = p4d_offset(&pgd[i], 0); 667 if (PTRS_PER_P4D > 1) 668 flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); 669 flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); 670 } 671 672 /* Do the top level last, so that the callbacks can use it as 673 a cue to do final things like tlb flushes. */ 674 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); 675 676 return flush; 677 } 678 679 static int xen_pgd_walk(struct mm_struct *mm, 680 int (*func)(struct mm_struct *mm, struct page *, 681 enum pt_level), 682 unsigned long limit) 683 { 684 return __xen_pgd_walk(mm, mm->pgd, func, limit); 685 } 686 687 /* If we're using split pte locks, then take the page's lock and 688 return a pointer to it. Otherwise return NULL. */ 689 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) 690 { 691 spinlock_t *ptl = NULL; 692 693 #if USE_SPLIT_PTE_PTLOCKS 694 ptl = ptlock_ptr(page); 695 spin_lock_nest_lock(ptl, &mm->page_table_lock); 696 #endif 697 698 return ptl; 699 } 700 701 static void xen_pte_unlock(void *v) 702 { 703 spinlock_t *ptl = v; 704 spin_unlock(ptl); 705 } 706 707 static void xen_do_pin(unsigned level, unsigned long pfn) 708 { 709 struct mmuext_op op; 710 711 op.cmd = level; 712 op.arg1.mfn = pfn_to_mfn(pfn); 713 714 xen_extend_mmuext_op(&op); 715 } 716 717 static int xen_pin_page(struct mm_struct *mm, struct page *page, 718 enum pt_level level) 719 { 720 unsigned pgfl = TestSetPagePinned(page); 721 int flush; 722 723 if (pgfl) 724 flush = 0; /* already pinned */ 725 else if (PageHighMem(page)) 726 /* kmaps need flushing if we found an unpinned 727 highpage */ 728 flush = 1; 729 else { 730 void *pt = lowmem_page_address(page); 731 unsigned long pfn = page_to_pfn(page); 732 struct multicall_space mcs = __xen_mc_entry(0); 733 spinlock_t *ptl; 734 735 flush = 0; 736 737 /* 738 * We need to hold the pagetable lock between the time 739 * we make the pagetable RO and when we actually pin 740 * it. If we don't, then other users may come in and 741 * attempt to update the pagetable by writing it, 742 * which will fail because the memory is RO but not 743 * pinned, so Xen won't do the trap'n'emulate. 744 * 745 * If we're using split pte locks, we can't hold the 746 * entire pagetable's worth of locks during the 747 * traverse, because we may wrap the preempt count (8 748 * bits). The solution is to mark RO and pin each PTE 749 * page while holding the lock. This means the number 750 * of locks we end up holding is never more than a 751 * batch size (~32 entries, at present). 752 * 753 * If we're not using split pte locks, we needn't pin 754 * the PTE pages independently, because we're 755 * protected by the overall pagetable lock. 756 */ 757 ptl = NULL; 758 if (level == PT_PTE) 759 ptl = xen_pte_lock(page, mm); 760 761 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 762 pfn_pte(pfn, PAGE_KERNEL_RO), 763 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 764 765 if (ptl) { 766 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); 767 768 /* Queue a deferred unlock for when this batch 769 is completed. */ 770 xen_mc_callback(xen_pte_unlock, ptl); 771 } 772 } 773 774 return flush; 775 } 776 777 /* This is called just after a mm has been created, but it has not 778 been used yet. We need to make sure that its pagetable is all 779 read-only, and can be pinned. */ 780 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) 781 { 782 trace_xen_mmu_pgd_pin(mm, pgd); 783 784 xen_mc_batch(); 785 786 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { 787 /* re-enable interrupts for flushing */ 788 xen_mc_issue(0); 789 790 kmap_flush_unused(); 791 792 xen_mc_batch(); 793 } 794 795 #ifdef CONFIG_X86_64 796 { 797 pgd_t *user_pgd = xen_get_user_pgd(pgd); 798 799 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); 800 801 if (user_pgd) { 802 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); 803 xen_do_pin(MMUEXT_PIN_L4_TABLE, 804 PFN_DOWN(__pa(user_pgd))); 805 } 806 } 807 #else /* CONFIG_X86_32 */ 808 #ifdef CONFIG_X86_PAE 809 /* Need to make sure unshared kernel PMD is pinnable */ 810 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 811 PT_PMD); 812 #endif 813 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); 814 #endif /* CONFIG_X86_64 */ 815 xen_mc_issue(0); 816 } 817 818 static void xen_pgd_pin(struct mm_struct *mm) 819 { 820 __xen_pgd_pin(mm, mm->pgd); 821 } 822 823 /* 824 * On save, we need to pin all pagetables to make sure they get their 825 * mfns turned into pfns. Search the list for any unpinned pgds and pin 826 * them (unpinned pgds are not currently in use, probably because the 827 * process is under construction or destruction). 828 * 829 * Expected to be called in stop_machine() ("equivalent to taking 830 * every spinlock in the system"), so the locking doesn't really 831 * matter all that much. 832 */ 833 void xen_mm_pin_all(void) 834 { 835 struct page *page; 836 837 spin_lock(&pgd_lock); 838 839 list_for_each_entry(page, &pgd_list, lru) { 840 if (!PagePinned(page)) { 841 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); 842 SetPageSavePinned(page); 843 } 844 } 845 846 spin_unlock(&pgd_lock); 847 } 848 849 /* 850 * The init_mm pagetable is really pinned as soon as its created, but 851 * that's before we have page structures to store the bits. So do all 852 * the book-keeping now. 853 */ 854 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, 855 enum pt_level level) 856 { 857 SetPagePinned(page); 858 return 0; 859 } 860 861 static void __init xen_mark_init_mm_pinned(void) 862 { 863 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); 864 } 865 866 static int xen_unpin_page(struct mm_struct *mm, struct page *page, 867 enum pt_level level) 868 { 869 unsigned pgfl = TestClearPagePinned(page); 870 871 if (pgfl && !PageHighMem(page)) { 872 void *pt = lowmem_page_address(page); 873 unsigned long pfn = page_to_pfn(page); 874 spinlock_t *ptl = NULL; 875 struct multicall_space mcs; 876 877 /* 878 * Do the converse to pin_page. If we're using split 879 * pte locks, we must be holding the lock for while 880 * the pte page is unpinned but still RO to prevent 881 * concurrent updates from seeing it in this 882 * partially-pinned state. 883 */ 884 if (level == PT_PTE) { 885 ptl = xen_pte_lock(page, mm); 886 887 if (ptl) 888 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); 889 } 890 891 mcs = __xen_mc_entry(0); 892 893 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, 894 pfn_pte(pfn, PAGE_KERNEL), 895 level == PT_PGD ? UVMF_TLB_FLUSH : 0); 896 897 if (ptl) { 898 /* unlock when batch completed */ 899 xen_mc_callback(xen_pte_unlock, ptl); 900 } 901 } 902 903 return 0; /* never need to flush on unpin */ 904 } 905 906 /* Release a pagetables pages back as normal RW */ 907 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) 908 { 909 trace_xen_mmu_pgd_unpin(mm, pgd); 910 911 xen_mc_batch(); 912 913 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 914 915 #ifdef CONFIG_X86_64 916 { 917 pgd_t *user_pgd = xen_get_user_pgd(pgd); 918 919 if (user_pgd) { 920 xen_do_pin(MMUEXT_UNPIN_TABLE, 921 PFN_DOWN(__pa(user_pgd))); 922 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); 923 } 924 } 925 #endif 926 927 #ifdef CONFIG_X86_PAE 928 /* Need to make sure unshared kernel PMD is unpinned */ 929 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), 930 PT_PMD); 931 #endif 932 933 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); 934 935 xen_mc_issue(0); 936 } 937 938 static void xen_pgd_unpin(struct mm_struct *mm) 939 { 940 __xen_pgd_unpin(mm, mm->pgd); 941 } 942 943 /* 944 * On resume, undo any pinning done at save, so that the rest of the 945 * kernel doesn't see any unexpected pinned pagetables. 946 */ 947 void xen_mm_unpin_all(void) 948 { 949 struct page *page; 950 951 spin_lock(&pgd_lock); 952 953 list_for_each_entry(page, &pgd_list, lru) { 954 if (PageSavePinned(page)) { 955 BUG_ON(!PagePinned(page)); 956 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); 957 ClearPageSavePinned(page); 958 } 959 } 960 961 spin_unlock(&pgd_lock); 962 } 963 964 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 965 { 966 spin_lock(&next->page_table_lock); 967 xen_pgd_pin(next); 968 spin_unlock(&next->page_table_lock); 969 } 970 971 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 972 { 973 spin_lock(&mm->page_table_lock); 974 xen_pgd_pin(mm); 975 spin_unlock(&mm->page_table_lock); 976 } 977 978 static void drop_mm_ref_this_cpu(void *info) 979 { 980 struct mm_struct *mm = info; 981 982 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) 983 leave_mm(smp_processor_id()); 984 985 /* 986 * If this cpu still has a stale cr3 reference, then make sure 987 * it has been flushed. 988 */ 989 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) 990 xen_mc_flush(); 991 } 992 993 #ifdef CONFIG_SMP 994 /* 995 * Another cpu may still have their %cr3 pointing at the pagetable, so 996 * we need to repoint it somewhere else before we can unpin it. 997 */ 998 static void xen_drop_mm_ref(struct mm_struct *mm) 999 { 1000 cpumask_var_t mask; 1001 unsigned cpu; 1002 1003 drop_mm_ref_this_cpu(mm); 1004 1005 /* Get the "official" set of cpus referring to our pagetable. */ 1006 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { 1007 for_each_online_cpu(cpu) { 1008 if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) 1009 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) 1010 continue; 1011 smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1); 1012 } 1013 return; 1014 } 1015 cpumask_copy(mask, mm_cpumask(mm)); 1016 1017 /* 1018 * It's possible that a vcpu may have a stale reference to our 1019 * cr3, because its in lazy mode, and it hasn't yet flushed 1020 * its set of pending hypercalls yet. In this case, we can 1021 * look at its actual current cr3 value, and force it to flush 1022 * if needed. 1023 */ 1024 for_each_online_cpu(cpu) { 1025 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) 1026 cpumask_set_cpu(cpu, mask); 1027 } 1028 1029 smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1); 1030 free_cpumask_var(mask); 1031 } 1032 #else 1033 static void xen_drop_mm_ref(struct mm_struct *mm) 1034 { 1035 drop_mm_ref_this_cpu(mm); 1036 } 1037 #endif 1038 1039 /* 1040 * While a process runs, Xen pins its pagetables, which means that the 1041 * hypervisor forces it to be read-only, and it controls all updates 1042 * to it. This means that all pagetable updates have to go via the 1043 * hypervisor, which is moderately expensive. 1044 * 1045 * Since we're pulling the pagetable down, we switch to use init_mm, 1046 * unpin old process pagetable and mark it all read-write, which 1047 * allows further operations on it to be simple memory accesses. 1048 * 1049 * The only subtle point is that another CPU may be still using the 1050 * pagetable because of lazy tlb flushing. This means we need need to 1051 * switch all CPUs off this pagetable before we can unpin it. 1052 */ 1053 static void xen_exit_mmap(struct mm_struct *mm) 1054 { 1055 get_cpu(); /* make sure we don't move around */ 1056 xen_drop_mm_ref(mm); 1057 put_cpu(); 1058 1059 spin_lock(&mm->page_table_lock); 1060 1061 /* pgd may not be pinned in the error exit path of execve */ 1062 if (xen_page_pinned(mm->pgd)) 1063 xen_pgd_unpin(mm); 1064 1065 spin_unlock(&mm->page_table_lock); 1066 } 1067 1068 static void xen_post_allocator_init(void); 1069 1070 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1071 { 1072 struct mmuext_op op; 1073 1074 op.cmd = cmd; 1075 op.arg1.mfn = pfn_to_mfn(pfn); 1076 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) 1077 BUG(); 1078 } 1079 1080 #ifdef CONFIG_X86_64 1081 static void __init xen_cleanhighmap(unsigned long vaddr, 1082 unsigned long vaddr_end) 1083 { 1084 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; 1085 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); 1086 1087 /* NOTE: The loop is more greedy than the cleanup_highmap variant. 1088 * We include the PMD passed in on _both_ boundaries. */ 1089 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); 1090 pmd++, vaddr += PMD_SIZE) { 1091 if (pmd_none(*pmd)) 1092 continue; 1093 if (vaddr < (unsigned long) _text || vaddr > kernel_end) 1094 set_pmd(pmd, __pmd(0)); 1095 } 1096 /* In case we did something silly, we should crash in this function 1097 * instead of somewhere later and be confusing. */ 1098 xen_mc_flush(); 1099 } 1100 1101 /* 1102 * Make a page range writeable and free it. 1103 */ 1104 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size) 1105 { 1106 void *vaddr = __va(paddr); 1107 void *vaddr_end = vaddr + size; 1108 1109 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) 1110 make_lowmem_page_readwrite(vaddr); 1111 1112 memblock_free(paddr, size); 1113 } 1114 1115 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin) 1116 { 1117 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK; 1118 1119 if (unpin) 1120 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa)); 1121 ClearPagePinned(virt_to_page(__va(pa))); 1122 xen_free_ro_pages(pa, PAGE_SIZE); 1123 } 1124 1125 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin) 1126 { 1127 unsigned long pa; 1128 pte_t *pte_tbl; 1129 int i; 1130 1131 if (pmd_large(*pmd)) { 1132 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; 1133 xen_free_ro_pages(pa, PMD_SIZE); 1134 return; 1135 } 1136 1137 pte_tbl = pte_offset_kernel(pmd, 0); 1138 for (i = 0; i < PTRS_PER_PTE; i++) { 1139 if (pte_none(pte_tbl[i])) 1140 continue; 1141 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT; 1142 xen_free_ro_pages(pa, PAGE_SIZE); 1143 } 1144 set_pmd(pmd, __pmd(0)); 1145 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin); 1146 } 1147 1148 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin) 1149 { 1150 unsigned long pa; 1151 pmd_t *pmd_tbl; 1152 int i; 1153 1154 if (pud_large(*pud)) { 1155 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; 1156 xen_free_ro_pages(pa, PUD_SIZE); 1157 return; 1158 } 1159 1160 pmd_tbl = pmd_offset(pud, 0); 1161 for (i = 0; i < PTRS_PER_PMD; i++) { 1162 if (pmd_none(pmd_tbl[i])) 1163 continue; 1164 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin); 1165 } 1166 set_pud(pud, __pud(0)); 1167 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin); 1168 } 1169 1170 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin) 1171 { 1172 unsigned long pa; 1173 pud_t *pud_tbl; 1174 int i; 1175 1176 if (p4d_large(*p4d)) { 1177 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK; 1178 xen_free_ro_pages(pa, P4D_SIZE); 1179 return; 1180 } 1181 1182 pud_tbl = pud_offset(p4d, 0); 1183 for (i = 0; i < PTRS_PER_PUD; i++) { 1184 if (pud_none(pud_tbl[i])) 1185 continue; 1186 xen_cleanmfnmap_pud(pud_tbl + i, unpin); 1187 } 1188 set_p4d(p4d, __p4d(0)); 1189 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin); 1190 } 1191 1192 /* 1193 * Since it is well isolated we can (and since it is perhaps large we should) 1194 * also free the page tables mapping the initial P->M table. 1195 */ 1196 static void __init xen_cleanmfnmap(unsigned long vaddr) 1197 { 1198 pgd_t *pgd; 1199 p4d_t *p4d; 1200 unsigned int i; 1201 bool unpin; 1202 1203 unpin = (vaddr == 2 * PGDIR_SIZE); 1204 vaddr &= PMD_MASK; 1205 pgd = pgd_offset_k(vaddr); 1206 p4d = p4d_offset(pgd, 0); 1207 for (i = 0; i < PTRS_PER_P4D; i++) { 1208 if (p4d_none(p4d[i])) 1209 continue; 1210 xen_cleanmfnmap_p4d(p4d + i, unpin); 1211 } 1212 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 1213 set_pgd(pgd, __pgd(0)); 1214 xen_cleanmfnmap_free_pgtbl(p4d, unpin); 1215 } 1216 } 1217 1218 static void __init xen_pagetable_p2m_free(void) 1219 { 1220 unsigned long size; 1221 unsigned long addr; 1222 1223 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 1224 1225 /* No memory or already called. */ 1226 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list) 1227 return; 1228 1229 /* using __ka address and sticking INVALID_P2M_ENTRY! */ 1230 memset((void *)xen_start_info->mfn_list, 0xff, size); 1231 1232 addr = xen_start_info->mfn_list; 1233 /* 1234 * We could be in __ka space. 1235 * We roundup to the PMD, which means that if anybody at this stage is 1236 * using the __ka address of xen_start_info or 1237 * xen_start_info->shared_info they are in going to crash. Fortunatly 1238 * we have already revectored in xen_setup_kernel_pagetable and in 1239 * xen_setup_shared_info. 1240 */ 1241 size = roundup(size, PMD_SIZE); 1242 1243 if (addr >= __START_KERNEL_map) { 1244 xen_cleanhighmap(addr, addr + size); 1245 size = PAGE_ALIGN(xen_start_info->nr_pages * 1246 sizeof(unsigned long)); 1247 memblock_free(__pa(addr), size); 1248 } else { 1249 xen_cleanmfnmap(addr); 1250 } 1251 } 1252 1253 static void __init xen_pagetable_cleanhighmap(void) 1254 { 1255 unsigned long size; 1256 unsigned long addr; 1257 1258 /* At this stage, cleanup_highmap has already cleaned __ka space 1259 * from _brk_limit way up to the max_pfn_mapped (which is the end of 1260 * the ramdisk). We continue on, erasing PMD entries that point to page 1261 * tables - do note that they are accessible at this stage via __va. 1262 * For good measure we also round up to the PMD - which means that if 1263 * anybody is using __ka address to the initial boot-stack - and try 1264 * to use it - they are going to crash. The xen_start_info has been 1265 * taken care of already in xen_setup_kernel_pagetable. */ 1266 addr = xen_start_info->pt_base; 1267 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); 1268 1269 xen_cleanhighmap(addr, addr + size); 1270 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); 1271 #ifdef DEBUG 1272 /* This is superfluous and is not necessary, but you know what 1273 * lets do it. The MODULES_VADDR -> MODULES_END should be clear of 1274 * anything at this stage. */ 1275 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); 1276 #endif 1277 } 1278 #endif 1279 1280 static void __init xen_pagetable_p2m_setup(void) 1281 { 1282 xen_vmalloc_p2m_tree(); 1283 1284 #ifdef CONFIG_X86_64 1285 xen_pagetable_p2m_free(); 1286 1287 xen_pagetable_cleanhighmap(); 1288 #endif 1289 /* And revector! Bye bye old array */ 1290 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; 1291 } 1292 1293 static void __init xen_pagetable_init(void) 1294 { 1295 paging_init(); 1296 xen_post_allocator_init(); 1297 1298 xen_pagetable_p2m_setup(); 1299 1300 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1301 xen_build_mfn_list_list(); 1302 1303 /* Remap memory freed due to conflicts with E820 map */ 1304 xen_remap_memory(); 1305 1306 xen_setup_shared_info(); 1307 } 1308 static void xen_write_cr2(unsigned long cr2) 1309 { 1310 this_cpu_read(xen_vcpu)->arch.cr2 = cr2; 1311 } 1312 1313 static unsigned long xen_read_cr2(void) 1314 { 1315 return this_cpu_read(xen_vcpu)->arch.cr2; 1316 } 1317 1318 unsigned long xen_read_cr2_direct(void) 1319 { 1320 return this_cpu_read(xen_vcpu_info.arch.cr2); 1321 } 1322 1323 static void xen_flush_tlb(void) 1324 { 1325 struct mmuext_op *op; 1326 struct multicall_space mcs; 1327 1328 trace_xen_mmu_flush_tlb(0); 1329 1330 preempt_disable(); 1331 1332 mcs = xen_mc_entry(sizeof(*op)); 1333 1334 op = mcs.args; 1335 op->cmd = MMUEXT_TLB_FLUSH_LOCAL; 1336 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1337 1338 xen_mc_issue(PARAVIRT_LAZY_MMU); 1339 1340 preempt_enable(); 1341 } 1342 1343 static void xen_flush_tlb_single(unsigned long addr) 1344 { 1345 struct mmuext_op *op; 1346 struct multicall_space mcs; 1347 1348 trace_xen_mmu_flush_tlb_single(addr); 1349 1350 preempt_disable(); 1351 1352 mcs = xen_mc_entry(sizeof(*op)); 1353 op = mcs.args; 1354 op->cmd = MMUEXT_INVLPG_LOCAL; 1355 op->arg1.linear_addr = addr & PAGE_MASK; 1356 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 1357 1358 xen_mc_issue(PARAVIRT_LAZY_MMU); 1359 1360 preempt_enable(); 1361 } 1362 1363 static void xen_flush_tlb_others(const struct cpumask *cpus, 1364 const struct flush_tlb_info *info) 1365 { 1366 struct { 1367 struct mmuext_op op; 1368 #ifdef CONFIG_SMP 1369 DECLARE_BITMAP(mask, num_processors); 1370 #else 1371 DECLARE_BITMAP(mask, NR_CPUS); 1372 #endif 1373 } *args; 1374 struct multicall_space mcs; 1375 1376 trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); 1377 1378 if (cpumask_empty(cpus)) 1379 return; /* nothing to do */ 1380 1381 mcs = xen_mc_entry(sizeof(*args)); 1382 args = mcs.args; 1383 args->op.arg2.vcpumask = to_cpumask(args->mask); 1384 1385 /* Remove us, and any offline CPUS. */ 1386 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); 1387 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); 1388 1389 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; 1390 if (info->end != TLB_FLUSH_ALL && 1391 (info->end - info->start) <= PAGE_SIZE) { 1392 args->op.cmd = MMUEXT_INVLPG_MULTI; 1393 args->op.arg1.linear_addr = info->start; 1394 } 1395 1396 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); 1397 1398 xen_mc_issue(PARAVIRT_LAZY_MMU); 1399 } 1400 1401 static unsigned long xen_read_cr3(void) 1402 { 1403 return this_cpu_read(xen_cr3); 1404 } 1405 1406 static void set_current_cr3(void *v) 1407 { 1408 this_cpu_write(xen_current_cr3, (unsigned long)v); 1409 } 1410 1411 static void __xen_write_cr3(bool kernel, unsigned long cr3) 1412 { 1413 struct mmuext_op op; 1414 unsigned long mfn; 1415 1416 trace_xen_mmu_write_cr3(kernel, cr3); 1417 1418 if (cr3) 1419 mfn = pfn_to_mfn(PFN_DOWN(cr3)); 1420 else 1421 mfn = 0; 1422 1423 WARN_ON(mfn == 0 && kernel); 1424 1425 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; 1426 op.arg1.mfn = mfn; 1427 1428 xen_extend_mmuext_op(&op); 1429 1430 if (kernel) { 1431 this_cpu_write(xen_cr3, cr3); 1432 1433 /* Update xen_current_cr3 once the batch has actually 1434 been submitted. */ 1435 xen_mc_callback(set_current_cr3, (void *)cr3); 1436 } 1437 } 1438 static void xen_write_cr3(unsigned long cr3) 1439 { 1440 BUG_ON(preemptible()); 1441 1442 xen_mc_batch(); /* disables interrupts */ 1443 1444 /* Update while interrupts are disabled, so its atomic with 1445 respect to ipis */ 1446 this_cpu_write(xen_cr3, cr3); 1447 1448 __xen_write_cr3(true, cr3); 1449 1450 #ifdef CONFIG_X86_64 1451 { 1452 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); 1453 if (user_pgd) 1454 __xen_write_cr3(false, __pa(user_pgd)); 1455 else 1456 __xen_write_cr3(false, 0); 1457 } 1458 #endif 1459 1460 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1461 } 1462 1463 #ifdef CONFIG_X86_64 1464 /* 1465 * At the start of the day - when Xen launches a guest, it has already 1466 * built pagetables for the guest. We diligently look over them 1467 * in xen_setup_kernel_pagetable and graft as appropriate them in the 1468 * init_top_pgt and its friends. Then when we are happy we load 1469 * the new init_top_pgt - and continue on. 1470 * 1471 * The generic code starts (start_kernel) and 'init_mem_mapping' sets 1472 * up the rest of the pagetables. When it has completed it loads the cr3. 1473 * N.B. that baremetal would start at 'start_kernel' (and the early 1474 * #PF handler would create bootstrap pagetables) - so we are running 1475 * with the same assumptions as what to do when write_cr3 is executed 1476 * at this point. 1477 * 1478 * Since there are no user-page tables at all, we have two variants 1479 * of xen_write_cr3 - the early bootup (this one), and the late one 1480 * (xen_write_cr3). The reason we have to do that is that in 64-bit 1481 * the Linux kernel and user-space are both in ring 3 while the 1482 * hypervisor is in ring 0. 1483 */ 1484 static void __init xen_write_cr3_init(unsigned long cr3) 1485 { 1486 BUG_ON(preemptible()); 1487 1488 xen_mc_batch(); /* disables interrupts */ 1489 1490 /* Update while interrupts are disabled, so its atomic with 1491 respect to ipis */ 1492 this_cpu_write(xen_cr3, cr3); 1493 1494 __xen_write_cr3(true, cr3); 1495 1496 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ 1497 } 1498 #endif 1499 1500 static int xen_pgd_alloc(struct mm_struct *mm) 1501 { 1502 pgd_t *pgd = mm->pgd; 1503 int ret = 0; 1504 1505 BUG_ON(PagePinned(virt_to_page(pgd))); 1506 1507 #ifdef CONFIG_X86_64 1508 { 1509 struct page *page = virt_to_page(pgd); 1510 pgd_t *user_pgd; 1511 1512 BUG_ON(page->private != 0); 1513 1514 ret = -ENOMEM; 1515 1516 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 1517 page->private = (unsigned long)user_pgd; 1518 1519 if (user_pgd != NULL) { 1520 #ifdef CONFIG_X86_VSYSCALL_EMULATION 1521 user_pgd[pgd_index(VSYSCALL_ADDR)] = 1522 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); 1523 #endif 1524 ret = 0; 1525 } 1526 1527 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); 1528 } 1529 #endif 1530 return ret; 1531 } 1532 1533 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) 1534 { 1535 #ifdef CONFIG_X86_64 1536 pgd_t *user_pgd = xen_get_user_pgd(pgd); 1537 1538 if (user_pgd) 1539 free_page((unsigned long)user_pgd); 1540 #endif 1541 } 1542 1543 /* 1544 * Init-time set_pte while constructing initial pagetables, which 1545 * doesn't allow RO page table pages to be remapped RW. 1546 * 1547 * If there is no MFN for this PFN then this page is initially 1548 * ballooned out so clear the PTE (as in decrease_reservation() in 1549 * drivers/xen/balloon.c). 1550 * 1551 * Many of these PTE updates are done on unpinned and writable pages 1552 * and doing a hypercall for these is unnecessary and expensive. At 1553 * this point it is not possible to tell if a page is pinned or not, 1554 * so always write the PTE directly and rely on Xen trapping and 1555 * emulating any updates as necessary. 1556 */ 1557 __visible pte_t xen_make_pte_init(pteval_t pte) 1558 { 1559 #ifdef CONFIG_X86_64 1560 unsigned long pfn; 1561 1562 /* 1563 * Pages belonging to the initial p2m list mapped outside the default 1564 * address range must be mapped read-only. This region contains the 1565 * page tables for mapping the p2m list, too, and page tables MUST be 1566 * mapped read-only. 1567 */ 1568 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; 1569 if (xen_start_info->mfn_list < __START_KERNEL_map && 1570 pfn >= xen_start_info->first_p2m_pfn && 1571 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) 1572 pte &= ~_PAGE_RW; 1573 #endif 1574 pte = pte_pfn_to_mfn(pte); 1575 return native_make_pte(pte); 1576 } 1577 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); 1578 1579 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) 1580 { 1581 #ifdef CONFIG_X86_32 1582 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1583 if (pte_mfn(pte) != INVALID_P2M_ENTRY 1584 && pte_val_ma(*ptep) & _PAGE_PRESENT) 1585 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1586 pte_val_ma(pte)); 1587 #endif 1588 native_set_pte(ptep, pte); 1589 } 1590 1591 /* Early in boot, while setting up the initial pagetable, assume 1592 everything is pinned. */ 1593 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) 1594 { 1595 #ifdef CONFIG_FLATMEM 1596 BUG_ON(mem_map); /* should only be used early */ 1597 #endif 1598 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1599 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1600 } 1601 1602 /* Used for pmd and pud */ 1603 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) 1604 { 1605 #ifdef CONFIG_FLATMEM 1606 BUG_ON(mem_map); /* should only be used early */ 1607 #endif 1608 make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); 1609 } 1610 1611 /* Early release_pte assumes that all pts are pinned, since there's 1612 only init_mm and anything attached to that is pinned. */ 1613 static void __init xen_release_pte_init(unsigned long pfn) 1614 { 1615 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1616 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1617 } 1618 1619 static void __init xen_release_pmd_init(unsigned long pfn) 1620 { 1621 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 1622 } 1623 1624 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1625 { 1626 struct multicall_space mcs; 1627 struct mmuext_op *op; 1628 1629 mcs = __xen_mc_entry(sizeof(*op)); 1630 op = mcs.args; 1631 op->cmd = cmd; 1632 op->arg1.mfn = pfn_to_mfn(pfn); 1633 1634 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); 1635 } 1636 1637 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) 1638 { 1639 struct multicall_space mcs; 1640 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); 1641 1642 mcs = __xen_mc_entry(0); 1643 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, 1644 pfn_pte(pfn, prot), 0); 1645 } 1646 1647 /* This needs to make sure the new pte page is pinned iff its being 1648 attached to a pinned pagetable. */ 1649 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, 1650 unsigned level) 1651 { 1652 bool pinned = PagePinned(virt_to_page(mm->pgd)); 1653 1654 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); 1655 1656 if (pinned) { 1657 struct page *page = pfn_to_page(pfn); 1658 1659 SetPagePinned(page); 1660 1661 if (!PageHighMem(page)) { 1662 xen_mc_batch(); 1663 1664 __set_pfn_prot(pfn, PAGE_KERNEL_RO); 1665 1666 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) 1667 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); 1668 1669 xen_mc_issue(PARAVIRT_LAZY_MMU); 1670 } else { 1671 /* make sure there are no stray mappings of 1672 this page */ 1673 kmap_flush_unused(); 1674 } 1675 } 1676 } 1677 1678 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) 1679 { 1680 xen_alloc_ptpage(mm, pfn, PT_PTE); 1681 } 1682 1683 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) 1684 { 1685 xen_alloc_ptpage(mm, pfn, PT_PMD); 1686 } 1687 1688 /* This should never happen until we're OK to use struct page */ 1689 static inline void xen_release_ptpage(unsigned long pfn, unsigned level) 1690 { 1691 struct page *page = pfn_to_page(pfn); 1692 bool pinned = PagePinned(page); 1693 1694 trace_xen_mmu_release_ptpage(pfn, level, pinned); 1695 1696 if (pinned) { 1697 if (!PageHighMem(page)) { 1698 xen_mc_batch(); 1699 1700 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) 1701 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); 1702 1703 __set_pfn_prot(pfn, PAGE_KERNEL); 1704 1705 xen_mc_issue(PARAVIRT_LAZY_MMU); 1706 } 1707 ClearPagePinned(page); 1708 } 1709 } 1710 1711 static void xen_release_pte(unsigned long pfn) 1712 { 1713 xen_release_ptpage(pfn, PT_PTE); 1714 } 1715 1716 static void xen_release_pmd(unsigned long pfn) 1717 { 1718 xen_release_ptpage(pfn, PT_PMD); 1719 } 1720 1721 #if CONFIG_PGTABLE_LEVELS >= 4 1722 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) 1723 { 1724 xen_alloc_ptpage(mm, pfn, PT_PUD); 1725 } 1726 1727 static void xen_release_pud(unsigned long pfn) 1728 { 1729 xen_release_ptpage(pfn, PT_PUD); 1730 } 1731 #endif 1732 1733 void __init xen_reserve_top(void) 1734 { 1735 #ifdef CONFIG_X86_32 1736 unsigned long top = HYPERVISOR_VIRT_START; 1737 struct xen_platform_parameters pp; 1738 1739 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) 1740 top = pp.virt_start; 1741 1742 reserve_top_address(-top); 1743 #endif /* CONFIG_X86_32 */ 1744 } 1745 1746 /* 1747 * Like __va(), but returns address in the kernel mapping (which is 1748 * all we have until the physical memory mapping has been set up. 1749 */ 1750 static void * __init __ka(phys_addr_t paddr) 1751 { 1752 #ifdef CONFIG_X86_64 1753 return (void *)(paddr + __START_KERNEL_map); 1754 #else 1755 return __va(paddr); 1756 #endif 1757 } 1758 1759 /* Convert a machine address to physical address */ 1760 static unsigned long __init m2p(phys_addr_t maddr) 1761 { 1762 phys_addr_t paddr; 1763 1764 maddr &= PTE_PFN_MASK; 1765 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; 1766 1767 return paddr; 1768 } 1769 1770 /* Convert a machine address to kernel virtual */ 1771 static void * __init m2v(phys_addr_t maddr) 1772 { 1773 return __ka(m2p(maddr)); 1774 } 1775 1776 /* Set the page permissions on an identity-mapped pages */ 1777 static void __init set_page_prot_flags(void *addr, pgprot_t prot, 1778 unsigned long flags) 1779 { 1780 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; 1781 pte_t pte = pfn_pte(pfn, prot); 1782 1783 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) 1784 BUG(); 1785 } 1786 static void __init set_page_prot(void *addr, pgprot_t prot) 1787 { 1788 return set_page_prot_flags(addr, prot, UVMF_NONE); 1789 } 1790 #ifdef CONFIG_X86_32 1791 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) 1792 { 1793 unsigned pmdidx, pteidx; 1794 unsigned ident_pte; 1795 unsigned long pfn; 1796 1797 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, 1798 PAGE_SIZE); 1799 1800 ident_pte = 0; 1801 pfn = 0; 1802 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { 1803 pte_t *pte_page; 1804 1805 /* Reuse or allocate a page of ptes */ 1806 if (pmd_present(pmd[pmdidx])) 1807 pte_page = m2v(pmd[pmdidx].pmd); 1808 else { 1809 /* Check for free pte pages */ 1810 if (ident_pte == LEVEL1_IDENT_ENTRIES) 1811 break; 1812 1813 pte_page = &level1_ident_pgt[ident_pte]; 1814 ident_pte += PTRS_PER_PTE; 1815 1816 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); 1817 } 1818 1819 /* Install mappings */ 1820 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1821 pte_t pte; 1822 1823 if (pfn > max_pfn_mapped) 1824 max_pfn_mapped = pfn; 1825 1826 if (!pte_none(pte_page[pteidx])) 1827 continue; 1828 1829 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); 1830 pte_page[pteidx] = pte; 1831 } 1832 } 1833 1834 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) 1835 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); 1836 1837 set_page_prot(pmd, PAGE_KERNEL_RO); 1838 } 1839 #endif 1840 void __init xen_setup_machphys_mapping(void) 1841 { 1842 struct xen_machphys_mapping mapping; 1843 1844 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { 1845 machine_to_phys_mapping = (unsigned long *)mapping.v_start; 1846 machine_to_phys_nr = mapping.max_mfn + 1; 1847 } else { 1848 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; 1849 } 1850 #ifdef CONFIG_X86_32 1851 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) 1852 < machine_to_phys_mapping); 1853 #endif 1854 } 1855 1856 #ifdef CONFIG_X86_64 1857 static void __init convert_pfn_mfn(void *v) 1858 { 1859 pte_t *pte = v; 1860 int i; 1861 1862 /* All levels are converted the same way, so just treat them 1863 as ptes. */ 1864 for (i = 0; i < PTRS_PER_PTE; i++) 1865 pte[i] = xen_make_pte(pte[i].pte); 1866 } 1867 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, 1868 unsigned long addr) 1869 { 1870 if (*pt_base == PFN_DOWN(__pa(addr))) { 1871 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); 1872 clear_page((void *)addr); 1873 (*pt_base)++; 1874 } 1875 if (*pt_end == PFN_DOWN(__pa(addr))) { 1876 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); 1877 clear_page((void *)addr); 1878 (*pt_end)--; 1879 } 1880 } 1881 /* 1882 * Set up the initial kernel pagetable. 1883 * 1884 * We can construct this by grafting the Xen provided pagetable into 1885 * head_64.S's preconstructed pagetables. We copy the Xen L2's into 1886 * level2_ident_pgt, and level2_kernel_pgt. This means that only the 1887 * kernel has a physical mapping to start with - but that's enough to 1888 * get __va working. We need to fill in the rest of the physical 1889 * mapping once some sort of allocator has been set up. 1890 */ 1891 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) 1892 { 1893 pud_t *l3; 1894 pmd_t *l2; 1895 unsigned long addr[3]; 1896 unsigned long pt_base, pt_end; 1897 unsigned i; 1898 1899 /* max_pfn_mapped is the last pfn mapped in the initial memory 1900 * mappings. Considering that on Xen after the kernel mappings we 1901 * have the mappings of some pages that don't exist in pfn space, we 1902 * set max_pfn_mapped to the last real pfn mapped. */ 1903 if (xen_start_info->mfn_list < __START_KERNEL_map) 1904 max_pfn_mapped = xen_start_info->first_p2m_pfn; 1905 else 1906 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1907 1908 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); 1909 pt_end = pt_base + xen_start_info->nr_pt_frames; 1910 1911 /* Zap identity mapping */ 1912 init_top_pgt[0] = __pgd(0); 1913 1914 /* Pre-constructed entries are in pfn, so convert to mfn */ 1915 /* L4[272] -> level3_ident_pgt */ 1916 /* L4[511] -> level3_kernel_pgt */ 1917 convert_pfn_mfn(init_top_pgt); 1918 1919 /* L3_i[0] -> level2_ident_pgt */ 1920 convert_pfn_mfn(level3_ident_pgt); 1921 /* L3_k[510] -> level2_kernel_pgt */ 1922 /* L3_k[511] -> level2_fixmap_pgt */ 1923 convert_pfn_mfn(level3_kernel_pgt); 1924 1925 /* L3_k[511][506] -> level1_fixmap_pgt */ 1926 convert_pfn_mfn(level2_fixmap_pgt); 1927 1928 /* We get [511][511] and have Xen's version of level2_kernel_pgt */ 1929 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); 1930 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); 1931 1932 addr[0] = (unsigned long)pgd; 1933 addr[1] = (unsigned long)l3; 1934 addr[2] = (unsigned long)l2; 1935 /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: 1936 * Both L4[272][0] and L4[511][510] have entries that point to the same 1937 * L2 (PMD) tables. Meaning that if you modify it in __va space 1938 * it will be also modified in the __ka space! (But if you just 1939 * modify the PMD table to point to other PTE's or none, then you 1940 * are OK - which is what cleanup_highmap does) */ 1941 copy_page(level2_ident_pgt, l2); 1942 /* Graft it onto L4[511][510] */ 1943 copy_page(level2_kernel_pgt, l2); 1944 1945 /* Copy the initial P->M table mappings if necessary. */ 1946 i = pgd_index(xen_start_info->mfn_list); 1947 if (i && i < pgd_index(__START_KERNEL_map)) 1948 init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; 1949 1950 /* Make pagetable pieces RO */ 1951 set_page_prot(init_top_pgt, PAGE_KERNEL_RO); 1952 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); 1953 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); 1954 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); 1955 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); 1956 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 1957 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); 1958 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); 1959 1960 /* Pin down new L4 */ 1961 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, 1962 PFN_DOWN(__pa_symbol(init_top_pgt))); 1963 1964 /* Unpin Xen-provided one */ 1965 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 1966 1967 /* 1968 * At this stage there can be no user pgd, and no page structure to 1969 * attach it to, so make sure we just set kernel pgd. 1970 */ 1971 xen_mc_batch(); 1972 __xen_write_cr3(true, __pa(init_top_pgt)); 1973 xen_mc_issue(PARAVIRT_LAZY_CPU); 1974 1975 /* We can't that easily rip out L3 and L2, as the Xen pagetables are 1976 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for 1977 * the initial domain. For guests using the toolstack, they are in: 1978 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only 1979 * rip out the [L4] (pgd), but for guests we shave off three pages. 1980 */ 1981 for (i = 0; i < ARRAY_SIZE(addr); i++) 1982 check_pt_base(&pt_base, &pt_end, addr[i]); 1983 1984 /* Our (by three pages) smaller Xen pagetable that we are using */ 1985 xen_pt_base = PFN_PHYS(pt_base); 1986 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE; 1987 memblock_reserve(xen_pt_base, xen_pt_size); 1988 1989 /* Revector the xen_start_info */ 1990 xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); 1991 } 1992 1993 /* 1994 * Read a value from a physical address. 1995 */ 1996 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) 1997 { 1998 unsigned long *vaddr; 1999 unsigned long val; 2000 2001 vaddr = early_memremap_ro(addr, sizeof(val)); 2002 val = *vaddr; 2003 early_memunmap(vaddr, sizeof(val)); 2004 return val; 2005 } 2006 2007 /* 2008 * Translate a virtual address to a physical one without relying on mapped 2009 * page tables. Don't rely on big pages being aligned in (guest) physical 2010 * space! 2011 */ 2012 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) 2013 { 2014 phys_addr_t pa; 2015 pgd_t pgd; 2016 pud_t pud; 2017 pmd_t pmd; 2018 pte_t pte; 2019 2020 pa = read_cr3_pa(); 2021 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * 2022 sizeof(pgd))); 2023 if (!pgd_present(pgd)) 2024 return 0; 2025 2026 pa = pgd_val(pgd) & PTE_PFN_MASK; 2027 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) * 2028 sizeof(pud))); 2029 if (!pud_present(pud)) 2030 return 0; 2031 pa = pud_val(pud) & PTE_PFN_MASK; 2032 if (pud_large(pud)) 2033 return pa + (vaddr & ~PUD_MASK); 2034 2035 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * 2036 sizeof(pmd))); 2037 if (!pmd_present(pmd)) 2038 return 0; 2039 pa = pmd_val(pmd) & PTE_PFN_MASK; 2040 if (pmd_large(pmd)) 2041 return pa + (vaddr & ~PMD_MASK); 2042 2043 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * 2044 sizeof(pte))); 2045 if (!pte_present(pte)) 2046 return 0; 2047 pa = pte_pfn(pte) << PAGE_SHIFT; 2048 2049 return pa | (vaddr & ~PAGE_MASK); 2050 } 2051 2052 /* 2053 * Find a new area for the hypervisor supplied p2m list and relocate the p2m to 2054 * this area. 2055 */ 2056 void __init xen_relocate_p2m(void) 2057 { 2058 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; 2059 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; 2060 int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; 2061 pte_t *pt; 2062 pmd_t *pmd; 2063 pud_t *pud; 2064 p4d_t *p4d = NULL; 2065 pgd_t *pgd; 2066 unsigned long *new_p2m; 2067 int save_pud; 2068 2069 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); 2070 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; 2071 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; 2072 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; 2073 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; 2074 if (PTRS_PER_P4D > 1) 2075 n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; 2076 else 2077 n_p4d = 0; 2078 n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; 2079 2080 new_area = xen_find_free_area(PFN_PHYS(n_frames)); 2081 if (!new_area) { 2082 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n"); 2083 BUG(); 2084 } 2085 2086 /* 2087 * Setup the page tables for addressing the new p2m list. 2088 * We have asked the hypervisor to map the p2m list at the user address 2089 * PUD_SIZE. It may have done so, or it may have used a kernel space 2090 * address depending on the Xen version. 2091 * To avoid any possible virtual address collision, just use 2092 * 2 * PUD_SIZE for the new area. 2093 */ 2094 p4d_phys = new_area; 2095 pud_phys = p4d_phys + PFN_PHYS(n_p4d); 2096 pmd_phys = pud_phys + PFN_PHYS(n_pud); 2097 pt_phys = pmd_phys + PFN_PHYS(n_pmd); 2098 p2m_pfn = PFN_DOWN(pt_phys) + n_pt; 2099 2100 pgd = __va(read_cr3_pa()); 2101 new_p2m = (unsigned long *)(2 * PGDIR_SIZE); 2102 idx_p4d = 0; 2103 save_pud = n_pud; 2104 do { 2105 if (n_p4d > 0) { 2106 p4d = early_memremap(p4d_phys, PAGE_SIZE); 2107 clear_page(p4d); 2108 n_pud = min(save_pud, PTRS_PER_P4D); 2109 } 2110 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { 2111 pud = early_memremap(pud_phys, PAGE_SIZE); 2112 clear_page(pud); 2113 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); 2114 idx_pmd++) { 2115 pmd = early_memremap(pmd_phys, PAGE_SIZE); 2116 clear_page(pmd); 2117 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); 2118 idx_pt++) { 2119 pt = early_memremap(pt_phys, PAGE_SIZE); 2120 clear_page(pt); 2121 for (idx_pte = 0; 2122 idx_pte < min(n_pte, PTRS_PER_PTE); 2123 idx_pte++) { 2124 set_pte(pt + idx_pte, 2125 pfn_pte(p2m_pfn, PAGE_KERNEL)); 2126 p2m_pfn++; 2127 } 2128 n_pte -= PTRS_PER_PTE; 2129 early_memunmap(pt, PAGE_SIZE); 2130 make_lowmem_page_readonly(__va(pt_phys)); 2131 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, 2132 PFN_DOWN(pt_phys)); 2133 set_pmd(pmd + idx_pt, 2134 __pmd(_PAGE_TABLE | pt_phys)); 2135 pt_phys += PAGE_SIZE; 2136 } 2137 n_pt -= PTRS_PER_PMD; 2138 early_memunmap(pmd, PAGE_SIZE); 2139 make_lowmem_page_readonly(__va(pmd_phys)); 2140 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, 2141 PFN_DOWN(pmd_phys)); 2142 set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); 2143 pmd_phys += PAGE_SIZE; 2144 } 2145 n_pmd -= PTRS_PER_PUD; 2146 early_memunmap(pud, PAGE_SIZE); 2147 make_lowmem_page_readonly(__va(pud_phys)); 2148 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); 2149 if (n_p4d > 0) 2150 set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); 2151 else 2152 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); 2153 pud_phys += PAGE_SIZE; 2154 } 2155 if (n_p4d > 0) { 2156 save_pud -= PTRS_PER_P4D; 2157 early_memunmap(p4d, PAGE_SIZE); 2158 make_lowmem_page_readonly(__va(p4d_phys)); 2159 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); 2160 set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); 2161 p4d_phys += PAGE_SIZE; 2162 } 2163 } while (++idx_p4d < n_p4d); 2164 2165 /* Now copy the old p2m info to the new area. */ 2166 memcpy(new_p2m, xen_p2m_addr, size); 2167 xen_p2m_addr = new_p2m; 2168 2169 /* Release the old p2m list and set new list info. */ 2170 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list)); 2171 BUG_ON(!p2m_pfn); 2172 p2m_pfn_end = p2m_pfn + PFN_DOWN(size); 2173 2174 if (xen_start_info->mfn_list < __START_KERNEL_map) { 2175 pfn = xen_start_info->first_p2m_pfn; 2176 pfn_end = xen_start_info->first_p2m_pfn + 2177 xen_start_info->nr_p2m_frames; 2178 set_pgd(pgd + 1, __pgd(0)); 2179 } else { 2180 pfn = p2m_pfn; 2181 pfn_end = p2m_pfn_end; 2182 } 2183 2184 memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); 2185 while (pfn < pfn_end) { 2186 if (pfn == p2m_pfn) { 2187 pfn = p2m_pfn_end; 2188 continue; 2189 } 2190 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); 2191 pfn++; 2192 } 2193 2194 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; 2195 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area); 2196 xen_start_info->nr_p2m_frames = n_frames; 2197 } 2198 2199 #else /* !CONFIG_X86_64 */ 2200 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); 2201 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); 2202 2203 static void __init xen_write_cr3_init(unsigned long cr3) 2204 { 2205 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); 2206 2207 BUG_ON(read_cr3_pa() != __pa(initial_page_table)); 2208 BUG_ON(cr3 != __pa(swapper_pg_dir)); 2209 2210 /* 2211 * We are switching to swapper_pg_dir for the first time (from 2212 * initial_page_table) and therefore need to mark that page 2213 * read-only and then pin it. 2214 * 2215 * Xen disallows sharing of kernel PMDs for PAE 2216 * guests. Therefore we must copy the kernel PMD from 2217 * initial_page_table into a new kernel PMD to be used in 2218 * swapper_pg_dir. 2219 */ 2220 swapper_kernel_pmd = 2221 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2222 copy_page(swapper_kernel_pmd, initial_kernel_pmd); 2223 swapper_pg_dir[KERNEL_PGD_BOUNDARY] = 2224 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); 2225 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); 2226 2227 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 2228 xen_write_cr3(cr3); 2229 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); 2230 2231 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, 2232 PFN_DOWN(__pa(initial_page_table))); 2233 set_page_prot(initial_page_table, PAGE_KERNEL); 2234 set_page_prot(initial_kernel_pmd, PAGE_KERNEL); 2235 2236 pv_mmu_ops.write_cr3 = &xen_write_cr3; 2237 } 2238 2239 /* 2240 * For 32 bit domains xen_start_info->pt_base is the pgd address which might be 2241 * not the first page table in the page table pool. 2242 * Iterate through the initial page tables to find the real page table base. 2243 */ 2244 static phys_addr_t xen_find_pt_base(pmd_t *pmd) 2245 { 2246 phys_addr_t pt_base, paddr; 2247 unsigned pmdidx; 2248 2249 pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd)); 2250 2251 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) 2252 if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) { 2253 paddr = m2p(pmd[pmdidx].pmd); 2254 pt_base = min(pt_base, paddr); 2255 } 2256 2257 return pt_base; 2258 } 2259 2260 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) 2261 { 2262 pmd_t *kernel_pmd; 2263 2264 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 2265 2266 xen_pt_base = xen_find_pt_base(kernel_pmd); 2267 xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE; 2268 2269 initial_kernel_pmd = 2270 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2271 2272 max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024); 2273 2274 copy_page(initial_kernel_pmd, kernel_pmd); 2275 2276 xen_map_identity_early(initial_kernel_pmd, max_pfn); 2277 2278 copy_page(initial_page_table, pgd); 2279 initial_page_table[KERNEL_PGD_BOUNDARY] = 2280 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); 2281 2282 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); 2283 set_page_prot(initial_page_table, PAGE_KERNEL_RO); 2284 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 2285 2286 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 2287 2288 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, 2289 PFN_DOWN(__pa(initial_page_table))); 2290 xen_write_cr3(__pa(initial_page_table)); 2291 2292 memblock_reserve(xen_pt_base, xen_pt_size); 2293 } 2294 #endif /* CONFIG_X86_64 */ 2295 2296 void __init xen_reserve_special_pages(void) 2297 { 2298 phys_addr_t paddr; 2299 2300 memblock_reserve(__pa(xen_start_info), PAGE_SIZE); 2301 if (xen_start_info->store_mfn) { 2302 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn)); 2303 memblock_reserve(paddr, PAGE_SIZE); 2304 } 2305 if (!xen_initial_domain()) { 2306 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn)); 2307 memblock_reserve(paddr, PAGE_SIZE); 2308 } 2309 } 2310 2311 void __init xen_pt_check_e820(void) 2312 { 2313 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { 2314 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); 2315 BUG(); 2316 } 2317 } 2318 2319 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; 2320 2321 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) 2322 { 2323 pte_t pte; 2324 2325 phys >>= PAGE_SHIFT; 2326 2327 switch (idx) { 2328 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: 2329 case FIX_RO_IDT: 2330 #ifdef CONFIG_X86_32 2331 case FIX_WP_TEST: 2332 # ifdef CONFIG_HIGHMEM 2333 case FIX_KMAP_BEGIN ... FIX_KMAP_END: 2334 # endif 2335 #elif defined(CONFIG_X86_VSYSCALL_EMULATION) 2336 case VSYSCALL_PAGE: 2337 #endif 2338 case FIX_TEXT_POKE0: 2339 case FIX_TEXT_POKE1: 2340 case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END: 2341 /* All local page mappings */ 2342 pte = pfn_pte(phys, prot); 2343 break; 2344 2345 #ifdef CONFIG_X86_LOCAL_APIC 2346 case FIX_APIC_BASE: /* maps dummy local APIC */ 2347 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 2348 break; 2349 #endif 2350 2351 #ifdef CONFIG_X86_IO_APIC 2352 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: 2353 /* 2354 * We just don't map the IO APIC - all access is via 2355 * hypercalls. Keep the address in the pte for reference. 2356 */ 2357 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); 2358 break; 2359 #endif 2360 2361 case FIX_PARAVIRT_BOOTMAP: 2362 /* This is an MFN, but it isn't an IO mapping from the 2363 IO domain */ 2364 pte = mfn_pte(phys, prot); 2365 break; 2366 2367 default: 2368 /* By default, set_fixmap is used for hardware mappings */ 2369 pte = mfn_pte(phys, prot); 2370 break; 2371 } 2372 2373 __native_set_fixmap(idx, pte); 2374 2375 #ifdef CONFIG_X86_VSYSCALL_EMULATION 2376 /* Replicate changes to map the vsyscall page into the user 2377 pagetable vsyscall mapping. */ 2378 if (idx == VSYSCALL_PAGE) { 2379 unsigned long vaddr = __fix_to_virt(idx); 2380 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); 2381 } 2382 #endif 2383 } 2384 2385 static void __init xen_post_allocator_init(void) 2386 { 2387 pv_mmu_ops.set_pte = xen_set_pte; 2388 pv_mmu_ops.set_pmd = xen_set_pmd; 2389 pv_mmu_ops.set_pud = xen_set_pud; 2390 #if CONFIG_PGTABLE_LEVELS >= 4 2391 pv_mmu_ops.set_p4d = xen_set_p4d; 2392 #endif 2393 2394 /* This will work as long as patching hasn't happened yet 2395 (which it hasn't) */ 2396 pv_mmu_ops.alloc_pte = xen_alloc_pte; 2397 pv_mmu_ops.alloc_pmd = xen_alloc_pmd; 2398 pv_mmu_ops.release_pte = xen_release_pte; 2399 pv_mmu_ops.release_pmd = xen_release_pmd; 2400 #if CONFIG_PGTABLE_LEVELS >= 4 2401 pv_mmu_ops.alloc_pud = xen_alloc_pud; 2402 pv_mmu_ops.release_pud = xen_release_pud; 2403 #endif 2404 pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); 2405 2406 #ifdef CONFIG_X86_64 2407 pv_mmu_ops.write_cr3 = &xen_write_cr3; 2408 SetPagePinned(virt_to_page(level3_user_vsyscall)); 2409 #endif 2410 xen_mark_init_mm_pinned(); 2411 } 2412 2413 static void xen_leave_lazy_mmu(void) 2414 { 2415 preempt_disable(); 2416 xen_mc_flush(); 2417 paravirt_leave_lazy_mmu(); 2418 preempt_enable(); 2419 } 2420 2421 static const struct pv_mmu_ops xen_mmu_ops __initconst = { 2422 .read_cr2 = xen_read_cr2, 2423 .write_cr2 = xen_write_cr2, 2424 2425 .read_cr3 = xen_read_cr3, 2426 .write_cr3 = xen_write_cr3_init, 2427 2428 .flush_tlb_user = xen_flush_tlb, 2429 .flush_tlb_kernel = xen_flush_tlb, 2430 .flush_tlb_single = xen_flush_tlb_single, 2431 .flush_tlb_others = xen_flush_tlb_others, 2432 2433 .pte_update = paravirt_nop, 2434 2435 .pgd_alloc = xen_pgd_alloc, 2436 .pgd_free = xen_pgd_free, 2437 2438 .alloc_pte = xen_alloc_pte_init, 2439 .release_pte = xen_release_pte_init, 2440 .alloc_pmd = xen_alloc_pmd_init, 2441 .release_pmd = xen_release_pmd_init, 2442 2443 .set_pte = xen_set_pte_init, 2444 .set_pte_at = xen_set_pte_at, 2445 .set_pmd = xen_set_pmd_hyper, 2446 2447 .ptep_modify_prot_start = __ptep_modify_prot_start, 2448 .ptep_modify_prot_commit = __ptep_modify_prot_commit, 2449 2450 .pte_val = PV_CALLEE_SAVE(xen_pte_val), 2451 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), 2452 2453 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), 2454 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), 2455 2456 #ifdef CONFIG_X86_PAE 2457 .set_pte_atomic = xen_set_pte_atomic, 2458 .pte_clear = xen_pte_clear, 2459 .pmd_clear = xen_pmd_clear, 2460 #endif /* CONFIG_X86_PAE */ 2461 .set_pud = xen_set_pud_hyper, 2462 2463 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), 2464 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), 2465 2466 #if CONFIG_PGTABLE_LEVELS >= 4 2467 .pud_val = PV_CALLEE_SAVE(xen_pud_val), 2468 .make_pud = PV_CALLEE_SAVE(xen_make_pud), 2469 .set_p4d = xen_set_p4d_hyper, 2470 2471 .alloc_pud = xen_alloc_pmd_init, 2472 .release_pud = xen_release_pmd_init, 2473 #endif /* CONFIG_PGTABLE_LEVELS == 4 */ 2474 2475 .activate_mm = xen_activate_mm, 2476 .dup_mmap = xen_dup_mmap, 2477 .exit_mmap = xen_exit_mmap, 2478 2479 .lazy_mode = { 2480 .enter = paravirt_enter_lazy_mmu, 2481 .leave = xen_leave_lazy_mmu, 2482 .flush = paravirt_flush_lazy_mmu, 2483 }, 2484 2485 .set_fixmap = xen_set_fixmap, 2486 }; 2487 2488 void __init xen_init_mmu_ops(void) 2489 { 2490 x86_init.paging.pagetable_init = xen_pagetable_init; 2491 2492 pv_mmu_ops = xen_mmu_ops; 2493 2494 memset(dummy_mapping, 0xff, PAGE_SIZE); 2495 } 2496 2497 /* Protected by xen_reservation_lock. */ 2498 #define MAX_CONTIG_ORDER 9 /* 2MB */ 2499 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; 2500 2501 #define VOID_PTE (mfn_pte(0, __pgprot(0))) 2502 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, 2503 unsigned long *in_frames, 2504 unsigned long *out_frames) 2505 { 2506 int i; 2507 struct multicall_space mcs; 2508 2509 xen_mc_batch(); 2510 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { 2511 mcs = __xen_mc_entry(0); 2512 2513 if (in_frames) 2514 in_frames[i] = virt_to_mfn(vaddr); 2515 2516 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); 2517 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); 2518 2519 if (out_frames) 2520 out_frames[i] = virt_to_pfn(vaddr); 2521 } 2522 xen_mc_issue(0); 2523 } 2524 2525 /* 2526 * Update the pfn-to-mfn mappings for a virtual address range, either to 2527 * point to an array of mfns, or contiguously from a single starting 2528 * mfn. 2529 */ 2530 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, 2531 unsigned long *mfns, 2532 unsigned long first_mfn) 2533 { 2534 unsigned i, limit; 2535 unsigned long mfn; 2536 2537 xen_mc_batch(); 2538 2539 limit = 1u << order; 2540 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { 2541 struct multicall_space mcs; 2542 unsigned flags; 2543 2544 mcs = __xen_mc_entry(0); 2545 if (mfns) 2546 mfn = mfns[i]; 2547 else 2548 mfn = first_mfn + i; 2549 2550 if (i < (limit - 1)) 2551 flags = 0; 2552 else { 2553 if (order == 0) 2554 flags = UVMF_INVLPG | UVMF_ALL; 2555 else 2556 flags = UVMF_TLB_FLUSH | UVMF_ALL; 2557 } 2558 2559 MULTI_update_va_mapping(mcs.mc, vaddr, 2560 mfn_pte(mfn, PAGE_KERNEL), flags); 2561 2562 set_phys_to_machine(virt_to_pfn(vaddr), mfn); 2563 } 2564 2565 xen_mc_issue(0); 2566 } 2567 2568 /* 2569 * Perform the hypercall to exchange a region of our pfns to point to 2570 * memory with the required contiguous alignment. Takes the pfns as 2571 * input, and populates mfns as output. 2572 * 2573 * Returns a success code indicating whether the hypervisor was able to 2574 * satisfy the request or not. 2575 */ 2576 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, 2577 unsigned long *pfns_in, 2578 unsigned long extents_out, 2579 unsigned int order_out, 2580 unsigned long *mfns_out, 2581 unsigned int address_bits) 2582 { 2583 long rc; 2584 int success; 2585 2586 struct xen_memory_exchange exchange = { 2587 .in = { 2588 .nr_extents = extents_in, 2589 .extent_order = order_in, 2590 .extent_start = pfns_in, 2591 .domid = DOMID_SELF 2592 }, 2593 .out = { 2594 .nr_extents = extents_out, 2595 .extent_order = order_out, 2596 .extent_start = mfns_out, 2597 .address_bits = address_bits, 2598 .domid = DOMID_SELF 2599 } 2600 }; 2601 2602 BUG_ON(extents_in << order_in != extents_out << order_out); 2603 2604 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); 2605 success = (exchange.nr_exchanged == extents_in); 2606 2607 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); 2608 BUG_ON(success && (rc != 0)); 2609 2610 return success; 2611 } 2612 2613 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 2614 unsigned int address_bits, 2615 dma_addr_t *dma_handle) 2616 { 2617 unsigned long *in_frames = discontig_frames, out_frame; 2618 unsigned long flags; 2619 int success; 2620 unsigned long vstart = (unsigned long)phys_to_virt(pstart); 2621 2622 /* 2623 * Currently an auto-translated guest will not perform I/O, nor will 2624 * it require PAE page directories below 4GB. Therefore any calls to 2625 * this function are redundant and can be ignored. 2626 */ 2627 2628 if (unlikely(order > MAX_CONTIG_ORDER)) 2629 return -ENOMEM; 2630 2631 memset((void *) vstart, 0, PAGE_SIZE << order); 2632 2633 spin_lock_irqsave(&xen_reservation_lock, flags); 2634 2635 /* 1. Zap current PTEs, remembering MFNs. */ 2636 xen_zap_pfn_range(vstart, order, in_frames, NULL); 2637 2638 /* 2. Get a new contiguous memory extent. */ 2639 out_frame = virt_to_pfn(vstart); 2640 success = xen_exchange_memory(1UL << order, 0, in_frames, 2641 1, order, &out_frame, 2642 address_bits); 2643 2644 /* 3. Map the new extent in place of old pages. */ 2645 if (success) 2646 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); 2647 else 2648 xen_remap_exchanged_ptes(vstart, order, in_frames, 0); 2649 2650 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2651 2652 *dma_handle = virt_to_machine(vstart).maddr; 2653 return success ? 0 : -ENOMEM; 2654 } 2655 EXPORT_SYMBOL_GPL(xen_create_contiguous_region); 2656 2657 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) 2658 { 2659 unsigned long *out_frames = discontig_frames, in_frame; 2660 unsigned long flags; 2661 int success; 2662 unsigned long vstart; 2663 2664 if (unlikely(order > MAX_CONTIG_ORDER)) 2665 return; 2666 2667 vstart = (unsigned long)phys_to_virt(pstart); 2668 memset((void *) vstart, 0, PAGE_SIZE << order); 2669 2670 spin_lock_irqsave(&xen_reservation_lock, flags); 2671 2672 /* 1. Find start MFN of contiguous extent. */ 2673 in_frame = virt_to_mfn(vstart); 2674 2675 /* 2. Zap current PTEs. */ 2676 xen_zap_pfn_range(vstart, order, NULL, out_frames); 2677 2678 /* 3. Do the exchange for non-contiguous MFNs. */ 2679 success = xen_exchange_memory(1, order, &in_frame, 1UL << order, 2680 0, out_frames, 0); 2681 2682 /* 4. Map new pages in place of old pages. */ 2683 if (success) 2684 xen_remap_exchanged_ptes(vstart, order, out_frames, 0); 2685 else 2686 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); 2687 2688 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2689 } 2690 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 2691 2692 #ifdef CONFIG_KEXEC_CORE 2693 phys_addr_t paddr_vmcoreinfo_note(void) 2694 { 2695 if (xen_pv_domain()) 2696 return virt_to_machine(vmcoreinfo_note).maddr; 2697 else 2698 return __pa(vmcoreinfo_note); 2699 } 2700 #endif /* CONFIG_KEXEC_CORE */ 2701