1901d209aSJuergen Gross // SPDX-License-Identifier: GPL-2.0
2901d209aSJuergen Gross
37e0563deSVitaly Kuznetsov /*
47e0563deSVitaly Kuznetsov * Xen mmu operations
57e0563deSVitaly Kuznetsov *
67e0563deSVitaly Kuznetsov * This file contains the various mmu fetch and update operations.
77e0563deSVitaly Kuznetsov * The most important job they must perform is the mapping between the
87e0563deSVitaly Kuznetsov * domain's pfn and the overall machine mfns.
97e0563deSVitaly Kuznetsov *
107e0563deSVitaly Kuznetsov * Xen allows guests to directly update the pagetable, in a controlled
117e0563deSVitaly Kuznetsov * fashion. In other words, the guest modifies the same pagetable
127e0563deSVitaly Kuznetsov * that the CPU actually uses, which eliminates the overhead of having
137e0563deSVitaly Kuznetsov * a separate shadow pagetable.
147e0563deSVitaly Kuznetsov *
157e0563deSVitaly Kuznetsov * In order to allow this, it falls on the guest domain to map its
167e0563deSVitaly Kuznetsov * notion of a "physical" pfn - which is just a domain-local linear
177e0563deSVitaly Kuznetsov * address - into a real "machine address" which the CPU's MMU can
187e0563deSVitaly Kuznetsov * use.
197e0563deSVitaly Kuznetsov *
207e0563deSVitaly Kuznetsov * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
217e0563deSVitaly Kuznetsov * inserted directly into the pagetable. When creating a new
227e0563deSVitaly Kuznetsov * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
237e0563deSVitaly Kuznetsov * when reading the content back with __(pgd|pmd|pte)_val, it converts
247e0563deSVitaly Kuznetsov * the mfn back into a pfn.
257e0563deSVitaly Kuznetsov *
267e0563deSVitaly Kuznetsov * The other constraint is that all pages which make up a pagetable
277e0563deSVitaly Kuznetsov * must be mapped read-only in the guest. This prevents uncontrolled
287e0563deSVitaly Kuznetsov * guest updates to the pagetable. Xen strictly enforces this, and
297e0563deSVitaly Kuznetsov * will disallow any pagetable update which will end up mapping a
307e0563deSVitaly Kuznetsov * pagetable page RW, and will disallow using any writable page as a
317e0563deSVitaly Kuznetsov * pagetable.
327e0563deSVitaly Kuznetsov *
337e0563deSVitaly Kuznetsov * Naively, when loading %cr3 with the base of a new pagetable, Xen
347e0563deSVitaly Kuznetsov * would need to validate the whole pagetable before going on.
357e0563deSVitaly Kuznetsov * Naturally, this is quite slow. The solution is to "pin" a
367e0563deSVitaly Kuznetsov * pagetable, which enforces all the constraints on the pagetable even
377e0563deSVitaly Kuznetsov * when it is not actively in use. This menas that Xen can be assured
387e0563deSVitaly Kuznetsov * that it is still valid when you do load it into %cr3, and doesn't
397e0563deSVitaly Kuznetsov * need to revalidate it.
407e0563deSVitaly Kuznetsov *
417e0563deSVitaly Kuznetsov * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
427e0563deSVitaly Kuznetsov */
437e0563deSVitaly Kuznetsov #include <linux/sched/mm.h>
447e0563deSVitaly Kuznetsov #include <linux/debugfs.h>
457e0563deSVitaly Kuznetsov #include <linux/bug.h>
467e0563deSVitaly Kuznetsov #include <linux/vmalloc.h>
477e0563deSVitaly Kuznetsov #include <linux/export.h>
487e0563deSVitaly Kuznetsov #include <linux/init.h>
497e0563deSVitaly Kuznetsov #include <linux/gfp.h>
507e0563deSVitaly Kuznetsov #include <linux/memblock.h>
517e0563deSVitaly Kuznetsov #include <linux/seq_file.h>
527e0563deSVitaly Kuznetsov #include <linux/crash_dump.h>
5365fddcfcSMike Rapoport #include <linux/pgtable.h>
5429985b09SJuergen Gross #ifdef CONFIG_KEXEC_CORE
5529985b09SJuergen Gross #include <linux/kexec.h>
5629985b09SJuergen Gross #endif
577e0563deSVitaly Kuznetsov
587e0563deSVitaly Kuznetsov #include <trace/events/xen.h>
597e0563deSVitaly Kuznetsov
607e0563deSVitaly Kuznetsov #include <asm/tlbflush.h>
617e0563deSVitaly Kuznetsov #include <asm/fixmap.h>
627e0563deSVitaly Kuznetsov #include <asm/mmu_context.h>
637e0563deSVitaly Kuznetsov #include <asm/setup.h>
647e0563deSVitaly Kuznetsov #include <asm/paravirt.h>
657e0563deSVitaly Kuznetsov #include <asm/e820/api.h>
667e0563deSVitaly Kuznetsov #include <asm/linkage.h>
677e0563deSVitaly Kuznetsov #include <asm/page.h>
687e0563deSVitaly Kuznetsov #include <asm/init.h>
69eb243d1dSIngo Molnar #include <asm/memtype.h>
707e0563deSVitaly Kuznetsov #include <asm/smp.h>
7148a8b97cSPeter Zijlstra #include <asm/tlb.h>
727e0563deSVitaly Kuznetsov
737e0563deSVitaly Kuznetsov #include <asm/xen/hypercall.h>
747e0563deSVitaly Kuznetsov #include <asm/xen/hypervisor.h>
757e0563deSVitaly Kuznetsov
767e0563deSVitaly Kuznetsov #include <xen/xen.h>
777e0563deSVitaly Kuznetsov #include <xen/page.h>
787e0563deSVitaly Kuznetsov #include <xen/interface/xen.h>
797e0563deSVitaly Kuznetsov #include <xen/interface/hvm/hvm_op.h>
807e0563deSVitaly Kuznetsov #include <xen/interface/version.h>
817e0563deSVitaly Kuznetsov #include <xen/interface/memory.h>
827e0563deSVitaly Kuznetsov #include <xen/hvc-console.h>
83566fb90eSChristoph Hellwig #include <xen/swiotlb-xen.h>
847e0563deSVitaly Kuznetsov
857e0563deSVitaly Kuznetsov #include "multicalls.h"
867e0563deSVitaly Kuznetsov #include "mmu.h"
877e0563deSVitaly Kuznetsov #include "debugfs.h"
887e0563deSVitaly Kuznetsov
893d013424SJuergen Gross /*
903d013424SJuergen Gross * Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order
913d013424SJuergen Gross * to avoid warnings with "-Wmissing-prototypes".
923d013424SJuergen Gross */
933d013424SJuergen Gross pteval_t xen_pte_val(pte_t pte);
943d013424SJuergen Gross pgdval_t xen_pgd_val(pgd_t pgd);
953d013424SJuergen Gross pmdval_t xen_pmd_val(pmd_t pmd);
963d013424SJuergen Gross pudval_t xen_pud_val(pud_t pud);
973d013424SJuergen Gross p4dval_t xen_p4d_val(p4d_t p4d);
983d013424SJuergen Gross pte_t xen_make_pte(pteval_t pte);
993d013424SJuergen Gross pgd_t xen_make_pgd(pgdval_t pgd);
1003d013424SJuergen Gross pmd_t xen_make_pmd(pmdval_t pmd);
1013d013424SJuergen Gross pud_t xen_make_pud(pudval_t pud);
1023d013424SJuergen Gross p4d_t xen_make_p4d(p4dval_t p4d);
1033d013424SJuergen Gross pte_t xen_make_pte_init(pteval_t pte);
1043d013424SJuergen Gross
105d2a3ef44SJan Beulich #ifdef CONFIG_X86_VSYSCALL_EMULATION
1067e0563deSVitaly Kuznetsov /* l3 pud for userspace vsyscall mapping */
1077e0563deSVitaly Kuznetsov static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
108d2a3ef44SJan Beulich #endif
1097e0563deSVitaly Kuznetsov
1107e0563deSVitaly Kuznetsov /*
111f030aadeSJuergen Gross * Protects atomic reservation decrease/increase against concurrent increases.
112f030aadeSJuergen Gross * Also protects non-atomic updates of current_pages and balloon lists.
113f030aadeSJuergen Gross */
1144f2d7af7SJuergen Gross static DEFINE_SPINLOCK(xen_reservation_lock);
115f030aadeSJuergen Gross
1165a32765aSJuergen Gross /* Protected by xen_reservation_lock. */
1175a32765aSJuergen Gross #define MIN_CONTIG_ORDER 9 /* 2MB */
1185a32765aSJuergen Gross static unsigned int discontig_frames_order = MIN_CONTIG_ORDER;
1195a32765aSJuergen Gross static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata;
1205a32765aSJuergen Gross static unsigned long *discontig_frames __refdata = discontig_frames_early;
1215a32765aSJuergen Gross static bool discontig_frames_dyn;
1225a32765aSJuergen Gross
alloc_discontig_frames(unsigned int order)1235a32765aSJuergen Gross static int alloc_discontig_frames(unsigned int order)
1245a32765aSJuergen Gross {
1255a32765aSJuergen Gross unsigned long *new_array, *old_array;
1265a32765aSJuergen Gross unsigned int old_order;
1275a32765aSJuergen Gross unsigned long flags;
1285a32765aSJuergen Gross
1295a32765aSJuergen Gross BUG_ON(order < MIN_CONTIG_ORDER);
1305a32765aSJuergen Gross BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE);
1315a32765aSJuergen Gross
1325a32765aSJuergen Gross new_array = (unsigned long *)__get_free_pages(GFP_KERNEL,
1335a32765aSJuergen Gross order - MIN_CONTIG_ORDER);
1345a32765aSJuergen Gross if (!new_array)
1355a32765aSJuergen Gross return -ENOMEM;
1365a32765aSJuergen Gross
1375a32765aSJuergen Gross spin_lock_irqsave(&xen_reservation_lock, flags);
1385a32765aSJuergen Gross
1395a32765aSJuergen Gross old_order = discontig_frames_order;
1405a32765aSJuergen Gross
1415a32765aSJuergen Gross if (order > discontig_frames_order || !discontig_frames_dyn) {
1425a32765aSJuergen Gross if (!discontig_frames_dyn)
1435a32765aSJuergen Gross old_array = NULL;
1445a32765aSJuergen Gross else
1455a32765aSJuergen Gross old_array = discontig_frames;
1465a32765aSJuergen Gross
1475a32765aSJuergen Gross discontig_frames = new_array;
1485a32765aSJuergen Gross discontig_frames_order = order;
1495a32765aSJuergen Gross discontig_frames_dyn = true;
1505a32765aSJuergen Gross } else {
1515a32765aSJuergen Gross old_array = new_array;
1525a32765aSJuergen Gross }
1535a32765aSJuergen Gross
1545a32765aSJuergen Gross spin_unlock_irqrestore(&xen_reservation_lock, flags);
1555a32765aSJuergen Gross
1565a32765aSJuergen Gross free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER);
1575a32765aSJuergen Gross
1585a32765aSJuergen Gross return 0;
1595a32765aSJuergen Gross }
1605a32765aSJuergen Gross
161f030aadeSJuergen Gross /*
1627e0563deSVitaly Kuznetsov * Note about cr3 (pagetable base) values:
1637e0563deSVitaly Kuznetsov *
1647e0563deSVitaly Kuznetsov * xen_cr3 contains the current logical cr3 value; it contains the
1657e0563deSVitaly Kuznetsov * last set cr3. This may not be the current effective cr3, because
1667e0563deSVitaly Kuznetsov * its update may be being lazily deferred. However, a vcpu looking
1677e0563deSVitaly Kuznetsov * at its own cr3 can use this value knowing that it everything will
1687e0563deSVitaly Kuznetsov * be self-consistent.
1697e0563deSVitaly Kuznetsov *
1707e0563deSVitaly Kuznetsov * xen_current_cr3 contains the actual vcpu cr3; it is set once the
1717e0563deSVitaly Kuznetsov * hypercall to set the vcpu cr3 is complete (so it may be a little
1727e0563deSVitaly Kuznetsov * out of date, but it will never be set early). If one vcpu is
1737e0563deSVitaly Kuznetsov * looking at another vcpu's cr3 value, it should use this variable.
1747e0563deSVitaly Kuznetsov */
1757e0563deSVitaly Kuznetsov DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
1767e0563deSVitaly Kuznetsov DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
1777e0563deSVitaly Kuznetsov
1787e0563deSVitaly Kuznetsov static phys_addr_t xen_pt_base, xen_pt_size __initdata;
1797e0563deSVitaly Kuznetsov
1806f84f8d1SPavel Tatashin static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
1816f84f8d1SPavel Tatashin
1827e0563deSVitaly Kuznetsov /*
1837e0563deSVitaly Kuznetsov * Just beyond the highest usermode address. STACK_TOP_MAX has a
1847e0563deSVitaly Kuznetsov * redzone above it, so round it up to a PGD boundary.
1857e0563deSVitaly Kuznetsov */
1867e0563deSVitaly Kuznetsov #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
1877e0563deSVitaly Kuznetsov
make_lowmem_page_readonly(void * vaddr)1887e0563deSVitaly Kuznetsov void make_lowmem_page_readonly(void *vaddr)
1897e0563deSVitaly Kuznetsov {
1907e0563deSVitaly Kuznetsov pte_t *pte, ptev;
1917e0563deSVitaly Kuznetsov unsigned long address = (unsigned long)vaddr;
1927e0563deSVitaly Kuznetsov unsigned int level;
1937e0563deSVitaly Kuznetsov
1947e0563deSVitaly Kuznetsov pte = lookup_address(address, &level);
1957e0563deSVitaly Kuznetsov if (pte == NULL)
1967e0563deSVitaly Kuznetsov return; /* vaddr missing */
1977e0563deSVitaly Kuznetsov
1987e0563deSVitaly Kuznetsov ptev = pte_wrprotect(*pte);
1997e0563deSVitaly Kuznetsov
2007e0563deSVitaly Kuznetsov if (HYPERVISOR_update_va_mapping(address, ptev, 0))
2017e0563deSVitaly Kuznetsov BUG();
2027e0563deSVitaly Kuznetsov }
2037e0563deSVitaly Kuznetsov
make_lowmem_page_readwrite(void * vaddr)2047e0563deSVitaly Kuznetsov void make_lowmem_page_readwrite(void *vaddr)
2057e0563deSVitaly Kuznetsov {
2067e0563deSVitaly Kuznetsov pte_t *pte, ptev;
2077e0563deSVitaly Kuznetsov unsigned long address = (unsigned long)vaddr;
2087e0563deSVitaly Kuznetsov unsigned int level;
2097e0563deSVitaly Kuznetsov
2107e0563deSVitaly Kuznetsov pte = lookup_address(address, &level);
2117e0563deSVitaly Kuznetsov if (pte == NULL)
2127e0563deSVitaly Kuznetsov return; /* vaddr missing */
2137e0563deSVitaly Kuznetsov
2146ecc21bbSRick Edgecombe ptev = pte_mkwrite_novma(*pte);
2157e0563deSVitaly Kuznetsov
2167e0563deSVitaly Kuznetsov if (HYPERVISOR_update_va_mapping(address, ptev, 0))
2177e0563deSVitaly Kuznetsov BUG();
2187e0563deSVitaly Kuznetsov }
2197e0563deSVitaly Kuznetsov
2207e0563deSVitaly Kuznetsov
2216f84f8d1SPavel Tatashin /*
2226f84f8d1SPavel Tatashin * During early boot all page table pages are pinned, but we do not have struct
2236f84f8d1SPavel Tatashin * pages, so return true until struct pages are ready.
2246f84f8d1SPavel Tatashin */
xen_page_pinned(void * ptr)2257e0563deSVitaly Kuznetsov static bool xen_page_pinned(void *ptr)
2267e0563deSVitaly Kuznetsov {
2276f84f8d1SPavel Tatashin if (static_branch_likely(&xen_struct_pages_ready)) {
2287e0563deSVitaly Kuznetsov struct page *page = virt_to_page(ptr);
2297e0563deSVitaly Kuznetsov
2307e0563deSVitaly Kuznetsov return PagePinned(page);
2317e0563deSVitaly Kuznetsov }
2326f84f8d1SPavel Tatashin return true;
2336f84f8d1SPavel Tatashin }
2347e0563deSVitaly Kuznetsov
xen_extend_mmu_update(const struct mmu_update * update)2357e0563deSVitaly Kuznetsov static void xen_extend_mmu_update(const struct mmu_update *update)
2367e0563deSVitaly Kuznetsov {
2377e0563deSVitaly Kuznetsov struct multicall_space mcs;
2387e0563deSVitaly Kuznetsov struct mmu_update *u;
2397e0563deSVitaly Kuznetsov
2407e0563deSVitaly Kuznetsov mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
2417e0563deSVitaly Kuznetsov
2427e0563deSVitaly Kuznetsov if (mcs.mc != NULL) {
2437e0563deSVitaly Kuznetsov mcs.mc->args[1]++;
2447e0563deSVitaly Kuznetsov } else {
2457e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(sizeof(*u));
2467e0563deSVitaly Kuznetsov MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
2477e0563deSVitaly Kuznetsov }
2487e0563deSVitaly Kuznetsov
2497e0563deSVitaly Kuznetsov u = mcs.args;
2507e0563deSVitaly Kuznetsov *u = *update;
2517e0563deSVitaly Kuznetsov }
2527e0563deSVitaly Kuznetsov
xen_extend_mmuext_op(const struct mmuext_op * op)2537e0563deSVitaly Kuznetsov static void xen_extend_mmuext_op(const struct mmuext_op *op)
2547e0563deSVitaly Kuznetsov {
2557e0563deSVitaly Kuznetsov struct multicall_space mcs;
2567e0563deSVitaly Kuznetsov struct mmuext_op *u;
2577e0563deSVitaly Kuznetsov
2587e0563deSVitaly Kuznetsov mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
2597e0563deSVitaly Kuznetsov
2607e0563deSVitaly Kuznetsov if (mcs.mc != NULL) {
2617e0563deSVitaly Kuznetsov mcs.mc->args[1]++;
2627e0563deSVitaly Kuznetsov } else {
2637e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(sizeof(*u));
2647e0563deSVitaly Kuznetsov MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
2657e0563deSVitaly Kuznetsov }
2667e0563deSVitaly Kuznetsov
2677e0563deSVitaly Kuznetsov u = mcs.args;
2687e0563deSVitaly Kuznetsov *u = *op;
2697e0563deSVitaly Kuznetsov }
2707e0563deSVitaly Kuznetsov
xen_set_pmd_hyper(pmd_t * ptr,pmd_t val)2717e0563deSVitaly Kuznetsov static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
2727e0563deSVitaly Kuznetsov {
2737e0563deSVitaly Kuznetsov struct mmu_update u;
2747e0563deSVitaly Kuznetsov
2757e0563deSVitaly Kuznetsov preempt_disable();
2767e0563deSVitaly Kuznetsov
2777e0563deSVitaly Kuznetsov xen_mc_batch();
2787e0563deSVitaly Kuznetsov
2797e0563deSVitaly Kuznetsov /* ptr may be ioremapped for 64-bit pagetable setup */
2807e0563deSVitaly Kuznetsov u.ptr = arbitrary_virt_to_machine(ptr).maddr;
2817e0563deSVitaly Kuznetsov u.val = pmd_val_ma(val);
2827e0563deSVitaly Kuznetsov xen_extend_mmu_update(&u);
2837e0563deSVitaly Kuznetsov
284a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
2857e0563deSVitaly Kuznetsov
2867e0563deSVitaly Kuznetsov preempt_enable();
2877e0563deSVitaly Kuznetsov }
2887e0563deSVitaly Kuznetsov
xen_set_pmd(pmd_t * ptr,pmd_t val)2897e0563deSVitaly Kuznetsov static void xen_set_pmd(pmd_t *ptr, pmd_t val)
2907e0563deSVitaly Kuznetsov {
2917e0563deSVitaly Kuznetsov trace_xen_mmu_set_pmd(ptr, val);
2927e0563deSVitaly Kuznetsov
2937e0563deSVitaly Kuznetsov /* If page is not pinned, we can just update the entry
2947e0563deSVitaly Kuznetsov directly */
2957e0563deSVitaly Kuznetsov if (!xen_page_pinned(ptr)) {
2967e0563deSVitaly Kuznetsov *ptr = val;
2977e0563deSVitaly Kuznetsov return;
2987e0563deSVitaly Kuznetsov }
2997e0563deSVitaly Kuznetsov
3007e0563deSVitaly Kuznetsov xen_set_pmd_hyper(ptr, val);
3017e0563deSVitaly Kuznetsov }
3027e0563deSVitaly Kuznetsov
3037e0563deSVitaly Kuznetsov /*
3047e0563deSVitaly Kuznetsov * Associate a virtual page frame with a given physical page frame
3057e0563deSVitaly Kuznetsov * and protection flags for that frame.
3067e0563deSVitaly Kuznetsov */
set_pte_mfn(unsigned long vaddr,unsigned long mfn,pgprot_t flags)307dc4bd2a2SJan Beulich void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
3087e0563deSVitaly Kuznetsov {
309dc4bd2a2SJan Beulich if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
310dc4bd2a2SJan Beulich UVMF_INVLPG))
311dc4bd2a2SJan Beulich BUG();
3127e0563deSVitaly Kuznetsov }
3137e0563deSVitaly Kuznetsov
xen_batched_set_pte(pte_t * ptep,pte_t pteval)3147e0563deSVitaly Kuznetsov static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
3157e0563deSVitaly Kuznetsov {
3167e0563deSVitaly Kuznetsov struct mmu_update u;
3177e0563deSVitaly Kuznetsov
318a4a7644cSJuergen Gross if (xen_get_lazy_mode() != XEN_LAZY_MMU)
3197e0563deSVitaly Kuznetsov return false;
3207e0563deSVitaly Kuznetsov
3217e0563deSVitaly Kuznetsov xen_mc_batch();
3227e0563deSVitaly Kuznetsov
3237e0563deSVitaly Kuznetsov u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
3247e0563deSVitaly Kuznetsov u.val = pte_val_ma(pteval);
3257e0563deSVitaly Kuznetsov xen_extend_mmu_update(&u);
3267e0563deSVitaly Kuznetsov
327a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
3287e0563deSVitaly Kuznetsov
3297e0563deSVitaly Kuznetsov return true;
3307e0563deSVitaly Kuznetsov }
3317e0563deSVitaly Kuznetsov
__xen_set_pte(pte_t * ptep,pte_t pteval)3327e0563deSVitaly Kuznetsov static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
3337e0563deSVitaly Kuznetsov {
3347e0563deSVitaly Kuznetsov if (!xen_batched_set_pte(ptep, pteval)) {
3357e0563deSVitaly Kuznetsov /*
3367e0563deSVitaly Kuznetsov * Could call native_set_pte() here and trap and
337a13f2ef1SJuergen Gross * emulate the PTE write, but a hypercall is much cheaper.
3387e0563deSVitaly Kuznetsov */
3397e0563deSVitaly Kuznetsov struct mmu_update u;
3407e0563deSVitaly Kuznetsov
3417e0563deSVitaly Kuznetsov u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
3427e0563deSVitaly Kuznetsov u.val = pte_val_ma(pteval);
3437e0563deSVitaly Kuznetsov HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
3447e0563deSVitaly Kuznetsov }
3457e0563deSVitaly Kuznetsov }
3467e0563deSVitaly Kuznetsov
xen_set_pte(pte_t * ptep,pte_t pteval)3477e0563deSVitaly Kuznetsov static void xen_set_pte(pte_t *ptep, pte_t pteval)
3487e0563deSVitaly Kuznetsov {
3497e0563deSVitaly Kuznetsov trace_xen_mmu_set_pte(ptep, pteval);
3507e0563deSVitaly Kuznetsov __xen_set_pte(ptep, pteval);
3517e0563deSVitaly Kuznetsov }
3527e0563deSVitaly Kuznetsov
xen_ptep_modify_prot_start(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)3530cbe3e26SAneesh Kumar K.V pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
3547e0563deSVitaly Kuznetsov unsigned long addr, pte_t *ptep)
3557e0563deSVitaly Kuznetsov {
3567e0563deSVitaly Kuznetsov /* Just return the pte as-is. We preserve the bits on commit */
3570cbe3e26SAneesh Kumar K.V trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
3587e0563deSVitaly Kuznetsov return *ptep;
3597e0563deSVitaly Kuznetsov }
3607e0563deSVitaly Kuznetsov
xen_ptep_modify_prot_commit(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte)3610cbe3e26SAneesh Kumar K.V void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
3627e0563deSVitaly Kuznetsov pte_t *ptep, pte_t pte)
3637e0563deSVitaly Kuznetsov {
3647e0563deSVitaly Kuznetsov struct mmu_update u;
3657e0563deSVitaly Kuznetsov
3660cbe3e26SAneesh Kumar K.V trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
3677e0563deSVitaly Kuznetsov xen_mc_batch();
3687e0563deSVitaly Kuznetsov
3697e0563deSVitaly Kuznetsov u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
3707e0563deSVitaly Kuznetsov u.val = pte_val_ma(pte);
3717e0563deSVitaly Kuznetsov xen_extend_mmu_update(&u);
3727e0563deSVitaly Kuznetsov
373a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
3747e0563deSVitaly Kuznetsov }
3757e0563deSVitaly Kuznetsov
3767e0563deSVitaly Kuznetsov /* Assume pteval_t is equivalent to all the other *val_t types. */
pte_mfn_to_pfn(pteval_t val)3777e0563deSVitaly Kuznetsov static pteval_t pte_mfn_to_pfn(pteval_t val)
3787e0563deSVitaly Kuznetsov {
3797e0563deSVitaly Kuznetsov if (val & _PAGE_PRESENT) {
3806f0e8bf1SJuergen Gross unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
3817e0563deSVitaly Kuznetsov unsigned long pfn = mfn_to_pfn(mfn);
3827e0563deSVitaly Kuznetsov
3837e0563deSVitaly Kuznetsov pteval_t flags = val & PTE_FLAGS_MASK;
3847e0563deSVitaly Kuznetsov if (unlikely(pfn == ~0))
3857e0563deSVitaly Kuznetsov val = flags & ~_PAGE_PRESENT;
3867e0563deSVitaly Kuznetsov else
3877e0563deSVitaly Kuznetsov val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
3887e0563deSVitaly Kuznetsov }
3897e0563deSVitaly Kuznetsov
3907e0563deSVitaly Kuznetsov return val;
3917e0563deSVitaly Kuznetsov }
3927e0563deSVitaly Kuznetsov
pte_pfn_to_mfn(pteval_t val)3937e0563deSVitaly Kuznetsov static pteval_t pte_pfn_to_mfn(pteval_t val)
3947e0563deSVitaly Kuznetsov {
3957e0563deSVitaly Kuznetsov if (val & _PAGE_PRESENT) {
3967e0563deSVitaly Kuznetsov unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
3977e0563deSVitaly Kuznetsov pteval_t flags = val & PTE_FLAGS_MASK;
3987e0563deSVitaly Kuznetsov unsigned long mfn;
3997e0563deSVitaly Kuznetsov
4007e0563deSVitaly Kuznetsov mfn = __pfn_to_mfn(pfn);
401989513a7SJuergen Gross
4027e0563deSVitaly Kuznetsov /*
4037e0563deSVitaly Kuznetsov * If there's no mfn for the pfn, then just create an
4047e0563deSVitaly Kuznetsov * empty non-present pte. Unfortunately this loses
4057e0563deSVitaly Kuznetsov * information about the original pfn, so
4067e0563deSVitaly Kuznetsov * pte_mfn_to_pfn is asymmetric.
4077e0563deSVitaly Kuznetsov */
4087e0563deSVitaly Kuznetsov if (unlikely(mfn == INVALID_P2M_ENTRY)) {
4097e0563deSVitaly Kuznetsov mfn = 0;
4107e0563deSVitaly Kuznetsov flags = 0;
4117e0563deSVitaly Kuznetsov } else
4127e0563deSVitaly Kuznetsov mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
4137e0563deSVitaly Kuznetsov val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
4147e0563deSVitaly Kuznetsov }
4157e0563deSVitaly Kuznetsov
4167e0563deSVitaly Kuznetsov return val;
4177e0563deSVitaly Kuznetsov }
4187e0563deSVitaly Kuznetsov
xen_pte_val(pte_t pte)4197e0563deSVitaly Kuznetsov __visible pteval_t xen_pte_val(pte_t pte)
4207e0563deSVitaly Kuznetsov {
4217e0563deSVitaly Kuznetsov pteval_t pteval = pte.pte;
4227e0563deSVitaly Kuznetsov
4237e0563deSVitaly Kuznetsov return pte_mfn_to_pfn(pteval);
4247e0563deSVitaly Kuznetsov }
4257e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
4267e0563deSVitaly Kuznetsov
xen_pgd_val(pgd_t pgd)4277e0563deSVitaly Kuznetsov __visible pgdval_t xen_pgd_val(pgd_t pgd)
4287e0563deSVitaly Kuznetsov {
4297e0563deSVitaly Kuznetsov return pte_mfn_to_pfn(pgd.pgd);
4307e0563deSVitaly Kuznetsov }
4317e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
4327e0563deSVitaly Kuznetsov
xen_make_pte(pteval_t pte)4337e0563deSVitaly Kuznetsov __visible pte_t xen_make_pte(pteval_t pte)
4347e0563deSVitaly Kuznetsov {
4357e0563deSVitaly Kuznetsov pte = pte_pfn_to_mfn(pte);
4367e0563deSVitaly Kuznetsov
4377e0563deSVitaly Kuznetsov return native_make_pte(pte);
4387e0563deSVitaly Kuznetsov }
4397e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
4407e0563deSVitaly Kuznetsov
xen_make_pgd(pgdval_t pgd)4417e0563deSVitaly Kuznetsov __visible pgd_t xen_make_pgd(pgdval_t pgd)
4427e0563deSVitaly Kuznetsov {
4437e0563deSVitaly Kuznetsov pgd = pte_pfn_to_mfn(pgd);
4447e0563deSVitaly Kuznetsov return native_make_pgd(pgd);
4457e0563deSVitaly Kuznetsov }
4467e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
4477e0563deSVitaly Kuznetsov
xen_pmd_val(pmd_t pmd)4487e0563deSVitaly Kuznetsov __visible pmdval_t xen_pmd_val(pmd_t pmd)
4497e0563deSVitaly Kuznetsov {
4507e0563deSVitaly Kuznetsov return pte_mfn_to_pfn(pmd.pmd);
4517e0563deSVitaly Kuznetsov }
4527e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
4537e0563deSVitaly Kuznetsov
xen_set_pud_hyper(pud_t * ptr,pud_t val)4547e0563deSVitaly Kuznetsov static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
4557e0563deSVitaly Kuznetsov {
4567e0563deSVitaly Kuznetsov struct mmu_update u;
4577e0563deSVitaly Kuznetsov
4587e0563deSVitaly Kuznetsov preempt_disable();
4597e0563deSVitaly Kuznetsov
4607e0563deSVitaly Kuznetsov xen_mc_batch();
4617e0563deSVitaly Kuznetsov
4627e0563deSVitaly Kuznetsov /* ptr may be ioremapped for 64-bit pagetable setup */
4637e0563deSVitaly Kuznetsov u.ptr = arbitrary_virt_to_machine(ptr).maddr;
4647e0563deSVitaly Kuznetsov u.val = pud_val_ma(val);
4657e0563deSVitaly Kuznetsov xen_extend_mmu_update(&u);
4667e0563deSVitaly Kuznetsov
467a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
4687e0563deSVitaly Kuznetsov
4697e0563deSVitaly Kuznetsov preempt_enable();
4707e0563deSVitaly Kuznetsov }
4717e0563deSVitaly Kuznetsov
xen_set_pud(pud_t * ptr,pud_t val)4727e0563deSVitaly Kuznetsov static void xen_set_pud(pud_t *ptr, pud_t val)
4737e0563deSVitaly Kuznetsov {
4747e0563deSVitaly Kuznetsov trace_xen_mmu_set_pud(ptr, val);
4757e0563deSVitaly Kuznetsov
4767e0563deSVitaly Kuznetsov /* If page is not pinned, we can just update the entry
4777e0563deSVitaly Kuznetsov directly */
4787e0563deSVitaly Kuznetsov if (!xen_page_pinned(ptr)) {
4797e0563deSVitaly Kuznetsov *ptr = val;
4807e0563deSVitaly Kuznetsov return;
4817e0563deSVitaly Kuznetsov }
4827e0563deSVitaly Kuznetsov
4837e0563deSVitaly Kuznetsov xen_set_pud_hyper(ptr, val);
4847e0563deSVitaly Kuznetsov }
4857e0563deSVitaly Kuznetsov
xen_make_pmd(pmdval_t pmd)4867e0563deSVitaly Kuznetsov __visible pmd_t xen_make_pmd(pmdval_t pmd)
4877e0563deSVitaly Kuznetsov {
4887e0563deSVitaly Kuznetsov pmd = pte_pfn_to_mfn(pmd);
4897e0563deSVitaly Kuznetsov return native_make_pmd(pmd);
4907e0563deSVitaly Kuznetsov }
4917e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
4927e0563deSVitaly Kuznetsov
xen_pud_val(pud_t pud)4937e0563deSVitaly Kuznetsov __visible pudval_t xen_pud_val(pud_t pud)
4947e0563deSVitaly Kuznetsov {
4957e0563deSVitaly Kuznetsov return pte_mfn_to_pfn(pud.pud);
4967e0563deSVitaly Kuznetsov }
4977e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
4987e0563deSVitaly Kuznetsov
xen_make_pud(pudval_t pud)4997e0563deSVitaly Kuznetsov __visible pud_t xen_make_pud(pudval_t pud)
5007e0563deSVitaly Kuznetsov {
5017e0563deSVitaly Kuznetsov pud = pte_pfn_to_mfn(pud);
5027e0563deSVitaly Kuznetsov
5037e0563deSVitaly Kuznetsov return native_make_pud(pud);
5047e0563deSVitaly Kuznetsov }
5057e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
5067e0563deSVitaly Kuznetsov
xen_get_user_pgd(pgd_t * pgd)5077e0563deSVitaly Kuznetsov static pgd_t *xen_get_user_pgd(pgd_t *pgd)
5087e0563deSVitaly Kuznetsov {
5097e0563deSVitaly Kuznetsov pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
5107e0563deSVitaly Kuznetsov unsigned offset = pgd - pgd_page;
5117e0563deSVitaly Kuznetsov pgd_t *user_ptr = NULL;
5127e0563deSVitaly Kuznetsov
5137e0563deSVitaly Kuznetsov if (offset < pgd_index(USER_LIMIT)) {
5147e0563deSVitaly Kuznetsov struct page *page = virt_to_page(pgd_page);
5157e0563deSVitaly Kuznetsov user_ptr = (pgd_t *)page->private;
5167e0563deSVitaly Kuznetsov if (user_ptr)
5177e0563deSVitaly Kuznetsov user_ptr += offset;
5187e0563deSVitaly Kuznetsov }
5197e0563deSVitaly Kuznetsov
5207e0563deSVitaly Kuznetsov return user_ptr;
5217e0563deSVitaly Kuznetsov }
5227e0563deSVitaly Kuznetsov
__xen_set_p4d_hyper(p4d_t * ptr,p4d_t val)5237e0563deSVitaly Kuznetsov static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
5247e0563deSVitaly Kuznetsov {
5257e0563deSVitaly Kuznetsov struct mmu_update u;
5267e0563deSVitaly Kuznetsov
5277e0563deSVitaly Kuznetsov u.ptr = virt_to_machine(ptr).maddr;
5287e0563deSVitaly Kuznetsov u.val = p4d_val_ma(val);
5297e0563deSVitaly Kuznetsov xen_extend_mmu_update(&u);
5307e0563deSVitaly Kuznetsov }
5317e0563deSVitaly Kuznetsov
5327e0563deSVitaly Kuznetsov /*
5337e0563deSVitaly Kuznetsov * Raw hypercall-based set_p4d, intended for in early boot before
5347e0563deSVitaly Kuznetsov * there's a page structure. This implies:
5357e0563deSVitaly Kuznetsov * 1. The only existing pagetable is the kernel's
5367e0563deSVitaly Kuznetsov * 2. It is always pinned
5377e0563deSVitaly Kuznetsov * 3. It has no user pagetable attached to it
5387e0563deSVitaly Kuznetsov */
xen_set_p4d_hyper(p4d_t * ptr,p4d_t val)5397e0563deSVitaly Kuznetsov static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
5407e0563deSVitaly Kuznetsov {
5417e0563deSVitaly Kuznetsov preempt_disable();
5427e0563deSVitaly Kuznetsov
5437e0563deSVitaly Kuznetsov xen_mc_batch();
5447e0563deSVitaly Kuznetsov
5457e0563deSVitaly Kuznetsov __xen_set_p4d_hyper(ptr, val);
5467e0563deSVitaly Kuznetsov
547a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
5487e0563deSVitaly Kuznetsov
5497e0563deSVitaly Kuznetsov preempt_enable();
5507e0563deSVitaly Kuznetsov }
5517e0563deSVitaly Kuznetsov
xen_set_p4d(p4d_t * ptr,p4d_t val)5527e0563deSVitaly Kuznetsov static void xen_set_p4d(p4d_t *ptr, p4d_t val)
5537e0563deSVitaly Kuznetsov {
5547e0563deSVitaly Kuznetsov pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
5557e0563deSVitaly Kuznetsov pgd_t pgd_val;
5567e0563deSVitaly Kuznetsov
5577e0563deSVitaly Kuznetsov trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
5587e0563deSVitaly Kuznetsov
5597e0563deSVitaly Kuznetsov /* If page is not pinned, we can just update the entry
5607e0563deSVitaly Kuznetsov directly */
5617e0563deSVitaly Kuznetsov if (!xen_page_pinned(ptr)) {
5627e0563deSVitaly Kuznetsov *ptr = val;
5637e0563deSVitaly Kuznetsov if (user_ptr) {
5647e0563deSVitaly Kuznetsov WARN_ON(xen_page_pinned(user_ptr));
5657e0563deSVitaly Kuznetsov pgd_val.pgd = p4d_val_ma(val);
5667e0563deSVitaly Kuznetsov *user_ptr = pgd_val;
5677e0563deSVitaly Kuznetsov }
5687e0563deSVitaly Kuznetsov return;
5697e0563deSVitaly Kuznetsov }
5707e0563deSVitaly Kuznetsov
5717e0563deSVitaly Kuznetsov /* If it's pinned, then we can at least batch the kernel and
5727e0563deSVitaly Kuznetsov user updates together. */
5737e0563deSVitaly Kuznetsov xen_mc_batch();
5747e0563deSVitaly Kuznetsov
5757e0563deSVitaly Kuznetsov __xen_set_p4d_hyper(ptr, val);
5767e0563deSVitaly Kuznetsov if (user_ptr)
5777e0563deSVitaly Kuznetsov __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
5787e0563deSVitaly Kuznetsov
579a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
5807e0563deSVitaly Kuznetsov }
581b9952ec7SKirill A. Shutemov
582b9952ec7SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS >= 5
xen_p4d_val(p4d_t p4d)583b9952ec7SKirill A. Shutemov __visible p4dval_t xen_p4d_val(p4d_t p4d)
584b9952ec7SKirill A. Shutemov {
585b9952ec7SKirill A. Shutemov return pte_mfn_to_pfn(p4d.p4d);
586b9952ec7SKirill A. Shutemov }
587b9952ec7SKirill A. Shutemov PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
588b9952ec7SKirill A. Shutemov
xen_make_p4d(p4dval_t p4d)589b9952ec7SKirill A. Shutemov __visible p4d_t xen_make_p4d(p4dval_t p4d)
590b9952ec7SKirill A. Shutemov {
591b9952ec7SKirill A. Shutemov p4d = pte_pfn_to_mfn(p4d);
592b9952ec7SKirill A. Shutemov
593b9952ec7SKirill A. Shutemov return native_make_p4d(p4d);
594b9952ec7SKirill A. Shutemov }
595b9952ec7SKirill A. Shutemov PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
596b9952ec7SKirill A. Shutemov #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
5977e0563deSVitaly Kuznetsov
xen_pmd_walk(struct mm_struct * mm,pmd_t * pmd,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)598f2e39e8cSJuergen Gross static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
599f2e39e8cSJuergen Gross void (*func)(struct mm_struct *mm, struct page *,
600f2e39e8cSJuergen Gross enum pt_level),
6017e0563deSVitaly Kuznetsov bool last, unsigned long limit)
6027e0563deSVitaly Kuznetsov {
603f2e39e8cSJuergen Gross int i, nr;
6047e0563deSVitaly Kuznetsov
6057e0563deSVitaly Kuznetsov nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
6067e0563deSVitaly Kuznetsov for (i = 0; i < nr; i++) {
6077e0563deSVitaly Kuznetsov if (!pmd_none(pmd[i]))
608f2e39e8cSJuergen Gross (*func)(mm, pmd_page(pmd[i]), PT_PTE);
6097e0563deSVitaly Kuznetsov }
6107e0563deSVitaly Kuznetsov }
6117e0563deSVitaly Kuznetsov
xen_pud_walk(struct mm_struct * mm,pud_t * pud,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)612f2e39e8cSJuergen Gross static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
613f2e39e8cSJuergen Gross void (*func)(struct mm_struct *mm, struct page *,
614f2e39e8cSJuergen Gross enum pt_level),
6157e0563deSVitaly Kuznetsov bool last, unsigned long limit)
6167e0563deSVitaly Kuznetsov {
617f2e39e8cSJuergen Gross int i, nr;
6187e0563deSVitaly Kuznetsov
6197e0563deSVitaly Kuznetsov nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
6207e0563deSVitaly Kuznetsov for (i = 0; i < nr; i++) {
6217e0563deSVitaly Kuznetsov pmd_t *pmd;
6227e0563deSVitaly Kuznetsov
6237e0563deSVitaly Kuznetsov if (pud_none(pud[i]))
6247e0563deSVitaly Kuznetsov continue;
6257e0563deSVitaly Kuznetsov
6267e0563deSVitaly Kuznetsov pmd = pmd_offset(&pud[i], 0);
6277e0563deSVitaly Kuznetsov if (PTRS_PER_PMD > 1)
628f2e39e8cSJuergen Gross (*func)(mm, virt_to_page(pmd), PT_PMD);
629f2e39e8cSJuergen Gross xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
6307e0563deSVitaly Kuznetsov }
6317e0563deSVitaly Kuznetsov }
6327e0563deSVitaly Kuznetsov
xen_p4d_walk(struct mm_struct * mm,p4d_t * p4d,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),bool last,unsigned long limit)633f2e39e8cSJuergen Gross static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
634f2e39e8cSJuergen Gross void (*func)(struct mm_struct *mm, struct page *,
635f2e39e8cSJuergen Gross enum pt_level),
6367e0563deSVitaly Kuznetsov bool last, unsigned long limit)
6377e0563deSVitaly Kuznetsov {
6387e0563deSVitaly Kuznetsov pud_t *pud;
6397e0563deSVitaly Kuznetsov
6407e0563deSVitaly Kuznetsov
641773dd2fcSKirill A. Shutemov if (p4d_none(*p4d))
642f2e39e8cSJuergen Gross return;
643773dd2fcSKirill A. Shutemov
644773dd2fcSKirill A. Shutemov pud = pud_offset(p4d, 0);
6457e0563deSVitaly Kuznetsov if (PTRS_PER_PUD > 1)
646f2e39e8cSJuergen Gross (*func)(mm, virt_to_page(pud), PT_PUD);
647f2e39e8cSJuergen Gross xen_pud_walk(mm, pud, func, last, limit);
6487e0563deSVitaly Kuznetsov }
6497e0563deSVitaly Kuznetsov
6507e0563deSVitaly Kuznetsov /*
6517e0563deSVitaly Kuznetsov * (Yet another) pagetable walker. This one is intended for pinning a
6527e0563deSVitaly Kuznetsov * pagetable. This means that it walks a pagetable and calls the
6537e0563deSVitaly Kuznetsov * callback function on each page it finds making up the page table,
6547e0563deSVitaly Kuznetsov * at every level. It walks the entire pagetable, but it only bothers
6557e0563deSVitaly Kuznetsov * pinning pte pages which are below limit. In the normal case this
6567e0563deSVitaly Kuznetsov * will be STACK_TOP_MAX, but at boot we need to pin up to
6577e0563deSVitaly Kuznetsov * FIXADDR_TOP.
6587e0563deSVitaly Kuznetsov *
659a13f2ef1SJuergen Gross * We must skip the Xen hole in the middle of the address space, just after
660a13f2ef1SJuergen Gross * the big x86-64 virtual hole.
6617e0563deSVitaly Kuznetsov */
__xen_pgd_walk(struct mm_struct * mm,pgd_t * pgd,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),unsigned long limit)662f2e39e8cSJuergen Gross static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
663f2e39e8cSJuergen Gross void (*func)(struct mm_struct *mm, struct page *,
6647e0563deSVitaly Kuznetsov enum pt_level),
6657e0563deSVitaly Kuznetsov unsigned long limit)
6667e0563deSVitaly Kuznetsov {
667f2e39e8cSJuergen Gross int i, nr;
66816877a55SKirill A. Shutemov unsigned hole_low = 0, hole_high = 0;
6697e0563deSVitaly Kuznetsov
6707e0563deSVitaly Kuznetsov /* The limit is the last byte to be touched */
6717e0563deSVitaly Kuznetsov limit--;
6727e0563deSVitaly Kuznetsov BUG_ON(limit >= FIXADDR_TOP);
6737e0563deSVitaly Kuznetsov
6747e0563deSVitaly Kuznetsov /*
6757e0563deSVitaly Kuznetsov * 64-bit has a great big hole in the middle of the address
67616877a55SKirill A. Shutemov * space, which contains the Xen mappings.
6777e0563deSVitaly Kuznetsov */
67816877a55SKirill A. Shutemov hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
67916877a55SKirill A. Shutemov hole_high = pgd_index(GUARD_HOLE_END_ADDR);
6807e0563deSVitaly Kuznetsov
6817e0563deSVitaly Kuznetsov nr = pgd_index(limit) + 1;
6827e0563deSVitaly Kuznetsov for (i = 0; i < nr; i++) {
6837e0563deSVitaly Kuznetsov p4d_t *p4d;
6847e0563deSVitaly Kuznetsov
6857e0563deSVitaly Kuznetsov if (i >= hole_low && i < hole_high)
6867e0563deSVitaly Kuznetsov continue;
6877e0563deSVitaly Kuznetsov
6887e0563deSVitaly Kuznetsov if (pgd_none(pgd[i]))
6897e0563deSVitaly Kuznetsov continue;
6907e0563deSVitaly Kuznetsov
6917e0563deSVitaly Kuznetsov p4d = p4d_offset(&pgd[i], 0);
692f2e39e8cSJuergen Gross xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
6937e0563deSVitaly Kuznetsov }
6947e0563deSVitaly Kuznetsov
6957e0563deSVitaly Kuznetsov /* Do the top level last, so that the callbacks can use it as
6967e0563deSVitaly Kuznetsov a cue to do final things like tlb flushes. */
697f2e39e8cSJuergen Gross (*func)(mm, virt_to_page(pgd), PT_PGD);
6987e0563deSVitaly Kuznetsov }
6997e0563deSVitaly Kuznetsov
xen_pgd_walk(struct mm_struct * mm,void (* func)(struct mm_struct * mm,struct page *,enum pt_level),unsigned long limit)700f2e39e8cSJuergen Gross static void xen_pgd_walk(struct mm_struct *mm,
701f2e39e8cSJuergen Gross void (*func)(struct mm_struct *mm, struct page *,
7027e0563deSVitaly Kuznetsov enum pt_level),
7037e0563deSVitaly Kuznetsov unsigned long limit)
7047e0563deSVitaly Kuznetsov {
705f2e39e8cSJuergen Gross __xen_pgd_walk(mm, mm->pgd, func, limit);
7067e0563deSVitaly Kuznetsov }
7077e0563deSVitaly Kuznetsov
7087e0563deSVitaly Kuznetsov /* If we're using split pte locks, then take the page's lock and
7097e0563deSVitaly Kuznetsov return a pointer to it. Otherwise return NULL. */
xen_pte_lock(struct page * page,struct mm_struct * mm)7107e0563deSVitaly Kuznetsov static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
7117e0563deSVitaly Kuznetsov {
7127e0563deSVitaly Kuznetsov spinlock_t *ptl = NULL;
7137e0563deSVitaly Kuznetsov
7147e0563deSVitaly Kuznetsov #if USE_SPLIT_PTE_PTLOCKS
7151865484aSVishal Moola (Oracle) ptl = ptlock_ptr(page_ptdesc(page));
7167e0563deSVitaly Kuznetsov spin_lock_nest_lock(ptl, &mm->page_table_lock);
7177e0563deSVitaly Kuznetsov #endif
7187e0563deSVitaly Kuznetsov
7197e0563deSVitaly Kuznetsov return ptl;
7207e0563deSVitaly Kuznetsov }
7217e0563deSVitaly Kuznetsov
xen_pte_unlock(void * v)7227e0563deSVitaly Kuznetsov static void xen_pte_unlock(void *v)
7237e0563deSVitaly Kuznetsov {
7247e0563deSVitaly Kuznetsov spinlock_t *ptl = v;
7257e0563deSVitaly Kuznetsov spin_unlock(ptl);
7267e0563deSVitaly Kuznetsov }
7277e0563deSVitaly Kuznetsov
xen_do_pin(unsigned level,unsigned long pfn)7287e0563deSVitaly Kuznetsov static void xen_do_pin(unsigned level, unsigned long pfn)
7297e0563deSVitaly Kuznetsov {
7307e0563deSVitaly Kuznetsov struct mmuext_op op;
7317e0563deSVitaly Kuznetsov
7327e0563deSVitaly Kuznetsov op.cmd = level;
7337e0563deSVitaly Kuznetsov op.arg1.mfn = pfn_to_mfn(pfn);
7347e0563deSVitaly Kuznetsov
7357e0563deSVitaly Kuznetsov xen_extend_mmuext_op(&op);
7367e0563deSVitaly Kuznetsov }
7377e0563deSVitaly Kuznetsov
xen_pin_page(struct mm_struct * mm,struct page * page,enum pt_level level)738f2e39e8cSJuergen Gross static void xen_pin_page(struct mm_struct *mm, struct page *page,
7397e0563deSVitaly Kuznetsov enum pt_level level)
7407e0563deSVitaly Kuznetsov {
7417e0563deSVitaly Kuznetsov unsigned pgfl = TestSetPagePinned(page);
7427e0563deSVitaly Kuznetsov
743f2e39e8cSJuergen Gross if (!pgfl) {
7447e0563deSVitaly Kuznetsov void *pt = lowmem_page_address(page);
7457e0563deSVitaly Kuznetsov unsigned long pfn = page_to_pfn(page);
7467e0563deSVitaly Kuznetsov struct multicall_space mcs = __xen_mc_entry(0);
7477e0563deSVitaly Kuznetsov spinlock_t *ptl;
7487e0563deSVitaly Kuznetsov
7497e0563deSVitaly Kuznetsov /*
7507e0563deSVitaly Kuznetsov * We need to hold the pagetable lock between the time
7517e0563deSVitaly Kuznetsov * we make the pagetable RO and when we actually pin
7527e0563deSVitaly Kuznetsov * it. If we don't, then other users may come in and
7537e0563deSVitaly Kuznetsov * attempt to update the pagetable by writing it,
7547e0563deSVitaly Kuznetsov * which will fail because the memory is RO but not
7557e0563deSVitaly Kuznetsov * pinned, so Xen won't do the trap'n'emulate.
7567e0563deSVitaly Kuznetsov *
7577e0563deSVitaly Kuznetsov * If we're using split pte locks, we can't hold the
7587e0563deSVitaly Kuznetsov * entire pagetable's worth of locks during the
7597e0563deSVitaly Kuznetsov * traverse, because we may wrap the preempt count (8
7607e0563deSVitaly Kuznetsov * bits). The solution is to mark RO and pin each PTE
7617e0563deSVitaly Kuznetsov * page while holding the lock. This means the number
7627e0563deSVitaly Kuznetsov * of locks we end up holding is never more than a
7637e0563deSVitaly Kuznetsov * batch size (~32 entries, at present).
7647e0563deSVitaly Kuznetsov *
7657e0563deSVitaly Kuznetsov * If we're not using split pte locks, we needn't pin
7667e0563deSVitaly Kuznetsov * the PTE pages independently, because we're
7677e0563deSVitaly Kuznetsov * protected by the overall pagetable lock.
7687e0563deSVitaly Kuznetsov */
7697e0563deSVitaly Kuznetsov ptl = NULL;
7707e0563deSVitaly Kuznetsov if (level == PT_PTE)
7717e0563deSVitaly Kuznetsov ptl = xen_pte_lock(page, mm);
7727e0563deSVitaly Kuznetsov
7737e0563deSVitaly Kuznetsov MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
7747e0563deSVitaly Kuznetsov pfn_pte(pfn, PAGE_KERNEL_RO),
7757e0563deSVitaly Kuznetsov level == PT_PGD ? UVMF_TLB_FLUSH : 0);
7767e0563deSVitaly Kuznetsov
7777e0563deSVitaly Kuznetsov if (ptl) {
7787e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
7797e0563deSVitaly Kuznetsov
7807e0563deSVitaly Kuznetsov /* Queue a deferred unlock for when this batch
7817e0563deSVitaly Kuznetsov is completed. */
7827e0563deSVitaly Kuznetsov xen_mc_callback(xen_pte_unlock, ptl);
7837e0563deSVitaly Kuznetsov }
7847e0563deSVitaly Kuznetsov }
7857e0563deSVitaly Kuznetsov }
7867e0563deSVitaly Kuznetsov
7877e0563deSVitaly Kuznetsov /* This is called just after a mm has been created, but it has not
7887e0563deSVitaly Kuznetsov been used yet. We need to make sure that its pagetable is all
7897e0563deSVitaly Kuznetsov read-only, and can be pinned. */
__xen_pgd_pin(struct mm_struct * mm,pgd_t * pgd)7907e0563deSVitaly Kuznetsov static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
7917e0563deSVitaly Kuznetsov {
792a13f2ef1SJuergen Gross pgd_t *user_pgd = xen_get_user_pgd(pgd);
793a13f2ef1SJuergen Gross
7947e0563deSVitaly Kuznetsov trace_xen_mmu_pgd_pin(mm, pgd);
7957e0563deSVitaly Kuznetsov
7967e0563deSVitaly Kuznetsov xen_mc_batch();
7977e0563deSVitaly Kuznetsov
798f2e39e8cSJuergen Gross __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
7997e0563deSVitaly Kuznetsov
8007e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
8017e0563deSVitaly Kuznetsov
8027e0563deSVitaly Kuznetsov if (user_pgd) {
8037e0563deSVitaly Kuznetsov xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
8047e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_PIN_L4_TABLE,
8057e0563deSVitaly Kuznetsov PFN_DOWN(__pa(user_pgd)));
8067e0563deSVitaly Kuznetsov }
807a13f2ef1SJuergen Gross
8087e0563deSVitaly Kuznetsov xen_mc_issue(0);
8097e0563deSVitaly Kuznetsov }
8107e0563deSVitaly Kuznetsov
xen_pgd_pin(struct mm_struct * mm)8117e0563deSVitaly Kuznetsov static void xen_pgd_pin(struct mm_struct *mm)
8127e0563deSVitaly Kuznetsov {
8137e0563deSVitaly Kuznetsov __xen_pgd_pin(mm, mm->pgd);
8147e0563deSVitaly Kuznetsov }
8157e0563deSVitaly Kuznetsov
8167e0563deSVitaly Kuznetsov /*
8177e0563deSVitaly Kuznetsov * On save, we need to pin all pagetables to make sure they get their
8187e0563deSVitaly Kuznetsov * mfns turned into pfns. Search the list for any unpinned pgds and pin
8197e0563deSVitaly Kuznetsov * them (unpinned pgds are not currently in use, probably because the
8207e0563deSVitaly Kuznetsov * process is under construction or destruction).
8217e0563deSVitaly Kuznetsov *
8227e0563deSVitaly Kuznetsov * Expected to be called in stop_machine() ("equivalent to taking
8237e0563deSVitaly Kuznetsov * every spinlock in the system"), so the locking doesn't really
8247e0563deSVitaly Kuznetsov * matter all that much.
8257e0563deSVitaly Kuznetsov */
xen_mm_pin_all(void)8267e0563deSVitaly Kuznetsov void xen_mm_pin_all(void)
8277e0563deSVitaly Kuznetsov {
8287e0563deSVitaly Kuznetsov struct page *page;
8297e0563deSVitaly Kuznetsov
830*207efb2fSMaksym Planeta spin_lock(&init_mm.page_table_lock);
8317e0563deSVitaly Kuznetsov spin_lock(&pgd_lock);
8327e0563deSVitaly Kuznetsov
8337e0563deSVitaly Kuznetsov list_for_each_entry(page, &pgd_list, lru) {
8347e0563deSVitaly Kuznetsov if (!PagePinned(page)) {
8357e0563deSVitaly Kuznetsov __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
8367e0563deSVitaly Kuznetsov SetPageSavePinned(page);
8377e0563deSVitaly Kuznetsov }
8387e0563deSVitaly Kuznetsov }
8397e0563deSVitaly Kuznetsov
8407e0563deSVitaly Kuznetsov spin_unlock(&pgd_lock);
841*207efb2fSMaksym Planeta spin_unlock(&init_mm.page_table_lock);
8427e0563deSVitaly Kuznetsov }
8437e0563deSVitaly Kuznetsov
xen_mark_pinned(struct mm_struct * mm,struct page * page,enum pt_level level)844f2e39e8cSJuergen Gross static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
8457e0563deSVitaly Kuznetsov enum pt_level level)
8467e0563deSVitaly Kuznetsov {
8477e0563deSVitaly Kuznetsov SetPagePinned(page);
8487e0563deSVitaly Kuznetsov }
8497e0563deSVitaly Kuznetsov
8506f84f8d1SPavel Tatashin /*
8516f84f8d1SPavel Tatashin * The init_mm pagetable is really pinned as soon as its created, but
8526f84f8d1SPavel Tatashin * that's before we have page structures to store the bits. So do all
8536f84f8d1SPavel Tatashin * the book-keeping now once struct pages for allocated pages are
854c6ffc5caSMike Rapoport * initialized. This happens only after memblock_free_all() is called.
8556f84f8d1SPavel Tatashin */
xen_after_bootmem(void)8566f84f8d1SPavel Tatashin static void __init xen_after_bootmem(void)
8577e0563deSVitaly Kuznetsov {
8586f84f8d1SPavel Tatashin static_branch_enable(&xen_struct_pages_ready);
859d2a3ef44SJan Beulich #ifdef CONFIG_X86_VSYSCALL_EMULATION
8606f84f8d1SPavel Tatashin SetPagePinned(virt_to_page(level3_user_vsyscall));
861d2a3ef44SJan Beulich #endif
8627e0563deSVitaly Kuznetsov xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
8635a32765aSJuergen Gross
8645a32765aSJuergen Gross if (alloc_discontig_frames(MIN_CONTIG_ORDER))
8655a32765aSJuergen Gross BUG();
8667e0563deSVitaly Kuznetsov }
8677e0563deSVitaly Kuznetsov
xen_unpin_page(struct mm_struct * mm,struct page * page,enum pt_level level)868f2e39e8cSJuergen Gross static void xen_unpin_page(struct mm_struct *mm, struct page *page,
8697e0563deSVitaly Kuznetsov enum pt_level level)
8707e0563deSVitaly Kuznetsov {
8717e0563deSVitaly Kuznetsov unsigned pgfl = TestClearPagePinned(page);
8727e0563deSVitaly Kuznetsov
873f2e39e8cSJuergen Gross if (pgfl) {
8747e0563deSVitaly Kuznetsov void *pt = lowmem_page_address(page);
8757e0563deSVitaly Kuznetsov unsigned long pfn = page_to_pfn(page);
8767e0563deSVitaly Kuznetsov spinlock_t *ptl = NULL;
8777e0563deSVitaly Kuznetsov struct multicall_space mcs;
8787e0563deSVitaly Kuznetsov
8797e0563deSVitaly Kuznetsov /*
8807e0563deSVitaly Kuznetsov * Do the converse to pin_page. If we're using split
8817e0563deSVitaly Kuznetsov * pte locks, we must be holding the lock for while
8827e0563deSVitaly Kuznetsov * the pte page is unpinned but still RO to prevent
8837e0563deSVitaly Kuznetsov * concurrent updates from seeing it in this
8847e0563deSVitaly Kuznetsov * partially-pinned state.
8857e0563deSVitaly Kuznetsov */
8867e0563deSVitaly Kuznetsov if (level == PT_PTE) {
8877e0563deSVitaly Kuznetsov ptl = xen_pte_lock(page, mm);
8887e0563deSVitaly Kuznetsov
8897e0563deSVitaly Kuznetsov if (ptl)
8907e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
8917e0563deSVitaly Kuznetsov }
8927e0563deSVitaly Kuznetsov
8937e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(0);
8947e0563deSVitaly Kuznetsov
8957e0563deSVitaly Kuznetsov MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
8967e0563deSVitaly Kuznetsov pfn_pte(pfn, PAGE_KERNEL),
8977e0563deSVitaly Kuznetsov level == PT_PGD ? UVMF_TLB_FLUSH : 0);
8987e0563deSVitaly Kuznetsov
8997e0563deSVitaly Kuznetsov if (ptl) {
9007e0563deSVitaly Kuznetsov /* unlock when batch completed */
9017e0563deSVitaly Kuznetsov xen_mc_callback(xen_pte_unlock, ptl);
9027e0563deSVitaly Kuznetsov }
9037e0563deSVitaly Kuznetsov }
9047e0563deSVitaly Kuznetsov }
9057e0563deSVitaly Kuznetsov
9067e0563deSVitaly Kuznetsov /* Release a pagetables pages back as normal RW */
__xen_pgd_unpin(struct mm_struct * mm,pgd_t * pgd)9077e0563deSVitaly Kuznetsov static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
9087e0563deSVitaly Kuznetsov {
909a13f2ef1SJuergen Gross pgd_t *user_pgd = xen_get_user_pgd(pgd);
910a13f2ef1SJuergen Gross
9117e0563deSVitaly Kuznetsov trace_xen_mmu_pgd_unpin(mm, pgd);
9127e0563deSVitaly Kuznetsov
9137e0563deSVitaly Kuznetsov xen_mc_batch();
9147e0563deSVitaly Kuznetsov
9157e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
9167e0563deSVitaly Kuznetsov
9177e0563deSVitaly Kuznetsov if (user_pgd) {
9187e0563deSVitaly Kuznetsov xen_do_pin(MMUEXT_UNPIN_TABLE,
9197e0563deSVitaly Kuznetsov PFN_DOWN(__pa(user_pgd)));
9207e0563deSVitaly Kuznetsov xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
9217e0563deSVitaly Kuznetsov }
9227e0563deSVitaly Kuznetsov
9237e0563deSVitaly Kuznetsov __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
9247e0563deSVitaly Kuznetsov
9257e0563deSVitaly Kuznetsov xen_mc_issue(0);
9267e0563deSVitaly Kuznetsov }
9277e0563deSVitaly Kuznetsov
xen_pgd_unpin(struct mm_struct * mm)9287e0563deSVitaly Kuznetsov static void xen_pgd_unpin(struct mm_struct *mm)
9297e0563deSVitaly Kuznetsov {
9307e0563deSVitaly Kuznetsov __xen_pgd_unpin(mm, mm->pgd);
9317e0563deSVitaly Kuznetsov }
9327e0563deSVitaly Kuznetsov
9337e0563deSVitaly Kuznetsov /*
9347e0563deSVitaly Kuznetsov * On resume, undo any pinning done at save, so that the rest of the
9357e0563deSVitaly Kuznetsov * kernel doesn't see any unexpected pinned pagetables.
9367e0563deSVitaly Kuznetsov */
xen_mm_unpin_all(void)9377e0563deSVitaly Kuznetsov void xen_mm_unpin_all(void)
9387e0563deSVitaly Kuznetsov {
9397e0563deSVitaly Kuznetsov struct page *page;
9407e0563deSVitaly Kuznetsov
941*207efb2fSMaksym Planeta spin_lock(&init_mm.page_table_lock);
9427e0563deSVitaly Kuznetsov spin_lock(&pgd_lock);
9437e0563deSVitaly Kuznetsov
9447e0563deSVitaly Kuznetsov list_for_each_entry(page, &pgd_list, lru) {
9457e0563deSVitaly Kuznetsov if (PageSavePinned(page)) {
9467e0563deSVitaly Kuznetsov BUG_ON(!PagePinned(page));
9477e0563deSVitaly Kuznetsov __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
9487e0563deSVitaly Kuznetsov ClearPageSavePinned(page);
9497e0563deSVitaly Kuznetsov }
9507e0563deSVitaly Kuznetsov }
9517e0563deSVitaly Kuznetsov
9527e0563deSVitaly Kuznetsov spin_unlock(&pgd_lock);
953*207efb2fSMaksym Planeta spin_unlock(&init_mm.page_table_lock);
9547e0563deSVitaly Kuznetsov }
9557e0563deSVitaly Kuznetsov
xen_enter_mmap(struct mm_struct * mm)956c9ae1b10SJuergen Gross static void xen_enter_mmap(struct mm_struct *mm)
9577e0563deSVitaly Kuznetsov {
9587e0563deSVitaly Kuznetsov spin_lock(&mm->page_table_lock);
9597e0563deSVitaly Kuznetsov xen_pgd_pin(mm);
9607e0563deSVitaly Kuznetsov spin_unlock(&mm->page_table_lock);
9617e0563deSVitaly Kuznetsov }
9627e0563deSVitaly Kuznetsov
drop_mm_ref_this_cpu(void * info)9633d28ebceSAndy Lutomirski static void drop_mm_ref_this_cpu(void *info)
9647e0563deSVitaly Kuznetsov {
9657e0563deSVitaly Kuznetsov struct mm_struct *mm = info;
9667e0563deSVitaly Kuznetsov
9673d28ebceSAndy Lutomirski if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
9687e0563deSVitaly Kuznetsov leave_mm(smp_processor_id());
9697e0563deSVitaly Kuznetsov
9703d28ebceSAndy Lutomirski /*
9713d28ebceSAndy Lutomirski * If this cpu still has a stale cr3 reference, then make sure
9723d28ebceSAndy Lutomirski * it has been flushed.
9733d28ebceSAndy Lutomirski */
9747e0563deSVitaly Kuznetsov if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
9753d28ebceSAndy Lutomirski xen_mc_flush();
9767e0563deSVitaly Kuznetsov }
9777e0563deSVitaly Kuznetsov
9783d28ebceSAndy Lutomirski #ifdef CONFIG_SMP
9793d28ebceSAndy Lutomirski /*
9803d28ebceSAndy Lutomirski * Another cpu may still have their %cr3 pointing at the pagetable, so
9813d28ebceSAndy Lutomirski * we need to repoint it somewhere else before we can unpin it.
9823d28ebceSAndy Lutomirski */
xen_drop_mm_ref(struct mm_struct * mm)9837e0563deSVitaly Kuznetsov static void xen_drop_mm_ref(struct mm_struct *mm)
9847e0563deSVitaly Kuznetsov {
9857e0563deSVitaly Kuznetsov cpumask_var_t mask;
9867e0563deSVitaly Kuznetsov unsigned cpu;
9877e0563deSVitaly Kuznetsov
9883d28ebceSAndy Lutomirski drop_mm_ref_this_cpu(mm);
9897e0563deSVitaly Kuznetsov
9907e0563deSVitaly Kuznetsov /* Get the "official" set of cpus referring to our pagetable. */
9917e0563deSVitaly Kuznetsov if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
9927e0563deSVitaly Kuznetsov for_each_online_cpu(cpu) {
99394b1b03bSAndy Lutomirski if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
9947e0563deSVitaly Kuznetsov continue;
9953d28ebceSAndy Lutomirski smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
9967e0563deSVitaly Kuznetsov }
9977e0563deSVitaly Kuznetsov return;
9987e0563deSVitaly Kuznetsov }
9997e0563deSVitaly Kuznetsov
10003d28ebceSAndy Lutomirski /*
10013d28ebceSAndy Lutomirski * It's possible that a vcpu may have a stale reference to our
10023d28ebceSAndy Lutomirski * cr3, because its in lazy mode, and it hasn't yet flushed
10033d28ebceSAndy Lutomirski * its set of pending hypercalls yet. In this case, we can
10043d28ebceSAndy Lutomirski * look at its actual current cr3 value, and force it to flush
10053d28ebceSAndy Lutomirski * if needed.
10063d28ebceSAndy Lutomirski */
100794b1b03bSAndy Lutomirski cpumask_clear(mask);
10087e0563deSVitaly Kuznetsov for_each_online_cpu(cpu) {
10097e0563deSVitaly Kuznetsov if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
10107e0563deSVitaly Kuznetsov cpumask_set_cpu(cpu, mask);
10117e0563deSVitaly Kuznetsov }
10127e0563deSVitaly Kuznetsov
10133d28ebceSAndy Lutomirski smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
10147e0563deSVitaly Kuznetsov free_cpumask_var(mask);
10157e0563deSVitaly Kuznetsov }
10167e0563deSVitaly Kuznetsov #else
xen_drop_mm_ref(struct mm_struct * mm)10177e0563deSVitaly Kuznetsov static void xen_drop_mm_ref(struct mm_struct *mm)
10187e0563deSVitaly Kuznetsov {
10193d28ebceSAndy Lutomirski drop_mm_ref_this_cpu(mm);
10207e0563deSVitaly Kuznetsov }
10217e0563deSVitaly Kuznetsov #endif
10227e0563deSVitaly Kuznetsov
10237e0563deSVitaly Kuznetsov /*
10247e0563deSVitaly Kuznetsov * While a process runs, Xen pins its pagetables, which means that the
10257e0563deSVitaly Kuznetsov * hypervisor forces it to be read-only, and it controls all updates
10267e0563deSVitaly Kuznetsov * to it. This means that all pagetable updates have to go via the
10277e0563deSVitaly Kuznetsov * hypervisor, which is moderately expensive.
10287e0563deSVitaly Kuznetsov *
10297e0563deSVitaly Kuznetsov * Since we're pulling the pagetable down, we switch to use init_mm,
10307e0563deSVitaly Kuznetsov * unpin old process pagetable and mark it all read-write, which
10317e0563deSVitaly Kuznetsov * allows further operations on it to be simple memory accesses.
10327e0563deSVitaly Kuznetsov *
10337e0563deSVitaly Kuznetsov * The only subtle point is that another CPU may be still using the
10347e0563deSVitaly Kuznetsov * pagetable because of lazy tlb flushing. This means we need need to
10357e0563deSVitaly Kuznetsov * switch all CPUs off this pagetable before we can unpin it.
10367e0563deSVitaly Kuznetsov */
xen_exit_mmap(struct mm_struct * mm)10377e0563deSVitaly Kuznetsov static void xen_exit_mmap(struct mm_struct *mm)
10387e0563deSVitaly Kuznetsov {
10397e0563deSVitaly Kuznetsov get_cpu(); /* make sure we don't move around */
10407e0563deSVitaly Kuznetsov xen_drop_mm_ref(mm);
10417e0563deSVitaly Kuznetsov put_cpu();
10427e0563deSVitaly Kuznetsov
10437e0563deSVitaly Kuznetsov spin_lock(&mm->page_table_lock);
10447e0563deSVitaly Kuznetsov
10457e0563deSVitaly Kuznetsov /* pgd may not be pinned in the error exit path of execve */
10467e0563deSVitaly Kuznetsov if (xen_page_pinned(mm->pgd))
10477e0563deSVitaly Kuznetsov xen_pgd_unpin(mm);
10487e0563deSVitaly Kuznetsov
10497e0563deSVitaly Kuznetsov spin_unlock(&mm->page_table_lock);
10507e0563deSVitaly Kuznetsov }
10517e0563deSVitaly Kuznetsov
10527e0563deSVitaly Kuznetsov static void xen_post_allocator_init(void);
10537e0563deSVitaly Kuznetsov
pin_pagetable_pfn(unsigned cmd,unsigned long pfn)10547e0563deSVitaly Kuznetsov static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
10557e0563deSVitaly Kuznetsov {
10567e0563deSVitaly Kuznetsov struct mmuext_op op;
10577e0563deSVitaly Kuznetsov
10587e0563deSVitaly Kuznetsov op.cmd = cmd;
10597e0563deSVitaly Kuznetsov op.arg1.mfn = pfn_to_mfn(pfn);
10607e0563deSVitaly Kuznetsov if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
10617e0563deSVitaly Kuznetsov BUG();
10627e0563deSVitaly Kuznetsov }
10637e0563deSVitaly Kuznetsov
xen_cleanhighmap(unsigned long vaddr,unsigned long vaddr_end)10647e0563deSVitaly Kuznetsov static void __init xen_cleanhighmap(unsigned long vaddr,
10657e0563deSVitaly Kuznetsov unsigned long vaddr_end)
10667e0563deSVitaly Kuznetsov {
10677e0563deSVitaly Kuznetsov unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
10687e0563deSVitaly Kuznetsov pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
10697e0563deSVitaly Kuznetsov
10707e0563deSVitaly Kuznetsov /* NOTE: The loop is more greedy than the cleanup_highmap variant.
10717e0563deSVitaly Kuznetsov * We include the PMD passed in on _both_ boundaries. */
10727e0563deSVitaly Kuznetsov for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
10737e0563deSVitaly Kuznetsov pmd++, vaddr += PMD_SIZE) {
10747e0563deSVitaly Kuznetsov if (pmd_none(*pmd))
10757e0563deSVitaly Kuznetsov continue;
10767e0563deSVitaly Kuznetsov if (vaddr < (unsigned long) _text || vaddr > kernel_end)
10777e0563deSVitaly Kuznetsov set_pmd(pmd, __pmd(0));
10787e0563deSVitaly Kuznetsov }
10797e0563deSVitaly Kuznetsov /* In case we did something silly, we should crash in this function
10807e0563deSVitaly Kuznetsov * instead of somewhere later and be confusing. */
10817e0563deSVitaly Kuznetsov xen_mc_flush();
10827e0563deSVitaly Kuznetsov }
10837e0563deSVitaly Kuznetsov
10847e0563deSVitaly Kuznetsov /*
10857e0563deSVitaly Kuznetsov * Make a page range writeable and free it.
10867e0563deSVitaly Kuznetsov */
xen_free_ro_pages(unsigned long paddr,unsigned long size)10877e0563deSVitaly Kuznetsov static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
10887e0563deSVitaly Kuznetsov {
10897e0563deSVitaly Kuznetsov void *vaddr = __va(paddr);
10907e0563deSVitaly Kuznetsov void *vaddr_end = vaddr + size;
10917e0563deSVitaly Kuznetsov
10927e0563deSVitaly Kuznetsov for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
10937e0563deSVitaly Kuznetsov make_lowmem_page_readwrite(vaddr);
10947e0563deSVitaly Kuznetsov
10953ecc6834SMike Rapoport memblock_phys_free(paddr, size);
10967e0563deSVitaly Kuznetsov }
10977e0563deSVitaly Kuznetsov
xen_cleanmfnmap_free_pgtbl(void * pgtbl,bool unpin)10987e0563deSVitaly Kuznetsov static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
10997e0563deSVitaly Kuznetsov {
11007e0563deSVitaly Kuznetsov unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
11017e0563deSVitaly Kuznetsov
11027e0563deSVitaly Kuznetsov if (unpin)
11037e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
11047e0563deSVitaly Kuznetsov ClearPagePinned(virt_to_page(__va(pa)));
11057e0563deSVitaly Kuznetsov xen_free_ro_pages(pa, PAGE_SIZE);
11067e0563deSVitaly Kuznetsov }
11077e0563deSVitaly Kuznetsov
xen_cleanmfnmap_pmd(pmd_t * pmd,bool unpin)11087e0563deSVitaly Kuznetsov static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
11097e0563deSVitaly Kuznetsov {
11107e0563deSVitaly Kuznetsov unsigned long pa;
11117e0563deSVitaly Kuznetsov pte_t *pte_tbl;
11127e0563deSVitaly Kuznetsov int i;
11137e0563deSVitaly Kuznetsov
11147e0563deSVitaly Kuznetsov if (pmd_large(*pmd)) {
11157e0563deSVitaly Kuznetsov pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
11167e0563deSVitaly Kuznetsov xen_free_ro_pages(pa, PMD_SIZE);
11177e0563deSVitaly Kuznetsov return;
11187e0563deSVitaly Kuznetsov }
11197e0563deSVitaly Kuznetsov
11207e0563deSVitaly Kuznetsov pte_tbl = pte_offset_kernel(pmd, 0);
11217e0563deSVitaly Kuznetsov for (i = 0; i < PTRS_PER_PTE; i++) {
11227e0563deSVitaly Kuznetsov if (pte_none(pte_tbl[i]))
11237e0563deSVitaly Kuznetsov continue;
11247e0563deSVitaly Kuznetsov pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
11257e0563deSVitaly Kuznetsov xen_free_ro_pages(pa, PAGE_SIZE);
11267e0563deSVitaly Kuznetsov }
11277e0563deSVitaly Kuznetsov set_pmd(pmd, __pmd(0));
11287e0563deSVitaly Kuznetsov xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
11297e0563deSVitaly Kuznetsov }
11307e0563deSVitaly Kuznetsov
xen_cleanmfnmap_pud(pud_t * pud,bool unpin)11317e0563deSVitaly Kuznetsov static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
11327e0563deSVitaly Kuznetsov {
11337e0563deSVitaly Kuznetsov unsigned long pa;
11347e0563deSVitaly Kuznetsov pmd_t *pmd_tbl;
11357e0563deSVitaly Kuznetsov int i;
11367e0563deSVitaly Kuznetsov
1137907835e6SPeter Xu if (pud_leaf(*pud)) {
11387e0563deSVitaly Kuznetsov pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
11397e0563deSVitaly Kuznetsov xen_free_ro_pages(pa, PUD_SIZE);
11407e0563deSVitaly Kuznetsov return;
11417e0563deSVitaly Kuznetsov }
11427e0563deSVitaly Kuznetsov
11437e0563deSVitaly Kuznetsov pmd_tbl = pmd_offset(pud, 0);
11447e0563deSVitaly Kuznetsov for (i = 0; i < PTRS_PER_PMD; i++) {
11457e0563deSVitaly Kuznetsov if (pmd_none(pmd_tbl[i]))
11467e0563deSVitaly Kuznetsov continue;
11477e0563deSVitaly Kuznetsov xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
11487e0563deSVitaly Kuznetsov }
11497e0563deSVitaly Kuznetsov set_pud(pud, __pud(0));
11507e0563deSVitaly Kuznetsov xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
11517e0563deSVitaly Kuznetsov }
11527e0563deSVitaly Kuznetsov
xen_cleanmfnmap_p4d(p4d_t * p4d,bool unpin)11537e0563deSVitaly Kuznetsov static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
11547e0563deSVitaly Kuznetsov {
11557e0563deSVitaly Kuznetsov unsigned long pa;
11567e0563deSVitaly Kuznetsov pud_t *pud_tbl;
11577e0563deSVitaly Kuznetsov int i;
11587e0563deSVitaly Kuznetsov
11597e0563deSVitaly Kuznetsov if (p4d_large(*p4d)) {
11607e0563deSVitaly Kuznetsov pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
11617e0563deSVitaly Kuznetsov xen_free_ro_pages(pa, P4D_SIZE);
11627e0563deSVitaly Kuznetsov return;
11637e0563deSVitaly Kuznetsov }
11647e0563deSVitaly Kuznetsov
11657e0563deSVitaly Kuznetsov pud_tbl = pud_offset(p4d, 0);
11667e0563deSVitaly Kuznetsov for (i = 0; i < PTRS_PER_PUD; i++) {
11677e0563deSVitaly Kuznetsov if (pud_none(pud_tbl[i]))
11687e0563deSVitaly Kuznetsov continue;
11697e0563deSVitaly Kuznetsov xen_cleanmfnmap_pud(pud_tbl + i, unpin);
11707e0563deSVitaly Kuznetsov }
11717e0563deSVitaly Kuznetsov set_p4d(p4d, __p4d(0));
11727e0563deSVitaly Kuznetsov xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
11737e0563deSVitaly Kuznetsov }
11747e0563deSVitaly Kuznetsov
11757e0563deSVitaly Kuznetsov /*
11767e0563deSVitaly Kuznetsov * Since it is well isolated we can (and since it is perhaps large we should)
11777e0563deSVitaly Kuznetsov * also free the page tables mapping the initial P->M table.
11787e0563deSVitaly Kuznetsov */
xen_cleanmfnmap(unsigned long vaddr)11797e0563deSVitaly Kuznetsov static void __init xen_cleanmfnmap(unsigned long vaddr)
11807e0563deSVitaly Kuznetsov {
11817e0563deSVitaly Kuznetsov pgd_t *pgd;
11827e0563deSVitaly Kuznetsov p4d_t *p4d;
11837e0563deSVitaly Kuznetsov bool unpin;
11847e0563deSVitaly Kuznetsov
11857e0563deSVitaly Kuznetsov unpin = (vaddr == 2 * PGDIR_SIZE);
11867e0563deSVitaly Kuznetsov vaddr &= PMD_MASK;
11877e0563deSVitaly Kuznetsov pgd = pgd_offset_k(vaddr);
11887e0563deSVitaly Kuznetsov p4d = p4d_offset(pgd, 0);
1189773dd2fcSKirill A. Shutemov if (!p4d_none(*p4d))
1190773dd2fcSKirill A. Shutemov xen_cleanmfnmap_p4d(p4d, unpin);
11917e0563deSVitaly Kuznetsov }
11927e0563deSVitaly Kuznetsov
xen_pagetable_p2m_free(void)11937e0563deSVitaly Kuznetsov static void __init xen_pagetable_p2m_free(void)
11947e0563deSVitaly Kuznetsov {
11957e0563deSVitaly Kuznetsov unsigned long size;
11967e0563deSVitaly Kuznetsov unsigned long addr;
11977e0563deSVitaly Kuznetsov
11987e0563deSVitaly Kuznetsov size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
11997e0563deSVitaly Kuznetsov
12007e0563deSVitaly Kuznetsov /* No memory or already called. */
12017e0563deSVitaly Kuznetsov if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
12027e0563deSVitaly Kuznetsov return;
12037e0563deSVitaly Kuznetsov
12047e0563deSVitaly Kuznetsov /* using __ka address and sticking INVALID_P2M_ENTRY! */
12057e0563deSVitaly Kuznetsov memset((void *)xen_start_info->mfn_list, 0xff, size);
12067e0563deSVitaly Kuznetsov
12077e0563deSVitaly Kuznetsov addr = xen_start_info->mfn_list;
12087e0563deSVitaly Kuznetsov /*
12097e0563deSVitaly Kuznetsov * We could be in __ka space.
12107e0563deSVitaly Kuznetsov * We roundup to the PMD, which means that if anybody at this stage is
12117e0563deSVitaly Kuznetsov * using the __ka address of xen_start_info or
121232118f97SHui Su * xen_start_info->shared_info they are in going to crash. Fortunately
12137b25b9cbSPavel Tatashin * we have already revectored in xen_setup_kernel_pagetable.
12147e0563deSVitaly Kuznetsov */
12157e0563deSVitaly Kuznetsov size = roundup(size, PMD_SIZE);
12167e0563deSVitaly Kuznetsov
12177e0563deSVitaly Kuznetsov if (addr >= __START_KERNEL_map) {
12187e0563deSVitaly Kuznetsov xen_cleanhighmap(addr, addr + size);
12197e0563deSVitaly Kuznetsov size = PAGE_ALIGN(xen_start_info->nr_pages *
12207e0563deSVitaly Kuznetsov sizeof(unsigned long));
12214421cca0SMike Rapoport memblock_free((void *)addr, size);
12227e0563deSVitaly Kuznetsov } else {
12237e0563deSVitaly Kuznetsov xen_cleanmfnmap(addr);
12247e0563deSVitaly Kuznetsov }
12257e0563deSVitaly Kuznetsov }
12267e0563deSVitaly Kuznetsov
xen_pagetable_cleanhighmap(void)12277e0563deSVitaly Kuznetsov static void __init xen_pagetable_cleanhighmap(void)
12287e0563deSVitaly Kuznetsov {
12297e0563deSVitaly Kuznetsov unsigned long size;
12307e0563deSVitaly Kuznetsov unsigned long addr;
12317e0563deSVitaly Kuznetsov
12327e0563deSVitaly Kuznetsov /* At this stage, cleanup_highmap has already cleaned __ka space
12337e0563deSVitaly Kuznetsov * from _brk_limit way up to the max_pfn_mapped (which is the end of
12347e0563deSVitaly Kuznetsov * the ramdisk). We continue on, erasing PMD entries that point to page
12357e0563deSVitaly Kuznetsov * tables - do note that they are accessible at this stage via __va.
12360d805ee7SZhenzhong Duan * As Xen is aligning the memory end to a 4MB boundary, for good
12370d805ee7SZhenzhong Duan * measure we also round up to PMD_SIZE * 2 - which means that if
12387e0563deSVitaly Kuznetsov * anybody is using __ka address to the initial boot-stack - and try
12397e0563deSVitaly Kuznetsov * to use it - they are going to crash. The xen_start_info has been
12407e0563deSVitaly Kuznetsov * taken care of already in xen_setup_kernel_pagetable. */
12417e0563deSVitaly Kuznetsov addr = xen_start_info->pt_base;
12420d805ee7SZhenzhong Duan size = xen_start_info->nr_pt_frames * PAGE_SIZE;
12437e0563deSVitaly Kuznetsov
12440d805ee7SZhenzhong Duan xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
12457e0563deSVitaly Kuznetsov xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
12467e0563deSVitaly Kuznetsov }
12477e0563deSVitaly Kuznetsov
xen_pagetable_p2m_setup(void)12487e0563deSVitaly Kuznetsov static void __init xen_pagetable_p2m_setup(void)
12497e0563deSVitaly Kuznetsov {
12507e0563deSVitaly Kuznetsov xen_vmalloc_p2m_tree();
12517e0563deSVitaly Kuznetsov
12527e0563deSVitaly Kuznetsov xen_pagetable_p2m_free();
12537e0563deSVitaly Kuznetsov
12547e0563deSVitaly Kuznetsov xen_pagetable_cleanhighmap();
1255a13f2ef1SJuergen Gross
12567e0563deSVitaly Kuznetsov /* And revector! Bye bye old array */
12577e0563deSVitaly Kuznetsov xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
12587e0563deSVitaly Kuznetsov }
12597e0563deSVitaly Kuznetsov
xen_pagetable_init(void)12607e0563deSVitaly Kuznetsov static void __init xen_pagetable_init(void)
12617e0563deSVitaly Kuznetsov {
1262cae73951SJan Beulich /*
1263cae73951SJan Beulich * The majority of further PTE writes is to pagetables already
1264cae73951SJan Beulich * announced as such to Xen. Hence it is more efficient to use
1265cae73951SJan Beulich * hypercalls for these updates.
1266cae73951SJan Beulich */
1267cae73951SJan Beulich pv_ops.mmu.set_pte = __xen_set_pte;
1268cae73951SJan Beulich
12697e0563deSVitaly Kuznetsov paging_init();
12707e0563deSVitaly Kuznetsov xen_post_allocator_init();
12717e0563deSVitaly Kuznetsov
12727e0563deSVitaly Kuznetsov xen_pagetable_p2m_setup();
12737e0563deSVitaly Kuznetsov
12747e0563deSVitaly Kuznetsov /* Allocate and initialize top and mid mfn levels for p2m structure */
12757e0563deSVitaly Kuznetsov xen_build_mfn_list_list();
12767e0563deSVitaly Kuznetsov
12777e0563deSVitaly Kuznetsov /* Remap memory freed due to conflicts with E820 map */
12787e0563deSVitaly Kuznetsov xen_remap_memory();
12797b25b9cbSPavel Tatashin xen_setup_mfn_list_list();
12807e0563deSVitaly Kuznetsov }
1281209cfd0cSPeter Zijlstra
xen_write_cr2(unsigned long cr2)1282209cfd0cSPeter Zijlstra static noinstr void xen_write_cr2(unsigned long cr2)
12837e0563deSVitaly Kuznetsov {
12847e0563deSVitaly Kuznetsov this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
12857e0563deSVitaly Kuznetsov }
12867e0563deSVitaly Kuznetsov
xen_flush_tlb(void)128745dd9b06SSteven Rostedt (VMware) static noinline void xen_flush_tlb(void)
12887e0563deSVitaly Kuznetsov {
12897e0563deSVitaly Kuznetsov struct mmuext_op *op;
12907e0563deSVitaly Kuznetsov struct multicall_space mcs;
12917e0563deSVitaly Kuznetsov
12927e0563deSVitaly Kuznetsov preempt_disable();
12937e0563deSVitaly Kuznetsov
12947e0563deSVitaly Kuznetsov mcs = xen_mc_entry(sizeof(*op));
12957e0563deSVitaly Kuznetsov
12967e0563deSVitaly Kuznetsov op = mcs.args;
12977e0563deSVitaly Kuznetsov op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
12987e0563deSVitaly Kuznetsov MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
12997e0563deSVitaly Kuznetsov
1300a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
13017e0563deSVitaly Kuznetsov
13027e0563deSVitaly Kuznetsov preempt_enable();
13037e0563deSVitaly Kuznetsov }
13047e0563deSVitaly Kuznetsov
xen_flush_tlb_one_user(unsigned long addr)13051299ef1dSAndy Lutomirski static void xen_flush_tlb_one_user(unsigned long addr)
13067e0563deSVitaly Kuznetsov {
13077e0563deSVitaly Kuznetsov struct mmuext_op *op;
13087e0563deSVitaly Kuznetsov struct multicall_space mcs;
13097e0563deSVitaly Kuznetsov
13101299ef1dSAndy Lutomirski trace_xen_mmu_flush_tlb_one_user(addr);
13117e0563deSVitaly Kuznetsov
13127e0563deSVitaly Kuznetsov preempt_disable();
13137e0563deSVitaly Kuznetsov
13147e0563deSVitaly Kuznetsov mcs = xen_mc_entry(sizeof(*op));
13157e0563deSVitaly Kuznetsov op = mcs.args;
13167e0563deSVitaly Kuznetsov op->cmd = MMUEXT_INVLPG_LOCAL;
13177e0563deSVitaly Kuznetsov op->arg1.linear_addr = addr & PAGE_MASK;
13187e0563deSVitaly Kuznetsov MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
13197e0563deSVitaly Kuznetsov
1320a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
13217e0563deSVitaly Kuznetsov
13227e0563deSVitaly Kuznetsov preempt_enable();
13237e0563deSVitaly Kuznetsov }
13247e0563deSVitaly Kuznetsov
xen_flush_tlb_multi(const struct cpumask * cpus,const struct flush_tlb_info * info)13254ce94eabSNadav Amit static void xen_flush_tlb_multi(const struct cpumask *cpus,
1326a2055abeSAndy Lutomirski const struct flush_tlb_info *info)
13277e0563deSVitaly Kuznetsov {
13287e0563deSVitaly Kuznetsov struct {
13297e0563deSVitaly Kuznetsov struct mmuext_op op;
13307e0563deSVitaly Kuznetsov DECLARE_BITMAP(mask, NR_CPUS);
13317e0563deSVitaly Kuznetsov } *args;
13327e0563deSVitaly Kuznetsov struct multicall_space mcs;
133366a640e7SNick Desaulniers const size_t mc_entry_size = sizeof(args->op) +
133466a640e7SNick Desaulniers sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
13357e0563deSVitaly Kuznetsov
13364ce94eabSNadav Amit trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
13377e0563deSVitaly Kuznetsov
13387e0563deSVitaly Kuznetsov if (cpumask_empty(cpus))
13397e0563deSVitaly Kuznetsov return; /* nothing to do */
13407e0563deSVitaly Kuznetsov
134166a640e7SNick Desaulniers mcs = xen_mc_entry(mc_entry_size);
13427e0563deSVitaly Kuznetsov args = mcs.args;
13437e0563deSVitaly Kuznetsov args->op.arg2.vcpumask = to_cpumask(args->mask);
13447e0563deSVitaly Kuznetsov
13454ce94eabSNadav Amit /* Remove any offline CPUs */
13467e0563deSVitaly Kuznetsov cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
13477e0563deSVitaly Kuznetsov
13487e0563deSVitaly Kuznetsov args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1349a2055abeSAndy Lutomirski if (info->end != TLB_FLUSH_ALL &&
1350a2055abeSAndy Lutomirski (info->end - info->start) <= PAGE_SIZE) {
13517e0563deSVitaly Kuznetsov args->op.cmd = MMUEXT_INVLPG_MULTI;
1352a2055abeSAndy Lutomirski args->op.arg1.linear_addr = info->start;
13537e0563deSVitaly Kuznetsov }
13547e0563deSVitaly Kuznetsov
13557e0563deSVitaly Kuznetsov MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
13567e0563deSVitaly Kuznetsov
1357a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
13587e0563deSVitaly Kuznetsov }
13597e0563deSVitaly Kuznetsov
xen_read_cr3(void)13607e0563deSVitaly Kuznetsov static unsigned long xen_read_cr3(void)
13617e0563deSVitaly Kuznetsov {
13627e0563deSVitaly Kuznetsov return this_cpu_read(xen_cr3);
13637e0563deSVitaly Kuznetsov }
13647e0563deSVitaly Kuznetsov
set_current_cr3(void * v)13657e0563deSVitaly Kuznetsov static void set_current_cr3(void *v)
13667e0563deSVitaly Kuznetsov {
13677e0563deSVitaly Kuznetsov this_cpu_write(xen_current_cr3, (unsigned long)v);
13687e0563deSVitaly Kuznetsov }
13697e0563deSVitaly Kuznetsov
__xen_write_cr3(bool kernel,unsigned long cr3)13707e0563deSVitaly Kuznetsov static void __xen_write_cr3(bool kernel, unsigned long cr3)
13717e0563deSVitaly Kuznetsov {
13727e0563deSVitaly Kuznetsov struct mmuext_op op;
13737e0563deSVitaly Kuznetsov unsigned long mfn;
13747e0563deSVitaly Kuznetsov
13757e0563deSVitaly Kuznetsov trace_xen_mmu_write_cr3(kernel, cr3);
13767e0563deSVitaly Kuznetsov
13777e0563deSVitaly Kuznetsov if (cr3)
13787e0563deSVitaly Kuznetsov mfn = pfn_to_mfn(PFN_DOWN(cr3));
13797e0563deSVitaly Kuznetsov else
13807e0563deSVitaly Kuznetsov mfn = 0;
13817e0563deSVitaly Kuznetsov
13827e0563deSVitaly Kuznetsov WARN_ON(mfn == 0 && kernel);
13837e0563deSVitaly Kuznetsov
13847e0563deSVitaly Kuznetsov op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
13857e0563deSVitaly Kuznetsov op.arg1.mfn = mfn;
13867e0563deSVitaly Kuznetsov
13877e0563deSVitaly Kuznetsov xen_extend_mmuext_op(&op);
13887e0563deSVitaly Kuznetsov
13897e0563deSVitaly Kuznetsov if (kernel) {
13907e0563deSVitaly Kuznetsov this_cpu_write(xen_cr3, cr3);
13917e0563deSVitaly Kuznetsov
13927e0563deSVitaly Kuznetsov /* Update xen_current_cr3 once the batch has actually
13937e0563deSVitaly Kuznetsov been submitted. */
13947e0563deSVitaly Kuznetsov xen_mc_callback(set_current_cr3, (void *)cr3);
13957e0563deSVitaly Kuznetsov }
13967e0563deSVitaly Kuznetsov }
xen_write_cr3(unsigned long cr3)13977e0563deSVitaly Kuznetsov static void xen_write_cr3(unsigned long cr3)
13987e0563deSVitaly Kuznetsov {
1399a13f2ef1SJuergen Gross pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1400a13f2ef1SJuergen Gross
14017e0563deSVitaly Kuznetsov BUG_ON(preemptible());
14027e0563deSVitaly Kuznetsov
14037e0563deSVitaly Kuznetsov xen_mc_batch(); /* disables interrupts */
14047e0563deSVitaly Kuznetsov
14057e0563deSVitaly Kuznetsov /* Update while interrupts are disabled, so its atomic with
14067e0563deSVitaly Kuznetsov respect to ipis */
14077e0563deSVitaly Kuznetsov this_cpu_write(xen_cr3, cr3);
14087e0563deSVitaly Kuznetsov
14097e0563deSVitaly Kuznetsov __xen_write_cr3(true, cr3);
14107e0563deSVitaly Kuznetsov
14117e0563deSVitaly Kuznetsov if (user_pgd)
14127e0563deSVitaly Kuznetsov __xen_write_cr3(false, __pa(user_pgd));
14137e0563deSVitaly Kuznetsov else
14147e0563deSVitaly Kuznetsov __xen_write_cr3(false, 0);
14157e0563deSVitaly Kuznetsov
1416a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
14177e0563deSVitaly Kuznetsov }
14187e0563deSVitaly Kuznetsov
14197e0563deSVitaly Kuznetsov /*
14207e0563deSVitaly Kuznetsov * At the start of the day - when Xen launches a guest, it has already
14217e0563deSVitaly Kuznetsov * built pagetables for the guest. We diligently look over them
14227e0563deSVitaly Kuznetsov * in xen_setup_kernel_pagetable and graft as appropriate them in the
142365ade2f8SKirill A. Shutemov * init_top_pgt and its friends. Then when we are happy we load
142465ade2f8SKirill A. Shutemov * the new init_top_pgt - and continue on.
14257e0563deSVitaly Kuznetsov *
14267e0563deSVitaly Kuznetsov * The generic code starts (start_kernel) and 'init_mem_mapping' sets
14277e0563deSVitaly Kuznetsov * up the rest of the pagetables. When it has completed it loads the cr3.
14287e0563deSVitaly Kuznetsov * N.B. that baremetal would start at 'start_kernel' (and the early
14297e0563deSVitaly Kuznetsov * #PF handler would create bootstrap pagetables) - so we are running
14307e0563deSVitaly Kuznetsov * with the same assumptions as what to do when write_cr3 is executed
14317e0563deSVitaly Kuznetsov * at this point.
14327e0563deSVitaly Kuznetsov *
14337e0563deSVitaly Kuznetsov * Since there are no user-page tables at all, we have two variants
14347e0563deSVitaly Kuznetsov * of xen_write_cr3 - the early bootup (this one), and the late one
14357e0563deSVitaly Kuznetsov * (xen_write_cr3). The reason we have to do that is that in 64-bit
14367e0563deSVitaly Kuznetsov * the Linux kernel and user-space are both in ring 3 while the
14377e0563deSVitaly Kuznetsov * hypervisor is in ring 0.
14387e0563deSVitaly Kuznetsov */
xen_write_cr3_init(unsigned long cr3)14397e0563deSVitaly Kuznetsov static void __init xen_write_cr3_init(unsigned long cr3)
14407e0563deSVitaly Kuznetsov {
14417e0563deSVitaly Kuznetsov BUG_ON(preemptible());
14427e0563deSVitaly Kuznetsov
14437e0563deSVitaly Kuznetsov xen_mc_batch(); /* disables interrupts */
14447e0563deSVitaly Kuznetsov
14457e0563deSVitaly Kuznetsov /* Update while interrupts are disabled, so its atomic with
14467e0563deSVitaly Kuznetsov respect to ipis */
14477e0563deSVitaly Kuznetsov this_cpu_write(xen_cr3, cr3);
14487e0563deSVitaly Kuznetsov
14497e0563deSVitaly Kuznetsov __xen_write_cr3(true, cr3);
14507e0563deSVitaly Kuznetsov
1451a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */
14527e0563deSVitaly Kuznetsov }
14537e0563deSVitaly Kuznetsov
xen_pgd_alloc(struct mm_struct * mm)14547e0563deSVitaly Kuznetsov static int xen_pgd_alloc(struct mm_struct *mm)
14557e0563deSVitaly Kuznetsov {
14567e0563deSVitaly Kuznetsov pgd_t *pgd = mm->pgd;
14577e0563deSVitaly Kuznetsov struct page *page = virt_to_page(pgd);
14587e0563deSVitaly Kuznetsov pgd_t *user_pgd;
1459a13f2ef1SJuergen Gross int ret = -ENOMEM;
14607e0563deSVitaly Kuznetsov
1461a13f2ef1SJuergen Gross BUG_ON(PagePinned(virt_to_page(pgd)));
14627e0563deSVitaly Kuznetsov BUG_ON(page->private != 0);
14637e0563deSVitaly Kuznetsov
14647e0563deSVitaly Kuznetsov user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
14657e0563deSVitaly Kuznetsov page->private = (unsigned long)user_pgd;
14667e0563deSVitaly Kuznetsov
14677e0563deSVitaly Kuznetsov if (user_pgd != NULL) {
14687e0563deSVitaly Kuznetsov #ifdef CONFIG_X86_VSYSCALL_EMULATION
14697e0563deSVitaly Kuznetsov user_pgd[pgd_index(VSYSCALL_ADDR)] =
14707e0563deSVitaly Kuznetsov __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
14717e0563deSVitaly Kuznetsov #endif
14727e0563deSVitaly Kuznetsov ret = 0;
14737e0563deSVitaly Kuznetsov }
14747e0563deSVitaly Kuznetsov
14757e0563deSVitaly Kuznetsov BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1476a13f2ef1SJuergen Gross
14777e0563deSVitaly Kuznetsov return ret;
14787e0563deSVitaly Kuznetsov }
14797e0563deSVitaly Kuznetsov
xen_pgd_free(struct mm_struct * mm,pgd_t * pgd)14807e0563deSVitaly Kuznetsov static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
14817e0563deSVitaly Kuznetsov {
14827e0563deSVitaly Kuznetsov pgd_t *user_pgd = xen_get_user_pgd(pgd);
14837e0563deSVitaly Kuznetsov
14847e0563deSVitaly Kuznetsov if (user_pgd)
14857e0563deSVitaly Kuznetsov free_page((unsigned long)user_pgd);
14867e0563deSVitaly Kuznetsov }
14877e0563deSVitaly Kuznetsov
14887e0563deSVitaly Kuznetsov /*
14897e0563deSVitaly Kuznetsov * Init-time set_pte while constructing initial pagetables, which
14907e0563deSVitaly Kuznetsov * doesn't allow RO page table pages to be remapped RW.
14917e0563deSVitaly Kuznetsov *
14927e0563deSVitaly Kuznetsov * If there is no MFN for this PFN then this page is initially
14937e0563deSVitaly Kuznetsov * ballooned out so clear the PTE (as in decrease_reservation() in
14947e0563deSVitaly Kuznetsov * drivers/xen/balloon.c).
14957e0563deSVitaly Kuznetsov *
14967e0563deSVitaly Kuznetsov * Many of these PTE updates are done on unpinned and writable pages
14977e0563deSVitaly Kuznetsov * and doing a hypercall for these is unnecessary and expensive. At
1498cae73951SJan Beulich * this point it is rarely possible to tell if a page is pinned, so
1499cae73951SJan Beulich * mostly write the PTE directly and rely on Xen trapping and
15007e0563deSVitaly Kuznetsov * emulating any updates as necessary.
15017e0563deSVitaly Kuznetsov */
xen_set_pte_init(pte_t * ptep,pte_t pte)1502cae73951SJan Beulich static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1503cae73951SJan Beulich {
1504cae73951SJan Beulich if (unlikely(is_early_ioremap_ptep(ptep)))
1505cae73951SJan Beulich __xen_set_pte(ptep, pte);
1506cae73951SJan Beulich else
1507cae73951SJan Beulich native_set_pte(ptep, pte);
1508cae73951SJan Beulich }
1509cae73951SJan Beulich
xen_make_pte_init(pteval_t pte)15107e0563deSVitaly Kuznetsov __visible pte_t xen_make_pte_init(pteval_t pte)
15117e0563deSVitaly Kuznetsov {
15127e0563deSVitaly Kuznetsov unsigned long pfn;
15137e0563deSVitaly Kuznetsov
15147e0563deSVitaly Kuznetsov /*
15157e0563deSVitaly Kuznetsov * Pages belonging to the initial p2m list mapped outside the default
15167e0563deSVitaly Kuznetsov * address range must be mapped read-only. This region contains the
15177e0563deSVitaly Kuznetsov * page tables for mapping the p2m list, too, and page tables MUST be
15187e0563deSVitaly Kuznetsov * mapped read-only.
15197e0563deSVitaly Kuznetsov */
15207e0563deSVitaly Kuznetsov pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
15217e0563deSVitaly Kuznetsov if (xen_start_info->mfn_list < __START_KERNEL_map &&
15227e0563deSVitaly Kuznetsov pfn >= xen_start_info->first_p2m_pfn &&
15237e0563deSVitaly Kuznetsov pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
15247e0563deSVitaly Kuznetsov pte &= ~_PAGE_RW;
1525a13f2ef1SJuergen Gross
15267e0563deSVitaly Kuznetsov pte = pte_pfn_to_mfn(pte);
15277e0563deSVitaly Kuznetsov return native_make_pte(pte);
15287e0563deSVitaly Kuznetsov }
15297e0563deSVitaly Kuznetsov PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
15307e0563deSVitaly Kuznetsov
15317e0563deSVitaly Kuznetsov /* Early in boot, while setting up the initial pagetable, assume
15327e0563deSVitaly Kuznetsov everything is pinned. */
xen_alloc_pte_init(struct mm_struct * mm,unsigned long pfn)15337e0563deSVitaly Kuznetsov static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
15347e0563deSVitaly Kuznetsov {
15357e0563deSVitaly Kuznetsov #ifdef CONFIG_FLATMEM
15367e0563deSVitaly Kuznetsov BUG_ON(mem_map); /* should only be used early */
15377e0563deSVitaly Kuznetsov #endif
15387e0563deSVitaly Kuznetsov make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
15397e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
15407e0563deSVitaly Kuznetsov }
15417e0563deSVitaly Kuznetsov
15427e0563deSVitaly Kuznetsov /* Used for pmd and pud */
xen_alloc_pmd_init(struct mm_struct * mm,unsigned long pfn)15437e0563deSVitaly Kuznetsov static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
15447e0563deSVitaly Kuznetsov {
15457e0563deSVitaly Kuznetsov #ifdef CONFIG_FLATMEM
15467e0563deSVitaly Kuznetsov BUG_ON(mem_map); /* should only be used early */
15477e0563deSVitaly Kuznetsov #endif
15487e0563deSVitaly Kuznetsov make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
15497e0563deSVitaly Kuznetsov }
15507e0563deSVitaly Kuznetsov
15517e0563deSVitaly Kuznetsov /* Early release_pte assumes that all pts are pinned, since there's
15527e0563deSVitaly Kuznetsov only init_mm and anything attached to that is pinned. */
xen_release_pte_init(unsigned long pfn)15537e0563deSVitaly Kuznetsov static void __init xen_release_pte_init(unsigned long pfn)
15547e0563deSVitaly Kuznetsov {
15557e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
15567e0563deSVitaly Kuznetsov make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
15577e0563deSVitaly Kuznetsov }
15587e0563deSVitaly Kuznetsov
xen_release_pmd_init(unsigned long pfn)15597e0563deSVitaly Kuznetsov static void __init xen_release_pmd_init(unsigned long pfn)
15607e0563deSVitaly Kuznetsov {
15617e0563deSVitaly Kuznetsov make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
15627e0563deSVitaly Kuznetsov }
15637e0563deSVitaly Kuznetsov
__pin_pagetable_pfn(unsigned cmd,unsigned long pfn)15647e0563deSVitaly Kuznetsov static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
15657e0563deSVitaly Kuznetsov {
15667e0563deSVitaly Kuznetsov struct multicall_space mcs;
15677e0563deSVitaly Kuznetsov struct mmuext_op *op;
15687e0563deSVitaly Kuznetsov
15697e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(sizeof(*op));
15707e0563deSVitaly Kuznetsov op = mcs.args;
15717e0563deSVitaly Kuznetsov op->cmd = cmd;
15727e0563deSVitaly Kuznetsov op->arg1.mfn = pfn_to_mfn(pfn);
15737e0563deSVitaly Kuznetsov
15747e0563deSVitaly Kuznetsov MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
15757e0563deSVitaly Kuznetsov }
15767e0563deSVitaly Kuznetsov
__set_pfn_prot(unsigned long pfn,pgprot_t prot)15777e0563deSVitaly Kuznetsov static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
15787e0563deSVitaly Kuznetsov {
15797e0563deSVitaly Kuznetsov struct multicall_space mcs;
15807e0563deSVitaly Kuznetsov unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
15817e0563deSVitaly Kuznetsov
15827e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(0);
15837e0563deSVitaly Kuznetsov MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
15847e0563deSVitaly Kuznetsov pfn_pte(pfn, prot), 0);
15857e0563deSVitaly Kuznetsov }
15867e0563deSVitaly Kuznetsov
15877e0563deSVitaly Kuznetsov /* This needs to make sure the new pte page is pinned iff its being
15887e0563deSVitaly Kuznetsov attached to a pinned pagetable. */
xen_alloc_ptpage(struct mm_struct * mm,unsigned long pfn,unsigned level)15897e0563deSVitaly Kuznetsov static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
15907e0563deSVitaly Kuznetsov unsigned level)
15917e0563deSVitaly Kuznetsov {
15926f84f8d1SPavel Tatashin bool pinned = xen_page_pinned(mm->pgd);
15937e0563deSVitaly Kuznetsov
15947e0563deSVitaly Kuznetsov trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
15957e0563deSVitaly Kuznetsov
15967e0563deSVitaly Kuznetsov if (pinned) {
15977e0563deSVitaly Kuznetsov struct page *page = pfn_to_page(pfn);
15987e0563deSVitaly Kuznetsov
159936c9b592SJuergen Gross pinned = false;
160036c9b592SJuergen Gross if (static_branch_likely(&xen_struct_pages_ready)) {
160136c9b592SJuergen Gross pinned = PagePinned(page);
16027e0563deSVitaly Kuznetsov SetPagePinned(page);
160336c9b592SJuergen Gross }
16047e0563deSVitaly Kuznetsov
16057e0563deSVitaly Kuznetsov xen_mc_batch();
16067e0563deSVitaly Kuznetsov
16077e0563deSVitaly Kuznetsov __set_pfn_prot(pfn, PAGE_KERNEL_RO);
16087e0563deSVitaly Kuznetsov
160936c9b592SJuergen Gross if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
16107e0563deSVitaly Kuznetsov __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
16117e0563deSVitaly Kuznetsov
1612a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
16137e0563deSVitaly Kuznetsov }
16147e0563deSVitaly Kuznetsov }
16157e0563deSVitaly Kuznetsov
xen_alloc_pte(struct mm_struct * mm,unsigned long pfn)16167e0563deSVitaly Kuznetsov static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
16177e0563deSVitaly Kuznetsov {
16187e0563deSVitaly Kuznetsov xen_alloc_ptpage(mm, pfn, PT_PTE);
16197e0563deSVitaly Kuznetsov }
16207e0563deSVitaly Kuznetsov
xen_alloc_pmd(struct mm_struct * mm,unsigned long pfn)16217e0563deSVitaly Kuznetsov static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
16227e0563deSVitaly Kuznetsov {
16237e0563deSVitaly Kuznetsov xen_alloc_ptpage(mm, pfn, PT_PMD);
16247e0563deSVitaly Kuznetsov }
16257e0563deSVitaly Kuznetsov
16267e0563deSVitaly Kuznetsov /* This should never happen until we're OK to use struct page */
xen_release_ptpage(unsigned long pfn,unsigned level)16277e0563deSVitaly Kuznetsov static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
16287e0563deSVitaly Kuznetsov {
16297e0563deSVitaly Kuznetsov struct page *page = pfn_to_page(pfn);
16307e0563deSVitaly Kuznetsov bool pinned = PagePinned(page);
16317e0563deSVitaly Kuznetsov
16327e0563deSVitaly Kuznetsov trace_xen_mmu_release_ptpage(pfn, level, pinned);
16337e0563deSVitaly Kuznetsov
16347e0563deSVitaly Kuznetsov if (pinned) {
16357e0563deSVitaly Kuznetsov xen_mc_batch();
16367e0563deSVitaly Kuznetsov
16377e0563deSVitaly Kuznetsov if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
16387e0563deSVitaly Kuznetsov __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
16397e0563deSVitaly Kuznetsov
16407e0563deSVitaly Kuznetsov __set_pfn_prot(pfn, PAGE_KERNEL);
16417e0563deSVitaly Kuznetsov
1642a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
1643f2e39e8cSJuergen Gross
16447e0563deSVitaly Kuznetsov ClearPagePinned(page);
16457e0563deSVitaly Kuznetsov }
16467e0563deSVitaly Kuznetsov }
16477e0563deSVitaly Kuznetsov
xen_release_pte(unsigned long pfn)16487e0563deSVitaly Kuznetsov static void xen_release_pte(unsigned long pfn)
16497e0563deSVitaly Kuznetsov {
16507e0563deSVitaly Kuznetsov xen_release_ptpage(pfn, PT_PTE);
16517e0563deSVitaly Kuznetsov }
16527e0563deSVitaly Kuznetsov
xen_release_pmd(unsigned long pfn)16537e0563deSVitaly Kuznetsov static void xen_release_pmd(unsigned long pfn)
16547e0563deSVitaly Kuznetsov {
16557e0563deSVitaly Kuznetsov xen_release_ptpage(pfn, PT_PMD);
16567e0563deSVitaly Kuznetsov }
16577e0563deSVitaly Kuznetsov
xen_alloc_pud(struct mm_struct * mm,unsigned long pfn)16587e0563deSVitaly Kuznetsov static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
16597e0563deSVitaly Kuznetsov {
16607e0563deSVitaly Kuznetsov xen_alloc_ptpage(mm, pfn, PT_PUD);
16617e0563deSVitaly Kuznetsov }
16627e0563deSVitaly Kuznetsov
xen_release_pud(unsigned long pfn)16637e0563deSVitaly Kuznetsov static void xen_release_pud(unsigned long pfn)
16647e0563deSVitaly Kuznetsov {
16657e0563deSVitaly Kuznetsov xen_release_ptpage(pfn, PT_PUD);
16667e0563deSVitaly Kuznetsov }
16677e0563deSVitaly Kuznetsov
16687e0563deSVitaly Kuznetsov /*
16697e0563deSVitaly Kuznetsov * Like __va(), but returns address in the kernel mapping (which is
16707e0563deSVitaly Kuznetsov * all we have until the physical memory mapping has been set up.
16717e0563deSVitaly Kuznetsov */
__ka(phys_addr_t paddr)16727e0563deSVitaly Kuznetsov static void * __init __ka(phys_addr_t paddr)
16737e0563deSVitaly Kuznetsov {
16747e0563deSVitaly Kuznetsov return (void *)(paddr + __START_KERNEL_map);
16757e0563deSVitaly Kuznetsov }
16767e0563deSVitaly Kuznetsov
16777e0563deSVitaly Kuznetsov /* Convert a machine address to physical address */
m2p(phys_addr_t maddr)16787e0563deSVitaly Kuznetsov static unsigned long __init m2p(phys_addr_t maddr)
16797e0563deSVitaly Kuznetsov {
16807e0563deSVitaly Kuznetsov phys_addr_t paddr;
16817e0563deSVitaly Kuznetsov
16826f0e8bf1SJuergen Gross maddr &= XEN_PTE_MFN_MASK;
16837e0563deSVitaly Kuznetsov paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
16847e0563deSVitaly Kuznetsov
16857e0563deSVitaly Kuznetsov return paddr;
16867e0563deSVitaly Kuznetsov }
16877e0563deSVitaly Kuznetsov
16887e0563deSVitaly Kuznetsov /* Convert a machine address to kernel virtual */
m2v(phys_addr_t maddr)16897e0563deSVitaly Kuznetsov static void * __init m2v(phys_addr_t maddr)
16907e0563deSVitaly Kuznetsov {
16917e0563deSVitaly Kuznetsov return __ka(m2p(maddr));
16927e0563deSVitaly Kuznetsov }
16937e0563deSVitaly Kuznetsov
16947e0563deSVitaly Kuznetsov /* Set the page permissions on an identity-mapped pages */
set_page_prot_flags(void * addr,pgprot_t prot,unsigned long flags)16957e0563deSVitaly Kuznetsov static void __init set_page_prot_flags(void *addr, pgprot_t prot,
16967e0563deSVitaly Kuznetsov unsigned long flags)
16977e0563deSVitaly Kuznetsov {
16987e0563deSVitaly Kuznetsov unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
16997e0563deSVitaly Kuznetsov pte_t pte = pfn_pte(pfn, prot);
17007e0563deSVitaly Kuznetsov
17017e0563deSVitaly Kuznetsov if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
17027e0563deSVitaly Kuznetsov BUG();
17037e0563deSVitaly Kuznetsov }
set_page_prot(void * addr,pgprot_t prot)17047e0563deSVitaly Kuznetsov static void __init set_page_prot(void *addr, pgprot_t prot)
17057e0563deSVitaly Kuznetsov {
17067e0563deSVitaly Kuznetsov return set_page_prot_flags(addr, prot, UVMF_NONE);
17077e0563deSVitaly Kuznetsov }
17087e0563deSVitaly Kuznetsov
xen_setup_machphys_mapping(void)17097e0563deSVitaly Kuznetsov void __init xen_setup_machphys_mapping(void)
17107e0563deSVitaly Kuznetsov {
17117e0563deSVitaly Kuznetsov struct xen_machphys_mapping mapping;
17127e0563deSVitaly Kuznetsov
17137e0563deSVitaly Kuznetsov if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
17147e0563deSVitaly Kuznetsov machine_to_phys_mapping = (unsigned long *)mapping.v_start;
17157e0563deSVitaly Kuznetsov machine_to_phys_nr = mapping.max_mfn + 1;
17167e0563deSVitaly Kuznetsov } else {
17177e0563deSVitaly Kuznetsov machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
17187e0563deSVitaly Kuznetsov }
17197e0563deSVitaly Kuznetsov }
17207e0563deSVitaly Kuznetsov
convert_pfn_mfn(void * v)17217e0563deSVitaly Kuznetsov static void __init convert_pfn_mfn(void *v)
17227e0563deSVitaly Kuznetsov {
17237e0563deSVitaly Kuznetsov pte_t *pte = v;
17247e0563deSVitaly Kuznetsov int i;
17257e0563deSVitaly Kuznetsov
17267e0563deSVitaly Kuznetsov /* All levels are converted the same way, so just treat them
17277e0563deSVitaly Kuznetsov as ptes. */
17287e0563deSVitaly Kuznetsov for (i = 0; i < PTRS_PER_PTE; i++)
17297e0563deSVitaly Kuznetsov pte[i] = xen_make_pte(pte[i].pte);
17307e0563deSVitaly Kuznetsov }
check_pt_base(unsigned long * pt_base,unsigned long * pt_end,unsigned long addr)17317e0563deSVitaly Kuznetsov static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
17327e0563deSVitaly Kuznetsov unsigned long addr)
17337e0563deSVitaly Kuznetsov {
17347e0563deSVitaly Kuznetsov if (*pt_base == PFN_DOWN(__pa(addr))) {
17357e0563deSVitaly Kuznetsov set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
17367e0563deSVitaly Kuznetsov clear_page((void *)addr);
17377e0563deSVitaly Kuznetsov (*pt_base)++;
17387e0563deSVitaly Kuznetsov }
17397e0563deSVitaly Kuznetsov if (*pt_end == PFN_DOWN(__pa(addr))) {
17407e0563deSVitaly Kuznetsov set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
17417e0563deSVitaly Kuznetsov clear_page((void *)addr);
17427e0563deSVitaly Kuznetsov (*pt_end)--;
17437e0563deSVitaly Kuznetsov }
17447e0563deSVitaly Kuznetsov }
17457e0563deSVitaly Kuznetsov /*
17467e0563deSVitaly Kuznetsov * Set up the initial kernel pagetable.
17477e0563deSVitaly Kuznetsov *
17487e0563deSVitaly Kuznetsov * We can construct this by grafting the Xen provided pagetable into
17497e0563deSVitaly Kuznetsov * head_64.S's preconstructed pagetables. We copy the Xen L2's into
17507e0563deSVitaly Kuznetsov * level2_ident_pgt, and level2_kernel_pgt. This means that only the
17517e0563deSVitaly Kuznetsov * kernel has a physical mapping to start with - but that's enough to
17527e0563deSVitaly Kuznetsov * get __va working. We need to fill in the rest of the physical
17537e0563deSVitaly Kuznetsov * mapping once some sort of allocator has been set up.
17547e0563deSVitaly Kuznetsov */
xen_setup_kernel_pagetable(pgd_t * pgd,unsigned long max_pfn)17557e0563deSVitaly Kuznetsov void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
17567e0563deSVitaly Kuznetsov {
17577e0563deSVitaly Kuznetsov pud_t *l3;
17587e0563deSVitaly Kuznetsov pmd_t *l2;
17597e0563deSVitaly Kuznetsov unsigned long addr[3];
17607e0563deSVitaly Kuznetsov unsigned long pt_base, pt_end;
17617e0563deSVitaly Kuznetsov unsigned i;
17627e0563deSVitaly Kuznetsov
17637e0563deSVitaly Kuznetsov /* max_pfn_mapped is the last pfn mapped in the initial memory
17647e0563deSVitaly Kuznetsov * mappings. Considering that on Xen after the kernel mappings we
17657e0563deSVitaly Kuznetsov * have the mappings of some pages that don't exist in pfn space, we
17667e0563deSVitaly Kuznetsov * set max_pfn_mapped to the last real pfn mapped. */
17677e0563deSVitaly Kuznetsov if (xen_start_info->mfn_list < __START_KERNEL_map)
17687e0563deSVitaly Kuznetsov max_pfn_mapped = xen_start_info->first_p2m_pfn;
17697e0563deSVitaly Kuznetsov else
17707e0563deSVitaly Kuznetsov max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
17717e0563deSVitaly Kuznetsov
17727e0563deSVitaly Kuznetsov pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
17737e0563deSVitaly Kuznetsov pt_end = pt_base + xen_start_info->nr_pt_frames;
17747e0563deSVitaly Kuznetsov
17757e0563deSVitaly Kuznetsov /* Zap identity mapping */
177665ade2f8SKirill A. Shutemov init_top_pgt[0] = __pgd(0);
17777e0563deSVitaly Kuznetsov
17787e0563deSVitaly Kuznetsov /* Pre-constructed entries are in pfn, so convert to mfn */
1779d52888aaSKirill A. Shutemov /* L4[273] -> level3_ident_pgt */
1780989513a7SJuergen Gross /* L4[511] -> level3_kernel_pgt */
178165ade2f8SKirill A. Shutemov convert_pfn_mfn(init_top_pgt);
17827e0563deSVitaly Kuznetsov
17837e0563deSVitaly Kuznetsov /* L3_i[0] -> level2_ident_pgt */
17847e0563deSVitaly Kuznetsov convert_pfn_mfn(level3_ident_pgt);
1785989513a7SJuergen Gross /* L3_k[510] -> level2_kernel_pgt */
1786989513a7SJuergen Gross /* L3_k[511] -> level2_fixmap_pgt */
17877e0563deSVitaly Kuznetsov convert_pfn_mfn(level3_kernel_pgt);
17887e0563deSVitaly Kuznetsov
178905ab1d8aSFeng Tang /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
17907e0563deSVitaly Kuznetsov convert_pfn_mfn(level2_fixmap_pgt);
1791989513a7SJuergen Gross
17927e0563deSVitaly Kuznetsov /* We get [511][511] and have Xen's version of level2_kernel_pgt */
17937e0563deSVitaly Kuznetsov l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
17947e0563deSVitaly Kuznetsov l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
17957e0563deSVitaly Kuznetsov
17967e0563deSVitaly Kuznetsov addr[0] = (unsigned long)pgd;
17977e0563deSVitaly Kuznetsov addr[1] = (unsigned long)l3;
17987e0563deSVitaly Kuznetsov addr[2] = (unsigned long)l2;
1799d52888aaSKirill A. Shutemov /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
1800d52888aaSKirill A. Shutemov * Both L4[273][0] and L4[511][510] have entries that point to the same
18017e0563deSVitaly Kuznetsov * L2 (PMD) tables. Meaning that if you modify it in __va space
18027e0563deSVitaly Kuznetsov * it will be also modified in the __ka space! (But if you just
18037e0563deSVitaly Kuznetsov * modify the PMD table to point to other PTE's or none, then you
18047e0563deSVitaly Kuznetsov * are OK - which is what cleanup_highmap does) */
18057e0563deSVitaly Kuznetsov copy_page(level2_ident_pgt, l2);
18067e0563deSVitaly Kuznetsov /* Graft it onto L4[511][510] */
18077e0563deSVitaly Kuznetsov copy_page(level2_kernel_pgt, l2);
18087e0563deSVitaly Kuznetsov
18092cc42bacSJan Beulich /*
18102cc42bacSJan Beulich * Zap execute permission from the ident map. Due to the sharing of
18112cc42bacSJan Beulich * L1 entries we need to do this in the L2.
18122cc42bacSJan Beulich */
18132cc42bacSJan Beulich if (__supported_pte_mask & _PAGE_NX) {
18142cc42bacSJan Beulich for (i = 0; i < PTRS_PER_PMD; ++i) {
18152cc42bacSJan Beulich if (pmd_none(level2_ident_pgt[i]))
18162cc42bacSJan Beulich continue;
18172cc42bacSJan Beulich level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
18182cc42bacSJan Beulich }
18192cc42bacSJan Beulich }
18202cc42bacSJan Beulich
18217e0563deSVitaly Kuznetsov /* Copy the initial P->M table mappings if necessary. */
18227e0563deSVitaly Kuznetsov i = pgd_index(xen_start_info->mfn_list);
18237e0563deSVitaly Kuznetsov if (i && i < pgd_index(__START_KERNEL_map))
182465ade2f8SKirill A. Shutemov init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
18257e0563deSVitaly Kuznetsov
18267e0563deSVitaly Kuznetsov /* Make pagetable pieces RO */
182765ade2f8SKirill A. Shutemov set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
18287e0563deSVitaly Kuznetsov set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
18297e0563deSVitaly Kuznetsov set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
18307e0563deSVitaly Kuznetsov set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
18317e0563deSVitaly Kuznetsov set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
18327e0563deSVitaly Kuznetsov set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
183305ab1d8aSFeng Tang
183405ab1d8aSFeng Tang for (i = 0; i < FIXMAP_PMD_NUM; i++) {
183505ab1d8aSFeng Tang set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
183605ab1d8aSFeng Tang PAGE_KERNEL_RO);
183705ab1d8aSFeng Tang }
18387e0563deSVitaly Kuznetsov
18397e0563deSVitaly Kuznetsov /* Pin down new L4 */
18407e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
184165ade2f8SKirill A. Shutemov PFN_DOWN(__pa_symbol(init_top_pgt)));
18427e0563deSVitaly Kuznetsov
18437e0563deSVitaly Kuznetsov /* Unpin Xen-provided one */
18447e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
18457e0563deSVitaly Kuznetsov
1846d2a3ef44SJan Beulich #ifdef CONFIG_X86_VSYSCALL_EMULATION
1847d2a3ef44SJan Beulich /* Pin user vsyscall L3 */
1848d2a3ef44SJan Beulich set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1849d2a3ef44SJan Beulich pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1850d2a3ef44SJan Beulich PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
1851d2a3ef44SJan Beulich #endif
1852d2a3ef44SJan Beulich
18537e0563deSVitaly Kuznetsov /*
1854989513a7SJuergen Gross * At this stage there can be no user pgd, and no page structure to
1855989513a7SJuergen Gross * attach it to, so make sure we just set kernel pgd.
18567e0563deSVitaly Kuznetsov */
18577e0563deSVitaly Kuznetsov xen_mc_batch();
185865ade2f8SKirill A. Shutemov __xen_write_cr3(true, __pa(init_top_pgt));
1859a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_CPU);
18607e0563deSVitaly Kuznetsov
18617e0563deSVitaly Kuznetsov /* We can't that easily rip out L3 and L2, as the Xen pagetables are
18627e0563deSVitaly Kuznetsov * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
18637e0563deSVitaly Kuznetsov * the initial domain. For guests using the toolstack, they are in:
18647e0563deSVitaly Kuznetsov * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
18657e0563deSVitaly Kuznetsov * rip out the [L4] (pgd), but for guests we shave off three pages.
18667e0563deSVitaly Kuznetsov */
18677e0563deSVitaly Kuznetsov for (i = 0; i < ARRAY_SIZE(addr); i++)
18687e0563deSVitaly Kuznetsov check_pt_base(&pt_base, &pt_end, addr[i]);
18697e0563deSVitaly Kuznetsov
18707e0563deSVitaly Kuznetsov /* Our (by three pages) smaller Xen pagetable that we are using */
18717e0563deSVitaly Kuznetsov xen_pt_base = PFN_PHYS(pt_base);
18727e0563deSVitaly Kuznetsov xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
18737e0563deSVitaly Kuznetsov memblock_reserve(xen_pt_base, xen_pt_size);
18747e0563deSVitaly Kuznetsov
18757e0563deSVitaly Kuznetsov /* Revector the xen_start_info */
18767e0563deSVitaly Kuznetsov xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
18777e0563deSVitaly Kuznetsov }
18787e0563deSVitaly Kuznetsov
18797e0563deSVitaly Kuznetsov /*
18807e0563deSVitaly Kuznetsov * Read a value from a physical address.
18817e0563deSVitaly Kuznetsov */
xen_read_phys_ulong(phys_addr_t addr)18827e0563deSVitaly Kuznetsov static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
18837e0563deSVitaly Kuznetsov {
18847e0563deSVitaly Kuznetsov unsigned long *vaddr;
18857e0563deSVitaly Kuznetsov unsigned long val;
18867e0563deSVitaly Kuznetsov
18877e0563deSVitaly Kuznetsov vaddr = early_memremap_ro(addr, sizeof(val));
18887e0563deSVitaly Kuznetsov val = *vaddr;
18897e0563deSVitaly Kuznetsov early_memunmap(vaddr, sizeof(val));
18907e0563deSVitaly Kuznetsov return val;
18917e0563deSVitaly Kuznetsov }
18927e0563deSVitaly Kuznetsov
18937e0563deSVitaly Kuznetsov /*
18947e0563deSVitaly Kuznetsov * Translate a virtual address to a physical one without relying on mapped
189569861e0aSJuergen Gross * page tables. Don't rely on big pages being aligned in (guest) physical
189669861e0aSJuergen Gross * space!
18977e0563deSVitaly Kuznetsov */
xen_early_virt_to_phys(unsigned long vaddr)18987e0563deSVitaly Kuznetsov static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
18997e0563deSVitaly Kuznetsov {
19007e0563deSVitaly Kuznetsov phys_addr_t pa;
19017e0563deSVitaly Kuznetsov pgd_t pgd;
19027e0563deSVitaly Kuznetsov pud_t pud;
19037e0563deSVitaly Kuznetsov pmd_t pmd;
19047e0563deSVitaly Kuznetsov pte_t pte;
19057e0563deSVitaly Kuznetsov
19066c690ee1SAndy Lutomirski pa = read_cr3_pa();
19077e0563deSVitaly Kuznetsov pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
19087e0563deSVitaly Kuznetsov sizeof(pgd)));
19097e0563deSVitaly Kuznetsov if (!pgd_present(pgd))
19107e0563deSVitaly Kuznetsov return 0;
19117e0563deSVitaly Kuznetsov
19127e0563deSVitaly Kuznetsov pa = pgd_val(pgd) & PTE_PFN_MASK;
19137e0563deSVitaly Kuznetsov pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
19147e0563deSVitaly Kuznetsov sizeof(pud)));
19157e0563deSVitaly Kuznetsov if (!pud_present(pud))
19167e0563deSVitaly Kuznetsov return 0;
191769861e0aSJuergen Gross pa = pud_val(pud) & PTE_PFN_MASK;
1918907835e6SPeter Xu if (pud_leaf(pud))
19197e0563deSVitaly Kuznetsov return pa + (vaddr & ~PUD_MASK);
19207e0563deSVitaly Kuznetsov
19217e0563deSVitaly Kuznetsov pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
19227e0563deSVitaly Kuznetsov sizeof(pmd)));
19237e0563deSVitaly Kuznetsov if (!pmd_present(pmd))
19247e0563deSVitaly Kuznetsov return 0;
192569861e0aSJuergen Gross pa = pmd_val(pmd) & PTE_PFN_MASK;
19267e0563deSVitaly Kuznetsov if (pmd_large(pmd))
19277e0563deSVitaly Kuznetsov return pa + (vaddr & ~PMD_MASK);
19287e0563deSVitaly Kuznetsov
19297e0563deSVitaly Kuznetsov pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
19307e0563deSVitaly Kuznetsov sizeof(pte)));
19317e0563deSVitaly Kuznetsov if (!pte_present(pte))
19327e0563deSVitaly Kuznetsov return 0;
19337e0563deSVitaly Kuznetsov pa = pte_pfn(pte) << PAGE_SHIFT;
19347e0563deSVitaly Kuznetsov
19357e0563deSVitaly Kuznetsov return pa | (vaddr & ~PAGE_MASK);
19367e0563deSVitaly Kuznetsov }
19377e0563deSVitaly Kuznetsov
19387e0563deSVitaly Kuznetsov /*
19397e0563deSVitaly Kuznetsov * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
19407e0563deSVitaly Kuznetsov * this area.
19417e0563deSVitaly Kuznetsov */
xen_relocate_p2m(void)19427e0563deSVitaly Kuznetsov void __init xen_relocate_p2m(void)
19437e0563deSVitaly Kuznetsov {
1944773dd2fcSKirill A. Shutemov phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
19457e0563deSVitaly Kuznetsov unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
1946773dd2fcSKirill A. Shutemov int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
19477e0563deSVitaly Kuznetsov pte_t *pt;
19487e0563deSVitaly Kuznetsov pmd_t *pmd;
19497e0563deSVitaly Kuznetsov pud_t *pud;
19507e0563deSVitaly Kuznetsov pgd_t *pgd;
19517e0563deSVitaly Kuznetsov unsigned long *new_p2m;
19527e0563deSVitaly Kuznetsov
19537e0563deSVitaly Kuznetsov size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
19547e0563deSVitaly Kuznetsov n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
19557e0563deSVitaly Kuznetsov n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
19567e0563deSVitaly Kuznetsov n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
19577e0563deSVitaly Kuznetsov n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
1958773dd2fcSKirill A. Shutemov n_frames = n_pte + n_pt + n_pmd + n_pud;
19597e0563deSVitaly Kuznetsov
19607e0563deSVitaly Kuznetsov new_area = xen_find_free_area(PFN_PHYS(n_frames));
19617e0563deSVitaly Kuznetsov if (!new_area) {
19627e0563deSVitaly Kuznetsov xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
19637e0563deSVitaly Kuznetsov BUG();
19647e0563deSVitaly Kuznetsov }
19657e0563deSVitaly Kuznetsov
19667e0563deSVitaly Kuznetsov /*
19677e0563deSVitaly Kuznetsov * Setup the page tables for addressing the new p2m list.
19687e0563deSVitaly Kuznetsov * We have asked the hypervisor to map the p2m list at the user address
19697e0563deSVitaly Kuznetsov * PUD_SIZE. It may have done so, or it may have used a kernel space
19707e0563deSVitaly Kuznetsov * address depending on the Xen version.
19717e0563deSVitaly Kuznetsov * To avoid any possible virtual address collision, just use
19727e0563deSVitaly Kuznetsov * 2 * PUD_SIZE for the new area.
19737e0563deSVitaly Kuznetsov */
1974773dd2fcSKirill A. Shutemov pud_phys = new_area;
19757e0563deSVitaly Kuznetsov pmd_phys = pud_phys + PFN_PHYS(n_pud);
19767e0563deSVitaly Kuznetsov pt_phys = pmd_phys + PFN_PHYS(n_pmd);
19777e0563deSVitaly Kuznetsov p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
19787e0563deSVitaly Kuznetsov
19796c690ee1SAndy Lutomirski pgd = __va(read_cr3_pa());
19807e0563deSVitaly Kuznetsov new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
19817e0563deSVitaly Kuznetsov for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
19827e0563deSVitaly Kuznetsov pud = early_memremap(pud_phys, PAGE_SIZE);
19837e0563deSVitaly Kuznetsov clear_page(pud);
19847e0563deSVitaly Kuznetsov for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
19857e0563deSVitaly Kuznetsov idx_pmd++) {
19867e0563deSVitaly Kuznetsov pmd = early_memremap(pmd_phys, PAGE_SIZE);
19877e0563deSVitaly Kuznetsov clear_page(pmd);
19887e0563deSVitaly Kuznetsov for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
19897e0563deSVitaly Kuznetsov idx_pt++) {
19907e0563deSVitaly Kuznetsov pt = early_memremap(pt_phys, PAGE_SIZE);
19917e0563deSVitaly Kuznetsov clear_page(pt);
19927e0563deSVitaly Kuznetsov for (idx_pte = 0;
19937e0563deSVitaly Kuznetsov idx_pte < min(n_pte, PTRS_PER_PTE);
19947e0563deSVitaly Kuznetsov idx_pte++) {
199501bd2ac2SJuergen Gross pt[idx_pte] = pfn_pte(p2m_pfn,
199601bd2ac2SJuergen Gross PAGE_KERNEL);
19977e0563deSVitaly Kuznetsov p2m_pfn++;
19987e0563deSVitaly Kuznetsov }
19997e0563deSVitaly Kuznetsov n_pte -= PTRS_PER_PTE;
20007e0563deSVitaly Kuznetsov early_memunmap(pt, PAGE_SIZE);
20017e0563deSVitaly Kuznetsov make_lowmem_page_readonly(__va(pt_phys));
20027e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
20037e0563deSVitaly Kuznetsov PFN_DOWN(pt_phys));
200401bd2ac2SJuergen Gross pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
20057e0563deSVitaly Kuznetsov pt_phys += PAGE_SIZE;
20067e0563deSVitaly Kuznetsov }
20077e0563deSVitaly Kuznetsov n_pt -= PTRS_PER_PMD;
20087e0563deSVitaly Kuznetsov early_memunmap(pmd, PAGE_SIZE);
20097e0563deSVitaly Kuznetsov make_lowmem_page_readonly(__va(pmd_phys));
20107e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
20117e0563deSVitaly Kuznetsov PFN_DOWN(pmd_phys));
201201bd2ac2SJuergen Gross pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
20137e0563deSVitaly Kuznetsov pmd_phys += PAGE_SIZE;
20147e0563deSVitaly Kuznetsov }
20157e0563deSVitaly Kuznetsov n_pmd -= PTRS_PER_PUD;
20167e0563deSVitaly Kuznetsov early_memunmap(pud, PAGE_SIZE);
20177e0563deSVitaly Kuznetsov make_lowmem_page_readonly(__va(pud_phys));
20187e0563deSVitaly Kuznetsov pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
20197e0563deSVitaly Kuznetsov set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
20207e0563deSVitaly Kuznetsov pud_phys += PAGE_SIZE;
20217e0563deSVitaly Kuznetsov }
20227e0563deSVitaly Kuznetsov
20237e0563deSVitaly Kuznetsov /* Now copy the old p2m info to the new area. */
20247e0563deSVitaly Kuznetsov memcpy(new_p2m, xen_p2m_addr, size);
20257e0563deSVitaly Kuznetsov xen_p2m_addr = new_p2m;
20267e0563deSVitaly Kuznetsov
20277e0563deSVitaly Kuznetsov /* Release the old p2m list and set new list info. */
20287e0563deSVitaly Kuznetsov p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
20297e0563deSVitaly Kuznetsov BUG_ON(!p2m_pfn);
20307e0563deSVitaly Kuznetsov p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
20317e0563deSVitaly Kuznetsov
20327e0563deSVitaly Kuznetsov if (xen_start_info->mfn_list < __START_KERNEL_map) {
20337e0563deSVitaly Kuznetsov pfn = xen_start_info->first_p2m_pfn;
20347e0563deSVitaly Kuznetsov pfn_end = xen_start_info->first_p2m_pfn +
20357e0563deSVitaly Kuznetsov xen_start_info->nr_p2m_frames;
20367e0563deSVitaly Kuznetsov set_pgd(pgd + 1, __pgd(0));
20377e0563deSVitaly Kuznetsov } else {
20387e0563deSVitaly Kuznetsov pfn = p2m_pfn;
20397e0563deSVitaly Kuznetsov pfn_end = p2m_pfn_end;
20407e0563deSVitaly Kuznetsov }
20417e0563deSVitaly Kuznetsov
20423ecc6834SMike Rapoport memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
20437e0563deSVitaly Kuznetsov while (pfn < pfn_end) {
20447e0563deSVitaly Kuznetsov if (pfn == p2m_pfn) {
20457e0563deSVitaly Kuznetsov pfn = p2m_pfn_end;
20467e0563deSVitaly Kuznetsov continue;
20477e0563deSVitaly Kuznetsov }
20487e0563deSVitaly Kuznetsov make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
20497e0563deSVitaly Kuznetsov pfn++;
20507e0563deSVitaly Kuznetsov }
20517e0563deSVitaly Kuznetsov
20527e0563deSVitaly Kuznetsov xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
20537e0563deSVitaly Kuznetsov xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
20547e0563deSVitaly Kuznetsov xen_start_info->nr_p2m_frames = n_frames;
20557e0563deSVitaly Kuznetsov }
20567e0563deSVitaly Kuznetsov
xen_reserve_special_pages(void)20577e0563deSVitaly Kuznetsov void __init xen_reserve_special_pages(void)
20587e0563deSVitaly Kuznetsov {
20597e0563deSVitaly Kuznetsov phys_addr_t paddr;
20607e0563deSVitaly Kuznetsov
20617e0563deSVitaly Kuznetsov memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
20627e0563deSVitaly Kuznetsov if (xen_start_info->store_mfn) {
20637e0563deSVitaly Kuznetsov paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
20647e0563deSVitaly Kuznetsov memblock_reserve(paddr, PAGE_SIZE);
20657e0563deSVitaly Kuznetsov }
20667e0563deSVitaly Kuznetsov if (!xen_initial_domain()) {
20677e0563deSVitaly Kuznetsov paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
20687e0563deSVitaly Kuznetsov memblock_reserve(paddr, PAGE_SIZE);
20697e0563deSVitaly Kuznetsov }
20707e0563deSVitaly Kuznetsov }
20717e0563deSVitaly Kuznetsov
xen_pt_check_e820(void)20727e0563deSVitaly Kuznetsov void __init xen_pt_check_e820(void)
20737e0563deSVitaly Kuznetsov {
2074242d0c3cSJuergen Gross xen_chk_is_e820_usable(xen_pt_base, xen_pt_size, "page table");
20757e0563deSVitaly Kuznetsov }
20767e0563deSVitaly Kuznetsov
20777e0563deSVitaly Kuznetsov static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
20787e0563deSVitaly Kuznetsov
xen_set_fixmap(unsigned idx,phys_addr_t phys,pgprot_t prot)20797e0563deSVitaly Kuznetsov static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
20807e0563deSVitaly Kuznetsov {
20817e0563deSVitaly Kuznetsov pte_t pte;
20824c360db6SJan Beulich unsigned long vaddr;
20837e0563deSVitaly Kuznetsov
20847e0563deSVitaly Kuznetsov phys >>= PAGE_SHIFT;
20857e0563deSVitaly Kuznetsov
20867e0563deSVitaly Kuznetsov switch (idx) {
20877e0563deSVitaly Kuznetsov case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2088a13f2ef1SJuergen Gross #ifdef CONFIG_X86_VSYSCALL_EMULATION
20897e0563deSVitaly Kuznetsov case VSYSCALL_PAGE:
20907e0563deSVitaly Kuznetsov #endif
20917e0563deSVitaly Kuznetsov /* All local page mappings */
20927e0563deSVitaly Kuznetsov pte = pfn_pte(phys, prot);
20937e0563deSVitaly Kuznetsov break;
20947e0563deSVitaly Kuznetsov
20957e0563deSVitaly Kuznetsov #ifdef CONFIG_X86_LOCAL_APIC
20967e0563deSVitaly Kuznetsov case FIX_APIC_BASE: /* maps dummy local APIC */
20977e0563deSVitaly Kuznetsov pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
20987e0563deSVitaly Kuznetsov break;
20997e0563deSVitaly Kuznetsov #endif
21007e0563deSVitaly Kuznetsov
21017e0563deSVitaly Kuznetsov #ifdef CONFIG_X86_IO_APIC
21027e0563deSVitaly Kuznetsov case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
21037e0563deSVitaly Kuznetsov /*
21047e0563deSVitaly Kuznetsov * We just don't map the IO APIC - all access is via
21057e0563deSVitaly Kuznetsov * hypercalls. Keep the address in the pte for reference.
21067e0563deSVitaly Kuznetsov */
21077e0563deSVitaly Kuznetsov pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
21087e0563deSVitaly Kuznetsov break;
21097e0563deSVitaly Kuznetsov #endif
21107e0563deSVitaly Kuznetsov
21117e0563deSVitaly Kuznetsov case FIX_PARAVIRT_BOOTMAP:
21127e0563deSVitaly Kuznetsov /* This is an MFN, but it isn't an IO mapping from the
21137e0563deSVitaly Kuznetsov IO domain */
21147e0563deSVitaly Kuznetsov pte = mfn_pte(phys, prot);
21157e0563deSVitaly Kuznetsov break;
21167e0563deSVitaly Kuznetsov
21177e0563deSVitaly Kuznetsov default:
21187e0563deSVitaly Kuznetsov /* By default, set_fixmap is used for hardware mappings */
21197e0563deSVitaly Kuznetsov pte = mfn_pte(phys, prot);
21207e0563deSVitaly Kuznetsov break;
21217e0563deSVitaly Kuznetsov }
21227e0563deSVitaly Kuznetsov
21234c360db6SJan Beulich vaddr = __fix_to_virt(idx);
21244c360db6SJan Beulich if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
21254c360db6SJan Beulich BUG();
21267e0563deSVitaly Kuznetsov
21277e0563deSVitaly Kuznetsov #ifdef CONFIG_X86_VSYSCALL_EMULATION
21287e0563deSVitaly Kuznetsov /* Replicate changes to map the vsyscall page into the user
21297e0563deSVitaly Kuznetsov pagetable vsyscall mapping. */
21304c360db6SJan Beulich if (idx == VSYSCALL_PAGE)
21317e0563deSVitaly Kuznetsov set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
21327e0563deSVitaly Kuznetsov #endif
21337e0563deSVitaly Kuznetsov }
21347e0563deSVitaly Kuznetsov
xen_enter_lazy_mmu(void)2135a4a7644cSJuergen Gross static void xen_enter_lazy_mmu(void)
2136a4a7644cSJuergen Gross {
2137a4a7644cSJuergen Gross enter_lazy(XEN_LAZY_MMU);
2138a4a7644cSJuergen Gross }
2139a4a7644cSJuergen Gross
xen_flush_lazy_mmu(void)2140a4a7644cSJuergen Gross static void xen_flush_lazy_mmu(void)
2141a4a7644cSJuergen Gross {
2142a4a7644cSJuergen Gross preempt_disable();
2143a4a7644cSJuergen Gross
2144a4a7644cSJuergen Gross if (xen_get_lazy_mode() == XEN_LAZY_MMU) {
2145a4a7644cSJuergen Gross arch_leave_lazy_mmu_mode();
2146a4a7644cSJuergen Gross arch_enter_lazy_mmu_mode();
2147a4a7644cSJuergen Gross }
2148a4a7644cSJuergen Gross
2149a4a7644cSJuergen Gross preempt_enable();
2150a4a7644cSJuergen Gross }
2151a4a7644cSJuergen Gross
xen_post_allocator_init(void)21527e0563deSVitaly Kuznetsov static void __init xen_post_allocator_init(void)
21537e0563deSVitaly Kuznetsov {
21545c83511bSJuergen Gross pv_ops.mmu.set_pte = xen_set_pte;
21555c83511bSJuergen Gross pv_ops.mmu.set_pmd = xen_set_pmd;
21565c83511bSJuergen Gross pv_ops.mmu.set_pud = xen_set_pud;
21575c83511bSJuergen Gross pv_ops.mmu.set_p4d = xen_set_p4d;
21587e0563deSVitaly Kuznetsov
21597e0563deSVitaly Kuznetsov /* This will work as long as patching hasn't happened yet
21607e0563deSVitaly Kuznetsov (which it hasn't) */
21615c83511bSJuergen Gross pv_ops.mmu.alloc_pte = xen_alloc_pte;
21625c83511bSJuergen Gross pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
21635c83511bSJuergen Gross pv_ops.mmu.release_pte = xen_release_pte;
21645c83511bSJuergen Gross pv_ops.mmu.release_pmd = xen_release_pmd;
21655c83511bSJuergen Gross pv_ops.mmu.alloc_pud = xen_alloc_pud;
21665c83511bSJuergen Gross pv_ops.mmu.release_pud = xen_release_pud;
21675c83511bSJuergen Gross pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
21687e0563deSVitaly Kuznetsov
21695c83511bSJuergen Gross pv_ops.mmu.write_cr3 = &xen_write_cr3;
21707e0563deSVitaly Kuznetsov }
21717e0563deSVitaly Kuznetsov
xen_leave_lazy_mmu(void)21727e0563deSVitaly Kuznetsov static void xen_leave_lazy_mmu(void)
21737e0563deSVitaly Kuznetsov {
21747e0563deSVitaly Kuznetsov preempt_disable();
21757e0563deSVitaly Kuznetsov xen_mc_flush();
2176a4a7644cSJuergen Gross leave_lazy(XEN_LAZY_MMU);
21777e0563deSVitaly Kuznetsov preempt_enable();
21787e0563deSVitaly Kuznetsov }
21797e0563deSVitaly Kuznetsov
21801462eb38SPeter Zijlstra static const typeof(pv_ops) xen_mmu_ops __initconst = {
21811462eb38SPeter Zijlstra .mmu = {
218255aedddbSPeter Zijlstra .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
21837e0563deSVitaly Kuznetsov .write_cr2 = xen_write_cr2,
21847e0563deSVitaly Kuznetsov
21857e0563deSVitaly Kuznetsov .read_cr3 = xen_read_cr3,
21867e0563deSVitaly Kuznetsov .write_cr3 = xen_write_cr3_init,
21877e0563deSVitaly Kuznetsov
21887e0563deSVitaly Kuznetsov .flush_tlb_user = xen_flush_tlb,
21897e0563deSVitaly Kuznetsov .flush_tlb_kernel = xen_flush_tlb,
21901299ef1dSAndy Lutomirski .flush_tlb_one_user = xen_flush_tlb_one_user,
21914ce94eabSNadav Amit .flush_tlb_multi = xen_flush_tlb_multi,
219248a8b97cSPeter Zijlstra .tlb_remove_table = tlb_remove_table,
21937e0563deSVitaly Kuznetsov
21947e0563deSVitaly Kuznetsov .pgd_alloc = xen_pgd_alloc,
21957e0563deSVitaly Kuznetsov .pgd_free = xen_pgd_free,
21967e0563deSVitaly Kuznetsov
21977e0563deSVitaly Kuznetsov .alloc_pte = xen_alloc_pte_init,
21987e0563deSVitaly Kuznetsov .release_pte = xen_release_pte_init,
21997e0563deSVitaly Kuznetsov .alloc_pmd = xen_alloc_pmd_init,
22007e0563deSVitaly Kuznetsov .release_pmd = xen_release_pmd_init,
22017e0563deSVitaly Kuznetsov
22027e0563deSVitaly Kuznetsov .set_pte = xen_set_pte_init,
22037e0563deSVitaly Kuznetsov .set_pmd = xen_set_pmd_hyper,
22047e0563deSVitaly Kuznetsov
22052526cff7SJuergen Gross .ptep_modify_prot_start = xen_ptep_modify_prot_start,
22062526cff7SJuergen Gross .ptep_modify_prot_commit = xen_ptep_modify_prot_commit,
22077e0563deSVitaly Kuznetsov
22087e0563deSVitaly Kuznetsov .pte_val = PV_CALLEE_SAVE(xen_pte_val),
22097e0563deSVitaly Kuznetsov .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
22107e0563deSVitaly Kuznetsov
22117e0563deSVitaly Kuznetsov .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
22127e0563deSVitaly Kuznetsov .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
22137e0563deSVitaly Kuznetsov
22147e0563deSVitaly Kuznetsov .set_pud = xen_set_pud_hyper,
22157e0563deSVitaly Kuznetsov
22167e0563deSVitaly Kuznetsov .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
22177e0563deSVitaly Kuznetsov .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
22187e0563deSVitaly Kuznetsov
22197e0563deSVitaly Kuznetsov .pud_val = PV_CALLEE_SAVE(xen_pud_val),
22207e0563deSVitaly Kuznetsov .make_pud = PV_CALLEE_SAVE(xen_make_pud),
22217e0563deSVitaly Kuznetsov .set_p4d = xen_set_p4d_hyper,
22227e0563deSVitaly Kuznetsov
22237e0563deSVitaly Kuznetsov .alloc_pud = xen_alloc_pmd_init,
22247e0563deSVitaly Kuznetsov .release_pud = xen_release_pmd_init,
2225b9952ec7SKirill A. Shutemov
2226b9952ec7SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS >= 5
2227b9952ec7SKirill A. Shutemov .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2228b9952ec7SKirill A. Shutemov .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2229b9952ec7SKirill A. Shutemov #endif
22307e0563deSVitaly Kuznetsov
2231c9ae1b10SJuergen Gross .enter_mmap = xen_enter_mmap,
22327e0563deSVitaly Kuznetsov .exit_mmap = xen_exit_mmap,
22337e0563deSVitaly Kuznetsov
22347e0563deSVitaly Kuznetsov .lazy_mode = {
2235a4a7644cSJuergen Gross .enter = xen_enter_lazy_mmu,
22367e0563deSVitaly Kuznetsov .leave = xen_leave_lazy_mmu,
2237a4a7644cSJuergen Gross .flush = xen_flush_lazy_mmu,
22387e0563deSVitaly Kuznetsov },
22397e0563deSVitaly Kuznetsov
22407e0563deSVitaly Kuznetsov .set_fixmap = xen_set_fixmap,
22411462eb38SPeter Zijlstra },
22427e0563deSVitaly Kuznetsov };
22437e0563deSVitaly Kuznetsov
xen_init_mmu_ops(void)22447e0563deSVitaly Kuznetsov void __init xen_init_mmu_ops(void)
22457e0563deSVitaly Kuznetsov {
22467e0563deSVitaly Kuznetsov x86_init.paging.pagetable_init = xen_pagetable_init;
22476f84f8d1SPavel Tatashin x86_init.hyper.init_after_bootmem = xen_after_bootmem;
22487e0563deSVitaly Kuznetsov
22491462eb38SPeter Zijlstra pv_ops.mmu = xen_mmu_ops.mmu;
22507e0563deSVitaly Kuznetsov
22517e0563deSVitaly Kuznetsov memset(dummy_mapping, 0xff, PAGE_SIZE);
22527e0563deSVitaly Kuznetsov }
22537e0563deSVitaly Kuznetsov
22547e0563deSVitaly Kuznetsov #define VOID_PTE (mfn_pte(0, __pgprot(0)))
xen_zap_pfn_range(unsigned long vaddr,unsigned int order,unsigned long * in_frames,unsigned long * out_frames)22557e0563deSVitaly Kuznetsov static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
22567e0563deSVitaly Kuznetsov unsigned long *in_frames,
22577e0563deSVitaly Kuznetsov unsigned long *out_frames)
22587e0563deSVitaly Kuznetsov {
22597e0563deSVitaly Kuznetsov int i;
22607e0563deSVitaly Kuznetsov struct multicall_space mcs;
22617e0563deSVitaly Kuznetsov
22627e0563deSVitaly Kuznetsov xen_mc_batch();
22637e0563deSVitaly Kuznetsov for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
22647e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(0);
22657e0563deSVitaly Kuznetsov
22667e0563deSVitaly Kuznetsov if (in_frames)
2267067e4f17SLinus Walleij in_frames[i] = virt_to_mfn((void *)vaddr);
22687e0563deSVitaly Kuznetsov
22697e0563deSVitaly Kuznetsov MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2270067e4f17SLinus Walleij __set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY);
22717e0563deSVitaly Kuznetsov
22727e0563deSVitaly Kuznetsov if (out_frames)
2273067e4f17SLinus Walleij out_frames[i] = virt_to_pfn((void *)vaddr);
22747e0563deSVitaly Kuznetsov }
22757e0563deSVitaly Kuznetsov xen_mc_issue(0);
22767e0563deSVitaly Kuznetsov }
22777e0563deSVitaly Kuznetsov
22787e0563deSVitaly Kuznetsov /*
22797e0563deSVitaly Kuznetsov * Update the pfn-to-mfn mappings for a virtual address range, either to
22807e0563deSVitaly Kuznetsov * point to an array of mfns, or contiguously from a single starting
22817e0563deSVitaly Kuznetsov * mfn.
22827e0563deSVitaly Kuznetsov */
xen_remap_exchanged_ptes(unsigned long vaddr,int order,unsigned long * mfns,unsigned long first_mfn)22837e0563deSVitaly Kuznetsov static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
22847e0563deSVitaly Kuznetsov unsigned long *mfns,
22857e0563deSVitaly Kuznetsov unsigned long first_mfn)
22867e0563deSVitaly Kuznetsov {
22877e0563deSVitaly Kuznetsov unsigned i, limit;
22887e0563deSVitaly Kuznetsov unsigned long mfn;
22897e0563deSVitaly Kuznetsov
22907e0563deSVitaly Kuznetsov xen_mc_batch();
22917e0563deSVitaly Kuznetsov
22927e0563deSVitaly Kuznetsov limit = 1u << order;
22937e0563deSVitaly Kuznetsov for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
22947e0563deSVitaly Kuznetsov struct multicall_space mcs;
22957e0563deSVitaly Kuznetsov unsigned flags;
22967e0563deSVitaly Kuznetsov
22977e0563deSVitaly Kuznetsov mcs = __xen_mc_entry(0);
22987e0563deSVitaly Kuznetsov if (mfns)
22997e0563deSVitaly Kuznetsov mfn = mfns[i];
23007e0563deSVitaly Kuznetsov else
23017e0563deSVitaly Kuznetsov mfn = first_mfn + i;
23027e0563deSVitaly Kuznetsov
23037e0563deSVitaly Kuznetsov if (i < (limit - 1))
23047e0563deSVitaly Kuznetsov flags = 0;
23057e0563deSVitaly Kuznetsov else {
23067e0563deSVitaly Kuznetsov if (order == 0)
23077e0563deSVitaly Kuznetsov flags = UVMF_INVLPG | UVMF_ALL;
23087e0563deSVitaly Kuznetsov else
23097e0563deSVitaly Kuznetsov flags = UVMF_TLB_FLUSH | UVMF_ALL;
23107e0563deSVitaly Kuznetsov }
23117e0563deSVitaly Kuznetsov
23127e0563deSVitaly Kuznetsov MULTI_update_va_mapping(mcs.mc, vaddr,
23137e0563deSVitaly Kuznetsov mfn_pte(mfn, PAGE_KERNEL), flags);
23147e0563deSVitaly Kuznetsov
2315067e4f17SLinus Walleij set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn);
23167e0563deSVitaly Kuznetsov }
23177e0563deSVitaly Kuznetsov
23187e0563deSVitaly Kuznetsov xen_mc_issue(0);
23197e0563deSVitaly Kuznetsov }
23207e0563deSVitaly Kuznetsov
23217e0563deSVitaly Kuznetsov /*
23227e0563deSVitaly Kuznetsov * Perform the hypercall to exchange a region of our pfns to point to
23237e0563deSVitaly Kuznetsov * memory with the required contiguous alignment. Takes the pfns as
23247e0563deSVitaly Kuznetsov * input, and populates mfns as output.
23257e0563deSVitaly Kuznetsov *
23267e0563deSVitaly Kuznetsov * Returns a success code indicating whether the hypervisor was able to
23277e0563deSVitaly Kuznetsov * satisfy the request or not.
23287e0563deSVitaly Kuznetsov */
xen_exchange_memory(unsigned long extents_in,unsigned int order_in,unsigned long * pfns_in,unsigned long extents_out,unsigned int order_out,unsigned long * mfns_out,unsigned int address_bits)23297e0563deSVitaly Kuznetsov static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
23307e0563deSVitaly Kuznetsov unsigned long *pfns_in,
23317e0563deSVitaly Kuznetsov unsigned long extents_out,
23327e0563deSVitaly Kuznetsov unsigned int order_out,
23337e0563deSVitaly Kuznetsov unsigned long *mfns_out,
23347e0563deSVitaly Kuznetsov unsigned int address_bits)
23357e0563deSVitaly Kuznetsov {
23367e0563deSVitaly Kuznetsov long rc;
23377e0563deSVitaly Kuznetsov int success;
23387e0563deSVitaly Kuznetsov
23397e0563deSVitaly Kuznetsov struct xen_memory_exchange exchange = {
23407e0563deSVitaly Kuznetsov .in = {
23417e0563deSVitaly Kuznetsov .nr_extents = extents_in,
23427e0563deSVitaly Kuznetsov .extent_order = order_in,
23437e0563deSVitaly Kuznetsov .extent_start = pfns_in,
23447e0563deSVitaly Kuznetsov .domid = DOMID_SELF
23457e0563deSVitaly Kuznetsov },
23467e0563deSVitaly Kuznetsov .out = {
23477e0563deSVitaly Kuznetsov .nr_extents = extents_out,
23487e0563deSVitaly Kuznetsov .extent_order = order_out,
23497e0563deSVitaly Kuznetsov .extent_start = mfns_out,
23507e0563deSVitaly Kuznetsov .address_bits = address_bits,
23517e0563deSVitaly Kuznetsov .domid = DOMID_SELF
23527e0563deSVitaly Kuznetsov }
23537e0563deSVitaly Kuznetsov };
23547e0563deSVitaly Kuznetsov
23557e0563deSVitaly Kuznetsov BUG_ON(extents_in << order_in != extents_out << order_out);
23567e0563deSVitaly Kuznetsov
23577e0563deSVitaly Kuznetsov rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
23587e0563deSVitaly Kuznetsov success = (exchange.nr_exchanged == extents_in);
23597e0563deSVitaly Kuznetsov
23607e0563deSVitaly Kuznetsov BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
23617e0563deSVitaly Kuznetsov BUG_ON(success && (rc != 0));
23627e0563deSVitaly Kuznetsov
23637e0563deSVitaly Kuznetsov return success;
23647e0563deSVitaly Kuznetsov }
23657e0563deSVitaly Kuznetsov
xen_create_contiguous_region(phys_addr_t pstart,unsigned int order,unsigned int address_bits,dma_addr_t * dma_handle)23667e0563deSVitaly Kuznetsov int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
23677e0563deSVitaly Kuznetsov unsigned int address_bits,
23687e0563deSVitaly Kuznetsov dma_addr_t *dma_handle)
23697e0563deSVitaly Kuznetsov {
23705a32765aSJuergen Gross unsigned long *in_frames, out_frame;
23717e0563deSVitaly Kuznetsov unsigned long flags;
23727e0563deSVitaly Kuznetsov int success;
23737e0563deSVitaly Kuznetsov unsigned long vstart = (unsigned long)phys_to_virt(pstart);
23747e0563deSVitaly Kuznetsov
23755a32765aSJuergen Gross if (unlikely(order > discontig_frames_order)) {
23765a32765aSJuergen Gross if (!discontig_frames_dyn)
23777e0563deSVitaly Kuznetsov return -ENOMEM;
23787e0563deSVitaly Kuznetsov
23795a32765aSJuergen Gross if (alloc_discontig_frames(order))
23805a32765aSJuergen Gross return -ENOMEM;
23815a32765aSJuergen Gross }
23825a32765aSJuergen Gross
23837e0563deSVitaly Kuznetsov memset((void *) vstart, 0, PAGE_SIZE << order);
23847e0563deSVitaly Kuznetsov
23857e0563deSVitaly Kuznetsov spin_lock_irqsave(&xen_reservation_lock, flags);
23867e0563deSVitaly Kuznetsov
23875a32765aSJuergen Gross in_frames = discontig_frames;
23885a32765aSJuergen Gross
23897e0563deSVitaly Kuznetsov /* 1. Zap current PTEs, remembering MFNs. */
23907e0563deSVitaly Kuznetsov xen_zap_pfn_range(vstart, order, in_frames, NULL);
23917e0563deSVitaly Kuznetsov
23927e0563deSVitaly Kuznetsov /* 2. Get a new contiguous memory extent. */
2393067e4f17SLinus Walleij out_frame = virt_to_pfn((void *)vstart);
23947e0563deSVitaly Kuznetsov success = xen_exchange_memory(1UL << order, 0, in_frames,
23957e0563deSVitaly Kuznetsov 1, order, &out_frame,
23967e0563deSVitaly Kuznetsov address_bits);
23977e0563deSVitaly Kuznetsov
23987e0563deSVitaly Kuznetsov /* 3. Map the new extent in place of old pages. */
23997e0563deSVitaly Kuznetsov if (success)
24007e0563deSVitaly Kuznetsov xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
24017e0563deSVitaly Kuznetsov else
24027e0563deSVitaly Kuznetsov xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
24037e0563deSVitaly Kuznetsov
24047e0563deSVitaly Kuznetsov spin_unlock_irqrestore(&xen_reservation_lock, flags);
24057e0563deSVitaly Kuznetsov
24067e0563deSVitaly Kuznetsov *dma_handle = virt_to_machine(vstart).maddr;
24077e0563deSVitaly Kuznetsov return success ? 0 : -ENOMEM;
24087e0563deSVitaly Kuznetsov }
24097e0563deSVitaly Kuznetsov
xen_destroy_contiguous_region(phys_addr_t pstart,unsigned int order)24107e0563deSVitaly Kuznetsov void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
24117e0563deSVitaly Kuznetsov {
24125a32765aSJuergen Gross unsigned long *out_frames, in_frame;
24137e0563deSVitaly Kuznetsov unsigned long flags;
24147e0563deSVitaly Kuznetsov int success;
24157e0563deSVitaly Kuznetsov unsigned long vstart;
24167e0563deSVitaly Kuznetsov
24175a32765aSJuergen Gross if (unlikely(order > discontig_frames_order))
24187e0563deSVitaly Kuznetsov return;
24197e0563deSVitaly Kuznetsov
24207e0563deSVitaly Kuznetsov vstart = (unsigned long)phys_to_virt(pstart);
24217e0563deSVitaly Kuznetsov memset((void *) vstart, 0, PAGE_SIZE << order);
24227e0563deSVitaly Kuznetsov
24237e0563deSVitaly Kuznetsov spin_lock_irqsave(&xen_reservation_lock, flags);
24247e0563deSVitaly Kuznetsov
24255a32765aSJuergen Gross out_frames = discontig_frames;
24265a32765aSJuergen Gross
24277e0563deSVitaly Kuznetsov /* 1. Find start MFN of contiguous extent. */
2428067e4f17SLinus Walleij in_frame = virt_to_mfn((void *)vstart);
24297e0563deSVitaly Kuznetsov
24307e0563deSVitaly Kuznetsov /* 2. Zap current PTEs. */
24317e0563deSVitaly Kuznetsov xen_zap_pfn_range(vstart, order, NULL, out_frames);
24327e0563deSVitaly Kuznetsov
24337e0563deSVitaly Kuznetsov /* 3. Do the exchange for non-contiguous MFNs. */
24347e0563deSVitaly Kuznetsov success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
24357e0563deSVitaly Kuznetsov 0, out_frames, 0);
24367e0563deSVitaly Kuznetsov
24377e0563deSVitaly Kuznetsov /* 4. Map new pages in place of old pages. */
24387e0563deSVitaly Kuznetsov if (success)
24397e0563deSVitaly Kuznetsov xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
24407e0563deSVitaly Kuznetsov else
24417e0563deSVitaly Kuznetsov xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
24427e0563deSVitaly Kuznetsov
24437e0563deSVitaly Kuznetsov spin_unlock_irqrestore(&xen_reservation_lock, flags);
24447e0563deSVitaly Kuznetsov }
244529985b09SJuergen Gross
xen_flush_tlb_all(void)2446f030aadeSJuergen Gross static noinline void xen_flush_tlb_all(void)
2447f030aadeSJuergen Gross {
2448f030aadeSJuergen Gross struct mmuext_op *op;
2449f030aadeSJuergen Gross struct multicall_space mcs;
2450f030aadeSJuergen Gross
2451f030aadeSJuergen Gross preempt_disable();
2452f030aadeSJuergen Gross
2453f030aadeSJuergen Gross mcs = xen_mc_entry(sizeof(*op));
2454f030aadeSJuergen Gross
2455f030aadeSJuergen Gross op = mcs.args;
2456f030aadeSJuergen Gross op->cmd = MMUEXT_TLB_FLUSH_ALL;
2457f030aadeSJuergen Gross MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2458f030aadeSJuergen Gross
2459a4a7644cSJuergen Gross xen_mc_issue(XEN_LAZY_MMU);
2460f030aadeSJuergen Gross
2461f030aadeSJuergen Gross preempt_enable();
2462f030aadeSJuergen Gross }
2463f030aadeSJuergen Gross
2464f030aadeSJuergen Gross #define REMAP_BATCH_SIZE 16
2465f030aadeSJuergen Gross
2466f030aadeSJuergen Gross struct remap_data {
2467f030aadeSJuergen Gross xen_pfn_t *pfn;
2468f030aadeSJuergen Gross bool contiguous;
2469f030aadeSJuergen Gross bool no_translate;
2470f030aadeSJuergen Gross pgprot_t prot;
2471f030aadeSJuergen Gross struct mmu_update *mmu_update;
2472f030aadeSJuergen Gross };
2473f030aadeSJuergen Gross
remap_area_pfn_pte_fn(pte_t * ptep,unsigned long addr,void * data)24748b1e0f81SAnshuman Khandual static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2475f030aadeSJuergen Gross {
2476f030aadeSJuergen Gross struct remap_data *rmd = data;
2477f030aadeSJuergen Gross pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2478f030aadeSJuergen Gross
2479f030aadeSJuergen Gross /*
2480f030aadeSJuergen Gross * If we have a contiguous range, just update the pfn itself,
2481f030aadeSJuergen Gross * else update pointer to be "next pfn".
2482f030aadeSJuergen Gross */
2483f030aadeSJuergen Gross if (rmd->contiguous)
2484f030aadeSJuergen Gross (*rmd->pfn)++;
2485f030aadeSJuergen Gross else
2486f030aadeSJuergen Gross rmd->pfn++;
2487f030aadeSJuergen Gross
2488f030aadeSJuergen Gross rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2489f030aadeSJuergen Gross rmd->mmu_update->ptr |= rmd->no_translate ?
2490f030aadeSJuergen Gross MMU_PT_UPDATE_NO_TRANSLATE :
2491f030aadeSJuergen Gross MMU_NORMAL_PT_UPDATE;
2492f030aadeSJuergen Gross rmd->mmu_update->val = pte_val_ma(pte);
2493f030aadeSJuergen Gross rmd->mmu_update++;
2494f030aadeSJuergen Gross
2495f030aadeSJuergen Gross return 0;
2496f030aadeSJuergen Gross }
2497f030aadeSJuergen Gross
xen_remap_pfn(struct vm_area_struct * vma,unsigned long addr,xen_pfn_t * pfn,int nr,int * err_ptr,pgprot_t prot,unsigned int domid,bool no_translate)2498f030aadeSJuergen Gross int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2499f030aadeSJuergen Gross xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
250097315723SJan Beulich unsigned int domid, bool no_translate)
2501f030aadeSJuergen Gross {
2502f030aadeSJuergen Gross int err = 0;
2503f030aadeSJuergen Gross struct remap_data rmd;
2504f030aadeSJuergen Gross struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2505f030aadeSJuergen Gross unsigned long range;
2506f030aadeSJuergen Gross int mapped = 0;
2507f030aadeSJuergen Gross
2508f030aadeSJuergen Gross BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2509f030aadeSJuergen Gross
2510f030aadeSJuergen Gross rmd.pfn = pfn;
2511f030aadeSJuergen Gross rmd.prot = prot;
2512f030aadeSJuergen Gross /*
2513f030aadeSJuergen Gross * We use the err_ptr to indicate if there we are doing a contiguous
2514163b0991SIngo Molnar * mapping or a discontiguous mapping.
2515f030aadeSJuergen Gross */
2516f030aadeSJuergen Gross rmd.contiguous = !err_ptr;
2517f030aadeSJuergen Gross rmd.no_translate = no_translate;
2518f030aadeSJuergen Gross
2519f030aadeSJuergen Gross while (nr) {
2520f030aadeSJuergen Gross int index = 0;
2521f030aadeSJuergen Gross int done = 0;
2522f030aadeSJuergen Gross int batch = min(REMAP_BATCH_SIZE, nr);
2523f030aadeSJuergen Gross int batch_left = batch;
2524f030aadeSJuergen Gross
2525f030aadeSJuergen Gross range = (unsigned long)batch << PAGE_SHIFT;
2526f030aadeSJuergen Gross
2527f030aadeSJuergen Gross rmd.mmu_update = mmu_update;
2528f030aadeSJuergen Gross err = apply_to_page_range(vma->vm_mm, addr, range,
2529f030aadeSJuergen Gross remap_area_pfn_pte_fn, &rmd);
2530f030aadeSJuergen Gross if (err)
2531f030aadeSJuergen Gross goto out;
2532f030aadeSJuergen Gross
2533f030aadeSJuergen Gross /*
2534f030aadeSJuergen Gross * We record the error for each page that gives an error, but
2535f030aadeSJuergen Gross * continue mapping until the whole set is done
2536f030aadeSJuergen Gross */
2537f030aadeSJuergen Gross do {
2538f030aadeSJuergen Gross int i;
2539f030aadeSJuergen Gross
2540f030aadeSJuergen Gross err = HYPERVISOR_mmu_update(&mmu_update[index],
2541f030aadeSJuergen Gross batch_left, &done, domid);
2542f030aadeSJuergen Gross
2543f030aadeSJuergen Gross /*
2544f030aadeSJuergen Gross * @err_ptr may be the same buffer as @gfn, so
2545f030aadeSJuergen Gross * only clear it after each chunk of @gfn is
2546f030aadeSJuergen Gross * used.
2547f030aadeSJuergen Gross */
2548f030aadeSJuergen Gross if (err_ptr) {
2549f030aadeSJuergen Gross for (i = index; i < index + done; i++)
2550f030aadeSJuergen Gross err_ptr[i] = 0;
2551f030aadeSJuergen Gross }
2552f030aadeSJuergen Gross if (err < 0) {
2553f030aadeSJuergen Gross if (!err_ptr)
2554f030aadeSJuergen Gross goto out;
2555f030aadeSJuergen Gross err_ptr[i] = err;
2556f030aadeSJuergen Gross done++; /* Skip failed frame. */
2557f030aadeSJuergen Gross } else
2558f030aadeSJuergen Gross mapped += done;
2559f030aadeSJuergen Gross batch_left -= done;
2560f030aadeSJuergen Gross index += done;
2561f030aadeSJuergen Gross } while (batch_left);
2562f030aadeSJuergen Gross
2563f030aadeSJuergen Gross nr -= batch;
2564f030aadeSJuergen Gross addr += range;
2565f030aadeSJuergen Gross if (err_ptr)
2566f030aadeSJuergen Gross err_ptr += batch;
2567f030aadeSJuergen Gross cond_resched();
2568f030aadeSJuergen Gross }
2569f030aadeSJuergen Gross out:
2570f030aadeSJuergen Gross
2571f030aadeSJuergen Gross xen_flush_tlb_all();
2572f030aadeSJuergen Gross
2573f030aadeSJuergen Gross return err < 0 ? err : mapped;
2574f030aadeSJuergen Gross }
2575f030aadeSJuergen Gross EXPORT_SYMBOL_GPL(xen_remap_pfn);
2576f030aadeSJuergen Gross
2577b34e8055SGreg Kroah-Hartman #ifdef CONFIG_KEXEC_CORE
paddr_vmcoreinfo_note(void)257829985b09SJuergen Gross phys_addr_t paddr_vmcoreinfo_note(void)
257929985b09SJuergen Gross {
258029985b09SJuergen Gross if (xen_pv_domain())
2581203e9e41SXunlei Pang return virt_to_machine(vmcoreinfo_note).maddr;
258229985b09SJuergen Gross else
2583203e9e41SXunlei Pang return __pa(vmcoreinfo_note);
258429985b09SJuergen Gross }
258529985b09SJuergen Gross #endif /* CONFIG_KEXEC_CORE */
2586