xref: /openbmc/linux/arch/x86/xen/mmu_pv.c (revision d061864b)
1 /*
2  * Xen mmu operations
3  *
4  * This file contains the various mmu fetch and update operations.
5  * The most important job they must perform is the mapping between the
6  * domain's pfn and the overall machine mfns.
7  *
8  * Xen allows guests to directly update the pagetable, in a controlled
9  * fashion.  In other words, the guest modifies the same pagetable
10  * that the CPU actually uses, which eliminates the overhead of having
11  * a separate shadow pagetable.
12  *
13  * In order to allow this, it falls on the guest domain to map its
14  * notion of a "physical" pfn - which is just a domain-local linear
15  * address - into a real "machine address" which the CPU's MMU can
16  * use.
17  *
18  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19  * inserted directly into the pagetable.  When creating a new
20  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
21  * when reading the content back with __(pgd|pmd|pte)_val, it converts
22  * the mfn back into a pfn.
23  *
24  * The other constraint is that all pages which make up a pagetable
25  * must be mapped read-only in the guest.  This prevents uncontrolled
26  * guest updates to the pagetable.  Xen strictly enforces this, and
27  * will disallow any pagetable update which will end up mapping a
28  * pagetable page RW, and will disallow using any writable page as a
29  * pagetable.
30  *
31  * Naively, when loading %cr3 with the base of a new pagetable, Xen
32  * would need to validate the whole pagetable before going on.
33  * Naturally, this is quite slow.  The solution is to "pin" a
34  * pagetable, which enforces all the constraints on the pagetable even
35  * when it is not actively in use.  This menas that Xen can be assured
36  * that it is still valid when you do load it into %cr3, and doesn't
37  * need to revalidate it.
38  *
39  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40  */
41 #include <linux/sched/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/vmalloc.h>
46 #include <linux/export.h>
47 #include <linux/init.h>
48 #include <linux/gfp.h>
49 #include <linux/memblock.h>
50 #include <linux/seq_file.h>
51 #include <linux/crash_dump.h>
52 #ifdef CONFIG_KEXEC_CORE
53 #include <linux/kexec.h>
54 #endif
55 
56 #include <trace/events/xen.h>
57 
58 #include <asm/pgtable.h>
59 #include <asm/tlbflush.h>
60 #include <asm/fixmap.h>
61 #include <asm/mmu_context.h>
62 #include <asm/setup.h>
63 #include <asm/paravirt.h>
64 #include <asm/e820/api.h>
65 #include <asm/linkage.h>
66 #include <asm/page.h>
67 #include <asm/init.h>
68 #include <asm/pat.h>
69 #include <asm/smp.h>
70 #include <asm/tlb.h>
71 
72 #include <asm/xen/hypercall.h>
73 #include <asm/xen/hypervisor.h>
74 
75 #include <xen/xen.h>
76 #include <xen/page.h>
77 #include <xen/interface/xen.h>
78 #include <xen/interface/hvm/hvm_op.h>
79 #include <xen/interface/version.h>
80 #include <xen/interface/memory.h>
81 #include <xen/hvc-console.h>
82 
83 #include "multicalls.h"
84 #include "mmu.h"
85 #include "debugfs.h"
86 
87 #ifdef CONFIG_X86_32
88 /*
89  * Identity map, in addition to plain kernel map.  This needs to be
90  * large enough to allocate page table pages to allocate the rest.
91  * Each page can map 2MB.
92  */
93 #define LEVEL1_IDENT_ENTRIES	(PTRS_PER_PTE * 4)
94 static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
95 #endif
96 #ifdef CONFIG_X86_64
97 /* l3 pud for userspace vsyscall mapping */
98 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
99 #endif /* CONFIG_X86_64 */
100 
101 /*
102  * Note about cr3 (pagetable base) values:
103  *
104  * xen_cr3 contains the current logical cr3 value; it contains the
105  * last set cr3.  This may not be the current effective cr3, because
106  * its update may be being lazily deferred.  However, a vcpu looking
107  * at its own cr3 can use this value knowing that it everything will
108  * be self-consistent.
109  *
110  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
111  * hypercall to set the vcpu cr3 is complete (so it may be a little
112  * out of date, but it will never be set early).  If one vcpu is
113  * looking at another vcpu's cr3 value, it should use this variable.
114  */
115 DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
116 DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */
117 
118 static phys_addr_t xen_pt_base, xen_pt_size __initdata;
119 
120 static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
121 
122 /*
123  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
124  * redzone above it, so round it up to a PGD boundary.
125  */
126 #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
127 
128 void make_lowmem_page_readonly(void *vaddr)
129 {
130 	pte_t *pte, ptev;
131 	unsigned long address = (unsigned long)vaddr;
132 	unsigned int level;
133 
134 	pte = lookup_address(address, &level);
135 	if (pte == NULL)
136 		return;		/* vaddr missing */
137 
138 	ptev = pte_wrprotect(*pte);
139 
140 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
141 		BUG();
142 }
143 
144 void make_lowmem_page_readwrite(void *vaddr)
145 {
146 	pte_t *pte, ptev;
147 	unsigned long address = (unsigned long)vaddr;
148 	unsigned int level;
149 
150 	pte = lookup_address(address, &level);
151 	if (pte == NULL)
152 		return;		/* vaddr missing */
153 
154 	ptev = pte_mkwrite(*pte);
155 
156 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
157 		BUG();
158 }
159 
160 
161 /*
162  * During early boot all page table pages are pinned, but we do not have struct
163  * pages, so return true until struct pages are ready.
164  */
165 static bool xen_page_pinned(void *ptr)
166 {
167 	if (static_branch_likely(&xen_struct_pages_ready)) {
168 		struct page *page = virt_to_page(ptr);
169 
170 		return PagePinned(page);
171 	}
172 	return true;
173 }
174 
175 static void xen_extend_mmu_update(const struct mmu_update *update)
176 {
177 	struct multicall_space mcs;
178 	struct mmu_update *u;
179 
180 	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
181 
182 	if (mcs.mc != NULL) {
183 		mcs.mc->args[1]++;
184 	} else {
185 		mcs = __xen_mc_entry(sizeof(*u));
186 		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
187 	}
188 
189 	u = mcs.args;
190 	*u = *update;
191 }
192 
193 static void xen_extend_mmuext_op(const struct mmuext_op *op)
194 {
195 	struct multicall_space mcs;
196 	struct mmuext_op *u;
197 
198 	mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
199 
200 	if (mcs.mc != NULL) {
201 		mcs.mc->args[1]++;
202 	} else {
203 		mcs = __xen_mc_entry(sizeof(*u));
204 		MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
205 	}
206 
207 	u = mcs.args;
208 	*u = *op;
209 }
210 
211 static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
212 {
213 	struct mmu_update u;
214 
215 	preempt_disable();
216 
217 	xen_mc_batch();
218 
219 	/* ptr may be ioremapped for 64-bit pagetable setup */
220 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
221 	u.val = pmd_val_ma(val);
222 	xen_extend_mmu_update(&u);
223 
224 	xen_mc_issue(PARAVIRT_LAZY_MMU);
225 
226 	preempt_enable();
227 }
228 
229 static void xen_set_pmd(pmd_t *ptr, pmd_t val)
230 {
231 	trace_xen_mmu_set_pmd(ptr, val);
232 
233 	/* If page is not pinned, we can just update the entry
234 	   directly */
235 	if (!xen_page_pinned(ptr)) {
236 		*ptr = val;
237 		return;
238 	}
239 
240 	xen_set_pmd_hyper(ptr, val);
241 }
242 
243 /*
244  * Associate a virtual page frame with a given physical page frame
245  * and protection flags for that frame.
246  */
247 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
248 {
249 	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
250 }
251 
252 static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
253 {
254 	struct mmu_update u;
255 
256 	if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
257 		return false;
258 
259 	xen_mc_batch();
260 
261 	u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
262 	u.val = pte_val_ma(pteval);
263 	xen_extend_mmu_update(&u);
264 
265 	xen_mc_issue(PARAVIRT_LAZY_MMU);
266 
267 	return true;
268 }
269 
270 static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
271 {
272 	if (!xen_batched_set_pte(ptep, pteval)) {
273 		/*
274 		 * Could call native_set_pte() here and trap and
275 		 * emulate the PTE write but with 32-bit guests this
276 		 * needs two traps (one for each of the two 32-bit
277 		 * words in the PTE) so do one hypercall directly
278 		 * instead.
279 		 */
280 		struct mmu_update u;
281 
282 		u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
283 		u.val = pte_val_ma(pteval);
284 		HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
285 	}
286 }
287 
288 static void xen_set_pte(pte_t *ptep, pte_t pteval)
289 {
290 	trace_xen_mmu_set_pte(ptep, pteval);
291 	__xen_set_pte(ptep, pteval);
292 }
293 
294 static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
295 		    pte_t *ptep, pte_t pteval)
296 {
297 	trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
298 	__xen_set_pte(ptep, pteval);
299 }
300 
301 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
302 				 unsigned long addr, pte_t *ptep)
303 {
304 	/* Just return the pte as-is.  We preserve the bits on commit */
305 	trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
306 	return *ptep;
307 }
308 
309 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
310 				 pte_t *ptep, pte_t pte)
311 {
312 	struct mmu_update u;
313 
314 	trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
315 	xen_mc_batch();
316 
317 	u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
318 	u.val = pte_val_ma(pte);
319 	xen_extend_mmu_update(&u);
320 
321 	xen_mc_issue(PARAVIRT_LAZY_MMU);
322 }
323 
324 /* Assume pteval_t is equivalent to all the other *val_t types. */
325 static pteval_t pte_mfn_to_pfn(pteval_t val)
326 {
327 	if (val & _PAGE_PRESENT) {
328 		unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
329 		unsigned long pfn = mfn_to_pfn(mfn);
330 
331 		pteval_t flags = val & PTE_FLAGS_MASK;
332 		if (unlikely(pfn == ~0))
333 			val = flags & ~_PAGE_PRESENT;
334 		else
335 			val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
336 	}
337 
338 	return val;
339 }
340 
341 static pteval_t pte_pfn_to_mfn(pteval_t val)
342 {
343 	if (val & _PAGE_PRESENT) {
344 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
345 		pteval_t flags = val & PTE_FLAGS_MASK;
346 		unsigned long mfn;
347 
348 		mfn = __pfn_to_mfn(pfn);
349 
350 		/*
351 		 * If there's no mfn for the pfn, then just create an
352 		 * empty non-present pte.  Unfortunately this loses
353 		 * information about the original pfn, so
354 		 * pte_mfn_to_pfn is asymmetric.
355 		 */
356 		if (unlikely(mfn == INVALID_P2M_ENTRY)) {
357 			mfn = 0;
358 			flags = 0;
359 		} else
360 			mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
361 		val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
362 	}
363 
364 	return val;
365 }
366 
367 __visible pteval_t xen_pte_val(pte_t pte)
368 {
369 	pteval_t pteval = pte.pte;
370 
371 	return pte_mfn_to_pfn(pteval);
372 }
373 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
374 
375 __visible pgdval_t xen_pgd_val(pgd_t pgd)
376 {
377 	return pte_mfn_to_pfn(pgd.pgd);
378 }
379 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
380 
381 __visible pte_t xen_make_pte(pteval_t pte)
382 {
383 	pte = pte_pfn_to_mfn(pte);
384 
385 	return native_make_pte(pte);
386 }
387 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
388 
389 __visible pgd_t xen_make_pgd(pgdval_t pgd)
390 {
391 	pgd = pte_pfn_to_mfn(pgd);
392 	return native_make_pgd(pgd);
393 }
394 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
395 
396 __visible pmdval_t xen_pmd_val(pmd_t pmd)
397 {
398 	return pte_mfn_to_pfn(pmd.pmd);
399 }
400 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
401 
402 static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
403 {
404 	struct mmu_update u;
405 
406 	preempt_disable();
407 
408 	xen_mc_batch();
409 
410 	/* ptr may be ioremapped for 64-bit pagetable setup */
411 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
412 	u.val = pud_val_ma(val);
413 	xen_extend_mmu_update(&u);
414 
415 	xen_mc_issue(PARAVIRT_LAZY_MMU);
416 
417 	preempt_enable();
418 }
419 
420 static void xen_set_pud(pud_t *ptr, pud_t val)
421 {
422 	trace_xen_mmu_set_pud(ptr, val);
423 
424 	/* If page is not pinned, we can just update the entry
425 	   directly */
426 	if (!xen_page_pinned(ptr)) {
427 		*ptr = val;
428 		return;
429 	}
430 
431 	xen_set_pud_hyper(ptr, val);
432 }
433 
434 #ifdef CONFIG_X86_PAE
435 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
436 {
437 	trace_xen_mmu_set_pte_atomic(ptep, pte);
438 	__xen_set_pte(ptep, pte);
439 }
440 
441 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
442 {
443 	trace_xen_mmu_pte_clear(mm, addr, ptep);
444 	__xen_set_pte(ptep, native_make_pte(0));
445 }
446 
447 static void xen_pmd_clear(pmd_t *pmdp)
448 {
449 	trace_xen_mmu_pmd_clear(pmdp);
450 	set_pmd(pmdp, __pmd(0));
451 }
452 #endif	/* CONFIG_X86_PAE */
453 
454 __visible pmd_t xen_make_pmd(pmdval_t pmd)
455 {
456 	pmd = pte_pfn_to_mfn(pmd);
457 	return native_make_pmd(pmd);
458 }
459 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
460 
461 #ifdef CONFIG_X86_64
462 __visible pudval_t xen_pud_val(pud_t pud)
463 {
464 	return pte_mfn_to_pfn(pud.pud);
465 }
466 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
467 
468 __visible pud_t xen_make_pud(pudval_t pud)
469 {
470 	pud = pte_pfn_to_mfn(pud);
471 
472 	return native_make_pud(pud);
473 }
474 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
475 
476 static pgd_t *xen_get_user_pgd(pgd_t *pgd)
477 {
478 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
479 	unsigned offset = pgd - pgd_page;
480 	pgd_t *user_ptr = NULL;
481 
482 	if (offset < pgd_index(USER_LIMIT)) {
483 		struct page *page = virt_to_page(pgd_page);
484 		user_ptr = (pgd_t *)page->private;
485 		if (user_ptr)
486 			user_ptr += offset;
487 	}
488 
489 	return user_ptr;
490 }
491 
492 static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
493 {
494 	struct mmu_update u;
495 
496 	u.ptr = virt_to_machine(ptr).maddr;
497 	u.val = p4d_val_ma(val);
498 	xen_extend_mmu_update(&u);
499 }
500 
501 /*
502  * Raw hypercall-based set_p4d, intended for in early boot before
503  * there's a page structure.  This implies:
504  *  1. The only existing pagetable is the kernel's
505  *  2. It is always pinned
506  *  3. It has no user pagetable attached to it
507  */
508 static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
509 {
510 	preempt_disable();
511 
512 	xen_mc_batch();
513 
514 	__xen_set_p4d_hyper(ptr, val);
515 
516 	xen_mc_issue(PARAVIRT_LAZY_MMU);
517 
518 	preempt_enable();
519 }
520 
521 static void xen_set_p4d(p4d_t *ptr, p4d_t val)
522 {
523 	pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
524 	pgd_t pgd_val;
525 
526 	trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
527 
528 	/* If page is not pinned, we can just update the entry
529 	   directly */
530 	if (!xen_page_pinned(ptr)) {
531 		*ptr = val;
532 		if (user_ptr) {
533 			WARN_ON(xen_page_pinned(user_ptr));
534 			pgd_val.pgd = p4d_val_ma(val);
535 			*user_ptr = pgd_val;
536 		}
537 		return;
538 	}
539 
540 	/* If it's pinned, then we can at least batch the kernel and
541 	   user updates together. */
542 	xen_mc_batch();
543 
544 	__xen_set_p4d_hyper(ptr, val);
545 	if (user_ptr)
546 		__xen_set_p4d_hyper((p4d_t *)user_ptr, val);
547 
548 	xen_mc_issue(PARAVIRT_LAZY_MMU);
549 }
550 
551 #if CONFIG_PGTABLE_LEVELS >= 5
552 __visible p4dval_t xen_p4d_val(p4d_t p4d)
553 {
554 	return pte_mfn_to_pfn(p4d.p4d);
555 }
556 PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
557 
558 __visible p4d_t xen_make_p4d(p4dval_t p4d)
559 {
560 	p4d = pte_pfn_to_mfn(p4d);
561 
562 	return native_make_p4d(p4d);
563 }
564 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
565 #endif  /* CONFIG_PGTABLE_LEVELS >= 5 */
566 #endif	/* CONFIG_X86_64 */
567 
568 static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
569 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
570 		bool last, unsigned long limit)
571 {
572 	int i, nr, flush = 0;
573 
574 	nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
575 	for (i = 0; i < nr; i++) {
576 		if (!pmd_none(pmd[i]))
577 			flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
578 	}
579 	return flush;
580 }
581 
582 static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
583 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
584 		bool last, unsigned long limit)
585 {
586 	int i, nr, flush = 0;
587 
588 	nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
589 	for (i = 0; i < nr; i++) {
590 		pmd_t *pmd;
591 
592 		if (pud_none(pud[i]))
593 			continue;
594 
595 		pmd = pmd_offset(&pud[i], 0);
596 		if (PTRS_PER_PMD > 1)
597 			flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
598 		flush |= xen_pmd_walk(mm, pmd, func,
599 				last && i == nr - 1, limit);
600 	}
601 	return flush;
602 }
603 
604 static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
605 		int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
606 		bool last, unsigned long limit)
607 {
608 	int flush = 0;
609 	pud_t *pud;
610 
611 
612 	if (p4d_none(*p4d))
613 		return flush;
614 
615 	pud = pud_offset(p4d, 0);
616 	if (PTRS_PER_PUD > 1)
617 		flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
618 	flush |= xen_pud_walk(mm, pud, func, last, limit);
619 	return flush;
620 }
621 
622 /*
623  * (Yet another) pagetable walker.  This one is intended for pinning a
624  * pagetable.  This means that it walks a pagetable and calls the
625  * callback function on each page it finds making up the page table,
626  * at every level.  It walks the entire pagetable, but it only bothers
627  * pinning pte pages which are below limit.  In the normal case this
628  * will be STACK_TOP_MAX, but at boot we need to pin up to
629  * FIXADDR_TOP.
630  *
631  * For 32-bit the important bit is that we don't pin beyond there,
632  * because then we start getting into Xen's ptes.
633  *
634  * For 64-bit, we must skip the Xen hole in the middle of the address
635  * space, just after the big x86-64 virtual hole.
636  */
637 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
638 			  int (*func)(struct mm_struct *mm, struct page *,
639 				      enum pt_level),
640 			  unsigned long limit)
641 {
642 	int i, nr, flush = 0;
643 	unsigned hole_low, hole_high;
644 
645 	/* The limit is the last byte to be touched */
646 	limit--;
647 	BUG_ON(limit >= FIXADDR_TOP);
648 
649 	/*
650 	 * 64-bit has a great big hole in the middle of the address
651 	 * space, which contains the Xen mappings.  On 32-bit these
652 	 * will end up making a zero-sized hole and so is a no-op.
653 	 */
654 	hole_low = pgd_index(USER_LIMIT);
655 	hole_high = pgd_index(PAGE_OFFSET);
656 
657 	nr = pgd_index(limit) + 1;
658 	for (i = 0; i < nr; i++) {
659 		p4d_t *p4d;
660 
661 		if (i >= hole_low && i < hole_high)
662 			continue;
663 
664 		if (pgd_none(pgd[i]))
665 			continue;
666 
667 		p4d = p4d_offset(&pgd[i], 0);
668 		flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
669 	}
670 
671 	/* Do the top level last, so that the callbacks can use it as
672 	   a cue to do final things like tlb flushes. */
673 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
674 
675 	return flush;
676 }
677 
678 static int xen_pgd_walk(struct mm_struct *mm,
679 			int (*func)(struct mm_struct *mm, struct page *,
680 				    enum pt_level),
681 			unsigned long limit)
682 {
683 	return __xen_pgd_walk(mm, mm->pgd, func, limit);
684 }
685 
686 /* If we're using split pte locks, then take the page's lock and
687    return a pointer to it.  Otherwise return NULL. */
688 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
689 {
690 	spinlock_t *ptl = NULL;
691 
692 #if USE_SPLIT_PTE_PTLOCKS
693 	ptl = ptlock_ptr(page);
694 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
695 #endif
696 
697 	return ptl;
698 }
699 
700 static void xen_pte_unlock(void *v)
701 {
702 	spinlock_t *ptl = v;
703 	spin_unlock(ptl);
704 }
705 
706 static void xen_do_pin(unsigned level, unsigned long pfn)
707 {
708 	struct mmuext_op op;
709 
710 	op.cmd = level;
711 	op.arg1.mfn = pfn_to_mfn(pfn);
712 
713 	xen_extend_mmuext_op(&op);
714 }
715 
716 static int xen_pin_page(struct mm_struct *mm, struct page *page,
717 			enum pt_level level)
718 {
719 	unsigned pgfl = TestSetPagePinned(page);
720 	int flush;
721 
722 	if (pgfl)
723 		flush = 0;		/* already pinned */
724 	else if (PageHighMem(page))
725 		/* kmaps need flushing if we found an unpinned
726 		   highpage */
727 		flush = 1;
728 	else {
729 		void *pt = lowmem_page_address(page);
730 		unsigned long pfn = page_to_pfn(page);
731 		struct multicall_space mcs = __xen_mc_entry(0);
732 		spinlock_t *ptl;
733 
734 		flush = 0;
735 
736 		/*
737 		 * We need to hold the pagetable lock between the time
738 		 * we make the pagetable RO and when we actually pin
739 		 * it.  If we don't, then other users may come in and
740 		 * attempt to update the pagetable by writing it,
741 		 * which will fail because the memory is RO but not
742 		 * pinned, so Xen won't do the trap'n'emulate.
743 		 *
744 		 * If we're using split pte locks, we can't hold the
745 		 * entire pagetable's worth of locks during the
746 		 * traverse, because we may wrap the preempt count (8
747 		 * bits).  The solution is to mark RO and pin each PTE
748 		 * page while holding the lock.  This means the number
749 		 * of locks we end up holding is never more than a
750 		 * batch size (~32 entries, at present).
751 		 *
752 		 * If we're not using split pte locks, we needn't pin
753 		 * the PTE pages independently, because we're
754 		 * protected by the overall pagetable lock.
755 		 */
756 		ptl = NULL;
757 		if (level == PT_PTE)
758 			ptl = xen_pte_lock(page, mm);
759 
760 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
761 					pfn_pte(pfn, PAGE_KERNEL_RO),
762 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
763 
764 		if (ptl) {
765 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
766 
767 			/* Queue a deferred unlock for when this batch
768 			   is completed. */
769 			xen_mc_callback(xen_pte_unlock, ptl);
770 		}
771 	}
772 
773 	return flush;
774 }
775 
776 /* This is called just after a mm has been created, but it has not
777    been used yet.  We need to make sure that its pagetable is all
778    read-only, and can be pinned. */
779 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
780 {
781 	trace_xen_mmu_pgd_pin(mm, pgd);
782 
783 	xen_mc_batch();
784 
785 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
786 		/* re-enable interrupts for flushing */
787 		xen_mc_issue(0);
788 
789 		kmap_flush_unused();
790 
791 		xen_mc_batch();
792 	}
793 
794 #ifdef CONFIG_X86_64
795 	{
796 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
797 
798 		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
799 
800 		if (user_pgd) {
801 			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
802 			xen_do_pin(MMUEXT_PIN_L4_TABLE,
803 				   PFN_DOWN(__pa(user_pgd)));
804 		}
805 	}
806 #else /* CONFIG_X86_32 */
807 #ifdef CONFIG_X86_PAE
808 	/* Need to make sure unshared kernel PMD is pinnable */
809 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
810 		     PT_PMD);
811 #endif
812 	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
813 #endif /* CONFIG_X86_64 */
814 	xen_mc_issue(0);
815 }
816 
817 static void xen_pgd_pin(struct mm_struct *mm)
818 {
819 	__xen_pgd_pin(mm, mm->pgd);
820 }
821 
822 /*
823  * On save, we need to pin all pagetables to make sure they get their
824  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
825  * them (unpinned pgds are not currently in use, probably because the
826  * process is under construction or destruction).
827  *
828  * Expected to be called in stop_machine() ("equivalent to taking
829  * every spinlock in the system"), so the locking doesn't really
830  * matter all that much.
831  */
832 void xen_mm_pin_all(void)
833 {
834 	struct page *page;
835 
836 	spin_lock(&pgd_lock);
837 
838 	list_for_each_entry(page, &pgd_list, lru) {
839 		if (!PagePinned(page)) {
840 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
841 			SetPageSavePinned(page);
842 		}
843 	}
844 
845 	spin_unlock(&pgd_lock);
846 }
847 
848 static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
849 				  enum pt_level level)
850 {
851 	SetPagePinned(page);
852 	return 0;
853 }
854 
855 /*
856  * The init_mm pagetable is really pinned as soon as its created, but
857  * that's before we have page structures to store the bits.  So do all
858  * the book-keeping now once struct pages for allocated pages are
859  * initialized. This happens only after free_all_bootmem() is called.
860  */
861 static void __init xen_after_bootmem(void)
862 {
863 	static_branch_enable(&xen_struct_pages_ready);
864 #ifdef CONFIG_X86_64
865 	SetPagePinned(virt_to_page(level3_user_vsyscall));
866 #endif
867 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
868 }
869 
870 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
871 			  enum pt_level level)
872 {
873 	unsigned pgfl = TestClearPagePinned(page);
874 
875 	if (pgfl && !PageHighMem(page)) {
876 		void *pt = lowmem_page_address(page);
877 		unsigned long pfn = page_to_pfn(page);
878 		spinlock_t *ptl = NULL;
879 		struct multicall_space mcs;
880 
881 		/*
882 		 * Do the converse to pin_page.  If we're using split
883 		 * pte locks, we must be holding the lock for while
884 		 * the pte page is unpinned but still RO to prevent
885 		 * concurrent updates from seeing it in this
886 		 * partially-pinned state.
887 		 */
888 		if (level == PT_PTE) {
889 			ptl = xen_pte_lock(page, mm);
890 
891 			if (ptl)
892 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
893 		}
894 
895 		mcs = __xen_mc_entry(0);
896 
897 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
898 					pfn_pte(pfn, PAGE_KERNEL),
899 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
900 
901 		if (ptl) {
902 			/* unlock when batch completed */
903 			xen_mc_callback(xen_pte_unlock, ptl);
904 		}
905 	}
906 
907 	return 0;		/* never need to flush on unpin */
908 }
909 
910 /* Release a pagetables pages back as normal RW */
911 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
912 {
913 	trace_xen_mmu_pgd_unpin(mm, pgd);
914 
915 	xen_mc_batch();
916 
917 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
918 
919 #ifdef CONFIG_X86_64
920 	{
921 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
922 
923 		if (user_pgd) {
924 			xen_do_pin(MMUEXT_UNPIN_TABLE,
925 				   PFN_DOWN(__pa(user_pgd)));
926 			xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
927 		}
928 	}
929 #endif
930 
931 #ifdef CONFIG_X86_PAE
932 	/* Need to make sure unshared kernel PMD is unpinned */
933 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
934 		       PT_PMD);
935 #endif
936 
937 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
938 
939 	xen_mc_issue(0);
940 }
941 
942 static void xen_pgd_unpin(struct mm_struct *mm)
943 {
944 	__xen_pgd_unpin(mm, mm->pgd);
945 }
946 
947 /*
948  * On resume, undo any pinning done at save, so that the rest of the
949  * kernel doesn't see any unexpected pinned pagetables.
950  */
951 void xen_mm_unpin_all(void)
952 {
953 	struct page *page;
954 
955 	spin_lock(&pgd_lock);
956 
957 	list_for_each_entry(page, &pgd_list, lru) {
958 		if (PageSavePinned(page)) {
959 			BUG_ON(!PagePinned(page));
960 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
961 			ClearPageSavePinned(page);
962 		}
963 	}
964 
965 	spin_unlock(&pgd_lock);
966 }
967 
968 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
969 {
970 	spin_lock(&next->page_table_lock);
971 	xen_pgd_pin(next);
972 	spin_unlock(&next->page_table_lock);
973 }
974 
975 static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
976 {
977 	spin_lock(&mm->page_table_lock);
978 	xen_pgd_pin(mm);
979 	spin_unlock(&mm->page_table_lock);
980 }
981 
982 static void drop_mm_ref_this_cpu(void *info)
983 {
984 	struct mm_struct *mm = info;
985 
986 	if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
987 		leave_mm(smp_processor_id());
988 
989 	/*
990 	 * If this cpu still has a stale cr3 reference, then make sure
991 	 * it has been flushed.
992 	 */
993 	if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
994 		xen_mc_flush();
995 }
996 
997 #ifdef CONFIG_SMP
998 /*
999  * Another cpu may still have their %cr3 pointing at the pagetable, so
1000  * we need to repoint it somewhere else before we can unpin it.
1001  */
1002 static void xen_drop_mm_ref(struct mm_struct *mm)
1003 {
1004 	cpumask_var_t mask;
1005 	unsigned cpu;
1006 
1007 	drop_mm_ref_this_cpu(mm);
1008 
1009 	/* Get the "official" set of cpus referring to our pagetable. */
1010 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1011 		for_each_online_cpu(cpu) {
1012 			if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1013 				continue;
1014 			smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
1015 		}
1016 		return;
1017 	}
1018 
1019 	/*
1020 	 * It's possible that a vcpu may have a stale reference to our
1021 	 * cr3, because its in lazy mode, and it hasn't yet flushed
1022 	 * its set of pending hypercalls yet.  In this case, we can
1023 	 * look at its actual current cr3 value, and force it to flush
1024 	 * if needed.
1025 	 */
1026 	cpumask_clear(mask);
1027 	for_each_online_cpu(cpu) {
1028 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1029 			cpumask_set_cpu(cpu, mask);
1030 	}
1031 
1032 	smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
1033 	free_cpumask_var(mask);
1034 }
1035 #else
1036 static void xen_drop_mm_ref(struct mm_struct *mm)
1037 {
1038 	drop_mm_ref_this_cpu(mm);
1039 }
1040 #endif
1041 
1042 /*
1043  * While a process runs, Xen pins its pagetables, which means that the
1044  * hypervisor forces it to be read-only, and it controls all updates
1045  * to it.  This means that all pagetable updates have to go via the
1046  * hypervisor, which is moderately expensive.
1047  *
1048  * Since we're pulling the pagetable down, we switch to use init_mm,
1049  * unpin old process pagetable and mark it all read-write, which
1050  * allows further operations on it to be simple memory accesses.
1051  *
1052  * The only subtle point is that another CPU may be still using the
1053  * pagetable because of lazy tlb flushing.  This means we need need to
1054  * switch all CPUs off this pagetable before we can unpin it.
1055  */
1056 static void xen_exit_mmap(struct mm_struct *mm)
1057 {
1058 	get_cpu();		/* make sure we don't move around */
1059 	xen_drop_mm_ref(mm);
1060 	put_cpu();
1061 
1062 	spin_lock(&mm->page_table_lock);
1063 
1064 	/* pgd may not be pinned in the error exit path of execve */
1065 	if (xen_page_pinned(mm->pgd))
1066 		xen_pgd_unpin(mm);
1067 
1068 	spin_unlock(&mm->page_table_lock);
1069 }
1070 
1071 static void xen_post_allocator_init(void);
1072 
1073 static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1074 {
1075 	struct mmuext_op op;
1076 
1077 	op.cmd = cmd;
1078 	op.arg1.mfn = pfn_to_mfn(pfn);
1079 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1080 		BUG();
1081 }
1082 
1083 #ifdef CONFIG_X86_64
1084 static void __init xen_cleanhighmap(unsigned long vaddr,
1085 				    unsigned long vaddr_end)
1086 {
1087 	unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1088 	pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1089 
1090 	/* NOTE: The loop is more greedy than the cleanup_highmap variant.
1091 	 * We include the PMD passed in on _both_ boundaries. */
1092 	for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1093 			pmd++, vaddr += PMD_SIZE) {
1094 		if (pmd_none(*pmd))
1095 			continue;
1096 		if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1097 			set_pmd(pmd, __pmd(0));
1098 	}
1099 	/* In case we did something silly, we should crash in this function
1100 	 * instead of somewhere later and be confusing. */
1101 	xen_mc_flush();
1102 }
1103 
1104 /*
1105  * Make a page range writeable and free it.
1106  */
1107 static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1108 {
1109 	void *vaddr = __va(paddr);
1110 	void *vaddr_end = vaddr + size;
1111 
1112 	for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1113 		make_lowmem_page_readwrite(vaddr);
1114 
1115 	memblock_free(paddr, size);
1116 }
1117 
1118 static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1119 {
1120 	unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1121 
1122 	if (unpin)
1123 		pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1124 	ClearPagePinned(virt_to_page(__va(pa)));
1125 	xen_free_ro_pages(pa, PAGE_SIZE);
1126 }
1127 
1128 static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1129 {
1130 	unsigned long pa;
1131 	pte_t *pte_tbl;
1132 	int i;
1133 
1134 	if (pmd_large(*pmd)) {
1135 		pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1136 		xen_free_ro_pages(pa, PMD_SIZE);
1137 		return;
1138 	}
1139 
1140 	pte_tbl = pte_offset_kernel(pmd, 0);
1141 	for (i = 0; i < PTRS_PER_PTE; i++) {
1142 		if (pte_none(pte_tbl[i]))
1143 			continue;
1144 		pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1145 		xen_free_ro_pages(pa, PAGE_SIZE);
1146 	}
1147 	set_pmd(pmd, __pmd(0));
1148 	xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1149 }
1150 
1151 static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1152 {
1153 	unsigned long pa;
1154 	pmd_t *pmd_tbl;
1155 	int i;
1156 
1157 	if (pud_large(*pud)) {
1158 		pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1159 		xen_free_ro_pages(pa, PUD_SIZE);
1160 		return;
1161 	}
1162 
1163 	pmd_tbl = pmd_offset(pud, 0);
1164 	for (i = 0; i < PTRS_PER_PMD; i++) {
1165 		if (pmd_none(pmd_tbl[i]))
1166 			continue;
1167 		xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1168 	}
1169 	set_pud(pud, __pud(0));
1170 	xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1171 }
1172 
1173 static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1174 {
1175 	unsigned long pa;
1176 	pud_t *pud_tbl;
1177 	int i;
1178 
1179 	if (p4d_large(*p4d)) {
1180 		pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1181 		xen_free_ro_pages(pa, P4D_SIZE);
1182 		return;
1183 	}
1184 
1185 	pud_tbl = pud_offset(p4d, 0);
1186 	for (i = 0; i < PTRS_PER_PUD; i++) {
1187 		if (pud_none(pud_tbl[i]))
1188 			continue;
1189 		xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1190 	}
1191 	set_p4d(p4d, __p4d(0));
1192 	xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1193 }
1194 
1195 /*
1196  * Since it is well isolated we can (and since it is perhaps large we should)
1197  * also free the page tables mapping the initial P->M table.
1198  */
1199 static void __init xen_cleanmfnmap(unsigned long vaddr)
1200 {
1201 	pgd_t *pgd;
1202 	p4d_t *p4d;
1203 	bool unpin;
1204 
1205 	unpin = (vaddr == 2 * PGDIR_SIZE);
1206 	vaddr &= PMD_MASK;
1207 	pgd = pgd_offset_k(vaddr);
1208 	p4d = p4d_offset(pgd, 0);
1209 	if (!p4d_none(*p4d))
1210 		xen_cleanmfnmap_p4d(p4d, unpin);
1211 }
1212 
1213 static void __init xen_pagetable_p2m_free(void)
1214 {
1215 	unsigned long size;
1216 	unsigned long addr;
1217 
1218 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1219 
1220 	/* No memory or already called. */
1221 	if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1222 		return;
1223 
1224 	/* using __ka address and sticking INVALID_P2M_ENTRY! */
1225 	memset((void *)xen_start_info->mfn_list, 0xff, size);
1226 
1227 	addr = xen_start_info->mfn_list;
1228 	/*
1229 	 * We could be in __ka space.
1230 	 * We roundup to the PMD, which means that if anybody at this stage is
1231 	 * using the __ka address of xen_start_info or
1232 	 * xen_start_info->shared_info they are in going to crash. Fortunatly
1233 	 * we have already revectored in xen_setup_kernel_pagetable.
1234 	 */
1235 	size = roundup(size, PMD_SIZE);
1236 
1237 	if (addr >= __START_KERNEL_map) {
1238 		xen_cleanhighmap(addr, addr + size);
1239 		size = PAGE_ALIGN(xen_start_info->nr_pages *
1240 				  sizeof(unsigned long));
1241 		memblock_free(__pa(addr), size);
1242 	} else {
1243 		xen_cleanmfnmap(addr);
1244 	}
1245 }
1246 
1247 static void __init xen_pagetable_cleanhighmap(void)
1248 {
1249 	unsigned long size;
1250 	unsigned long addr;
1251 
1252 	/* At this stage, cleanup_highmap has already cleaned __ka space
1253 	 * from _brk_limit way up to the max_pfn_mapped (which is the end of
1254 	 * the ramdisk). We continue on, erasing PMD entries that point to page
1255 	 * tables - do note that they are accessible at this stage via __va.
1256 	 * As Xen is aligning the memory end to a 4MB boundary, for good
1257 	 * measure we also round up to PMD_SIZE * 2 - which means that if
1258 	 * anybody is using __ka address to the initial boot-stack - and try
1259 	 * to use it - they are going to crash. The xen_start_info has been
1260 	 * taken care of already in xen_setup_kernel_pagetable. */
1261 	addr = xen_start_info->pt_base;
1262 	size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1263 
1264 	xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1265 	xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1266 }
1267 #endif
1268 
1269 static void __init xen_pagetable_p2m_setup(void)
1270 {
1271 	xen_vmalloc_p2m_tree();
1272 
1273 #ifdef CONFIG_X86_64
1274 	xen_pagetable_p2m_free();
1275 
1276 	xen_pagetable_cleanhighmap();
1277 #endif
1278 	/* And revector! Bye bye old array */
1279 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1280 }
1281 
1282 static void __init xen_pagetable_init(void)
1283 {
1284 	paging_init();
1285 	xen_post_allocator_init();
1286 
1287 	xen_pagetable_p2m_setup();
1288 
1289 	/* Allocate and initialize top and mid mfn levels for p2m structure */
1290 	xen_build_mfn_list_list();
1291 
1292 	/* Remap memory freed due to conflicts with E820 map */
1293 	xen_remap_memory();
1294 	xen_setup_mfn_list_list();
1295 }
1296 static void xen_write_cr2(unsigned long cr2)
1297 {
1298 	this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1299 }
1300 
1301 static unsigned long xen_read_cr2(void)
1302 {
1303 	return this_cpu_read(xen_vcpu)->arch.cr2;
1304 }
1305 
1306 unsigned long xen_read_cr2_direct(void)
1307 {
1308 	return this_cpu_read(xen_vcpu_info.arch.cr2);
1309 }
1310 
1311 static noinline void xen_flush_tlb(void)
1312 {
1313 	struct mmuext_op *op;
1314 	struct multicall_space mcs;
1315 
1316 	preempt_disable();
1317 
1318 	mcs = xen_mc_entry(sizeof(*op));
1319 
1320 	op = mcs.args;
1321 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1322 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1323 
1324 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1325 
1326 	preempt_enable();
1327 }
1328 
1329 static void xen_flush_tlb_one_user(unsigned long addr)
1330 {
1331 	struct mmuext_op *op;
1332 	struct multicall_space mcs;
1333 
1334 	trace_xen_mmu_flush_tlb_one_user(addr);
1335 
1336 	preempt_disable();
1337 
1338 	mcs = xen_mc_entry(sizeof(*op));
1339 	op = mcs.args;
1340 	op->cmd = MMUEXT_INVLPG_LOCAL;
1341 	op->arg1.linear_addr = addr & PAGE_MASK;
1342 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1343 
1344 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1345 
1346 	preempt_enable();
1347 }
1348 
1349 static void xen_flush_tlb_others(const struct cpumask *cpus,
1350 				 const struct flush_tlb_info *info)
1351 {
1352 	struct {
1353 		struct mmuext_op op;
1354 		DECLARE_BITMAP(mask, NR_CPUS);
1355 	} *args;
1356 	struct multicall_space mcs;
1357 	const size_t mc_entry_size = sizeof(args->op) +
1358 		sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1359 
1360 	trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
1361 
1362 	if (cpumask_empty(cpus))
1363 		return;		/* nothing to do */
1364 
1365 	mcs = xen_mc_entry(mc_entry_size);
1366 	args = mcs.args;
1367 	args->op.arg2.vcpumask = to_cpumask(args->mask);
1368 
1369 	/* Remove us, and any offline CPUS. */
1370 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1371 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1372 
1373 	args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1374 	if (info->end != TLB_FLUSH_ALL &&
1375 	    (info->end - info->start) <= PAGE_SIZE) {
1376 		args->op.cmd = MMUEXT_INVLPG_MULTI;
1377 		args->op.arg1.linear_addr = info->start;
1378 	}
1379 
1380 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1381 
1382 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1383 }
1384 
1385 static unsigned long xen_read_cr3(void)
1386 {
1387 	return this_cpu_read(xen_cr3);
1388 }
1389 
1390 static void set_current_cr3(void *v)
1391 {
1392 	this_cpu_write(xen_current_cr3, (unsigned long)v);
1393 }
1394 
1395 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1396 {
1397 	struct mmuext_op op;
1398 	unsigned long mfn;
1399 
1400 	trace_xen_mmu_write_cr3(kernel, cr3);
1401 
1402 	if (cr3)
1403 		mfn = pfn_to_mfn(PFN_DOWN(cr3));
1404 	else
1405 		mfn = 0;
1406 
1407 	WARN_ON(mfn == 0 && kernel);
1408 
1409 	op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1410 	op.arg1.mfn = mfn;
1411 
1412 	xen_extend_mmuext_op(&op);
1413 
1414 	if (kernel) {
1415 		this_cpu_write(xen_cr3, cr3);
1416 
1417 		/* Update xen_current_cr3 once the batch has actually
1418 		   been submitted. */
1419 		xen_mc_callback(set_current_cr3, (void *)cr3);
1420 	}
1421 }
1422 static void xen_write_cr3(unsigned long cr3)
1423 {
1424 	BUG_ON(preemptible());
1425 
1426 	xen_mc_batch();  /* disables interrupts */
1427 
1428 	/* Update while interrupts are disabled, so its atomic with
1429 	   respect to ipis */
1430 	this_cpu_write(xen_cr3, cr3);
1431 
1432 	__xen_write_cr3(true, cr3);
1433 
1434 #ifdef CONFIG_X86_64
1435 	{
1436 		pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1437 		if (user_pgd)
1438 			__xen_write_cr3(false, __pa(user_pgd));
1439 		else
1440 			__xen_write_cr3(false, 0);
1441 	}
1442 #endif
1443 
1444 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1445 }
1446 
1447 #ifdef CONFIG_X86_64
1448 /*
1449  * At the start of the day - when Xen launches a guest, it has already
1450  * built pagetables for the guest. We diligently look over them
1451  * in xen_setup_kernel_pagetable and graft as appropriate them in the
1452  * init_top_pgt and its friends. Then when we are happy we load
1453  * the new init_top_pgt - and continue on.
1454  *
1455  * The generic code starts (start_kernel) and 'init_mem_mapping' sets
1456  * up the rest of the pagetables. When it has completed it loads the cr3.
1457  * N.B. that baremetal would start at 'start_kernel' (and the early
1458  * #PF handler would create bootstrap pagetables) - so we are running
1459  * with the same assumptions as what to do when write_cr3 is executed
1460  * at this point.
1461  *
1462  * Since there are no user-page tables at all, we have two variants
1463  * of xen_write_cr3 - the early bootup (this one), and the late one
1464  * (xen_write_cr3). The reason we have to do that is that in 64-bit
1465  * the Linux kernel and user-space are both in ring 3 while the
1466  * hypervisor is in ring 0.
1467  */
1468 static void __init xen_write_cr3_init(unsigned long cr3)
1469 {
1470 	BUG_ON(preemptible());
1471 
1472 	xen_mc_batch();  /* disables interrupts */
1473 
1474 	/* Update while interrupts are disabled, so its atomic with
1475 	   respect to ipis */
1476 	this_cpu_write(xen_cr3, cr3);
1477 
1478 	__xen_write_cr3(true, cr3);
1479 
1480 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1481 }
1482 #endif
1483 
1484 static int xen_pgd_alloc(struct mm_struct *mm)
1485 {
1486 	pgd_t *pgd = mm->pgd;
1487 	int ret = 0;
1488 
1489 	BUG_ON(PagePinned(virt_to_page(pgd)));
1490 
1491 #ifdef CONFIG_X86_64
1492 	{
1493 		struct page *page = virt_to_page(pgd);
1494 		pgd_t *user_pgd;
1495 
1496 		BUG_ON(page->private != 0);
1497 
1498 		ret = -ENOMEM;
1499 
1500 		user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1501 		page->private = (unsigned long)user_pgd;
1502 
1503 		if (user_pgd != NULL) {
1504 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1505 			user_pgd[pgd_index(VSYSCALL_ADDR)] =
1506 				__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1507 #endif
1508 			ret = 0;
1509 		}
1510 
1511 		BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1512 	}
1513 #endif
1514 	return ret;
1515 }
1516 
1517 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1518 {
1519 #ifdef CONFIG_X86_64
1520 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
1521 
1522 	if (user_pgd)
1523 		free_page((unsigned long)user_pgd);
1524 #endif
1525 }
1526 
1527 /*
1528  * Init-time set_pte while constructing initial pagetables, which
1529  * doesn't allow RO page table pages to be remapped RW.
1530  *
1531  * If there is no MFN for this PFN then this page is initially
1532  * ballooned out so clear the PTE (as in decrease_reservation() in
1533  * drivers/xen/balloon.c).
1534  *
1535  * Many of these PTE updates are done on unpinned and writable pages
1536  * and doing a hypercall for these is unnecessary and expensive.  At
1537  * this point it is not possible to tell if a page is pinned or not,
1538  * so always write the PTE directly and rely on Xen trapping and
1539  * emulating any updates as necessary.
1540  */
1541 __visible pte_t xen_make_pte_init(pteval_t pte)
1542 {
1543 #ifdef CONFIG_X86_64
1544 	unsigned long pfn;
1545 
1546 	/*
1547 	 * Pages belonging to the initial p2m list mapped outside the default
1548 	 * address range must be mapped read-only. This region contains the
1549 	 * page tables for mapping the p2m list, too, and page tables MUST be
1550 	 * mapped read-only.
1551 	 */
1552 	pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1553 	if (xen_start_info->mfn_list < __START_KERNEL_map &&
1554 	    pfn >= xen_start_info->first_p2m_pfn &&
1555 	    pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1556 		pte &= ~_PAGE_RW;
1557 #endif
1558 	pte = pte_pfn_to_mfn(pte);
1559 	return native_make_pte(pte);
1560 }
1561 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1562 
1563 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1564 {
1565 #ifdef CONFIG_X86_32
1566 	/* If there's an existing pte, then don't allow _PAGE_RW to be set */
1567 	if (pte_mfn(pte) != INVALID_P2M_ENTRY
1568 	    && pte_val_ma(*ptep) & _PAGE_PRESENT)
1569 		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1570 			       pte_val_ma(pte));
1571 #endif
1572 	__xen_set_pte(ptep, pte);
1573 }
1574 
1575 /* Early in boot, while setting up the initial pagetable, assume
1576    everything is pinned. */
1577 static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1578 {
1579 #ifdef CONFIG_FLATMEM
1580 	BUG_ON(mem_map);	/* should only be used early */
1581 #endif
1582 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1583 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1584 }
1585 
1586 /* Used for pmd and pud */
1587 static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1588 {
1589 #ifdef CONFIG_FLATMEM
1590 	BUG_ON(mem_map);	/* should only be used early */
1591 #endif
1592 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1593 }
1594 
1595 /* Early release_pte assumes that all pts are pinned, since there's
1596    only init_mm and anything attached to that is pinned. */
1597 static void __init xen_release_pte_init(unsigned long pfn)
1598 {
1599 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1600 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1601 }
1602 
1603 static void __init xen_release_pmd_init(unsigned long pfn)
1604 {
1605 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1606 }
1607 
1608 static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1609 {
1610 	struct multicall_space mcs;
1611 	struct mmuext_op *op;
1612 
1613 	mcs = __xen_mc_entry(sizeof(*op));
1614 	op = mcs.args;
1615 	op->cmd = cmd;
1616 	op->arg1.mfn = pfn_to_mfn(pfn);
1617 
1618 	MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1619 }
1620 
1621 static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1622 {
1623 	struct multicall_space mcs;
1624 	unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1625 
1626 	mcs = __xen_mc_entry(0);
1627 	MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1628 				pfn_pte(pfn, prot), 0);
1629 }
1630 
1631 /* This needs to make sure the new pte page is pinned iff its being
1632    attached to a pinned pagetable. */
1633 static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1634 				    unsigned level)
1635 {
1636 	bool pinned = xen_page_pinned(mm->pgd);
1637 
1638 	trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1639 
1640 	if (pinned) {
1641 		struct page *page = pfn_to_page(pfn);
1642 
1643 		if (static_branch_likely(&xen_struct_pages_ready))
1644 			SetPagePinned(page);
1645 
1646 		if (!PageHighMem(page)) {
1647 			xen_mc_batch();
1648 
1649 			__set_pfn_prot(pfn, PAGE_KERNEL_RO);
1650 
1651 			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1652 				__pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1653 
1654 			xen_mc_issue(PARAVIRT_LAZY_MMU);
1655 		} else {
1656 			/* make sure there are no stray mappings of
1657 			   this page */
1658 			kmap_flush_unused();
1659 		}
1660 	}
1661 }
1662 
1663 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1664 {
1665 	xen_alloc_ptpage(mm, pfn, PT_PTE);
1666 }
1667 
1668 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1669 {
1670 	xen_alloc_ptpage(mm, pfn, PT_PMD);
1671 }
1672 
1673 /* This should never happen until we're OK to use struct page */
1674 static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1675 {
1676 	struct page *page = pfn_to_page(pfn);
1677 	bool pinned = PagePinned(page);
1678 
1679 	trace_xen_mmu_release_ptpage(pfn, level, pinned);
1680 
1681 	if (pinned) {
1682 		if (!PageHighMem(page)) {
1683 			xen_mc_batch();
1684 
1685 			if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1686 				__pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1687 
1688 			__set_pfn_prot(pfn, PAGE_KERNEL);
1689 
1690 			xen_mc_issue(PARAVIRT_LAZY_MMU);
1691 		}
1692 		ClearPagePinned(page);
1693 	}
1694 }
1695 
1696 static void xen_release_pte(unsigned long pfn)
1697 {
1698 	xen_release_ptpage(pfn, PT_PTE);
1699 }
1700 
1701 static void xen_release_pmd(unsigned long pfn)
1702 {
1703 	xen_release_ptpage(pfn, PT_PMD);
1704 }
1705 
1706 #ifdef CONFIG_X86_64
1707 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1708 {
1709 	xen_alloc_ptpage(mm, pfn, PT_PUD);
1710 }
1711 
1712 static void xen_release_pud(unsigned long pfn)
1713 {
1714 	xen_release_ptpage(pfn, PT_PUD);
1715 }
1716 #endif
1717 
1718 void __init xen_reserve_top(void)
1719 {
1720 #ifdef CONFIG_X86_32
1721 	unsigned long top = HYPERVISOR_VIRT_START;
1722 	struct xen_platform_parameters pp;
1723 
1724 	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1725 		top = pp.virt_start;
1726 
1727 	reserve_top_address(-top);
1728 #endif	/* CONFIG_X86_32 */
1729 }
1730 
1731 /*
1732  * Like __va(), but returns address in the kernel mapping (which is
1733  * all we have until the physical memory mapping has been set up.
1734  */
1735 static void * __init __ka(phys_addr_t paddr)
1736 {
1737 #ifdef CONFIG_X86_64
1738 	return (void *)(paddr + __START_KERNEL_map);
1739 #else
1740 	return __va(paddr);
1741 #endif
1742 }
1743 
1744 /* Convert a machine address to physical address */
1745 static unsigned long __init m2p(phys_addr_t maddr)
1746 {
1747 	phys_addr_t paddr;
1748 
1749 	maddr &= XEN_PTE_MFN_MASK;
1750 	paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1751 
1752 	return paddr;
1753 }
1754 
1755 /* Convert a machine address to kernel virtual */
1756 static void * __init m2v(phys_addr_t maddr)
1757 {
1758 	return __ka(m2p(maddr));
1759 }
1760 
1761 /* Set the page permissions on an identity-mapped pages */
1762 static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1763 				       unsigned long flags)
1764 {
1765 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1766 	pte_t pte = pfn_pte(pfn, prot);
1767 
1768 	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1769 		BUG();
1770 }
1771 static void __init set_page_prot(void *addr, pgprot_t prot)
1772 {
1773 	return set_page_prot_flags(addr, prot, UVMF_NONE);
1774 }
1775 #ifdef CONFIG_X86_32
1776 static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1777 {
1778 	unsigned pmdidx, pteidx;
1779 	unsigned ident_pte;
1780 	unsigned long pfn;
1781 
1782 	level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1783 				      PAGE_SIZE);
1784 
1785 	ident_pte = 0;
1786 	pfn = 0;
1787 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1788 		pte_t *pte_page;
1789 
1790 		/* Reuse or allocate a page of ptes */
1791 		if (pmd_present(pmd[pmdidx]))
1792 			pte_page = m2v(pmd[pmdidx].pmd);
1793 		else {
1794 			/* Check for free pte pages */
1795 			if (ident_pte == LEVEL1_IDENT_ENTRIES)
1796 				break;
1797 
1798 			pte_page = &level1_ident_pgt[ident_pte];
1799 			ident_pte += PTRS_PER_PTE;
1800 
1801 			pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1802 		}
1803 
1804 		/* Install mappings */
1805 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1806 			pte_t pte;
1807 
1808 			if (pfn > max_pfn_mapped)
1809 				max_pfn_mapped = pfn;
1810 
1811 			if (!pte_none(pte_page[pteidx]))
1812 				continue;
1813 
1814 			pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1815 			pte_page[pteidx] = pte;
1816 		}
1817 	}
1818 
1819 	for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1820 		set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1821 
1822 	set_page_prot(pmd, PAGE_KERNEL_RO);
1823 }
1824 #endif
1825 void __init xen_setup_machphys_mapping(void)
1826 {
1827 	struct xen_machphys_mapping mapping;
1828 
1829 	if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1830 		machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1831 		machine_to_phys_nr = mapping.max_mfn + 1;
1832 	} else {
1833 		machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1834 	}
1835 #ifdef CONFIG_X86_32
1836 	WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1837 		< machine_to_phys_mapping);
1838 #endif
1839 }
1840 
1841 #ifdef CONFIG_X86_64
1842 static void __init convert_pfn_mfn(void *v)
1843 {
1844 	pte_t *pte = v;
1845 	int i;
1846 
1847 	/* All levels are converted the same way, so just treat them
1848 	   as ptes. */
1849 	for (i = 0; i < PTRS_PER_PTE; i++)
1850 		pte[i] = xen_make_pte(pte[i].pte);
1851 }
1852 static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1853 				 unsigned long addr)
1854 {
1855 	if (*pt_base == PFN_DOWN(__pa(addr))) {
1856 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1857 		clear_page((void *)addr);
1858 		(*pt_base)++;
1859 	}
1860 	if (*pt_end == PFN_DOWN(__pa(addr))) {
1861 		set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1862 		clear_page((void *)addr);
1863 		(*pt_end)--;
1864 	}
1865 }
1866 /*
1867  * Set up the initial kernel pagetable.
1868  *
1869  * We can construct this by grafting the Xen provided pagetable into
1870  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1871  * level2_ident_pgt, and level2_kernel_pgt.  This means that only the
1872  * kernel has a physical mapping to start with - but that's enough to
1873  * get __va working.  We need to fill in the rest of the physical
1874  * mapping once some sort of allocator has been set up.
1875  */
1876 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1877 {
1878 	pud_t *l3;
1879 	pmd_t *l2;
1880 	unsigned long addr[3];
1881 	unsigned long pt_base, pt_end;
1882 	unsigned i;
1883 
1884 	/* max_pfn_mapped is the last pfn mapped in the initial memory
1885 	 * mappings. Considering that on Xen after the kernel mappings we
1886 	 * have the mappings of some pages that don't exist in pfn space, we
1887 	 * set max_pfn_mapped to the last real pfn mapped. */
1888 	if (xen_start_info->mfn_list < __START_KERNEL_map)
1889 		max_pfn_mapped = xen_start_info->first_p2m_pfn;
1890 	else
1891 		max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1892 
1893 	pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1894 	pt_end = pt_base + xen_start_info->nr_pt_frames;
1895 
1896 	/* Zap identity mapping */
1897 	init_top_pgt[0] = __pgd(0);
1898 
1899 	/* Pre-constructed entries are in pfn, so convert to mfn */
1900 	/* L4[272] -> level3_ident_pgt  */
1901 	/* L4[511] -> level3_kernel_pgt */
1902 	convert_pfn_mfn(init_top_pgt);
1903 
1904 	/* L3_i[0] -> level2_ident_pgt */
1905 	convert_pfn_mfn(level3_ident_pgt);
1906 	/* L3_k[510] -> level2_kernel_pgt */
1907 	/* L3_k[511] -> level2_fixmap_pgt */
1908 	convert_pfn_mfn(level3_kernel_pgt);
1909 
1910 	/* L3_k[511][506] -> level1_fixmap_pgt */
1911 	convert_pfn_mfn(level2_fixmap_pgt);
1912 
1913 	/* We get [511][511] and have Xen's version of level2_kernel_pgt */
1914 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1915 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1916 
1917 	addr[0] = (unsigned long)pgd;
1918 	addr[1] = (unsigned long)l3;
1919 	addr[2] = (unsigned long)l2;
1920 	/* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
1921 	 * Both L4[272][0] and L4[511][510] have entries that point to the same
1922 	 * L2 (PMD) tables. Meaning that if you modify it in __va space
1923 	 * it will be also modified in the __ka space! (But if you just
1924 	 * modify the PMD table to point to other PTE's or none, then you
1925 	 * are OK - which is what cleanup_highmap does) */
1926 	copy_page(level2_ident_pgt, l2);
1927 	/* Graft it onto L4[511][510] */
1928 	copy_page(level2_kernel_pgt, l2);
1929 
1930 	/*
1931 	 * Zap execute permission from the ident map. Due to the sharing of
1932 	 * L1 entries we need to do this in the L2.
1933 	 */
1934 	if (__supported_pte_mask & _PAGE_NX) {
1935 		for (i = 0; i < PTRS_PER_PMD; ++i) {
1936 			if (pmd_none(level2_ident_pgt[i]))
1937 				continue;
1938 			level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1939 		}
1940 	}
1941 
1942 	/* Copy the initial P->M table mappings if necessary. */
1943 	i = pgd_index(xen_start_info->mfn_list);
1944 	if (i && i < pgd_index(__START_KERNEL_map))
1945 		init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1946 
1947 	/* Make pagetable pieces RO */
1948 	set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1949 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1950 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1951 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1952 	set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1953 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1954 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1955 	set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
1956 
1957 	/* Pin down new L4 */
1958 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1959 			  PFN_DOWN(__pa_symbol(init_top_pgt)));
1960 
1961 	/* Unpin Xen-provided one */
1962 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1963 
1964 	/*
1965 	 * At this stage there can be no user pgd, and no page structure to
1966 	 * attach it to, so make sure we just set kernel pgd.
1967 	 */
1968 	xen_mc_batch();
1969 	__xen_write_cr3(true, __pa(init_top_pgt));
1970 	xen_mc_issue(PARAVIRT_LAZY_CPU);
1971 
1972 	/* We can't that easily rip out L3 and L2, as the Xen pagetables are
1973 	 * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
1974 	 * the initial domain. For guests using the toolstack, they are in:
1975 	 * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only
1976 	 * rip out the [L4] (pgd), but for guests we shave off three pages.
1977 	 */
1978 	for (i = 0; i < ARRAY_SIZE(addr); i++)
1979 		check_pt_base(&pt_base, &pt_end, addr[i]);
1980 
1981 	/* Our (by three pages) smaller Xen pagetable that we are using */
1982 	xen_pt_base = PFN_PHYS(pt_base);
1983 	xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1984 	memblock_reserve(xen_pt_base, xen_pt_size);
1985 
1986 	/* Revector the xen_start_info */
1987 	xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1988 }
1989 
1990 /*
1991  * Read a value from a physical address.
1992  */
1993 static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1994 {
1995 	unsigned long *vaddr;
1996 	unsigned long val;
1997 
1998 	vaddr = early_memremap_ro(addr, sizeof(val));
1999 	val = *vaddr;
2000 	early_memunmap(vaddr, sizeof(val));
2001 	return val;
2002 }
2003 
2004 /*
2005  * Translate a virtual address to a physical one without relying on mapped
2006  * page tables. Don't rely on big pages being aligned in (guest) physical
2007  * space!
2008  */
2009 static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
2010 {
2011 	phys_addr_t pa;
2012 	pgd_t pgd;
2013 	pud_t pud;
2014 	pmd_t pmd;
2015 	pte_t pte;
2016 
2017 	pa = read_cr3_pa();
2018 	pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
2019 						       sizeof(pgd)));
2020 	if (!pgd_present(pgd))
2021 		return 0;
2022 
2023 	pa = pgd_val(pgd) & PTE_PFN_MASK;
2024 	pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
2025 						       sizeof(pud)));
2026 	if (!pud_present(pud))
2027 		return 0;
2028 	pa = pud_val(pud) & PTE_PFN_MASK;
2029 	if (pud_large(pud))
2030 		return pa + (vaddr & ~PUD_MASK);
2031 
2032 	pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
2033 						       sizeof(pmd)));
2034 	if (!pmd_present(pmd))
2035 		return 0;
2036 	pa = pmd_val(pmd) & PTE_PFN_MASK;
2037 	if (pmd_large(pmd))
2038 		return pa + (vaddr & ~PMD_MASK);
2039 
2040 	pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
2041 						       sizeof(pte)));
2042 	if (!pte_present(pte))
2043 		return 0;
2044 	pa = pte_pfn(pte) << PAGE_SHIFT;
2045 
2046 	return pa | (vaddr & ~PAGE_MASK);
2047 }
2048 
2049 /*
2050  * Find a new area for the hypervisor supplied p2m list and relocate the p2m to
2051  * this area.
2052  */
2053 void __init xen_relocate_p2m(void)
2054 {
2055 	phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
2056 	unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
2057 	int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
2058 	pte_t *pt;
2059 	pmd_t *pmd;
2060 	pud_t *pud;
2061 	pgd_t *pgd;
2062 	unsigned long *new_p2m;
2063 
2064 	size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
2065 	n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
2066 	n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
2067 	n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
2068 	n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
2069 	n_frames = n_pte + n_pt + n_pmd + n_pud;
2070 
2071 	new_area = xen_find_free_area(PFN_PHYS(n_frames));
2072 	if (!new_area) {
2073 		xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
2074 		BUG();
2075 	}
2076 
2077 	/*
2078 	 * Setup the page tables for addressing the new p2m list.
2079 	 * We have asked the hypervisor to map the p2m list at the user address
2080 	 * PUD_SIZE. It may have done so, or it may have used a kernel space
2081 	 * address depending on the Xen version.
2082 	 * To avoid any possible virtual address collision, just use
2083 	 * 2 * PUD_SIZE for the new area.
2084 	 */
2085 	pud_phys = new_area;
2086 	pmd_phys = pud_phys + PFN_PHYS(n_pud);
2087 	pt_phys = pmd_phys + PFN_PHYS(n_pmd);
2088 	p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
2089 
2090 	pgd = __va(read_cr3_pa());
2091 	new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
2092 	for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
2093 		pud = early_memremap(pud_phys, PAGE_SIZE);
2094 		clear_page(pud);
2095 		for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
2096 				idx_pmd++) {
2097 			pmd = early_memremap(pmd_phys, PAGE_SIZE);
2098 			clear_page(pmd);
2099 			for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
2100 					idx_pt++) {
2101 				pt = early_memremap(pt_phys, PAGE_SIZE);
2102 				clear_page(pt);
2103 				for (idx_pte = 0;
2104 						idx_pte < min(n_pte, PTRS_PER_PTE);
2105 						idx_pte++) {
2106 					set_pte(pt + idx_pte,
2107 							pfn_pte(p2m_pfn, PAGE_KERNEL));
2108 					p2m_pfn++;
2109 				}
2110 				n_pte -= PTRS_PER_PTE;
2111 				early_memunmap(pt, PAGE_SIZE);
2112 				make_lowmem_page_readonly(__va(pt_phys));
2113 				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
2114 						PFN_DOWN(pt_phys));
2115 				set_pmd(pmd + idx_pt,
2116 						__pmd(_PAGE_TABLE | pt_phys));
2117 				pt_phys += PAGE_SIZE;
2118 			}
2119 			n_pt -= PTRS_PER_PMD;
2120 			early_memunmap(pmd, PAGE_SIZE);
2121 			make_lowmem_page_readonly(__va(pmd_phys));
2122 			pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
2123 					PFN_DOWN(pmd_phys));
2124 			set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
2125 			pmd_phys += PAGE_SIZE;
2126 		}
2127 		n_pmd -= PTRS_PER_PUD;
2128 		early_memunmap(pud, PAGE_SIZE);
2129 		make_lowmem_page_readonly(__va(pud_phys));
2130 		pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
2131 		set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
2132 		pud_phys += PAGE_SIZE;
2133 	}
2134 
2135 	/* Now copy the old p2m info to the new area. */
2136 	memcpy(new_p2m, xen_p2m_addr, size);
2137 	xen_p2m_addr = new_p2m;
2138 
2139 	/* Release the old p2m list and set new list info. */
2140 	p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
2141 	BUG_ON(!p2m_pfn);
2142 	p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
2143 
2144 	if (xen_start_info->mfn_list < __START_KERNEL_map) {
2145 		pfn = xen_start_info->first_p2m_pfn;
2146 		pfn_end = xen_start_info->first_p2m_pfn +
2147 			  xen_start_info->nr_p2m_frames;
2148 		set_pgd(pgd + 1, __pgd(0));
2149 	} else {
2150 		pfn = p2m_pfn;
2151 		pfn_end = p2m_pfn_end;
2152 	}
2153 
2154 	memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
2155 	while (pfn < pfn_end) {
2156 		if (pfn == p2m_pfn) {
2157 			pfn = p2m_pfn_end;
2158 			continue;
2159 		}
2160 		make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
2161 		pfn++;
2162 	}
2163 
2164 	xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
2165 	xen_start_info->first_p2m_pfn =  PFN_DOWN(new_area);
2166 	xen_start_info->nr_p2m_frames = n_frames;
2167 }
2168 
2169 #else	/* !CONFIG_X86_64 */
2170 static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2171 static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2172 RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
2173 RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
2174 
2175 static void __init xen_write_cr3_init(unsigned long cr3)
2176 {
2177 	unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2178 
2179 	BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2180 	BUG_ON(cr3 != __pa(swapper_pg_dir));
2181 
2182 	/*
2183 	 * We are switching to swapper_pg_dir for the first time (from
2184 	 * initial_page_table) and therefore need to mark that page
2185 	 * read-only and then pin it.
2186 	 *
2187 	 * Xen disallows sharing of kernel PMDs for PAE
2188 	 * guests. Therefore we must copy the kernel PMD from
2189 	 * initial_page_table into a new kernel PMD to be used in
2190 	 * swapper_pg_dir.
2191 	 */
2192 	swapper_kernel_pmd =
2193 		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2194 	copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2195 	swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2196 		__pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2197 	set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2198 
2199 	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2200 	xen_write_cr3(cr3);
2201 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2202 
2203 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2204 			  PFN_DOWN(__pa(initial_page_table)));
2205 	set_page_prot(initial_page_table, PAGE_KERNEL);
2206 	set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2207 
2208 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
2209 }
2210 
2211 /*
2212  * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2213  * not the first page table in the page table pool.
2214  * Iterate through the initial page tables to find the real page table base.
2215  */
2216 static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
2217 {
2218 	phys_addr_t pt_base, paddr;
2219 	unsigned pmdidx;
2220 
2221 	pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2222 
2223 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2224 		if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2225 			paddr = m2p(pmd[pmdidx].pmd);
2226 			pt_base = min(pt_base, paddr);
2227 		}
2228 
2229 	return pt_base;
2230 }
2231 
2232 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2233 {
2234 	pmd_t *kernel_pmd;
2235 
2236 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2237 
2238 	xen_pt_base = xen_find_pt_base(kernel_pmd);
2239 	xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2240 
2241 	initial_kernel_pmd =
2242 		extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2243 
2244 	max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2245 
2246 	copy_page(initial_kernel_pmd, kernel_pmd);
2247 
2248 	xen_map_identity_early(initial_kernel_pmd, max_pfn);
2249 
2250 	copy_page(initial_page_table, pgd);
2251 	initial_page_table[KERNEL_PGD_BOUNDARY] =
2252 		__pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2253 
2254 	set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2255 	set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2256 	set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2257 
2258 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2259 
2260 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2261 			  PFN_DOWN(__pa(initial_page_table)));
2262 	xen_write_cr3(__pa(initial_page_table));
2263 
2264 	memblock_reserve(xen_pt_base, xen_pt_size);
2265 }
2266 #endif	/* CONFIG_X86_64 */
2267 
2268 void __init xen_reserve_special_pages(void)
2269 {
2270 	phys_addr_t paddr;
2271 
2272 	memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2273 	if (xen_start_info->store_mfn) {
2274 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2275 		memblock_reserve(paddr, PAGE_SIZE);
2276 	}
2277 	if (!xen_initial_domain()) {
2278 		paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2279 		memblock_reserve(paddr, PAGE_SIZE);
2280 	}
2281 }
2282 
2283 void __init xen_pt_check_e820(void)
2284 {
2285 	if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2286 		xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2287 		BUG();
2288 	}
2289 }
2290 
2291 static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2292 
2293 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2294 {
2295 	pte_t pte;
2296 
2297 	phys >>= PAGE_SHIFT;
2298 
2299 	switch (idx) {
2300 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2301 #ifdef CONFIG_X86_32
2302 	case FIX_WP_TEST:
2303 # ifdef CONFIG_HIGHMEM
2304 	case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2305 # endif
2306 #elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2307 	case VSYSCALL_PAGE:
2308 #endif
2309 	case FIX_TEXT_POKE0:
2310 	case FIX_TEXT_POKE1:
2311 		/* All local page mappings */
2312 		pte = pfn_pte(phys, prot);
2313 		break;
2314 
2315 #ifdef CONFIG_X86_LOCAL_APIC
2316 	case FIX_APIC_BASE:	/* maps dummy local APIC */
2317 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2318 		break;
2319 #endif
2320 
2321 #ifdef CONFIG_X86_IO_APIC
2322 	case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2323 		/*
2324 		 * We just don't map the IO APIC - all access is via
2325 		 * hypercalls.  Keep the address in the pte for reference.
2326 		 */
2327 		pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2328 		break;
2329 #endif
2330 
2331 	case FIX_PARAVIRT_BOOTMAP:
2332 		/* This is an MFN, but it isn't an IO mapping from the
2333 		   IO domain */
2334 		pte = mfn_pte(phys, prot);
2335 		break;
2336 
2337 	default:
2338 		/* By default, set_fixmap is used for hardware mappings */
2339 		pte = mfn_pte(phys, prot);
2340 		break;
2341 	}
2342 
2343 	__native_set_fixmap(idx, pte);
2344 
2345 #ifdef CONFIG_X86_VSYSCALL_EMULATION
2346 	/* Replicate changes to map the vsyscall page into the user
2347 	   pagetable vsyscall mapping. */
2348 	if (idx == VSYSCALL_PAGE) {
2349 		unsigned long vaddr = __fix_to_virt(idx);
2350 		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2351 	}
2352 #endif
2353 }
2354 
2355 static void __init xen_post_allocator_init(void)
2356 {
2357 	pv_mmu_ops.set_pte = xen_set_pte;
2358 	pv_mmu_ops.set_pmd = xen_set_pmd;
2359 	pv_mmu_ops.set_pud = xen_set_pud;
2360 #ifdef CONFIG_X86_64
2361 	pv_mmu_ops.set_p4d = xen_set_p4d;
2362 #endif
2363 
2364 	/* This will work as long as patching hasn't happened yet
2365 	   (which it hasn't) */
2366 	pv_mmu_ops.alloc_pte = xen_alloc_pte;
2367 	pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2368 	pv_mmu_ops.release_pte = xen_release_pte;
2369 	pv_mmu_ops.release_pmd = xen_release_pmd;
2370 #ifdef CONFIG_X86_64
2371 	pv_mmu_ops.alloc_pud = xen_alloc_pud;
2372 	pv_mmu_ops.release_pud = xen_release_pud;
2373 #endif
2374 	pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2375 
2376 #ifdef CONFIG_X86_64
2377 	pv_mmu_ops.write_cr3 = &xen_write_cr3;
2378 #endif
2379 }
2380 
2381 static void xen_leave_lazy_mmu(void)
2382 {
2383 	preempt_disable();
2384 	xen_mc_flush();
2385 	paravirt_leave_lazy_mmu();
2386 	preempt_enable();
2387 }
2388 
2389 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2390 	.read_cr2 = xen_read_cr2,
2391 	.write_cr2 = xen_write_cr2,
2392 
2393 	.read_cr3 = xen_read_cr3,
2394 	.write_cr3 = xen_write_cr3_init,
2395 
2396 	.flush_tlb_user = xen_flush_tlb,
2397 	.flush_tlb_kernel = xen_flush_tlb,
2398 	.flush_tlb_one_user = xen_flush_tlb_one_user,
2399 	.flush_tlb_others = xen_flush_tlb_others,
2400 	.tlb_remove_table = tlb_remove_table,
2401 
2402 	.pgd_alloc = xen_pgd_alloc,
2403 	.pgd_free = xen_pgd_free,
2404 
2405 	.alloc_pte = xen_alloc_pte_init,
2406 	.release_pte = xen_release_pte_init,
2407 	.alloc_pmd = xen_alloc_pmd_init,
2408 	.release_pmd = xen_release_pmd_init,
2409 
2410 	.set_pte = xen_set_pte_init,
2411 	.set_pte_at = xen_set_pte_at,
2412 	.set_pmd = xen_set_pmd_hyper,
2413 
2414 	.ptep_modify_prot_start = __ptep_modify_prot_start,
2415 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
2416 
2417 	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
2418 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2419 
2420 	.make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2421 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2422 
2423 #ifdef CONFIG_X86_PAE
2424 	.set_pte_atomic = xen_set_pte_atomic,
2425 	.pte_clear = xen_pte_clear,
2426 	.pmd_clear = xen_pmd_clear,
2427 #endif	/* CONFIG_X86_PAE */
2428 	.set_pud = xen_set_pud_hyper,
2429 
2430 	.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2431 	.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2432 
2433 #ifdef CONFIG_X86_64
2434 	.pud_val = PV_CALLEE_SAVE(xen_pud_val),
2435 	.make_pud = PV_CALLEE_SAVE(xen_make_pud),
2436 	.set_p4d = xen_set_p4d_hyper,
2437 
2438 	.alloc_pud = xen_alloc_pmd_init,
2439 	.release_pud = xen_release_pmd_init,
2440 
2441 #if CONFIG_PGTABLE_LEVELS >= 5
2442 	.p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2443 	.make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2444 #endif
2445 #endif	/* CONFIG_X86_64 */
2446 
2447 	.activate_mm = xen_activate_mm,
2448 	.dup_mmap = xen_dup_mmap,
2449 	.exit_mmap = xen_exit_mmap,
2450 
2451 	.lazy_mode = {
2452 		.enter = paravirt_enter_lazy_mmu,
2453 		.leave = xen_leave_lazy_mmu,
2454 		.flush = paravirt_flush_lazy_mmu,
2455 	},
2456 
2457 	.set_fixmap = xen_set_fixmap,
2458 };
2459 
2460 void __init xen_init_mmu_ops(void)
2461 {
2462 	x86_init.paging.pagetable_init = xen_pagetable_init;
2463 	x86_init.hyper.init_after_bootmem = xen_after_bootmem;
2464 
2465 	pv_mmu_ops = xen_mmu_ops;
2466 
2467 	memset(dummy_mapping, 0xff, PAGE_SIZE);
2468 }
2469 
2470 /* Protected by xen_reservation_lock. */
2471 #define MAX_CONTIG_ORDER 9 /* 2MB */
2472 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2473 
2474 #define VOID_PTE (mfn_pte(0, __pgprot(0)))
2475 static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2476 				unsigned long *in_frames,
2477 				unsigned long *out_frames)
2478 {
2479 	int i;
2480 	struct multicall_space mcs;
2481 
2482 	xen_mc_batch();
2483 	for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2484 		mcs = __xen_mc_entry(0);
2485 
2486 		if (in_frames)
2487 			in_frames[i] = virt_to_mfn(vaddr);
2488 
2489 		MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2490 		__set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2491 
2492 		if (out_frames)
2493 			out_frames[i] = virt_to_pfn(vaddr);
2494 	}
2495 	xen_mc_issue(0);
2496 }
2497 
2498 /*
2499  * Update the pfn-to-mfn mappings for a virtual address range, either to
2500  * point to an array of mfns, or contiguously from a single starting
2501  * mfn.
2502  */
2503 static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2504 				     unsigned long *mfns,
2505 				     unsigned long first_mfn)
2506 {
2507 	unsigned i, limit;
2508 	unsigned long mfn;
2509 
2510 	xen_mc_batch();
2511 
2512 	limit = 1u << order;
2513 	for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2514 		struct multicall_space mcs;
2515 		unsigned flags;
2516 
2517 		mcs = __xen_mc_entry(0);
2518 		if (mfns)
2519 			mfn = mfns[i];
2520 		else
2521 			mfn = first_mfn + i;
2522 
2523 		if (i < (limit - 1))
2524 			flags = 0;
2525 		else {
2526 			if (order == 0)
2527 				flags = UVMF_INVLPG | UVMF_ALL;
2528 			else
2529 				flags = UVMF_TLB_FLUSH | UVMF_ALL;
2530 		}
2531 
2532 		MULTI_update_va_mapping(mcs.mc, vaddr,
2533 				mfn_pte(mfn, PAGE_KERNEL), flags);
2534 
2535 		set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2536 	}
2537 
2538 	xen_mc_issue(0);
2539 }
2540 
2541 /*
2542  * Perform the hypercall to exchange a region of our pfns to point to
2543  * memory with the required contiguous alignment.  Takes the pfns as
2544  * input, and populates mfns as output.
2545  *
2546  * Returns a success code indicating whether the hypervisor was able to
2547  * satisfy the request or not.
2548  */
2549 static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2550 			       unsigned long *pfns_in,
2551 			       unsigned long extents_out,
2552 			       unsigned int order_out,
2553 			       unsigned long *mfns_out,
2554 			       unsigned int address_bits)
2555 {
2556 	long rc;
2557 	int success;
2558 
2559 	struct xen_memory_exchange exchange = {
2560 		.in = {
2561 			.nr_extents   = extents_in,
2562 			.extent_order = order_in,
2563 			.extent_start = pfns_in,
2564 			.domid        = DOMID_SELF
2565 		},
2566 		.out = {
2567 			.nr_extents   = extents_out,
2568 			.extent_order = order_out,
2569 			.extent_start = mfns_out,
2570 			.address_bits = address_bits,
2571 			.domid        = DOMID_SELF
2572 		}
2573 	};
2574 
2575 	BUG_ON(extents_in << order_in != extents_out << order_out);
2576 
2577 	rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2578 	success = (exchange.nr_exchanged == extents_in);
2579 
2580 	BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2581 	BUG_ON(success && (rc != 0));
2582 
2583 	return success;
2584 }
2585 
2586 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2587 				 unsigned int address_bits,
2588 				 dma_addr_t *dma_handle)
2589 {
2590 	unsigned long *in_frames = discontig_frames, out_frame;
2591 	unsigned long  flags;
2592 	int            success;
2593 	unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2594 
2595 	/*
2596 	 * Currently an auto-translated guest will not perform I/O, nor will
2597 	 * it require PAE page directories below 4GB. Therefore any calls to
2598 	 * this function are redundant and can be ignored.
2599 	 */
2600 
2601 	if (unlikely(order > MAX_CONTIG_ORDER))
2602 		return -ENOMEM;
2603 
2604 	memset((void *) vstart, 0, PAGE_SIZE << order);
2605 
2606 	spin_lock_irqsave(&xen_reservation_lock, flags);
2607 
2608 	/* 1. Zap current PTEs, remembering MFNs. */
2609 	xen_zap_pfn_range(vstart, order, in_frames, NULL);
2610 
2611 	/* 2. Get a new contiguous memory extent. */
2612 	out_frame = virt_to_pfn(vstart);
2613 	success = xen_exchange_memory(1UL << order, 0, in_frames,
2614 				      1, order, &out_frame,
2615 				      address_bits);
2616 
2617 	/* 3. Map the new extent in place of old pages. */
2618 	if (success)
2619 		xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2620 	else
2621 		xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2622 
2623 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2624 
2625 	*dma_handle = virt_to_machine(vstart).maddr;
2626 	return success ? 0 : -ENOMEM;
2627 }
2628 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2629 
2630 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2631 {
2632 	unsigned long *out_frames = discontig_frames, in_frame;
2633 	unsigned long  flags;
2634 	int success;
2635 	unsigned long vstart;
2636 
2637 	if (unlikely(order > MAX_CONTIG_ORDER))
2638 		return;
2639 
2640 	vstart = (unsigned long)phys_to_virt(pstart);
2641 	memset((void *) vstart, 0, PAGE_SIZE << order);
2642 
2643 	spin_lock_irqsave(&xen_reservation_lock, flags);
2644 
2645 	/* 1. Find start MFN of contiguous extent. */
2646 	in_frame = virt_to_mfn(vstart);
2647 
2648 	/* 2. Zap current PTEs. */
2649 	xen_zap_pfn_range(vstart, order, NULL, out_frames);
2650 
2651 	/* 3. Do the exchange for non-contiguous MFNs. */
2652 	success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2653 					0, out_frames, 0);
2654 
2655 	/* 4. Map new pages in place of old pages. */
2656 	if (success)
2657 		xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2658 	else
2659 		xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2660 
2661 	spin_unlock_irqrestore(&xen_reservation_lock, flags);
2662 }
2663 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2664 
2665 #ifdef CONFIG_KEXEC_CORE
2666 phys_addr_t paddr_vmcoreinfo_note(void)
2667 {
2668 	if (xen_pv_domain())
2669 		return virt_to_machine(vmcoreinfo_note).maddr;
2670 	else
2671 		return __pa(vmcoreinfo_note);
2672 }
2673 #endif /* CONFIG_KEXEC_CORE */
2674