xref: /openbmc/linux/arch/x86/xen/mmu.c (revision 78c99ba1)
1 /*
2  * Xen mmu operations
3  *
4  * This file contains the various mmu fetch and update operations.
5  * The most important job they must perform is the mapping between the
6  * domain's pfn and the overall machine mfns.
7  *
8  * Xen allows guests to directly update the pagetable, in a controlled
9  * fashion.  In other words, the guest modifies the same pagetable
10  * that the CPU actually uses, which eliminates the overhead of having
11  * a separate shadow pagetable.
12  *
13  * In order to allow this, it falls on the guest domain to map its
14  * notion of a "physical" pfn - which is just a domain-local linear
15  * address - into a real "machine address" which the CPU's MMU can
16  * use.
17  *
18  * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19  * inserted directly into the pagetable.  When creating a new
20  * pte/pmd/pgd, it converts the passed pfn into an mfn.  Conversely,
21  * when reading the content back with __(pgd|pmd|pte)_val, it converts
22  * the mfn back into a pfn.
23  *
24  * The other constraint is that all pages which make up a pagetable
25  * must be mapped read-only in the guest.  This prevents uncontrolled
26  * guest updates to the pagetable.  Xen strictly enforces this, and
27  * will disallow any pagetable update which will end up mapping a
28  * pagetable page RW, and will disallow using any writable page as a
29  * pagetable.
30  *
31  * Naively, when loading %cr3 with the base of a new pagetable, Xen
32  * would need to validate the whole pagetable before going on.
33  * Naturally, this is quite slow.  The solution is to "pin" a
34  * pagetable, which enforces all the constraints on the pagetable even
35  * when it is not actively in use.  This menas that Xen can be assured
36  * that it is still valid when you do load it into %cr3, and doesn't
37  * need to revalidate it.
38  *
39  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40  */
41 #include <linux/sched.h>
42 #include <linux/highmem.h>
43 #include <linux/debugfs.h>
44 #include <linux/bug.h>
45 #include <linux/module.h>
46 
47 #include <asm/pgtable.h>
48 #include <asm/tlbflush.h>
49 #include <asm/fixmap.h>
50 #include <asm/mmu_context.h>
51 #include <asm/setup.h>
52 #include <asm/paravirt.h>
53 #include <asm/linkage.h>
54 
55 #include <asm/xen/hypercall.h>
56 #include <asm/xen/hypervisor.h>
57 
58 #include <xen/page.h>
59 #include <xen/interface/xen.h>
60 #include <xen/interface/version.h>
61 #include <xen/hvc-console.h>
62 
63 #include "multicalls.h"
64 #include "mmu.h"
65 #include "debugfs.h"
66 
67 #define MMU_UPDATE_HISTO	30
68 
69 #ifdef CONFIG_XEN_DEBUG_FS
70 
71 static struct {
72 	u32 pgd_update;
73 	u32 pgd_update_pinned;
74 	u32 pgd_update_batched;
75 
76 	u32 pud_update;
77 	u32 pud_update_pinned;
78 	u32 pud_update_batched;
79 
80 	u32 pmd_update;
81 	u32 pmd_update_pinned;
82 	u32 pmd_update_batched;
83 
84 	u32 pte_update;
85 	u32 pte_update_pinned;
86 	u32 pte_update_batched;
87 
88 	u32 mmu_update;
89 	u32 mmu_update_extended;
90 	u32 mmu_update_histo[MMU_UPDATE_HISTO];
91 
92 	u32 prot_commit;
93 	u32 prot_commit_batched;
94 
95 	u32 set_pte_at;
96 	u32 set_pte_at_batched;
97 	u32 set_pte_at_pinned;
98 	u32 set_pte_at_current;
99 	u32 set_pte_at_kernel;
100 } mmu_stats;
101 
102 static u8 zero_stats;
103 
104 static inline void check_zero(void)
105 {
106 	if (unlikely(zero_stats)) {
107 		memset(&mmu_stats, 0, sizeof(mmu_stats));
108 		zero_stats = 0;
109 	}
110 }
111 
112 #define ADD_STATS(elem, val)			\
113 	do { check_zero(); mmu_stats.elem += (val); } while(0)
114 
115 #else  /* !CONFIG_XEN_DEBUG_FS */
116 
117 #define ADD_STATS(elem, val)	do { (void)(val); } while(0)
118 
119 #endif /* CONFIG_XEN_DEBUG_FS */
120 
121 
122 /*
123  * Identity map, in addition to plain kernel map.  This needs to be
124  * large enough to allocate page table pages to allocate the rest.
125  * Each page can map 2MB.
126  */
127 static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
128 
129 #ifdef CONFIG_X86_64
130 /* l3 pud for userspace vsyscall mapping */
131 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
132 #endif /* CONFIG_X86_64 */
133 
134 /*
135  * Note about cr3 (pagetable base) values:
136  *
137  * xen_cr3 contains the current logical cr3 value; it contains the
138  * last set cr3.  This may not be the current effective cr3, because
139  * its update may be being lazily deferred.  However, a vcpu looking
140  * at its own cr3 can use this value knowing that it everything will
141  * be self-consistent.
142  *
143  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
144  * hypercall to set the vcpu cr3 is complete (so it may be a little
145  * out of date, but it will never be set early).  If one vcpu is
146  * looking at another vcpu's cr3 value, it should use this variable.
147  */
148 DEFINE_PER_CPU(unsigned long, xen_cr3);	 /* cr3 stored as physaddr */
149 DEFINE_PER_CPU(unsigned long, xen_current_cr3);	 /* actual vcpu cr3 */
150 
151 
152 /*
153  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
154  * redzone above it, so round it up to a PGD boundary.
155  */
156 #define USER_LIMIT	((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
157 
158 
159 #define P2M_ENTRIES_PER_PAGE	(PAGE_SIZE / sizeof(unsigned long))
160 #define TOP_ENTRIES		(MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
161 
162 /* Placeholder for holes in the address space */
163 static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
164 		{ [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
165 
166  /* Array of pointers to pages containing p2m entries */
167 static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
168 		{ [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
169 
170 /* Arrays of p2m arrays expressed in mfns used for save/restore */
171 static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
172 
173 static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
174 	__page_aligned_bss;
175 
176 static inline unsigned p2m_top_index(unsigned long pfn)
177 {
178 	BUG_ON(pfn >= MAX_DOMAIN_PAGES);
179 	return pfn / P2M_ENTRIES_PER_PAGE;
180 }
181 
182 static inline unsigned p2m_index(unsigned long pfn)
183 {
184 	return pfn % P2M_ENTRIES_PER_PAGE;
185 }
186 
187 /* Build the parallel p2m_top_mfn structures */
188 static void __init xen_build_mfn_list_list(void)
189 {
190 	unsigned pfn, idx;
191 
192 	for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
193 		unsigned topidx = p2m_top_index(pfn);
194 
195 		p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
196 	}
197 
198 	for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
199 		unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
200 		p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
201 	}
202 }
203 
204 void xen_setup_mfn_list_list(void)
205 {
206 	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
207 
208 	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
209 		virt_to_mfn(p2m_top_mfn_list);
210 	HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
211 }
212 
213 /* Set up p2m_top to point to the domain-builder provided p2m pages */
214 void __init xen_build_dynamic_phys_to_machine(void)
215 {
216 	unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
217 	unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
218 	unsigned pfn;
219 
220 	for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
221 		unsigned topidx = p2m_top_index(pfn);
222 
223 		p2m_top[topidx] = &mfn_list[pfn];
224 	}
225 
226 	xen_build_mfn_list_list();
227 }
228 
229 unsigned long get_phys_to_machine(unsigned long pfn)
230 {
231 	unsigned topidx, idx;
232 
233 	if (unlikely(pfn >= MAX_DOMAIN_PAGES))
234 		return INVALID_P2M_ENTRY;
235 
236 	topidx = p2m_top_index(pfn);
237 	idx = p2m_index(pfn);
238 	return p2m_top[topidx][idx];
239 }
240 EXPORT_SYMBOL_GPL(get_phys_to_machine);
241 
242 /* install a  new p2m_top page */
243 bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
244 {
245 	unsigned topidx = p2m_top_index(pfn);
246 	unsigned long **pfnp, *mfnp;
247 	unsigned i;
248 
249 	pfnp = &p2m_top[topidx];
250 	mfnp = &p2m_top_mfn[topidx];
251 
252 	for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
253 		p[i] = INVALID_P2M_ENTRY;
254 
255 	if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) {
256 		*mfnp = virt_to_mfn(p);
257 		return true;
258 	}
259 
260 	return false;
261 }
262 
263 static void alloc_p2m(unsigned long pfn)
264 {
265 	unsigned long *p;
266 
267 	p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
268 	BUG_ON(p == NULL);
269 
270 	if (!install_p2mtop_page(pfn, p))
271 		free_page((unsigned long)p);
272 }
273 
274 /* Try to install p2m mapping; fail if intermediate bits missing */
275 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
276 {
277 	unsigned topidx, idx;
278 
279 	if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
280 		BUG_ON(mfn != INVALID_P2M_ENTRY);
281 		return true;
282 	}
283 
284 	topidx = p2m_top_index(pfn);
285 	if (p2m_top[topidx] == p2m_missing) {
286 		if (mfn == INVALID_P2M_ENTRY)
287 			return true;
288 		return false;
289 	}
290 
291 	idx = p2m_index(pfn);
292 	p2m_top[topidx][idx] = mfn;
293 
294 	return true;
295 }
296 
297 void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
298 {
299 	if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
300 		BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
301 		return;
302 	}
303 
304 	if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
305 		alloc_p2m(pfn);
306 
307 		if (!__set_phys_to_machine(pfn, mfn))
308 			BUG();
309 	}
310 }
311 
312 unsigned long arbitrary_virt_to_mfn(void *vaddr)
313 {
314 	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
315 
316 	return PFN_DOWN(maddr.maddr);
317 }
318 
319 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
320 {
321 	unsigned long address = (unsigned long)vaddr;
322 	unsigned int level;
323 	pte_t *pte;
324 	unsigned offset;
325 
326 	/*
327 	 * if the PFN is in the linear mapped vaddr range, we can just use
328 	 * the (quick) virt_to_machine() p2m lookup
329 	 */
330 	if (virt_addr_valid(vaddr))
331 		return virt_to_machine(vaddr);
332 
333 	/* otherwise we have to do a (slower) full page-table walk */
334 
335 	pte = lookup_address(address, &level);
336 	BUG_ON(pte == NULL);
337 	offset = address & ~PAGE_MASK;
338 	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
339 }
340 
341 void make_lowmem_page_readonly(void *vaddr)
342 {
343 	pte_t *pte, ptev;
344 	unsigned long address = (unsigned long)vaddr;
345 	unsigned int level;
346 
347 	pte = lookup_address(address, &level);
348 	BUG_ON(pte == NULL);
349 
350 	ptev = pte_wrprotect(*pte);
351 
352 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
353 		BUG();
354 }
355 
356 void make_lowmem_page_readwrite(void *vaddr)
357 {
358 	pte_t *pte, ptev;
359 	unsigned long address = (unsigned long)vaddr;
360 	unsigned int level;
361 
362 	pte = lookup_address(address, &level);
363 	BUG_ON(pte == NULL);
364 
365 	ptev = pte_mkwrite(*pte);
366 
367 	if (HYPERVISOR_update_va_mapping(address, ptev, 0))
368 		BUG();
369 }
370 
371 
372 static bool xen_page_pinned(void *ptr)
373 {
374 	struct page *page = virt_to_page(ptr);
375 
376 	return PagePinned(page);
377 }
378 
379 static void xen_extend_mmu_update(const struct mmu_update *update)
380 {
381 	struct multicall_space mcs;
382 	struct mmu_update *u;
383 
384 	mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
385 
386 	if (mcs.mc != NULL) {
387 		ADD_STATS(mmu_update_extended, 1);
388 		ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
389 
390 		mcs.mc->args[1]++;
391 
392 		if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
393 			ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
394 		else
395 			ADD_STATS(mmu_update_histo[0], 1);
396 	} else {
397 		ADD_STATS(mmu_update, 1);
398 		mcs = __xen_mc_entry(sizeof(*u));
399 		MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
400 		ADD_STATS(mmu_update_histo[1], 1);
401 	}
402 
403 	u = mcs.args;
404 	*u = *update;
405 }
406 
407 void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
408 {
409 	struct mmu_update u;
410 
411 	preempt_disable();
412 
413 	xen_mc_batch();
414 
415 	/* ptr may be ioremapped for 64-bit pagetable setup */
416 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
417 	u.val = pmd_val_ma(val);
418 	xen_extend_mmu_update(&u);
419 
420 	ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
421 
422 	xen_mc_issue(PARAVIRT_LAZY_MMU);
423 
424 	preempt_enable();
425 }
426 
427 void xen_set_pmd(pmd_t *ptr, pmd_t val)
428 {
429 	ADD_STATS(pmd_update, 1);
430 
431 	/* If page is not pinned, we can just update the entry
432 	   directly */
433 	if (!xen_page_pinned(ptr)) {
434 		*ptr = val;
435 		return;
436 	}
437 
438 	ADD_STATS(pmd_update_pinned, 1);
439 
440 	xen_set_pmd_hyper(ptr, val);
441 }
442 
443 /*
444  * Associate a virtual page frame with a given physical page frame
445  * and protection flags for that frame.
446  */
447 void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
448 {
449 	set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
450 }
451 
452 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
453 		    pte_t *ptep, pte_t pteval)
454 {
455 	ADD_STATS(set_pte_at, 1);
456 //	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
457 	ADD_STATS(set_pte_at_current, mm == current->mm);
458 	ADD_STATS(set_pte_at_kernel, mm == &init_mm);
459 
460 	if (mm == current->mm || mm == &init_mm) {
461 		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
462 			struct multicall_space mcs;
463 			mcs = xen_mc_entry(0);
464 
465 			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
466 			ADD_STATS(set_pte_at_batched, 1);
467 			xen_mc_issue(PARAVIRT_LAZY_MMU);
468 			goto out;
469 		} else
470 			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
471 				goto out;
472 	}
473 	xen_set_pte(ptep, pteval);
474 
475 out:	return;
476 }
477 
478 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
479 				 unsigned long addr, pte_t *ptep)
480 {
481 	/* Just return the pte as-is.  We preserve the bits on commit */
482 	return *ptep;
483 }
484 
485 void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
486 				 pte_t *ptep, pte_t pte)
487 {
488 	struct mmu_update u;
489 
490 	xen_mc_batch();
491 
492 	u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
493 	u.val = pte_val_ma(pte);
494 	xen_extend_mmu_update(&u);
495 
496 	ADD_STATS(prot_commit, 1);
497 	ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
498 
499 	xen_mc_issue(PARAVIRT_LAZY_MMU);
500 }
501 
502 /* Assume pteval_t is equivalent to all the other *val_t types. */
503 static pteval_t pte_mfn_to_pfn(pteval_t val)
504 {
505 	if (val & _PAGE_PRESENT) {
506 		unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
507 		pteval_t flags = val & PTE_FLAGS_MASK;
508 		val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
509 	}
510 
511 	return val;
512 }
513 
514 static pteval_t pte_pfn_to_mfn(pteval_t val)
515 {
516 	if (val & _PAGE_PRESENT) {
517 		unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
518 		pteval_t flags = val & PTE_FLAGS_MASK;
519 		val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
520 	}
521 
522 	return val;
523 }
524 
525 pteval_t xen_pte_val(pte_t pte)
526 {
527 	return pte_mfn_to_pfn(pte.pte);
528 }
529 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
530 
531 pgdval_t xen_pgd_val(pgd_t pgd)
532 {
533 	return pte_mfn_to_pfn(pgd.pgd);
534 }
535 PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
536 
537 pte_t xen_make_pte(pteval_t pte)
538 {
539 	pte = pte_pfn_to_mfn(pte);
540 	return native_make_pte(pte);
541 }
542 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
543 
544 pgd_t xen_make_pgd(pgdval_t pgd)
545 {
546 	pgd = pte_pfn_to_mfn(pgd);
547 	return native_make_pgd(pgd);
548 }
549 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
550 
551 pmdval_t xen_pmd_val(pmd_t pmd)
552 {
553 	return pte_mfn_to_pfn(pmd.pmd);
554 }
555 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
556 
557 void xen_set_pud_hyper(pud_t *ptr, pud_t val)
558 {
559 	struct mmu_update u;
560 
561 	preempt_disable();
562 
563 	xen_mc_batch();
564 
565 	/* ptr may be ioremapped for 64-bit pagetable setup */
566 	u.ptr = arbitrary_virt_to_machine(ptr).maddr;
567 	u.val = pud_val_ma(val);
568 	xen_extend_mmu_update(&u);
569 
570 	ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
571 
572 	xen_mc_issue(PARAVIRT_LAZY_MMU);
573 
574 	preempt_enable();
575 }
576 
577 void xen_set_pud(pud_t *ptr, pud_t val)
578 {
579 	ADD_STATS(pud_update, 1);
580 
581 	/* If page is not pinned, we can just update the entry
582 	   directly */
583 	if (!xen_page_pinned(ptr)) {
584 		*ptr = val;
585 		return;
586 	}
587 
588 	ADD_STATS(pud_update_pinned, 1);
589 
590 	xen_set_pud_hyper(ptr, val);
591 }
592 
593 void xen_set_pte(pte_t *ptep, pte_t pte)
594 {
595 	ADD_STATS(pte_update, 1);
596 //	ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
597 	ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
598 
599 #ifdef CONFIG_X86_PAE
600 	ptep->pte_high = pte.pte_high;
601 	smp_wmb();
602 	ptep->pte_low = pte.pte_low;
603 #else
604 	*ptep = pte;
605 #endif
606 }
607 
608 #ifdef CONFIG_X86_PAE
609 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
610 {
611 	set_64bit((u64 *)ptep, native_pte_val(pte));
612 }
613 
614 void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
615 {
616 	ptep->pte_low = 0;
617 	smp_wmb();		/* make sure low gets written first */
618 	ptep->pte_high = 0;
619 }
620 
621 void xen_pmd_clear(pmd_t *pmdp)
622 {
623 	set_pmd(pmdp, __pmd(0));
624 }
625 #endif	/* CONFIG_X86_PAE */
626 
627 pmd_t xen_make_pmd(pmdval_t pmd)
628 {
629 	pmd = pte_pfn_to_mfn(pmd);
630 	return native_make_pmd(pmd);
631 }
632 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
633 
634 #if PAGETABLE_LEVELS == 4
635 pudval_t xen_pud_val(pud_t pud)
636 {
637 	return pte_mfn_to_pfn(pud.pud);
638 }
639 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
640 
641 pud_t xen_make_pud(pudval_t pud)
642 {
643 	pud = pte_pfn_to_mfn(pud);
644 
645 	return native_make_pud(pud);
646 }
647 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
648 
649 pgd_t *xen_get_user_pgd(pgd_t *pgd)
650 {
651 	pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
652 	unsigned offset = pgd - pgd_page;
653 	pgd_t *user_ptr = NULL;
654 
655 	if (offset < pgd_index(USER_LIMIT)) {
656 		struct page *page = virt_to_page(pgd_page);
657 		user_ptr = (pgd_t *)page->private;
658 		if (user_ptr)
659 			user_ptr += offset;
660 	}
661 
662 	return user_ptr;
663 }
664 
665 static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
666 {
667 	struct mmu_update u;
668 
669 	u.ptr = virt_to_machine(ptr).maddr;
670 	u.val = pgd_val_ma(val);
671 	xen_extend_mmu_update(&u);
672 }
673 
674 /*
675  * Raw hypercall-based set_pgd, intended for in early boot before
676  * there's a page structure.  This implies:
677  *  1. The only existing pagetable is the kernel's
678  *  2. It is always pinned
679  *  3. It has no user pagetable attached to it
680  */
681 void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
682 {
683 	preempt_disable();
684 
685 	xen_mc_batch();
686 
687 	__xen_set_pgd_hyper(ptr, val);
688 
689 	xen_mc_issue(PARAVIRT_LAZY_MMU);
690 
691 	preempt_enable();
692 }
693 
694 void xen_set_pgd(pgd_t *ptr, pgd_t val)
695 {
696 	pgd_t *user_ptr = xen_get_user_pgd(ptr);
697 
698 	ADD_STATS(pgd_update, 1);
699 
700 	/* If page is not pinned, we can just update the entry
701 	   directly */
702 	if (!xen_page_pinned(ptr)) {
703 		*ptr = val;
704 		if (user_ptr) {
705 			WARN_ON(xen_page_pinned(user_ptr));
706 			*user_ptr = val;
707 		}
708 		return;
709 	}
710 
711 	ADD_STATS(pgd_update_pinned, 1);
712 	ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
713 
714 	/* If it's pinned, then we can at least batch the kernel and
715 	   user updates together. */
716 	xen_mc_batch();
717 
718 	__xen_set_pgd_hyper(ptr, val);
719 	if (user_ptr)
720 		__xen_set_pgd_hyper(user_ptr, val);
721 
722 	xen_mc_issue(PARAVIRT_LAZY_MMU);
723 }
724 #endif	/* PAGETABLE_LEVELS == 4 */
725 
726 /*
727  * (Yet another) pagetable walker.  This one is intended for pinning a
728  * pagetable.  This means that it walks a pagetable and calls the
729  * callback function on each page it finds making up the page table,
730  * at every level.  It walks the entire pagetable, but it only bothers
731  * pinning pte pages which are below limit.  In the normal case this
732  * will be STACK_TOP_MAX, but at boot we need to pin up to
733  * FIXADDR_TOP.
734  *
735  * For 32-bit the important bit is that we don't pin beyond there,
736  * because then we start getting into Xen's ptes.
737  *
738  * For 64-bit, we must skip the Xen hole in the middle of the address
739  * space, just after the big x86-64 virtual hole.
740  */
741 static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
742 			  int (*func)(struct mm_struct *mm, struct page *,
743 				      enum pt_level),
744 			  unsigned long limit)
745 {
746 	int flush = 0;
747 	unsigned hole_low, hole_high;
748 	unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
749 	unsigned pgdidx, pudidx, pmdidx;
750 
751 	/* The limit is the last byte to be touched */
752 	limit--;
753 	BUG_ON(limit >= FIXADDR_TOP);
754 
755 	if (xen_feature(XENFEAT_auto_translated_physmap))
756 		return 0;
757 
758 	/*
759 	 * 64-bit has a great big hole in the middle of the address
760 	 * space, which contains the Xen mappings.  On 32-bit these
761 	 * will end up making a zero-sized hole and so is a no-op.
762 	 */
763 	hole_low = pgd_index(USER_LIMIT);
764 	hole_high = pgd_index(PAGE_OFFSET);
765 
766 	pgdidx_limit = pgd_index(limit);
767 #if PTRS_PER_PUD > 1
768 	pudidx_limit = pud_index(limit);
769 #else
770 	pudidx_limit = 0;
771 #endif
772 #if PTRS_PER_PMD > 1
773 	pmdidx_limit = pmd_index(limit);
774 #else
775 	pmdidx_limit = 0;
776 #endif
777 
778 	for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
779 		pud_t *pud;
780 
781 		if (pgdidx >= hole_low && pgdidx < hole_high)
782 			continue;
783 
784 		if (!pgd_val(pgd[pgdidx]))
785 			continue;
786 
787 		pud = pud_offset(&pgd[pgdidx], 0);
788 
789 		if (PTRS_PER_PUD > 1) /* not folded */
790 			flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
791 
792 		for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
793 			pmd_t *pmd;
794 
795 			if (pgdidx == pgdidx_limit &&
796 			    pudidx > pudidx_limit)
797 				goto out;
798 
799 			if (pud_none(pud[pudidx]))
800 				continue;
801 
802 			pmd = pmd_offset(&pud[pudidx], 0);
803 
804 			if (PTRS_PER_PMD > 1) /* not folded */
805 				flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
806 
807 			for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
808 				struct page *pte;
809 
810 				if (pgdidx == pgdidx_limit &&
811 				    pudidx == pudidx_limit &&
812 				    pmdidx > pmdidx_limit)
813 					goto out;
814 
815 				if (pmd_none(pmd[pmdidx]))
816 					continue;
817 
818 				pte = pmd_page(pmd[pmdidx]);
819 				flush |= (*func)(mm, pte, PT_PTE);
820 			}
821 		}
822 	}
823 
824 out:
825 	/* Do the top level last, so that the callbacks can use it as
826 	   a cue to do final things like tlb flushes. */
827 	flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
828 
829 	return flush;
830 }
831 
832 static int xen_pgd_walk(struct mm_struct *mm,
833 			int (*func)(struct mm_struct *mm, struct page *,
834 				    enum pt_level),
835 			unsigned long limit)
836 {
837 	return __xen_pgd_walk(mm, mm->pgd, func, limit);
838 }
839 
840 /* If we're using split pte locks, then take the page's lock and
841    return a pointer to it.  Otherwise return NULL. */
842 static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
843 {
844 	spinlock_t *ptl = NULL;
845 
846 #if USE_SPLIT_PTLOCKS
847 	ptl = __pte_lockptr(page);
848 	spin_lock_nest_lock(ptl, &mm->page_table_lock);
849 #endif
850 
851 	return ptl;
852 }
853 
854 static void xen_pte_unlock(void *v)
855 {
856 	spinlock_t *ptl = v;
857 	spin_unlock(ptl);
858 }
859 
860 static void xen_do_pin(unsigned level, unsigned long pfn)
861 {
862 	struct mmuext_op *op;
863 	struct multicall_space mcs;
864 
865 	mcs = __xen_mc_entry(sizeof(*op));
866 	op = mcs.args;
867 	op->cmd = level;
868 	op->arg1.mfn = pfn_to_mfn(pfn);
869 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
870 }
871 
872 static int xen_pin_page(struct mm_struct *mm, struct page *page,
873 			enum pt_level level)
874 {
875 	unsigned pgfl = TestSetPagePinned(page);
876 	int flush;
877 
878 	if (pgfl)
879 		flush = 0;		/* already pinned */
880 	else if (PageHighMem(page))
881 		/* kmaps need flushing if we found an unpinned
882 		   highpage */
883 		flush = 1;
884 	else {
885 		void *pt = lowmem_page_address(page);
886 		unsigned long pfn = page_to_pfn(page);
887 		struct multicall_space mcs = __xen_mc_entry(0);
888 		spinlock_t *ptl;
889 
890 		flush = 0;
891 
892 		/*
893 		 * We need to hold the pagetable lock between the time
894 		 * we make the pagetable RO and when we actually pin
895 		 * it.  If we don't, then other users may come in and
896 		 * attempt to update the pagetable by writing it,
897 		 * which will fail because the memory is RO but not
898 		 * pinned, so Xen won't do the trap'n'emulate.
899 		 *
900 		 * If we're using split pte locks, we can't hold the
901 		 * entire pagetable's worth of locks during the
902 		 * traverse, because we may wrap the preempt count (8
903 		 * bits).  The solution is to mark RO and pin each PTE
904 		 * page while holding the lock.  This means the number
905 		 * of locks we end up holding is never more than a
906 		 * batch size (~32 entries, at present).
907 		 *
908 		 * If we're not using split pte locks, we needn't pin
909 		 * the PTE pages independently, because we're
910 		 * protected by the overall pagetable lock.
911 		 */
912 		ptl = NULL;
913 		if (level == PT_PTE)
914 			ptl = xen_pte_lock(page, mm);
915 
916 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
917 					pfn_pte(pfn, PAGE_KERNEL_RO),
918 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
919 
920 		if (ptl) {
921 			xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
922 
923 			/* Queue a deferred unlock for when this batch
924 			   is completed. */
925 			xen_mc_callback(xen_pte_unlock, ptl);
926 		}
927 	}
928 
929 	return flush;
930 }
931 
932 /* This is called just after a mm has been created, but it has not
933    been used yet.  We need to make sure that its pagetable is all
934    read-only, and can be pinned. */
935 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
936 {
937 	vm_unmap_aliases();
938 
939 	xen_mc_batch();
940 
941 	if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
942 		/* re-enable interrupts for flushing */
943 		xen_mc_issue(0);
944 
945 		kmap_flush_unused();
946 
947 		xen_mc_batch();
948 	}
949 
950 #ifdef CONFIG_X86_64
951 	{
952 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
953 
954 		xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
955 
956 		if (user_pgd) {
957 			xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
958 			xen_do_pin(MMUEXT_PIN_L4_TABLE,
959 				   PFN_DOWN(__pa(user_pgd)));
960 		}
961 	}
962 #else /* CONFIG_X86_32 */
963 #ifdef CONFIG_X86_PAE
964 	/* Need to make sure unshared kernel PMD is pinnable */
965 	xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
966 		     PT_PMD);
967 #endif
968 	xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
969 #endif /* CONFIG_X86_64 */
970 	xen_mc_issue(0);
971 }
972 
973 static void xen_pgd_pin(struct mm_struct *mm)
974 {
975 	__xen_pgd_pin(mm, mm->pgd);
976 }
977 
978 /*
979  * On save, we need to pin all pagetables to make sure they get their
980  * mfns turned into pfns.  Search the list for any unpinned pgds and pin
981  * them (unpinned pgds are not currently in use, probably because the
982  * process is under construction or destruction).
983  *
984  * Expected to be called in stop_machine() ("equivalent to taking
985  * every spinlock in the system"), so the locking doesn't really
986  * matter all that much.
987  */
988 void xen_mm_pin_all(void)
989 {
990 	unsigned long flags;
991 	struct page *page;
992 
993 	spin_lock_irqsave(&pgd_lock, flags);
994 
995 	list_for_each_entry(page, &pgd_list, lru) {
996 		if (!PagePinned(page)) {
997 			__xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
998 			SetPageSavePinned(page);
999 		}
1000 	}
1001 
1002 	spin_unlock_irqrestore(&pgd_lock, flags);
1003 }
1004 
1005 /*
1006  * The init_mm pagetable is really pinned as soon as its created, but
1007  * that's before we have page structures to store the bits.  So do all
1008  * the book-keeping now.
1009  */
1010 static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page,
1011 				  enum pt_level level)
1012 {
1013 	SetPagePinned(page);
1014 	return 0;
1015 }
1016 
1017 static void __init xen_mark_init_mm_pinned(void)
1018 {
1019 	xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
1020 }
1021 
1022 static int xen_unpin_page(struct mm_struct *mm, struct page *page,
1023 			  enum pt_level level)
1024 {
1025 	unsigned pgfl = TestClearPagePinned(page);
1026 
1027 	if (pgfl && !PageHighMem(page)) {
1028 		void *pt = lowmem_page_address(page);
1029 		unsigned long pfn = page_to_pfn(page);
1030 		spinlock_t *ptl = NULL;
1031 		struct multicall_space mcs;
1032 
1033 		/*
1034 		 * Do the converse to pin_page.  If we're using split
1035 		 * pte locks, we must be holding the lock for while
1036 		 * the pte page is unpinned but still RO to prevent
1037 		 * concurrent updates from seeing it in this
1038 		 * partially-pinned state.
1039 		 */
1040 		if (level == PT_PTE) {
1041 			ptl = xen_pte_lock(page, mm);
1042 
1043 			if (ptl)
1044 				xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
1045 		}
1046 
1047 		mcs = __xen_mc_entry(0);
1048 
1049 		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1050 					pfn_pte(pfn, PAGE_KERNEL),
1051 					level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1052 
1053 		if (ptl) {
1054 			/* unlock when batch completed */
1055 			xen_mc_callback(xen_pte_unlock, ptl);
1056 		}
1057 	}
1058 
1059 	return 0;		/* never need to flush on unpin */
1060 }
1061 
1062 /* Release a pagetables pages back as normal RW */
1063 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1064 {
1065 	xen_mc_batch();
1066 
1067 	xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1068 
1069 #ifdef CONFIG_X86_64
1070 	{
1071 		pgd_t *user_pgd = xen_get_user_pgd(pgd);
1072 
1073 		if (user_pgd) {
1074 			xen_do_pin(MMUEXT_UNPIN_TABLE,
1075 				   PFN_DOWN(__pa(user_pgd)));
1076 			xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1077 		}
1078 	}
1079 #endif
1080 
1081 #ifdef CONFIG_X86_PAE
1082 	/* Need to make sure unshared kernel PMD is unpinned */
1083 	xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1084 		       PT_PMD);
1085 #endif
1086 
1087 	__xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1088 
1089 	xen_mc_issue(0);
1090 }
1091 
1092 static void xen_pgd_unpin(struct mm_struct *mm)
1093 {
1094 	__xen_pgd_unpin(mm, mm->pgd);
1095 }
1096 
1097 /*
1098  * On resume, undo any pinning done at save, so that the rest of the
1099  * kernel doesn't see any unexpected pinned pagetables.
1100  */
1101 void xen_mm_unpin_all(void)
1102 {
1103 	unsigned long flags;
1104 	struct page *page;
1105 
1106 	spin_lock_irqsave(&pgd_lock, flags);
1107 
1108 	list_for_each_entry(page, &pgd_list, lru) {
1109 		if (PageSavePinned(page)) {
1110 			BUG_ON(!PagePinned(page));
1111 			__xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1112 			ClearPageSavePinned(page);
1113 		}
1114 	}
1115 
1116 	spin_unlock_irqrestore(&pgd_lock, flags);
1117 }
1118 
1119 void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1120 {
1121 	spin_lock(&next->page_table_lock);
1122 	xen_pgd_pin(next);
1123 	spin_unlock(&next->page_table_lock);
1124 }
1125 
1126 void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1127 {
1128 	spin_lock(&mm->page_table_lock);
1129 	xen_pgd_pin(mm);
1130 	spin_unlock(&mm->page_table_lock);
1131 }
1132 
1133 
1134 #ifdef CONFIG_SMP
1135 /* Another cpu may still have their %cr3 pointing at the pagetable, so
1136    we need to repoint it somewhere else before we can unpin it. */
1137 static void drop_other_mm_ref(void *info)
1138 {
1139 	struct mm_struct *mm = info;
1140 	struct mm_struct *active_mm;
1141 
1142 	active_mm = percpu_read(cpu_tlbstate.active_mm);
1143 
1144 	if (active_mm == mm)
1145 		leave_mm(smp_processor_id());
1146 
1147 	/* If this cpu still has a stale cr3 reference, then make sure
1148 	   it has been flushed. */
1149 	if (percpu_read(xen_current_cr3) == __pa(mm->pgd))
1150 		load_cr3(swapper_pg_dir);
1151 }
1152 
1153 static void xen_drop_mm_ref(struct mm_struct *mm)
1154 {
1155 	cpumask_var_t mask;
1156 	unsigned cpu;
1157 
1158 	if (current->active_mm == mm) {
1159 		if (current->mm == mm)
1160 			load_cr3(swapper_pg_dir);
1161 		else
1162 			leave_mm(smp_processor_id());
1163 	}
1164 
1165 	/* Get the "official" set of cpus referring to our pagetable. */
1166 	if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1167 		for_each_online_cpu(cpu) {
1168 			if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
1169 			    && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1170 				continue;
1171 			smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1172 		}
1173 		return;
1174 	}
1175 	cpumask_copy(mask, &mm->cpu_vm_mask);
1176 
1177 	/* It's possible that a vcpu may have a stale reference to our
1178 	   cr3, because its in lazy mode, and it hasn't yet flushed
1179 	   its set of pending hypercalls yet.  In this case, we can
1180 	   look at its actual current cr3 value, and force it to flush
1181 	   if needed. */
1182 	for_each_online_cpu(cpu) {
1183 		if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1184 			cpumask_set_cpu(cpu, mask);
1185 	}
1186 
1187 	if (!cpumask_empty(mask))
1188 		smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1189 	free_cpumask_var(mask);
1190 }
1191 #else
1192 static void xen_drop_mm_ref(struct mm_struct *mm)
1193 {
1194 	if (current->active_mm == mm)
1195 		load_cr3(swapper_pg_dir);
1196 }
1197 #endif
1198 
1199 /*
1200  * While a process runs, Xen pins its pagetables, which means that the
1201  * hypervisor forces it to be read-only, and it controls all updates
1202  * to it.  This means that all pagetable updates have to go via the
1203  * hypervisor, which is moderately expensive.
1204  *
1205  * Since we're pulling the pagetable down, we switch to use init_mm,
1206  * unpin old process pagetable and mark it all read-write, which
1207  * allows further operations on it to be simple memory accesses.
1208  *
1209  * The only subtle point is that another CPU may be still using the
1210  * pagetable because of lazy tlb flushing.  This means we need need to
1211  * switch all CPUs off this pagetable before we can unpin it.
1212  */
1213 void xen_exit_mmap(struct mm_struct *mm)
1214 {
1215 	get_cpu();		/* make sure we don't move around */
1216 	xen_drop_mm_ref(mm);
1217 	put_cpu();
1218 
1219 	spin_lock(&mm->page_table_lock);
1220 
1221 	/* pgd may not be pinned in the error exit path of execve */
1222 	if (xen_page_pinned(mm->pgd))
1223 		xen_pgd_unpin(mm);
1224 
1225 	spin_unlock(&mm->page_table_lock);
1226 }
1227 
1228 static __init void xen_pagetable_setup_start(pgd_t *base)
1229 {
1230 }
1231 
1232 static __init void xen_pagetable_setup_done(pgd_t *base)
1233 {
1234 	xen_setup_shared_info();
1235 }
1236 
1237 static void xen_write_cr2(unsigned long cr2)
1238 {
1239 	percpu_read(xen_vcpu)->arch.cr2 = cr2;
1240 }
1241 
1242 static unsigned long xen_read_cr2(void)
1243 {
1244 	return percpu_read(xen_vcpu)->arch.cr2;
1245 }
1246 
1247 unsigned long xen_read_cr2_direct(void)
1248 {
1249 	return percpu_read(xen_vcpu_info.arch.cr2);
1250 }
1251 
1252 static void xen_flush_tlb(void)
1253 {
1254 	struct mmuext_op *op;
1255 	struct multicall_space mcs;
1256 
1257 	preempt_disable();
1258 
1259 	mcs = xen_mc_entry(sizeof(*op));
1260 
1261 	op = mcs.args;
1262 	op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1263 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1264 
1265 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1266 
1267 	preempt_enable();
1268 }
1269 
1270 static void xen_flush_tlb_single(unsigned long addr)
1271 {
1272 	struct mmuext_op *op;
1273 	struct multicall_space mcs;
1274 
1275 	preempt_disable();
1276 
1277 	mcs = xen_mc_entry(sizeof(*op));
1278 	op = mcs.args;
1279 	op->cmd = MMUEXT_INVLPG_LOCAL;
1280 	op->arg1.linear_addr = addr & PAGE_MASK;
1281 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1282 
1283 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1284 
1285 	preempt_enable();
1286 }
1287 
1288 static void xen_flush_tlb_others(const struct cpumask *cpus,
1289 				 struct mm_struct *mm, unsigned long va)
1290 {
1291 	struct {
1292 		struct mmuext_op op;
1293 		DECLARE_BITMAP(mask, NR_CPUS);
1294 	} *args;
1295 	struct multicall_space mcs;
1296 
1297 	if (cpumask_empty(cpus))
1298 		return;		/* nothing to do */
1299 
1300 	mcs = xen_mc_entry(sizeof(*args));
1301 	args = mcs.args;
1302 	args->op.arg2.vcpumask = to_cpumask(args->mask);
1303 
1304 	/* Remove us, and any offline CPUS. */
1305 	cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1306 	cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1307 
1308 	if (va == TLB_FLUSH_ALL) {
1309 		args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1310 	} else {
1311 		args->op.cmd = MMUEXT_INVLPG_MULTI;
1312 		args->op.arg1.linear_addr = va;
1313 	}
1314 
1315 	MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1316 
1317 	xen_mc_issue(PARAVIRT_LAZY_MMU);
1318 }
1319 
1320 static unsigned long xen_read_cr3(void)
1321 {
1322 	return percpu_read(xen_cr3);
1323 }
1324 
1325 static void set_current_cr3(void *v)
1326 {
1327 	percpu_write(xen_current_cr3, (unsigned long)v);
1328 }
1329 
1330 static void __xen_write_cr3(bool kernel, unsigned long cr3)
1331 {
1332 	struct mmuext_op *op;
1333 	struct multicall_space mcs;
1334 	unsigned long mfn;
1335 
1336 	if (cr3)
1337 		mfn = pfn_to_mfn(PFN_DOWN(cr3));
1338 	else
1339 		mfn = 0;
1340 
1341 	WARN_ON(mfn == 0 && kernel);
1342 
1343 	mcs = __xen_mc_entry(sizeof(*op));
1344 
1345 	op = mcs.args;
1346 	op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1347 	op->arg1.mfn = mfn;
1348 
1349 	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1350 
1351 	if (kernel) {
1352 		percpu_write(xen_cr3, cr3);
1353 
1354 		/* Update xen_current_cr3 once the batch has actually
1355 		   been submitted. */
1356 		xen_mc_callback(set_current_cr3, (void *)cr3);
1357 	}
1358 }
1359 
1360 static void xen_write_cr3(unsigned long cr3)
1361 {
1362 	BUG_ON(preemptible());
1363 
1364 	xen_mc_batch();  /* disables interrupts */
1365 
1366 	/* Update while interrupts are disabled, so its atomic with
1367 	   respect to ipis */
1368 	percpu_write(xen_cr3, cr3);
1369 
1370 	__xen_write_cr3(true, cr3);
1371 
1372 #ifdef CONFIG_X86_64
1373 	{
1374 		pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1375 		if (user_pgd)
1376 			__xen_write_cr3(false, __pa(user_pgd));
1377 		else
1378 			__xen_write_cr3(false, 0);
1379 	}
1380 #endif
1381 
1382 	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
1383 }
1384 
1385 static int xen_pgd_alloc(struct mm_struct *mm)
1386 {
1387 	pgd_t *pgd = mm->pgd;
1388 	int ret = 0;
1389 
1390 	BUG_ON(PagePinned(virt_to_page(pgd)));
1391 
1392 #ifdef CONFIG_X86_64
1393 	{
1394 		struct page *page = virt_to_page(pgd);
1395 		pgd_t *user_pgd;
1396 
1397 		BUG_ON(page->private != 0);
1398 
1399 		ret = -ENOMEM;
1400 
1401 		user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1402 		page->private = (unsigned long)user_pgd;
1403 
1404 		if (user_pgd != NULL) {
1405 			user_pgd[pgd_index(VSYSCALL_START)] =
1406 				__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1407 			ret = 0;
1408 		}
1409 
1410 		BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1411 	}
1412 #endif
1413 
1414 	return ret;
1415 }
1416 
1417 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1418 {
1419 #ifdef CONFIG_X86_64
1420 	pgd_t *user_pgd = xen_get_user_pgd(pgd);
1421 
1422 	if (user_pgd)
1423 		free_page((unsigned long)user_pgd);
1424 #endif
1425 }
1426 
1427 #ifdef CONFIG_HIGHPTE
1428 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1429 {
1430 	pgprot_t prot = PAGE_KERNEL;
1431 
1432 	if (PagePinned(page))
1433 		prot = PAGE_KERNEL_RO;
1434 
1435 	if (0 && PageHighMem(page))
1436 		printk("mapping highpte %lx type %d prot %s\n",
1437 		       page_to_pfn(page), type,
1438 		       (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1439 
1440 	return kmap_atomic_prot(page, type, prot);
1441 }
1442 #endif
1443 
1444 #ifdef CONFIG_X86_32
1445 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1446 {
1447 	/* If there's an existing pte, then don't allow _PAGE_RW to be set */
1448 	if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1449 		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1450 			       pte_val_ma(pte));
1451 
1452 	return pte;
1453 }
1454 
1455 /* Init-time set_pte while constructing initial pagetables, which
1456    doesn't allow RO pagetable pages to be remapped RW */
1457 static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1458 {
1459 	pte = mask_rw_pte(ptep, pte);
1460 
1461 	xen_set_pte(ptep, pte);
1462 }
1463 #endif
1464 
1465 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1466 {
1467 	struct mmuext_op op;
1468 	op.cmd = cmd;
1469 	op.arg1.mfn = pfn_to_mfn(pfn);
1470 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1471 		BUG();
1472 }
1473 
1474 /* Early in boot, while setting up the initial pagetable, assume
1475    everything is pinned. */
1476 static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1477 {
1478 #ifdef CONFIG_FLATMEM
1479 	BUG_ON(mem_map);	/* should only be used early */
1480 #endif
1481 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1482 	pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1483 }
1484 
1485 /* Used for pmd and pud */
1486 static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1487 {
1488 #ifdef CONFIG_FLATMEM
1489 	BUG_ON(mem_map);	/* should only be used early */
1490 #endif
1491 	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1492 }
1493 
1494 /* Early release_pte assumes that all pts are pinned, since there's
1495    only init_mm and anything attached to that is pinned. */
1496 static __init void xen_release_pte_init(unsigned long pfn)
1497 {
1498 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1499 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1500 }
1501 
1502 static __init void xen_release_pmd_init(unsigned long pfn)
1503 {
1504 	make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1505 }
1506 
1507 /* This needs to make sure the new pte page is pinned iff its being
1508    attached to a pinned pagetable. */
1509 static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1510 {
1511 	struct page *page = pfn_to_page(pfn);
1512 
1513 	if (PagePinned(virt_to_page(mm->pgd))) {
1514 		SetPagePinned(page);
1515 
1516 		vm_unmap_aliases();
1517 		if (!PageHighMem(page)) {
1518 			make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1519 			if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1520 				pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1521 		} else {
1522 			/* make sure there are no stray mappings of
1523 			   this page */
1524 			kmap_flush_unused();
1525 		}
1526 	}
1527 }
1528 
1529 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1530 {
1531 	xen_alloc_ptpage(mm, pfn, PT_PTE);
1532 }
1533 
1534 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1535 {
1536 	xen_alloc_ptpage(mm, pfn, PT_PMD);
1537 }
1538 
1539 /* This should never happen until we're OK to use struct page */
1540 static void xen_release_ptpage(unsigned long pfn, unsigned level)
1541 {
1542 	struct page *page = pfn_to_page(pfn);
1543 
1544 	if (PagePinned(page)) {
1545 		if (!PageHighMem(page)) {
1546 			if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1547 				pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1548 			make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1549 		}
1550 		ClearPagePinned(page);
1551 	}
1552 }
1553 
1554 static void xen_release_pte(unsigned long pfn)
1555 {
1556 	xen_release_ptpage(pfn, PT_PTE);
1557 }
1558 
1559 static void xen_release_pmd(unsigned long pfn)
1560 {
1561 	xen_release_ptpage(pfn, PT_PMD);
1562 }
1563 
1564 #if PAGETABLE_LEVELS == 4
1565 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1566 {
1567 	xen_alloc_ptpage(mm, pfn, PT_PUD);
1568 }
1569 
1570 static void xen_release_pud(unsigned long pfn)
1571 {
1572 	xen_release_ptpage(pfn, PT_PUD);
1573 }
1574 #endif
1575 
1576 void __init xen_reserve_top(void)
1577 {
1578 #ifdef CONFIG_X86_32
1579 	unsigned long top = HYPERVISOR_VIRT_START;
1580 	struct xen_platform_parameters pp;
1581 
1582 	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1583 		top = pp.virt_start;
1584 
1585 	reserve_top_address(-top);
1586 #endif	/* CONFIG_X86_32 */
1587 }
1588 
1589 /*
1590  * Like __va(), but returns address in the kernel mapping (which is
1591  * all we have until the physical memory mapping has been set up.
1592  */
1593 static void *__ka(phys_addr_t paddr)
1594 {
1595 #ifdef CONFIG_X86_64
1596 	return (void *)(paddr + __START_KERNEL_map);
1597 #else
1598 	return __va(paddr);
1599 #endif
1600 }
1601 
1602 /* Convert a machine address to physical address */
1603 static unsigned long m2p(phys_addr_t maddr)
1604 {
1605 	phys_addr_t paddr;
1606 
1607 	maddr &= PTE_PFN_MASK;
1608 	paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1609 
1610 	return paddr;
1611 }
1612 
1613 /* Convert a machine address to kernel virtual */
1614 static void *m2v(phys_addr_t maddr)
1615 {
1616 	return __ka(m2p(maddr));
1617 }
1618 
1619 static void set_page_prot(void *addr, pgprot_t prot)
1620 {
1621 	unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1622 	pte_t pte = pfn_pte(pfn, prot);
1623 
1624 	if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1625 		BUG();
1626 }
1627 
1628 static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1629 {
1630 	unsigned pmdidx, pteidx;
1631 	unsigned ident_pte;
1632 	unsigned long pfn;
1633 
1634 	ident_pte = 0;
1635 	pfn = 0;
1636 	for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1637 		pte_t *pte_page;
1638 
1639 		/* Reuse or allocate a page of ptes */
1640 		if (pmd_present(pmd[pmdidx]))
1641 			pte_page = m2v(pmd[pmdidx].pmd);
1642 		else {
1643 			/* Check for free pte pages */
1644 			if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1645 				break;
1646 
1647 			pte_page = &level1_ident_pgt[ident_pte];
1648 			ident_pte += PTRS_PER_PTE;
1649 
1650 			pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1651 		}
1652 
1653 		/* Install mappings */
1654 		for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1655 			pte_t pte;
1656 
1657 			if (pfn > max_pfn_mapped)
1658 				max_pfn_mapped = pfn;
1659 
1660 			if (!pte_none(pte_page[pteidx]))
1661 				continue;
1662 
1663 			pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1664 			pte_page[pteidx] = pte;
1665 		}
1666 	}
1667 
1668 	for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1669 		set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1670 
1671 	set_page_prot(pmd, PAGE_KERNEL_RO);
1672 }
1673 
1674 #ifdef CONFIG_X86_64
1675 static void convert_pfn_mfn(void *v)
1676 {
1677 	pte_t *pte = v;
1678 	int i;
1679 
1680 	/* All levels are converted the same way, so just treat them
1681 	   as ptes. */
1682 	for (i = 0; i < PTRS_PER_PTE; i++)
1683 		pte[i] = xen_make_pte(pte[i].pte);
1684 }
1685 
1686 /*
1687  * Set up the inital kernel pagetable.
1688  *
1689  * We can construct this by grafting the Xen provided pagetable into
1690  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1691  * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
1692  * means that only the kernel has a physical mapping to start with -
1693  * but that's enough to get __va working.  We need to fill in the rest
1694  * of the physical mapping once some sort of allocator has been set
1695  * up.
1696  */
1697 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1698 					 unsigned long max_pfn)
1699 {
1700 	pud_t *l3;
1701 	pmd_t *l2;
1702 
1703 	/* Zap identity mapping */
1704 	init_level4_pgt[0] = __pgd(0);
1705 
1706 	/* Pre-constructed entries are in pfn, so convert to mfn */
1707 	convert_pfn_mfn(init_level4_pgt);
1708 	convert_pfn_mfn(level3_ident_pgt);
1709 	convert_pfn_mfn(level3_kernel_pgt);
1710 
1711 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1712 	l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1713 
1714 	memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1715 	memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1716 
1717 	l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1718 	l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1719 	memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1720 
1721 	/* Set up identity map */
1722 	xen_map_identity_early(level2_ident_pgt, max_pfn);
1723 
1724 	/* Make pagetable pieces RO */
1725 	set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1726 	set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1727 	set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1728 	set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1729 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1730 	set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1731 
1732 	/* Pin down new L4 */
1733 	pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1734 			  PFN_DOWN(__pa_symbol(init_level4_pgt)));
1735 
1736 	/* Unpin Xen-provided one */
1737 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1738 
1739 	/* Switch over */
1740 	pgd = init_level4_pgt;
1741 
1742 	/*
1743 	 * At this stage there can be no user pgd, and no page
1744 	 * structure to attach it to, so make sure we just set kernel
1745 	 * pgd.
1746 	 */
1747 	xen_mc_batch();
1748 	__xen_write_cr3(true, __pa(pgd));
1749 	xen_mc_issue(PARAVIRT_LAZY_CPU);
1750 
1751 	reserve_early(__pa(xen_start_info->pt_base),
1752 		      __pa(xen_start_info->pt_base +
1753 			   xen_start_info->nr_pt_frames * PAGE_SIZE),
1754 		      "XEN PAGETABLES");
1755 
1756 	return pgd;
1757 }
1758 #else	/* !CONFIG_X86_64 */
1759 static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1760 
1761 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1762 					 unsigned long max_pfn)
1763 {
1764 	pmd_t *kernel_pmd;
1765 
1766 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1767 				  xen_start_info->nr_pt_frames * PAGE_SIZE +
1768 				  512*1024);
1769 
1770 	kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1771 	memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1772 
1773 	xen_map_identity_early(level2_kernel_pgt, max_pfn);
1774 
1775 	memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1776 	set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1777 			__pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1778 
1779 	set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1780 	set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1781 	set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1782 
1783 	pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1784 
1785 	xen_write_cr3(__pa(swapper_pg_dir));
1786 
1787 	pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1788 
1789 	reserve_early(__pa(xen_start_info->pt_base),
1790 		      __pa(xen_start_info->pt_base +
1791 			   xen_start_info->nr_pt_frames * PAGE_SIZE),
1792 		      "XEN PAGETABLES");
1793 
1794 	return swapper_pg_dir;
1795 }
1796 #endif	/* CONFIG_X86_64 */
1797 
1798 static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1799 {
1800 	pte_t pte;
1801 
1802 	phys >>= PAGE_SHIFT;
1803 
1804 	switch (idx) {
1805 	case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1806 #ifdef CONFIG_X86_F00F_BUG
1807 	case FIX_F00F_IDT:
1808 #endif
1809 #ifdef CONFIG_X86_32
1810 	case FIX_WP_TEST:
1811 	case FIX_VDSO:
1812 # ifdef CONFIG_HIGHMEM
1813 	case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1814 # endif
1815 #else
1816 	case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1817 #endif
1818 #ifdef CONFIG_X86_LOCAL_APIC
1819 	case FIX_APIC_BASE:	/* maps dummy local APIC */
1820 #endif
1821 	case FIX_TEXT_POKE0:
1822 	case FIX_TEXT_POKE1:
1823 		/* All local page mappings */
1824 		pte = pfn_pte(phys, prot);
1825 		break;
1826 
1827 	default:
1828 		pte = mfn_pte(phys, prot);
1829 		break;
1830 	}
1831 
1832 	__native_set_fixmap(idx, pte);
1833 
1834 #ifdef CONFIG_X86_64
1835 	/* Replicate changes to map the vsyscall page into the user
1836 	   pagetable vsyscall mapping. */
1837 	if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1838 		unsigned long vaddr = __fix_to_virt(idx);
1839 		set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1840 	}
1841 #endif
1842 }
1843 
1844 __init void xen_post_allocator_init(void)
1845 {
1846 	pv_mmu_ops.set_pte = xen_set_pte;
1847 	pv_mmu_ops.set_pmd = xen_set_pmd;
1848 	pv_mmu_ops.set_pud = xen_set_pud;
1849 #if PAGETABLE_LEVELS == 4
1850 	pv_mmu_ops.set_pgd = xen_set_pgd;
1851 #endif
1852 
1853 	/* This will work as long as patching hasn't happened yet
1854 	   (which it hasn't) */
1855 	pv_mmu_ops.alloc_pte = xen_alloc_pte;
1856 	pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1857 	pv_mmu_ops.release_pte = xen_release_pte;
1858 	pv_mmu_ops.release_pmd = xen_release_pmd;
1859 #if PAGETABLE_LEVELS == 4
1860 	pv_mmu_ops.alloc_pud = xen_alloc_pud;
1861 	pv_mmu_ops.release_pud = xen_release_pud;
1862 #endif
1863 
1864 #ifdef CONFIG_X86_64
1865 	SetPagePinned(virt_to_page(level3_user_vsyscall));
1866 #endif
1867 	xen_mark_init_mm_pinned();
1868 }
1869 
1870 static void xen_leave_lazy_mmu(void)
1871 {
1872 	preempt_disable();
1873 	xen_mc_flush();
1874 	paravirt_leave_lazy_mmu();
1875 	preempt_enable();
1876 }
1877 
1878 const struct pv_mmu_ops xen_mmu_ops __initdata = {
1879 	.pagetable_setup_start = xen_pagetable_setup_start,
1880 	.pagetable_setup_done = xen_pagetable_setup_done,
1881 
1882 	.read_cr2 = xen_read_cr2,
1883 	.write_cr2 = xen_write_cr2,
1884 
1885 	.read_cr3 = xen_read_cr3,
1886 	.write_cr3 = xen_write_cr3,
1887 
1888 	.flush_tlb_user = xen_flush_tlb,
1889 	.flush_tlb_kernel = xen_flush_tlb,
1890 	.flush_tlb_single = xen_flush_tlb_single,
1891 	.flush_tlb_others = xen_flush_tlb_others,
1892 
1893 	.pte_update = paravirt_nop,
1894 	.pte_update_defer = paravirt_nop,
1895 
1896 	.pgd_alloc = xen_pgd_alloc,
1897 	.pgd_free = xen_pgd_free,
1898 
1899 	.alloc_pte = xen_alloc_pte_init,
1900 	.release_pte = xen_release_pte_init,
1901 	.alloc_pmd = xen_alloc_pmd_init,
1902 	.alloc_pmd_clone = paravirt_nop,
1903 	.release_pmd = xen_release_pmd_init,
1904 
1905 #ifdef CONFIG_HIGHPTE
1906 	.kmap_atomic_pte = xen_kmap_atomic_pte,
1907 #endif
1908 
1909 #ifdef CONFIG_X86_64
1910 	.set_pte = xen_set_pte,
1911 #else
1912 	.set_pte = xen_set_pte_init,
1913 #endif
1914 	.set_pte_at = xen_set_pte_at,
1915 	.set_pmd = xen_set_pmd_hyper,
1916 
1917 	.ptep_modify_prot_start = __ptep_modify_prot_start,
1918 	.ptep_modify_prot_commit = __ptep_modify_prot_commit,
1919 
1920 	.pte_val = PV_CALLEE_SAVE(xen_pte_val),
1921 	.pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
1922 
1923 	.make_pte = PV_CALLEE_SAVE(xen_make_pte),
1924 	.make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
1925 
1926 #ifdef CONFIG_X86_PAE
1927 	.set_pte_atomic = xen_set_pte_atomic,
1928 	.pte_clear = xen_pte_clear,
1929 	.pmd_clear = xen_pmd_clear,
1930 #endif	/* CONFIG_X86_PAE */
1931 	.set_pud = xen_set_pud_hyper,
1932 
1933 	.make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1934 	.pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
1935 
1936 #if PAGETABLE_LEVELS == 4
1937 	.pud_val = PV_CALLEE_SAVE(xen_pud_val),
1938 	.make_pud = PV_CALLEE_SAVE(xen_make_pud),
1939 	.set_pgd = xen_set_pgd_hyper,
1940 
1941 	.alloc_pud = xen_alloc_pmd_init,
1942 	.release_pud = xen_release_pmd_init,
1943 #endif	/* PAGETABLE_LEVELS == 4 */
1944 
1945 	.activate_mm = xen_activate_mm,
1946 	.dup_mmap = xen_dup_mmap,
1947 	.exit_mmap = xen_exit_mmap,
1948 
1949 	.lazy_mode = {
1950 		.enter = paravirt_enter_lazy_mmu,
1951 		.leave = xen_leave_lazy_mmu,
1952 	},
1953 
1954 	.set_fixmap = xen_set_fixmap,
1955 };
1956 
1957 
1958 #ifdef CONFIG_XEN_DEBUG_FS
1959 
1960 static struct dentry *d_mmu_debug;
1961 
1962 static int __init xen_mmu_debugfs(void)
1963 {
1964 	struct dentry *d_xen = xen_init_debugfs();
1965 
1966 	if (d_xen == NULL)
1967 		return -ENOMEM;
1968 
1969 	d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1970 
1971 	debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
1972 
1973 	debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
1974 	debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
1975 			   &mmu_stats.pgd_update_pinned);
1976 	debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
1977 			   &mmu_stats.pgd_update_pinned);
1978 
1979 	debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
1980 	debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
1981 			   &mmu_stats.pud_update_pinned);
1982 	debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
1983 			   &mmu_stats.pud_update_pinned);
1984 
1985 	debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
1986 	debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
1987 			   &mmu_stats.pmd_update_pinned);
1988 	debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
1989 			   &mmu_stats.pmd_update_pinned);
1990 
1991 	debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
1992 //	debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
1993 //			   &mmu_stats.pte_update_pinned);
1994 	debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
1995 			   &mmu_stats.pte_update_pinned);
1996 
1997 	debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
1998 	debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
1999 			   &mmu_stats.mmu_update_extended);
2000 	xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
2001 				     mmu_stats.mmu_update_histo, 20);
2002 
2003 	debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
2004 	debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
2005 			   &mmu_stats.set_pte_at_batched);
2006 	debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
2007 			   &mmu_stats.set_pte_at_current);
2008 	debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
2009 			   &mmu_stats.set_pte_at_kernel);
2010 
2011 	debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
2012 	debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
2013 			   &mmu_stats.prot_commit_batched);
2014 
2015 	return 0;
2016 }
2017 fs_initcall(xen_mmu_debugfs);
2018 
2019 #endif	/* CONFIG_XEN_DEBUG_FS */
2020