xref: /openbmc/linux/arch/x86/kvm/mmu/paging_tmpl.h (revision 83946783)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * MMU support
9  *
10  * Copyright (C) 2006 Qumranet, Inc.
11  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12  *
13  * Authors:
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Avi Kivity   <avi@qumranet.com>
16  */
17 
18 /*
19  * We need the mmu code to access both 32-bit and 64-bit guest ptes,
20  * so the code in this file is compiled twice, once per pte size.
21  */
22 
23 #if PTTYPE == 64
24 	#define pt_element_t u64
25 	#define guest_walker guest_walker64
26 	#define FNAME(name) paging##64_##name
27 	#define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
28 	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
29 	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
30 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
31 	#define PT_LEVEL_BITS PT64_LEVEL_BITS
32 	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
33 	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
34 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
35 	#ifdef CONFIG_X86_64
36 	#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
37 	#define CMPXCHG cmpxchg
38 	#else
39 	#define CMPXCHG cmpxchg64
40 	#define PT_MAX_FULL_LEVELS 2
41 	#endif
42 #elif PTTYPE == 32
43 	#define pt_element_t u32
44 	#define guest_walker guest_walker32
45 	#define FNAME(name) paging##32_##name
46 	#define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 	#define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 	#define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 	#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 	#define PT_LEVEL_BITS PT32_LEVEL_BITS
51 	#define PT_MAX_FULL_LEVELS 2
52 	#define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
53 	#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
54 	#define PT_HAVE_ACCESSED_DIRTY(mmu) true
55 	#define CMPXCHG cmpxchg
56 #elif PTTYPE == PTTYPE_EPT
57 	#define pt_element_t u64
58 	#define guest_walker guest_walkerEPT
59 	#define FNAME(name) ept_##name
60 	#define PT_BASE_ADDR_MASK GUEST_PT64_BASE_ADDR_MASK
61 	#define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
62 	#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
63 	#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
64 	#define PT_LEVEL_BITS PT64_LEVEL_BITS
65 	#define PT_GUEST_DIRTY_SHIFT 9
66 	#define PT_GUEST_ACCESSED_SHIFT 8
67 	#define PT_HAVE_ACCESSED_DIRTY(mmu) ((mmu)->ept_ad)
68 	#define CMPXCHG cmpxchg64
69 	#define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
70 #else
71 	#error Invalid PTTYPE value
72 #endif
73 
74 #define PT_GUEST_DIRTY_MASK    (1 << PT_GUEST_DIRTY_SHIFT)
75 #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
76 
77 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
78 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
79 
80 /*
81  * The guest_walker structure emulates the behavior of the hardware page
82  * table walker.
83  */
84 struct guest_walker {
85 	int level;
86 	unsigned max_level;
87 	gfn_t table_gfn[PT_MAX_FULL_LEVELS];
88 	pt_element_t ptes[PT_MAX_FULL_LEVELS];
89 	pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
90 	gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
91 	pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
92 	bool pte_writable[PT_MAX_FULL_LEVELS];
93 	unsigned int pt_access[PT_MAX_FULL_LEVELS];
94 	unsigned int pte_access;
95 	gfn_t gfn;
96 	struct x86_exception fault;
97 };
98 
99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
100 {
101 	return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
102 }
103 
104 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
105 					     unsigned gpte)
106 {
107 	unsigned mask;
108 
109 	/* dirty bit is not supported, so no need to track it */
110 	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
111 		return;
112 
113 	BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
114 
115 	mask = (unsigned)~ACC_WRITE_MASK;
116 	/* Allow write access to dirty gptes */
117 	mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
118 		PT_WRITABLE_MASK;
119 	*access &= mask;
120 }
121 
122 static inline int FNAME(is_present_gpte)(unsigned long pte)
123 {
124 #if PTTYPE != PTTYPE_EPT
125 	return pte & PT_PRESENT_MASK;
126 #else
127 	return pte & 7;
128 #endif
129 }
130 
131 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
132 {
133 #if PTTYPE != PTTYPE_EPT
134 	return false;
135 #else
136 	return __is_bad_mt_xwr(rsvd_check, gpte);
137 #endif
138 }
139 
140 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
141 {
142 	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
143 	       FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
144 }
145 
146 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
147 			       pt_element_t __user *ptep_user, unsigned index,
148 			       pt_element_t orig_pte, pt_element_t new_pte)
149 {
150 	int npages;
151 	pt_element_t ret;
152 	pt_element_t *table;
153 	struct page *page;
154 
155 	npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
156 	if (likely(npages == 1)) {
157 		table = kmap_atomic(page);
158 		ret = CMPXCHG(&table[index], orig_pte, new_pte);
159 		kunmap_atomic(table);
160 
161 		kvm_release_page_dirty(page);
162 	} else {
163 		struct vm_area_struct *vma;
164 		unsigned long vaddr = (unsigned long)ptep_user & PAGE_MASK;
165 		unsigned long pfn;
166 		unsigned long paddr;
167 
168 		mmap_read_lock(current->mm);
169 		vma = find_vma_intersection(current->mm, vaddr, vaddr + PAGE_SIZE);
170 		if (!vma || !(vma->vm_flags & VM_PFNMAP)) {
171 			mmap_read_unlock(current->mm);
172 			return -EFAULT;
173 		}
174 		pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
175 		paddr = pfn << PAGE_SHIFT;
176 		table = memremap(paddr, PAGE_SIZE, MEMREMAP_WB);
177 		if (!table) {
178 			mmap_read_unlock(current->mm);
179 			return -EFAULT;
180 		}
181 		ret = CMPXCHG(&table[index], orig_pte, new_pte);
182 		memunmap(table);
183 		mmap_read_unlock(current->mm);
184 	}
185 
186 	return (ret != orig_pte);
187 }
188 
189 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
190 				  struct kvm_mmu_page *sp, u64 *spte,
191 				  u64 gpte)
192 {
193 	if (!FNAME(is_present_gpte)(gpte))
194 		goto no_present;
195 
196 	/* if accessed bit is not supported prefetch non accessed gpte */
197 	if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
198 	    !(gpte & PT_GUEST_ACCESSED_MASK))
199 		goto no_present;
200 
201 	if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
202 		goto no_present;
203 
204 	return false;
205 
206 no_present:
207 	drop_spte(vcpu->kvm, spte);
208 	return true;
209 }
210 
211 /*
212  * For PTTYPE_EPT, a page table can be executable but not readable
213  * on supported processors. Therefore, set_spte does not automatically
214  * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
215  * to signify readability since it isn't used in the EPT case
216  */
217 static inline unsigned FNAME(gpte_access)(u64 gpte)
218 {
219 	unsigned access;
220 #if PTTYPE == PTTYPE_EPT
221 	access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
222 		((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
223 		((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
224 #else
225 	BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
226 	BUILD_BUG_ON(ACC_EXEC_MASK != 1);
227 	access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
228 	/* Combine NX with P (which is set here) to get ACC_EXEC_MASK.  */
229 	access ^= (gpte >> PT64_NX_SHIFT);
230 #endif
231 
232 	return access;
233 }
234 
235 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
236 					     struct kvm_mmu *mmu,
237 					     struct guest_walker *walker,
238 					     gpa_t addr, int write_fault)
239 {
240 	unsigned level, index;
241 	pt_element_t pte, orig_pte;
242 	pt_element_t __user *ptep_user;
243 	gfn_t table_gfn;
244 	int ret;
245 
246 	/* dirty/accessed bits are not supported, so no need to update them */
247 	if (!PT_HAVE_ACCESSED_DIRTY(mmu))
248 		return 0;
249 
250 	for (level = walker->max_level; level >= walker->level; --level) {
251 		pte = orig_pte = walker->ptes[level - 1];
252 		table_gfn = walker->table_gfn[level - 1];
253 		ptep_user = walker->ptep_user[level - 1];
254 		index = offset_in_page(ptep_user) / sizeof(pt_element_t);
255 		if (!(pte & PT_GUEST_ACCESSED_MASK)) {
256 			trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
257 			pte |= PT_GUEST_ACCESSED_MASK;
258 		}
259 		if (level == walker->level && write_fault &&
260 				!(pte & PT_GUEST_DIRTY_MASK)) {
261 			trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
262 #if PTTYPE == PTTYPE_EPT
263 			if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
264 				return -EINVAL;
265 #endif
266 			pte |= PT_GUEST_DIRTY_MASK;
267 		}
268 		if (pte == orig_pte)
269 			continue;
270 
271 		/*
272 		 * If the slot is read-only, simply do not process the accessed
273 		 * and dirty bits.  This is the correct thing to do if the slot
274 		 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
275 		 * are only supported if the accessed and dirty bits are already
276 		 * set in the ROM (so that MMIO writes are never needed).
277 		 *
278 		 * Note that NPT does not allow this at all and faults, since
279 		 * it always wants nested page table entries for the guest
280 		 * page tables to be writable.  And EPT works but will simply
281 		 * overwrite the read-only memory to set the accessed and dirty
282 		 * bits.
283 		 */
284 		if (unlikely(!walker->pte_writable[level - 1]))
285 			continue;
286 
287 		ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
288 		if (ret)
289 			return ret;
290 
291 		kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
292 		walker->ptes[level - 1] = pte;
293 	}
294 	return 0;
295 }
296 
297 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
298 {
299 	unsigned pkeys = 0;
300 #if PTTYPE == 64
301 	pte_t pte = {.pte = gpte};
302 
303 	pkeys = pte_flags_pkey(pte_flags(pte));
304 #endif
305 	return pkeys;
306 }
307 
308 static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
309 				       unsigned int level, unsigned int gpte)
310 {
311 	/*
312 	 * For EPT and PAE paging (both variants), bit 7 is either reserved at
313 	 * all level or indicates a huge page (ignoring CR3/EPTP).  In either
314 	 * case, bit 7 being set terminates the walk.
315 	 */
316 #if PTTYPE == 32
317 	/*
318 	 * 32-bit paging requires special handling because bit 7 is ignored if
319 	 * CR4.PSE=0, not reserved.  Clear bit 7 in the gpte if the level is
320 	 * greater than the last level for which bit 7 is the PAGE_SIZE bit.
321 	 *
322 	 * The RHS has bit 7 set iff level < (2 + PSE).  If it is clear, bit 7
323 	 * is not reserved and does not indicate a large page at this level,
324 	 * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
325 	 */
326 	gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse);
327 #endif
328 	/*
329 	 * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
330 	 * iff level <= PG_LEVEL_4K, which for our purpose means
331 	 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
332 	 */
333 	gpte |= level - PG_LEVEL_4K - 1;
334 
335 	return gpte & PT_PAGE_SIZE_MASK;
336 }
337 /*
338  * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
339  */
340 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
341 				    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
342 				    gpa_t addr, u32 access)
343 {
344 	int ret;
345 	pt_element_t pte;
346 	pt_element_t __user *ptep_user;
347 	gfn_t table_gfn;
348 	u64 pt_access, pte_access;
349 	unsigned index, accessed_dirty, pte_pkey;
350 	unsigned nested_access;
351 	gpa_t pte_gpa;
352 	bool have_ad;
353 	int offset;
354 	u64 walk_nx_mask = 0;
355 	const int write_fault = access & PFERR_WRITE_MASK;
356 	const int user_fault  = access & PFERR_USER_MASK;
357 	const int fetch_fault = access & PFERR_FETCH_MASK;
358 	u16 errcode = 0;
359 	gpa_t real_gpa;
360 	gfn_t gfn;
361 
362 	trace_kvm_mmu_pagetable_walk(addr, access);
363 retry_walk:
364 	walker->level = mmu->root_level;
365 	pte           = mmu->get_guest_pgd(vcpu);
366 	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
367 
368 #if PTTYPE == 64
369 	walk_nx_mask = 1ULL << PT64_NX_SHIFT;
370 	if (walker->level == PT32E_ROOT_LEVEL) {
371 		pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
372 		trace_kvm_mmu_paging_element(pte, walker->level);
373 		if (!FNAME(is_present_gpte)(pte))
374 			goto error;
375 		--walker->level;
376 	}
377 #endif
378 	walker->max_level = walker->level;
379 	ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
380 
381 	/*
382 	 * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
383 	 * by the MOV to CR instruction are treated as reads and do not cause the
384 	 * processor to set the dirty flag in any EPT paging-structure entry.
385 	 */
386 	nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
387 
388 	pte_access = ~0;
389 	++walker->level;
390 
391 	do {
392 		unsigned long host_addr;
393 
394 		pt_access = pte_access;
395 		--walker->level;
396 
397 		index = PT_INDEX(addr, walker->level);
398 		table_gfn = gpte_to_gfn(pte);
399 		offset    = index * sizeof(pt_element_t);
400 		pte_gpa   = gfn_to_gpa(table_gfn) + offset;
401 
402 		BUG_ON(walker->level < 1);
403 		walker->table_gfn[walker->level - 1] = table_gfn;
404 		walker->pte_gpa[walker->level - 1] = pte_gpa;
405 
406 		real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
407 					      nested_access,
408 					      &walker->fault);
409 
410 		/*
411 		 * FIXME: This can happen if emulation (for of an INS/OUTS
412 		 * instruction) triggers a nested page fault.  The exit
413 		 * qualification / exit info field will incorrectly have
414 		 * "guest page access" as the nested page fault's cause,
415 		 * instead of "guest page structure access".  To fix this,
416 		 * the x86_exception struct should be augmented with enough
417 		 * information to fix the exit_qualification or exit_info_1
418 		 * fields.
419 		 */
420 		if (unlikely(real_gpa == UNMAPPED_GVA))
421 			return 0;
422 
423 		host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
424 					    &walker->pte_writable[walker->level - 1]);
425 		if (unlikely(kvm_is_error_hva(host_addr)))
426 			goto error;
427 
428 		ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
429 		if (unlikely(__get_user(pte, ptep_user)))
430 			goto error;
431 		walker->ptep_user[walker->level - 1] = ptep_user;
432 
433 		trace_kvm_mmu_paging_element(pte, walker->level);
434 
435 		/*
436 		 * Inverting the NX it lets us AND it like other
437 		 * permission bits.
438 		 */
439 		pte_access = pt_access & (pte ^ walk_nx_mask);
440 
441 		if (unlikely(!FNAME(is_present_gpte)(pte)))
442 			goto error;
443 
444 		if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
445 			errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
446 			goto error;
447 		}
448 
449 		walker->ptes[walker->level - 1] = pte;
450 
451 		/* Convert to ACC_*_MASK flags for struct guest_walker.  */
452 		walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
453 	} while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
454 
455 	pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
456 	accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
457 
458 	/* Convert to ACC_*_MASK flags for struct guest_walker.  */
459 	walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
460 	errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
461 	if (unlikely(errcode))
462 		goto error;
463 
464 	gfn = gpte_to_gfn_lvl(pte, walker->level);
465 	gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
466 
467 	if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
468 		gfn += pse36_gfn_delta(pte);
469 
470 	real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
471 	if (real_gpa == UNMAPPED_GVA)
472 		return 0;
473 
474 	walker->gfn = real_gpa >> PAGE_SHIFT;
475 
476 	if (!write_fault)
477 		FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
478 	else
479 		/*
480 		 * On a write fault, fold the dirty bit into accessed_dirty.
481 		 * For modes without A/D bits support accessed_dirty will be
482 		 * always clear.
483 		 */
484 		accessed_dirty &= pte >>
485 			(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
486 
487 	if (unlikely(!accessed_dirty)) {
488 		ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
489 							addr, write_fault);
490 		if (unlikely(ret < 0))
491 			goto error;
492 		else if (ret)
493 			goto retry_walk;
494 	}
495 
496 	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
497 		 __func__, (u64)pte, walker->pte_access,
498 		 walker->pt_access[walker->level - 1]);
499 	return 1;
500 
501 error:
502 	errcode |= write_fault | user_fault;
503 	if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
504 		errcode |= PFERR_FETCH_MASK;
505 
506 	walker->fault.vector = PF_VECTOR;
507 	walker->fault.error_code_valid = true;
508 	walker->fault.error_code = errcode;
509 
510 #if PTTYPE == PTTYPE_EPT
511 	/*
512 	 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
513 	 * misconfiguration requires to be injected. The detection is
514 	 * done by is_rsvd_bits_set() above.
515 	 *
516 	 * We set up the value of exit_qualification to inject:
517 	 * [2:0] - Derive from the access bits. The exit_qualification might be
518 	 *         out of date if it is serving an EPT misconfiguration.
519 	 * [5:3] - Calculated by the page walk of the guest EPT page tables
520 	 * [7:8] - Derived from [7:8] of real exit_qualification
521 	 *
522 	 * The other bits are set to 0.
523 	 */
524 	if (!(errcode & PFERR_RSVD_MASK)) {
525 		vcpu->arch.exit_qualification &= 0x180;
526 		if (write_fault)
527 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
528 		if (user_fault)
529 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
530 		if (fetch_fault)
531 			vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
532 		vcpu->arch.exit_qualification |= (pte_access & 0x7) << 3;
533 	}
534 #endif
535 	walker->fault.address = addr;
536 	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
537 	walker->fault.async_page_fault = false;
538 
539 	trace_kvm_mmu_walker_error(walker->fault.error_code);
540 	return 0;
541 }
542 
543 static int FNAME(walk_addr)(struct guest_walker *walker,
544 			    struct kvm_vcpu *vcpu, gpa_t addr, u32 access)
545 {
546 	return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
547 					access);
548 }
549 
550 #if PTTYPE != PTTYPE_EPT
551 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
552 				   struct kvm_vcpu *vcpu, gva_t addr,
553 				   u32 access)
554 {
555 	return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
556 					addr, access);
557 }
558 #endif
559 
560 static bool
561 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
562 		     u64 *spte, pt_element_t gpte, bool no_dirty_log)
563 {
564 	struct kvm_memory_slot *slot;
565 	unsigned pte_access;
566 	gfn_t gfn;
567 	kvm_pfn_t pfn;
568 
569 	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
570 		return false;
571 
572 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
573 
574 	gfn = gpte_to_gfn(gpte);
575 	pte_access = sp->role.access & FNAME(gpte_access)(gpte);
576 	FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
577 
578 	slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn,
579 			no_dirty_log && (pte_access & ACC_WRITE_MASK));
580 	if (!slot)
581 		return false;
582 
583 	pfn = gfn_to_pfn_memslot_atomic(slot, gfn);
584 	if (is_error_pfn(pfn))
585 		return false;
586 
587 	mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL);
588 	kvm_release_pfn_clean(pfn);
589 	return true;
590 }
591 
592 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
593 				struct guest_walker *gw, int level)
594 {
595 	pt_element_t curr_pte;
596 	gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
597 	u64 mask;
598 	int r, index;
599 
600 	if (level == PG_LEVEL_4K) {
601 		mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
602 		base_gpa = pte_gpa & ~mask;
603 		index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
604 
605 		r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
606 				gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
607 		curr_pte = gw->prefetch_ptes[index];
608 	} else
609 		r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
610 				  &curr_pte, sizeof(curr_pte));
611 
612 	return r || curr_pte != gw->ptes[level - 1];
613 }
614 
615 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
616 				u64 *sptep)
617 {
618 	struct kvm_mmu_page *sp;
619 	pt_element_t *gptep = gw->prefetch_ptes;
620 	u64 *spte;
621 	int i;
622 
623 	sp = sptep_to_sp(sptep);
624 
625 	if (sp->role.level > PG_LEVEL_4K)
626 		return;
627 
628 	/*
629 	 * If addresses are being invalidated, skip prefetching to avoid
630 	 * accidentally prefetching those addresses.
631 	 */
632 	if (unlikely(vcpu->kvm->mmu_notifier_count))
633 		return;
634 
635 	if (sp->role.direct)
636 		return __direct_pte_prefetch(vcpu, sp, sptep);
637 
638 	i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
639 	spte = sp->spt + i;
640 
641 	for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
642 		if (spte == sptep)
643 			continue;
644 
645 		if (is_shadow_present_pte(*spte))
646 			continue;
647 
648 		if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
649 			break;
650 	}
651 }
652 
653 /*
654  * Fetch a shadow pte for a specific level in the paging hierarchy.
655  * If the guest tries to write a write-protected page, we need to
656  * emulate this operation, return 1 to indicate this case.
657  */
658 static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
659 			 struct guest_walker *gw)
660 {
661 	struct kvm_mmu_page *sp = NULL;
662 	struct kvm_shadow_walk_iterator it;
663 	unsigned int direct_access, access;
664 	int top_level, ret;
665 	gfn_t base_gfn = fault->gfn;
666 
667 	WARN_ON_ONCE(gw->gfn != base_gfn);
668 	direct_access = gw->pte_access;
669 
670 	top_level = vcpu->arch.mmu->root_level;
671 	if (top_level == PT32E_ROOT_LEVEL)
672 		top_level = PT32_ROOT_LEVEL;
673 	/*
674 	 * Verify that the top-level gpte is still there.  Since the page
675 	 * is a root page, it is either write protected (and cannot be
676 	 * changed from now on) or it is invalid (in which case, we don't
677 	 * really care if it changes underneath us after this point).
678 	 */
679 	if (FNAME(gpte_changed)(vcpu, gw, top_level))
680 		goto out_gpte_changed;
681 
682 	if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
683 		goto out_gpte_changed;
684 
685 	for (shadow_walk_init(&it, vcpu, fault->addr);
686 	     shadow_walk_okay(&it) && it.level > gw->level;
687 	     shadow_walk_next(&it)) {
688 		gfn_t table_gfn;
689 
690 		clear_sp_write_flooding_count(it.sptep);
691 		drop_large_spte(vcpu, it.sptep);
692 
693 		sp = NULL;
694 		if (!is_shadow_present_pte(*it.sptep)) {
695 			table_gfn = gw->table_gfn[it.level - 2];
696 			access = gw->pt_access[it.level - 2];
697 			sp = kvm_mmu_get_page(vcpu, table_gfn, fault->addr,
698 					      it.level-1, false, access);
699 			/*
700 			 * We must synchronize the pagetable before linking it
701 			 * because the guest doesn't need to flush tlb when
702 			 * the gpte is changed from non-present to present.
703 			 * Otherwise, the guest may use the wrong mapping.
704 			 *
705 			 * For PG_LEVEL_4K, kvm_mmu_get_page() has already
706 			 * synchronized it transiently via kvm_sync_page().
707 			 *
708 			 * For higher level pagetable, we synchronize it via
709 			 * the slower mmu_sync_children().  If it needs to
710 			 * break, some progress has been made; return
711 			 * RET_PF_RETRY and retry on the next #PF.
712 			 * KVM_REQ_MMU_SYNC is not necessary but it
713 			 * expedites the process.
714 			 */
715 			if (sp->unsync_children &&
716 			    mmu_sync_children(vcpu, sp, false))
717 				return RET_PF_RETRY;
718 		}
719 
720 		/*
721 		 * Verify that the gpte in the page we've just write
722 		 * protected is still there.
723 		 */
724 		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
725 			goto out_gpte_changed;
726 
727 		if (sp)
728 			link_shadow_page(vcpu, it.sptep, sp);
729 	}
730 
731 	kvm_mmu_hugepage_adjust(vcpu, fault);
732 
733 	trace_kvm_mmu_spte_requested(fault);
734 
735 	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
736 		clear_sp_write_flooding_count(it.sptep);
737 
738 		/*
739 		 * We cannot overwrite existing page tables with an NX
740 		 * large page, as the leaf could be executable.
741 		 */
742 		if (fault->nx_huge_page_workaround_enabled)
743 			disallowed_hugepage_adjust(fault, *it.sptep, it.level);
744 
745 		base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
746 		if (it.level == fault->goal_level)
747 			break;
748 
749 		validate_direct_spte(vcpu, it.sptep, direct_access);
750 
751 		drop_large_spte(vcpu, it.sptep);
752 
753 		if (!is_shadow_present_pte(*it.sptep)) {
754 			sp = kvm_mmu_get_page(vcpu, base_gfn, fault->addr,
755 					      it.level - 1, true, direct_access);
756 			link_shadow_page(vcpu, it.sptep, sp);
757 			if (fault->huge_page_disallowed &&
758 			    fault->req_level >= it.level)
759 				account_huge_nx_page(vcpu->kvm, sp);
760 		}
761 	}
762 
763 	if (WARN_ON_ONCE(it.level != fault->goal_level))
764 		return -EFAULT;
765 
766 	ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access,
767 			   base_gfn, fault->pfn, fault);
768 	if (ret == RET_PF_SPURIOUS)
769 		return ret;
770 
771 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
772 	++vcpu->stat.pf_fixed;
773 	return ret;
774 
775 out_gpte_changed:
776 	return RET_PF_RETRY;
777 }
778 
779  /*
780  * To see whether the mapped gfn can write its page table in the current
781  * mapping.
782  *
783  * It is the helper function of FNAME(page_fault). When guest uses large page
784  * size to map the writable gfn which is used as current page table, we should
785  * force kvm to use small page size to map it because new shadow page will be
786  * created when kvm establishes shadow page table that stop kvm using large
787  * page size. Do it early can avoid unnecessary #PF and emulation.
788  *
789  * @write_fault_to_shadow_pgtable will return true if the fault gfn is
790  * currently used as its page table.
791  *
792  * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
793  * since the PDPT is always shadowed, that means, we can not use large page
794  * size to map the gfn which is used as PDPT.
795  */
796 static bool
797 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
798 			      struct guest_walker *walker, bool user_fault,
799 			      bool *write_fault_to_shadow_pgtable)
800 {
801 	int level;
802 	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
803 	bool self_changed = false;
804 
805 	if (!(walker->pte_access & ACC_WRITE_MASK ||
806 	    (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
807 		return false;
808 
809 	for (level = walker->level; level <= walker->max_level; level++) {
810 		gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
811 
812 		self_changed |= !(gfn & mask);
813 		*write_fault_to_shadow_pgtable |= !gfn;
814 	}
815 
816 	return self_changed;
817 }
818 
819 /*
820  * Page fault handler.  There are several causes for a page fault:
821  *   - there is no shadow pte for the guest pte
822  *   - write access through a shadow pte marked read only so that we can set
823  *     the dirty bit
824  *   - write access to a shadow pte marked read only so we can update the page
825  *     dirty bitmap, when userspace requests it
826  *   - mmio access; in this case we will never install a present shadow pte
827  *   - normal guest page fault due to the guest pte marked not present, not
828  *     writable, or not executable
829  *
830  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
831  *           a negative value on error.
832  */
833 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
834 {
835 	struct guest_walker walker;
836 	int r;
837 	unsigned long mmu_seq;
838 	bool is_self_change_mapping;
839 
840 	pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code);
841 	WARN_ON_ONCE(fault->is_tdp);
842 
843 	/*
844 	 * Look up the guest pte for the faulting address.
845 	 * If PFEC.RSVD is set, this is a shadow page fault.
846 	 * The bit needs to be cleared before walking guest page tables.
847 	 */
848 	r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
849 			     fault->error_code & ~PFERR_RSVD_MASK);
850 
851 	/*
852 	 * The page is not mapped by the guest.  Let the guest handle it.
853 	 */
854 	if (!r) {
855 		pgprintk("%s: guest page fault\n", __func__);
856 		if (!fault->prefetch)
857 			kvm_inject_emulated_page_fault(vcpu, &walker.fault);
858 
859 		return RET_PF_RETRY;
860 	}
861 
862 	fault->gfn = walker.gfn;
863 	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
864 
865 	if (page_fault_handle_page_track(vcpu, fault)) {
866 		shadow_page_table_clear_flood(vcpu, fault->addr);
867 		return RET_PF_EMULATE;
868 	}
869 
870 	r = mmu_topup_memory_caches(vcpu, true);
871 	if (r)
872 		return r;
873 
874 	vcpu->arch.write_fault_to_shadow_pgtable = false;
875 
876 	is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
877 	      &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable);
878 
879 	if (is_self_change_mapping)
880 		fault->max_level = PG_LEVEL_4K;
881 	else
882 		fault->max_level = walker.level;
883 
884 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
885 	smp_rmb();
886 
887 	if (kvm_faultin_pfn(vcpu, fault, &r))
888 		return r;
889 
890 	if (handle_abnormal_pfn(vcpu, fault, walker.pte_access, &r))
891 		return r;
892 
893 	/*
894 	 * Do not change pte_access if the pfn is a mmio page, otherwise
895 	 * we will cache the incorrect access into mmio spte.
896 	 */
897 	if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
898 	    !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
899 		walker.pte_access |= ACC_WRITE_MASK;
900 		walker.pte_access &= ~ACC_USER_MASK;
901 
902 		/*
903 		 * If we converted a user page to a kernel page,
904 		 * so that the kernel can write to it when cr0.wp=0,
905 		 * then we should prevent the kernel from executing it
906 		 * if SMEP is enabled.
907 		 */
908 		if (is_cr4_smep(vcpu->arch.mmu))
909 			walker.pte_access &= ~ACC_EXEC_MASK;
910 	}
911 
912 	r = RET_PF_RETRY;
913 	write_lock(&vcpu->kvm->mmu_lock);
914 	if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
915 		goto out_unlock;
916 
917 	kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
918 	r = make_mmu_pages_available(vcpu);
919 	if (r)
920 		goto out_unlock;
921 	r = FNAME(fetch)(vcpu, fault, &walker);
922 	kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
923 
924 out_unlock:
925 	write_unlock(&vcpu->kvm->mmu_lock);
926 	kvm_release_pfn_clean(fault->pfn);
927 	return r;
928 }
929 
930 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
931 {
932 	int offset = 0;
933 
934 	WARN_ON(sp->role.level != PG_LEVEL_4K);
935 
936 	if (PTTYPE == 32)
937 		offset = sp->role.quadrant << PT64_LEVEL_BITS;
938 
939 	return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
940 }
941 
942 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
943 {
944 	struct kvm_shadow_walk_iterator iterator;
945 	struct kvm_mmu_page *sp;
946 	u64 old_spte;
947 	int level;
948 	u64 *sptep;
949 
950 	vcpu_clear_mmio_info(vcpu, gva);
951 
952 	/*
953 	 * No need to check return value here, rmap_can_add() can
954 	 * help us to skip pte prefetch later.
955 	 */
956 	mmu_topup_memory_caches(vcpu, true);
957 
958 	if (!VALID_PAGE(root_hpa)) {
959 		WARN_ON(1);
960 		return;
961 	}
962 
963 	write_lock(&vcpu->kvm->mmu_lock);
964 	for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
965 		level = iterator.level;
966 		sptep = iterator.sptep;
967 
968 		sp = sptep_to_sp(sptep);
969 		old_spte = *sptep;
970 		if (is_last_spte(old_spte, level)) {
971 			pt_element_t gpte;
972 			gpa_t pte_gpa;
973 
974 			if (!sp->unsync)
975 				break;
976 
977 			pte_gpa = FNAME(get_level1_sp_gpa)(sp);
978 			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
979 
980 			mmu_page_zap_pte(vcpu->kvm, sp, sptep, NULL);
981 			if (is_shadow_present_pte(old_spte))
982 				kvm_flush_remote_tlbs_with_address(vcpu->kvm,
983 					sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
984 
985 			if (!rmap_can_add(vcpu))
986 				break;
987 
988 			if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
989 						       sizeof(pt_element_t)))
990 				break;
991 
992 			FNAME(prefetch_gpte)(vcpu, sp, sptep, gpte, false);
993 		}
994 
995 		if (!sp->unsync_children)
996 			break;
997 	}
998 	write_unlock(&vcpu->kvm->mmu_lock);
999 }
1000 
1001 /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
1002 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
1003 			       struct x86_exception *exception)
1004 {
1005 	struct guest_walker walker;
1006 	gpa_t gpa = UNMAPPED_GVA;
1007 	int r;
1008 
1009 	r = FNAME(walk_addr)(&walker, vcpu, addr, access);
1010 
1011 	if (r) {
1012 		gpa = gfn_to_gpa(walker.gfn);
1013 		gpa |= addr & ~PAGE_MASK;
1014 	} else if (exception)
1015 		*exception = walker.fault;
1016 
1017 	return gpa;
1018 }
1019 
1020 #if PTTYPE != PTTYPE_EPT
1021 /* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
1022 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
1023 				      u32 access,
1024 				      struct x86_exception *exception)
1025 {
1026 	struct guest_walker walker;
1027 	gpa_t gpa = UNMAPPED_GVA;
1028 	int r;
1029 
1030 #ifndef CONFIG_X86_64
1031 	/* A 64-bit GVA should be impossible on 32-bit KVM. */
1032 	WARN_ON_ONCE(vaddr >> 32);
1033 #endif
1034 
1035 	r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
1036 
1037 	if (r) {
1038 		gpa = gfn_to_gpa(walker.gfn);
1039 		gpa |= vaddr & ~PAGE_MASK;
1040 	} else if (exception)
1041 		*exception = walker.fault;
1042 
1043 	return gpa;
1044 }
1045 #endif
1046 
1047 /*
1048  * Using the cached information from sp->gfns is safe because:
1049  * - The spte has a reference to the struct page, so the pfn for a given gfn
1050  *   can't change unless all sptes pointing to it are nuked first.
1051  *
1052  * Returns
1053  * < 0: the sp should be zapped
1054  *   0: the sp is synced and no tlb flushing is required
1055  * > 0: the sp is synced and tlb flushing is required
1056  */
1057 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1058 {
1059 	union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
1060 	int i;
1061 	bool host_writable;
1062 	gpa_t first_pte_gpa;
1063 	bool flush = false;
1064 
1065 	/*
1066 	 * Ignore various flags when verifying that it's safe to sync a shadow
1067 	 * page using the current MMU context.
1068 	 *
1069 	 *  - level: not part of the overall MMU role and will never match as the MMU's
1070 	 *           level tracks the root level
1071 	 *  - access: updated based on the new guest PTE
1072 	 *  - quadrant: not part of the overall MMU role (similar to level)
1073 	 */
1074 	const union kvm_mmu_page_role sync_role_ign = {
1075 		.level = 0xf,
1076 		.access = 0x7,
1077 		.quadrant = 0x3,
1078 	};
1079 
1080 	/*
1081 	 * Direct pages can never be unsync, and KVM should never attempt to
1082 	 * sync a shadow page for a different MMU context, e.g. if the role
1083 	 * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
1084 	 * reserved bits checks will be wrong, etc...
1085 	 */
1086 	if (WARN_ON_ONCE(sp->role.direct ||
1087 			 (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
1088 		return -1;
1089 
1090 	first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
1091 
1092 	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
1093 		u64 *sptep, spte;
1094 		struct kvm_memory_slot *slot;
1095 		unsigned pte_access;
1096 		pt_element_t gpte;
1097 		gpa_t pte_gpa;
1098 		gfn_t gfn;
1099 
1100 		if (!sp->spt[i])
1101 			continue;
1102 
1103 		pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
1104 
1105 		if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
1106 					       sizeof(pt_element_t)))
1107 			return -1;
1108 
1109 		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
1110 			flush = true;
1111 			continue;
1112 		}
1113 
1114 		gfn = gpte_to_gfn(gpte);
1115 		pte_access = sp->role.access;
1116 		pte_access &= FNAME(gpte_access)(gpte);
1117 		FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
1118 
1119 		if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
1120 			continue;
1121 
1122 		if (gfn != sp->gfns[i]) {
1123 			drop_spte(vcpu->kvm, &sp->spt[i]);
1124 			flush = true;
1125 			continue;
1126 		}
1127 
1128 		sptep = &sp->spt[i];
1129 		spte = *sptep;
1130 		host_writable = spte & shadow_host_writable_mask;
1131 		slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1132 		make_spte(vcpu, sp, slot, pte_access, gfn,
1133 			  spte_to_pfn(spte), spte, true, false,
1134 			  host_writable, &spte);
1135 
1136 		flush |= mmu_spte_update(sptep, spte);
1137 	}
1138 
1139 	return flush;
1140 }
1141 
1142 #undef pt_element_t
1143 #undef guest_walker
1144 #undef FNAME
1145 #undef PT_BASE_ADDR_MASK
1146 #undef PT_INDEX
1147 #undef PT_LVL_ADDR_MASK
1148 #undef PT_LVL_OFFSET_MASK
1149 #undef PT_LEVEL_BITS
1150 #undef PT_MAX_FULL_LEVELS
1151 #undef gpte_to_gfn
1152 #undef gpte_to_gfn_lvl
1153 #undef CMPXCHG
1154 #undef PT_GUEST_ACCESSED_MASK
1155 #undef PT_GUEST_DIRTY_MASK
1156 #undef PT_GUEST_DIRTY_SHIFT
1157 #undef PT_GUEST_ACCESSED_SHIFT
1158 #undef PT_HAVE_ACCESSED_DIRTY
1159