1c50d8ae3SPaolo Bonzini /* SPDX-License-Identifier: GPL-2.0-only */
2c50d8ae3SPaolo Bonzini /*
3c50d8ae3SPaolo Bonzini * Kernel-based Virtual Machine driver for Linux
4c50d8ae3SPaolo Bonzini *
5c50d8ae3SPaolo Bonzini * This module enables machines with Intel VT-x extensions to run virtual
6c50d8ae3SPaolo Bonzini * machines without emulation or binary translation.
7c50d8ae3SPaolo Bonzini *
8c50d8ae3SPaolo Bonzini * MMU support
9c50d8ae3SPaolo Bonzini *
10c50d8ae3SPaolo Bonzini * Copyright (C) 2006 Qumranet, Inc.
11c50d8ae3SPaolo Bonzini * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12c50d8ae3SPaolo Bonzini *
13c50d8ae3SPaolo Bonzini * Authors:
14c50d8ae3SPaolo Bonzini * Yaniv Kamay <yaniv@qumranet.com>
15c50d8ae3SPaolo Bonzini * Avi Kivity <avi@qumranet.com>
16c50d8ae3SPaolo Bonzini */
17c50d8ae3SPaolo Bonzini
18c50d8ae3SPaolo Bonzini /*
19f6b8ea6dSPaolo Bonzini * The MMU needs to be able to access/walk 32-bit and 64-bit guest page tables,
20f6b8ea6dSPaolo Bonzini * as well as guest EPT tables, so the code in this file is compiled thrice,
21f6b8ea6dSPaolo Bonzini * once per guest PTE type. The per-type defines are #undef'd at the end.
22c50d8ae3SPaolo Bonzini */
23c50d8ae3SPaolo Bonzini
24c50d8ae3SPaolo Bonzini #if PTTYPE == 64
25c50d8ae3SPaolo Bonzini #define pt_element_t u64
26c50d8ae3SPaolo Bonzini #define guest_walker guest_walker64
27c50d8ae3SPaolo Bonzini #define FNAME(name) paging##64_##name
28f6b8ea6dSPaolo Bonzini #define PT_LEVEL_BITS 9
29c50d8ae3SPaolo Bonzini #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
30c50d8ae3SPaolo Bonzini #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
31c50d8ae3SPaolo Bonzini #define PT_HAVE_ACCESSED_DIRTY(mmu) true
32c50d8ae3SPaolo Bonzini #ifdef CONFIG_X86_64
33f6ab0107SSean Christopherson #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
34c50d8ae3SPaolo Bonzini #else
35c50d8ae3SPaolo Bonzini #define PT_MAX_FULL_LEVELS 2
36c50d8ae3SPaolo Bonzini #endif
37c50d8ae3SPaolo Bonzini #elif PTTYPE == 32
38c50d8ae3SPaolo Bonzini #define pt_element_t u32
39c50d8ae3SPaolo Bonzini #define guest_walker guest_walker32
40c50d8ae3SPaolo Bonzini #define FNAME(name) paging##32_##name
412ca3129eSSean Christopherson #define PT_LEVEL_BITS 10
42c50d8ae3SPaolo Bonzini #define PT_MAX_FULL_LEVELS 2
43c50d8ae3SPaolo Bonzini #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
44c50d8ae3SPaolo Bonzini #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
45c50d8ae3SPaolo Bonzini #define PT_HAVE_ACCESSED_DIRTY(mmu) true
46b3fcdb04SSean Christopherson
47b3fcdb04SSean Christopherson #define PT32_DIR_PSE36_SIZE 4
48b3fcdb04SSean Christopherson #define PT32_DIR_PSE36_SHIFT 13
49b3fcdb04SSean Christopherson #define PT32_DIR_PSE36_MASK \
50b3fcdb04SSean Christopherson (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
51c50d8ae3SPaolo Bonzini #elif PTTYPE == PTTYPE_EPT
52c50d8ae3SPaolo Bonzini #define pt_element_t u64
53c50d8ae3SPaolo Bonzini #define guest_walker guest_walkerEPT
54c50d8ae3SPaolo Bonzini #define FNAME(name) ept_##name
55f6b8ea6dSPaolo Bonzini #define PT_LEVEL_BITS 9
56c50d8ae3SPaolo Bonzini #define PT_GUEST_DIRTY_SHIFT 9
57c50d8ae3SPaolo Bonzini #define PT_GUEST_ACCESSED_SHIFT 8
58ec283cb1SPaolo Bonzini #define PT_HAVE_ACCESSED_DIRTY(mmu) (!(mmu)->cpu_role.base.ad_disabled)
59bb1fcc70SSean Christopherson #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
60c50d8ae3SPaolo Bonzini #else
61c50d8ae3SPaolo Bonzini #error Invalid PTTYPE value
62c50d8ae3SPaolo Bonzini #endif
63c50d8ae3SPaolo Bonzini
64f6b8ea6dSPaolo Bonzini /* Common logic, but per-type values. These also need to be undefined. */
6570e41c31SSean Christopherson #define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
66f6b8ea6dSPaolo Bonzini #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
67f6b8ea6dSPaolo Bonzini #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS)
68f6b8ea6dSPaolo Bonzini #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS)
69f6b8ea6dSPaolo Bonzini
70c50d8ae3SPaolo Bonzini #define PT_GUEST_DIRTY_MASK (1 << PT_GUEST_DIRTY_SHIFT)
71c50d8ae3SPaolo Bonzini #define PT_GUEST_ACCESSED_MASK (1 << PT_GUEST_ACCESSED_SHIFT)
72c50d8ae3SPaolo Bonzini
73c50d8ae3SPaolo Bonzini #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
743bae0459SSean Christopherson #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PG_LEVEL_4K)
75c50d8ae3SPaolo Bonzini
76c50d8ae3SPaolo Bonzini /*
77c50d8ae3SPaolo Bonzini * The guest_walker structure emulates the behavior of the hardware page
78c50d8ae3SPaolo Bonzini * table walker.
79c50d8ae3SPaolo Bonzini */
80c50d8ae3SPaolo Bonzini struct guest_walker {
81c50d8ae3SPaolo Bonzini int level;
82c50d8ae3SPaolo Bonzini unsigned max_level;
83c50d8ae3SPaolo Bonzini gfn_t table_gfn[PT_MAX_FULL_LEVELS];
84c50d8ae3SPaolo Bonzini pt_element_t ptes[PT_MAX_FULL_LEVELS];
85c50d8ae3SPaolo Bonzini pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
86c50d8ae3SPaolo Bonzini gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
87c50d8ae3SPaolo Bonzini pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
88c50d8ae3SPaolo Bonzini bool pte_writable[PT_MAX_FULL_LEVELS];
89b1bd5cbaSLai Jiangshan unsigned int pt_access[PT_MAX_FULL_LEVELS];
90b1bd5cbaSLai Jiangshan unsigned int pte_access;
91c50d8ae3SPaolo Bonzini gfn_t gfn;
92c50d8ae3SPaolo Bonzini struct x86_exception fault;
93c50d8ae3SPaolo Bonzini };
94c50d8ae3SPaolo Bonzini
95b3fcdb04SSean Christopherson #if PTTYPE == 32
pse36_gfn_delta(u32 gpte)96b3fcdb04SSean Christopherson static inline gfn_t pse36_gfn_delta(u32 gpte)
97b3fcdb04SSean Christopherson {
98b3fcdb04SSean Christopherson int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
99b3fcdb04SSean Christopherson
100b3fcdb04SSean Christopherson return (gpte & PT32_DIR_PSE36_MASK) << shift;
101b3fcdb04SSean Christopherson }
102b3fcdb04SSean Christopherson #endif
103b3fcdb04SSean Christopherson
gpte_to_gfn_lvl(pt_element_t gpte,int lvl)104c50d8ae3SPaolo Bonzini static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
105c50d8ae3SPaolo Bonzini {
106c50d8ae3SPaolo Bonzini return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
107c50d8ae3SPaolo Bonzini }
108c50d8ae3SPaolo Bonzini
FNAME(protect_clean_gpte)109c50d8ae3SPaolo Bonzini static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
110c50d8ae3SPaolo Bonzini unsigned gpte)
111c50d8ae3SPaolo Bonzini {
112c50d8ae3SPaolo Bonzini unsigned mask;
113c50d8ae3SPaolo Bonzini
114c50d8ae3SPaolo Bonzini /* dirty bit is not supported, so no need to track it */
115c50d8ae3SPaolo Bonzini if (!PT_HAVE_ACCESSED_DIRTY(mmu))
116c50d8ae3SPaolo Bonzini return;
117c50d8ae3SPaolo Bonzini
118c50d8ae3SPaolo Bonzini BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
119c50d8ae3SPaolo Bonzini
120c50d8ae3SPaolo Bonzini mask = (unsigned)~ACC_WRITE_MASK;
121c50d8ae3SPaolo Bonzini /* Allow write access to dirty gptes */
122c50d8ae3SPaolo Bonzini mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
123c50d8ae3SPaolo Bonzini PT_WRITABLE_MASK;
124c50d8ae3SPaolo Bonzini *access &= mask;
125c50d8ae3SPaolo Bonzini }
126c50d8ae3SPaolo Bonzini
FNAME(is_present_gpte)127c50d8ae3SPaolo Bonzini static inline int FNAME(is_present_gpte)(unsigned long pte)
128c50d8ae3SPaolo Bonzini {
129c50d8ae3SPaolo Bonzini #if PTTYPE != PTTYPE_EPT
130c50d8ae3SPaolo Bonzini return pte & PT_PRESENT_MASK;
131c50d8ae3SPaolo Bonzini #else
132c50d8ae3SPaolo Bonzini return pte & 7;
133c50d8ae3SPaolo Bonzini #endif
134c50d8ae3SPaolo Bonzini }
135c50d8ae3SPaolo Bonzini
FNAME(is_bad_mt_xwr)136b5c3c1b3SSean Christopherson static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
137b5c3c1b3SSean Christopherson {
138b5c3c1b3SSean Christopherson #if PTTYPE != PTTYPE_EPT
139b5c3c1b3SSean Christopherson return false;
140b5c3c1b3SSean Christopherson #else
141b5c3c1b3SSean Christopherson return __is_bad_mt_xwr(rsvd_check, gpte);
142b5c3c1b3SSean Christopherson #endif
143b5c3c1b3SSean Christopherson }
144b5c3c1b3SSean Christopherson
FNAME(is_rsvd_bits_set)145b5c3c1b3SSean Christopherson static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
146b5c3c1b3SSean Christopherson {
147b5c3c1b3SSean Christopherson return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) ||
148b5c3c1b3SSean Christopherson FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
149b5c3c1b3SSean Christopherson }
150b5c3c1b3SSean Christopherson
FNAME(prefetch_invalid_gpte)151c50d8ae3SPaolo Bonzini static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
152c50d8ae3SPaolo Bonzini struct kvm_mmu_page *sp, u64 *spte,
153c50d8ae3SPaolo Bonzini u64 gpte)
154c50d8ae3SPaolo Bonzini {
155c50d8ae3SPaolo Bonzini if (!FNAME(is_present_gpte)(gpte))
156c50d8ae3SPaolo Bonzini goto no_present;
157c50d8ae3SPaolo Bonzini
15825cc0565SPaolo Bonzini /* Prefetch only accessed entries (unless A/D bits are disabled). */
159c50d8ae3SPaolo Bonzini if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
160c50d8ae3SPaolo Bonzini !(gpte & PT_GUEST_ACCESSED_MASK))
161c50d8ae3SPaolo Bonzini goto no_present;
162c50d8ae3SPaolo Bonzini
1633bae0459SSean Christopherson if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
164f8052a05SSean Christopherson goto no_present;
165f8052a05SSean Christopherson
166c50d8ae3SPaolo Bonzini return false;
167c50d8ae3SPaolo Bonzini
168c50d8ae3SPaolo Bonzini no_present:
169c50d8ae3SPaolo Bonzini drop_spte(vcpu->kvm, spte);
170c50d8ae3SPaolo Bonzini return true;
171c50d8ae3SPaolo Bonzini }
172c50d8ae3SPaolo Bonzini
173c50d8ae3SPaolo Bonzini /*
174c50d8ae3SPaolo Bonzini * For PTTYPE_EPT, a page table can be executable but not readable
175c50d8ae3SPaolo Bonzini * on supported processors. Therefore, set_spte does not automatically
176c50d8ae3SPaolo Bonzini * set bit 0 if execute only is supported. Here, we repurpose ACC_USER_MASK
177c50d8ae3SPaolo Bonzini * to signify readability since it isn't used in the EPT case
178c50d8ae3SPaolo Bonzini */
FNAME(gpte_access)179c50d8ae3SPaolo Bonzini static inline unsigned FNAME(gpte_access)(u64 gpte)
180c50d8ae3SPaolo Bonzini {
181c50d8ae3SPaolo Bonzini unsigned access;
182c50d8ae3SPaolo Bonzini #if PTTYPE == PTTYPE_EPT
183c50d8ae3SPaolo Bonzini access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
184c50d8ae3SPaolo Bonzini ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
185c50d8ae3SPaolo Bonzini ((gpte & VMX_EPT_READABLE_MASK) ? ACC_USER_MASK : 0);
186c50d8ae3SPaolo Bonzini #else
187c50d8ae3SPaolo Bonzini BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
188c50d8ae3SPaolo Bonzini BUILD_BUG_ON(ACC_EXEC_MASK != 1);
189c50d8ae3SPaolo Bonzini access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
190c50d8ae3SPaolo Bonzini /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
191c50d8ae3SPaolo Bonzini access ^= (gpte >> PT64_NX_SHIFT);
192c50d8ae3SPaolo Bonzini #endif
193c50d8ae3SPaolo Bonzini
194c50d8ae3SPaolo Bonzini return access;
195c50d8ae3SPaolo Bonzini }
196c50d8ae3SPaolo Bonzini
FNAME(update_accessed_dirty_bits)197c50d8ae3SPaolo Bonzini static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
198c50d8ae3SPaolo Bonzini struct kvm_mmu *mmu,
199c50d8ae3SPaolo Bonzini struct guest_walker *walker,
2002dbebf7aSSean Christopherson gpa_t addr, int write_fault)
201c50d8ae3SPaolo Bonzini {
202c50d8ae3SPaolo Bonzini unsigned level, index;
203c50d8ae3SPaolo Bonzini pt_element_t pte, orig_pte;
204c50d8ae3SPaolo Bonzini pt_element_t __user *ptep_user;
205c50d8ae3SPaolo Bonzini gfn_t table_gfn;
206c50d8ae3SPaolo Bonzini int ret;
207c50d8ae3SPaolo Bonzini
208c50d8ae3SPaolo Bonzini /* dirty/accessed bits are not supported, so no need to update them */
209c50d8ae3SPaolo Bonzini if (!PT_HAVE_ACCESSED_DIRTY(mmu))
210c50d8ae3SPaolo Bonzini return 0;
211c50d8ae3SPaolo Bonzini
212c50d8ae3SPaolo Bonzini for (level = walker->max_level; level >= walker->level; --level) {
213c50d8ae3SPaolo Bonzini pte = orig_pte = walker->ptes[level - 1];
214c50d8ae3SPaolo Bonzini table_gfn = walker->table_gfn[level - 1];
215c50d8ae3SPaolo Bonzini ptep_user = walker->ptep_user[level - 1];
216c50d8ae3SPaolo Bonzini index = offset_in_page(ptep_user) / sizeof(pt_element_t);
217c50d8ae3SPaolo Bonzini if (!(pte & PT_GUEST_ACCESSED_MASK)) {
218c50d8ae3SPaolo Bonzini trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
219c50d8ae3SPaolo Bonzini pte |= PT_GUEST_ACCESSED_MASK;
220c50d8ae3SPaolo Bonzini }
221c50d8ae3SPaolo Bonzini if (level == walker->level && write_fault &&
222c50d8ae3SPaolo Bonzini !(pte & PT_GUEST_DIRTY_MASK)) {
223c50d8ae3SPaolo Bonzini trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
224c50d8ae3SPaolo Bonzini #if PTTYPE == PTTYPE_EPT
22502f5fb2eSSean Christopherson if (kvm_x86_ops.nested_ops->write_log_dirty(vcpu, addr))
226c50d8ae3SPaolo Bonzini return -EINVAL;
227c50d8ae3SPaolo Bonzini #endif
228c50d8ae3SPaolo Bonzini pte |= PT_GUEST_DIRTY_MASK;
229c50d8ae3SPaolo Bonzini }
230c50d8ae3SPaolo Bonzini if (pte == orig_pte)
231c50d8ae3SPaolo Bonzini continue;
232c50d8ae3SPaolo Bonzini
233c50d8ae3SPaolo Bonzini /*
234c50d8ae3SPaolo Bonzini * If the slot is read-only, simply do not process the accessed
235c50d8ae3SPaolo Bonzini * and dirty bits. This is the correct thing to do if the slot
236c50d8ae3SPaolo Bonzini * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
237c50d8ae3SPaolo Bonzini * are only supported if the accessed and dirty bits are already
238c50d8ae3SPaolo Bonzini * set in the ROM (so that MMIO writes are never needed).
239c50d8ae3SPaolo Bonzini *
240c50d8ae3SPaolo Bonzini * Note that NPT does not allow this at all and faults, since
241c50d8ae3SPaolo Bonzini * it always wants nested page table entries for the guest
242c50d8ae3SPaolo Bonzini * page tables to be writable. And EPT works but will simply
243c50d8ae3SPaolo Bonzini * overwrite the read-only memory to set the accessed and dirty
244c50d8ae3SPaolo Bonzini * bits.
245c50d8ae3SPaolo Bonzini */
246c50d8ae3SPaolo Bonzini if (unlikely(!walker->pte_writable[level - 1]))
247c50d8ae3SPaolo Bonzini continue;
248c50d8ae3SPaolo Bonzini
249f122dfe4SSean Christopherson ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault);
250c50d8ae3SPaolo Bonzini if (ret)
251c50d8ae3SPaolo Bonzini return ret;
252c50d8ae3SPaolo Bonzini
253c50d8ae3SPaolo Bonzini kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
254c50d8ae3SPaolo Bonzini walker->ptes[level - 1] = pte;
255c50d8ae3SPaolo Bonzini }
256c50d8ae3SPaolo Bonzini return 0;
257c50d8ae3SPaolo Bonzini }
258c50d8ae3SPaolo Bonzini
FNAME(gpte_pkeys)259c50d8ae3SPaolo Bonzini static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
260c50d8ae3SPaolo Bonzini {
261c50d8ae3SPaolo Bonzini unsigned pkeys = 0;
262c50d8ae3SPaolo Bonzini #if PTTYPE == 64
263c50d8ae3SPaolo Bonzini pte_t pte = {.pte = gpte};
264c50d8ae3SPaolo Bonzini
265c50d8ae3SPaolo Bonzini pkeys = pte_flags_pkey(pte_flags(pte));
266c50d8ae3SPaolo Bonzini #endif
267c50d8ae3SPaolo Bonzini return pkeys;
268c50d8ae3SPaolo Bonzini }
269c50d8ae3SPaolo Bonzini
FNAME(is_last_gpte)2707cd138dbSSean Christopherson static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
2717cd138dbSSean Christopherson unsigned int level, unsigned int gpte)
2727cd138dbSSean Christopherson {
2737cd138dbSSean Christopherson /*
2747cd138dbSSean Christopherson * For EPT and PAE paging (both variants), bit 7 is either reserved at
2757cd138dbSSean Christopherson * all level or indicates a huge page (ignoring CR3/EPTP). In either
2767cd138dbSSean Christopherson * case, bit 7 being set terminates the walk.
2777cd138dbSSean Christopherson */
2787cd138dbSSean Christopherson #if PTTYPE == 32
2797cd138dbSSean Christopherson /*
2807cd138dbSSean Christopherson * 32-bit paging requires special handling because bit 7 is ignored if
2817cd138dbSSean Christopherson * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
2827cd138dbSSean Christopherson * greater than the last level for which bit 7 is the PAGE_SIZE bit.
2837cd138dbSSean Christopherson *
2847cd138dbSSean Christopherson * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
2857cd138dbSSean Christopherson * is not reserved and does not indicate a large page at this level,
2867cd138dbSSean Christopherson * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
2877cd138dbSSean Christopherson */
288e5ed0fb0SPaolo Bonzini gpte &= level - (PT32_ROOT_LEVEL + mmu->cpu_role.ext.cr4_pse);
2897cd138dbSSean Christopherson #endif
2907cd138dbSSean Christopherson /*
2917cd138dbSSean Christopherson * PG_LEVEL_4K always terminates. The RHS has bit 7 set
2927cd138dbSSean Christopherson * iff level <= PG_LEVEL_4K, which for our purpose means
2937cd138dbSSean Christopherson * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
2947cd138dbSSean Christopherson */
2957cd138dbSSean Christopherson gpte |= level - PG_LEVEL_4K - 1;
2967cd138dbSSean Christopherson
2977cd138dbSSean Christopherson return gpte & PT_PAGE_SIZE_MASK;
2987cd138dbSSean Christopherson }
299c50d8ae3SPaolo Bonzini /*
300736c291cSSean Christopherson * Fetch a guest pte for a guest virtual address, or for an L2's GPA.
301c50d8ae3SPaolo Bonzini */
FNAME(walk_addr_generic)302c50d8ae3SPaolo Bonzini static int FNAME(walk_addr_generic)(struct guest_walker *walker,
303c50d8ae3SPaolo Bonzini struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3045b22bbe7SLai Jiangshan gpa_t addr, u64 access)
305c50d8ae3SPaolo Bonzini {
306c50d8ae3SPaolo Bonzini int ret;
307c50d8ae3SPaolo Bonzini pt_element_t pte;
3083f649ab7SKees Cook pt_element_t __user *ptep_user;
309c50d8ae3SPaolo Bonzini gfn_t table_gfn;
310c50d8ae3SPaolo Bonzini u64 pt_access, pte_access;
311c50d8ae3SPaolo Bonzini unsigned index, accessed_dirty, pte_pkey;
3125b22bbe7SLai Jiangshan u64 nested_access;
313c50d8ae3SPaolo Bonzini gpa_t pte_gpa;
314c50d8ae3SPaolo Bonzini bool have_ad;
315c50d8ae3SPaolo Bonzini int offset;
316c50d8ae3SPaolo Bonzini u64 walk_nx_mask = 0;
317c50d8ae3SPaolo Bonzini const int write_fault = access & PFERR_WRITE_MASK;
318c50d8ae3SPaolo Bonzini const int user_fault = access & PFERR_USER_MASK;
319c50d8ae3SPaolo Bonzini const int fetch_fault = access & PFERR_FETCH_MASK;
320c50d8ae3SPaolo Bonzini u16 errcode = 0;
321c50d8ae3SPaolo Bonzini gpa_t real_gpa;
322c50d8ae3SPaolo Bonzini gfn_t gfn;
323c50d8ae3SPaolo Bonzini
324c50d8ae3SPaolo Bonzini trace_kvm_mmu_pagetable_walk(addr, access);
325c50d8ae3SPaolo Bonzini retry_walk:
3264d25502aSPaolo Bonzini walker->level = mmu->cpu_role.base.level;
3272fdcc1b3SPaolo Bonzini pte = kvm_mmu_get_guest_pgd(vcpu, mmu);
328c50d8ae3SPaolo Bonzini have_ad = PT_HAVE_ACCESSED_DIRTY(mmu);
329c50d8ae3SPaolo Bonzini
330c50d8ae3SPaolo Bonzini #if PTTYPE == 64
331c50d8ae3SPaolo Bonzini walk_nx_mask = 1ULL << PT64_NX_SHIFT;
332c50d8ae3SPaolo Bonzini if (walker->level == PT32E_ROOT_LEVEL) {
333c50d8ae3SPaolo Bonzini pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
334c50d8ae3SPaolo Bonzini trace_kvm_mmu_paging_element(pte, walker->level);
335c50d8ae3SPaolo Bonzini if (!FNAME(is_present_gpte)(pte))
336c50d8ae3SPaolo Bonzini goto error;
337c50d8ae3SPaolo Bonzini --walker->level;
338c50d8ae3SPaolo Bonzini }
339c50d8ae3SPaolo Bonzini #endif
340c50d8ae3SPaolo Bonzini walker->max_level = walker->level;
341c50d8ae3SPaolo Bonzini
342c50d8ae3SPaolo Bonzini /*
343c50d8ae3SPaolo Bonzini * FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
344c50d8ae3SPaolo Bonzini * by the MOV to CR instruction are treated as reads and do not cause the
345c50d8ae3SPaolo Bonzini * processor to set the dirty flag in any EPT paging-structure entry.
346c50d8ae3SPaolo Bonzini */
347c50d8ae3SPaolo Bonzini nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
348c50d8ae3SPaolo Bonzini
349c50d8ae3SPaolo Bonzini pte_access = ~0;
35072e2fb24SSean Christopherson
35172e2fb24SSean Christopherson /*
35272e2fb24SSean Christopherson * Queue a page fault for injection if this assertion fails, as callers
35372e2fb24SSean Christopherson * assume that walker.fault contains sane info on a walk failure. I.e.
35472e2fb24SSean Christopherson * avoid making the situation worse by inducing even worse badness
35572e2fb24SSean Christopherson * between when the assertion fails and when KVM kicks the vCPU out to
35672e2fb24SSean Christopherson * userspace (because the VM is bugged).
35772e2fb24SSean Christopherson */
35872e2fb24SSean Christopherson if (KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm))
35972e2fb24SSean Christopherson goto error;
36072e2fb24SSean Christopherson
361c50d8ae3SPaolo Bonzini ++walker->level;
362c50d8ae3SPaolo Bonzini
363c50d8ae3SPaolo Bonzini do {
364b5b359acSSean Christopherson struct kvm_memory_slot *slot;
365c50d8ae3SPaolo Bonzini unsigned long host_addr;
366c50d8ae3SPaolo Bonzini
367c50d8ae3SPaolo Bonzini pt_access = pte_access;
368c50d8ae3SPaolo Bonzini --walker->level;
369c50d8ae3SPaolo Bonzini
370c50d8ae3SPaolo Bonzini index = PT_INDEX(addr, walker->level);
371c50d8ae3SPaolo Bonzini table_gfn = gpte_to_gfn(pte);
372c50d8ae3SPaolo Bonzini offset = index * sizeof(pt_element_t);
373c50d8ae3SPaolo Bonzini pte_gpa = gfn_to_gpa(table_gfn) + offset;
374c50d8ae3SPaolo Bonzini
375c50d8ae3SPaolo Bonzini BUG_ON(walker->level < 1);
376c50d8ae3SPaolo Bonzini walker->table_gfn[walker->level - 1] = table_gfn;
377c50d8ae3SPaolo Bonzini walker->pte_gpa[walker->level - 1] = pte_gpa;
378c50d8ae3SPaolo Bonzini
379c59a0f57SLai Jiangshan real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
380c59a0f57SLai Jiangshan nested_access, &walker->fault);
381c50d8ae3SPaolo Bonzini
382c50d8ae3SPaolo Bonzini /*
383c50d8ae3SPaolo Bonzini * FIXME: This can happen if emulation (for of an INS/OUTS
384c50d8ae3SPaolo Bonzini * instruction) triggers a nested page fault. The exit
385c50d8ae3SPaolo Bonzini * qualification / exit info field will incorrectly have
386c50d8ae3SPaolo Bonzini * "guest page access" as the nested page fault's cause,
387c50d8ae3SPaolo Bonzini * instead of "guest page structure access". To fix this,
388c50d8ae3SPaolo Bonzini * the x86_exception struct should be augmented with enough
389c50d8ae3SPaolo Bonzini * information to fix the exit_qualification or exit_info_1
390c50d8ae3SPaolo Bonzini * fields.
391c50d8ae3SPaolo Bonzini */
3926e1d2a3fSHou Wenlong if (unlikely(real_gpa == INVALID_GPA))
393c50d8ae3SPaolo Bonzini return 0;
394c50d8ae3SPaolo Bonzini
395b5b359acSSean Christopherson slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(real_gpa));
396b5b359acSSean Christopherson if (!kvm_is_visible_memslot(slot))
397b5b359acSSean Christopherson goto error;
398b5b359acSSean Christopherson
399b5b359acSSean Christopherson host_addr = gfn_to_hva_memslot_prot(slot, gpa_to_gfn(real_gpa),
400c50d8ae3SPaolo Bonzini &walker->pte_writable[walker->level - 1]);
401c50d8ae3SPaolo Bonzini if (unlikely(kvm_is_error_hva(host_addr)))
402c50d8ae3SPaolo Bonzini goto error;
403c50d8ae3SPaolo Bonzini
404c50d8ae3SPaolo Bonzini ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
405a4814443SAl Viro if (unlikely(__get_user(pte, ptep_user)))
406c50d8ae3SPaolo Bonzini goto error;
407c50d8ae3SPaolo Bonzini walker->ptep_user[walker->level - 1] = ptep_user;
408c50d8ae3SPaolo Bonzini
409c50d8ae3SPaolo Bonzini trace_kvm_mmu_paging_element(pte, walker->level);
410c50d8ae3SPaolo Bonzini
411c50d8ae3SPaolo Bonzini /*
412c50d8ae3SPaolo Bonzini * Inverting the NX it lets us AND it like other
413c50d8ae3SPaolo Bonzini * permission bits.
414c50d8ae3SPaolo Bonzini */
415c50d8ae3SPaolo Bonzini pte_access = pt_access & (pte ^ walk_nx_mask);
416c50d8ae3SPaolo Bonzini
417c50d8ae3SPaolo Bonzini if (unlikely(!FNAME(is_present_gpte)(pte)))
418c50d8ae3SPaolo Bonzini goto error;
419c50d8ae3SPaolo Bonzini
420b5c3c1b3SSean Christopherson if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
421c50d8ae3SPaolo Bonzini errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
422c50d8ae3SPaolo Bonzini goto error;
423c50d8ae3SPaolo Bonzini }
424c50d8ae3SPaolo Bonzini
425c50d8ae3SPaolo Bonzini walker->ptes[walker->level - 1] = pte;
426b1bd5cbaSLai Jiangshan
427b1bd5cbaSLai Jiangshan /* Convert to ACC_*_MASK flags for struct guest_walker. */
428b1bd5cbaSLai Jiangshan walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
4297cd138dbSSean Christopherson } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
430c50d8ae3SPaolo Bonzini
431c50d8ae3SPaolo Bonzini pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
432c50d8ae3SPaolo Bonzini accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
433c50d8ae3SPaolo Bonzini
434c50d8ae3SPaolo Bonzini /* Convert to ACC_*_MASK flags for struct guest_walker. */
435c50d8ae3SPaolo Bonzini walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
436c50d8ae3SPaolo Bonzini errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
437c50d8ae3SPaolo Bonzini if (unlikely(errcode))
438c50d8ae3SPaolo Bonzini goto error;
439c50d8ae3SPaolo Bonzini
440c50d8ae3SPaolo Bonzini gfn = gpte_to_gfn_lvl(pte, walker->level);
441c50d8ae3SPaolo Bonzini gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
442c50d8ae3SPaolo Bonzini
443b3fcdb04SSean Christopherson #if PTTYPE == 32
444b3fcdb04SSean Christopherson if (walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
445c50d8ae3SPaolo Bonzini gfn += pse36_gfn_delta(pte);
446b3fcdb04SSean Christopherson #endif
447c50d8ae3SPaolo Bonzini
448c59a0f57SLai Jiangshan real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
4496e1d2a3fSHou Wenlong if (real_gpa == INVALID_GPA)
450c50d8ae3SPaolo Bonzini return 0;
451c50d8ae3SPaolo Bonzini
452c50d8ae3SPaolo Bonzini walker->gfn = real_gpa >> PAGE_SHIFT;
453c50d8ae3SPaolo Bonzini
454c50d8ae3SPaolo Bonzini if (!write_fault)
455c50d8ae3SPaolo Bonzini FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
456c50d8ae3SPaolo Bonzini else
457c50d8ae3SPaolo Bonzini /*
458c50d8ae3SPaolo Bonzini * On a write fault, fold the dirty bit into accessed_dirty.
459c50d8ae3SPaolo Bonzini * For modes without A/D bits support accessed_dirty will be
460c50d8ae3SPaolo Bonzini * always clear.
461c50d8ae3SPaolo Bonzini */
462c50d8ae3SPaolo Bonzini accessed_dirty &= pte >>
463c50d8ae3SPaolo Bonzini (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
464c50d8ae3SPaolo Bonzini
465c50d8ae3SPaolo Bonzini if (unlikely(!accessed_dirty)) {
4662dbebf7aSSean Christopherson ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
4672dbebf7aSSean Christopherson addr, write_fault);
468c50d8ae3SPaolo Bonzini if (unlikely(ret < 0))
469c50d8ae3SPaolo Bonzini goto error;
470c50d8ae3SPaolo Bonzini else if (ret)
471c50d8ae3SPaolo Bonzini goto retry_walk;
472c50d8ae3SPaolo Bonzini }
473c50d8ae3SPaolo Bonzini
474c50d8ae3SPaolo Bonzini return 1;
475c50d8ae3SPaolo Bonzini
476c50d8ae3SPaolo Bonzini error:
477c50d8ae3SPaolo Bonzini errcode |= write_fault | user_fault;
478cd628f0fSSean Christopherson if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
479c50d8ae3SPaolo Bonzini errcode |= PFERR_FETCH_MASK;
480c50d8ae3SPaolo Bonzini
481c50d8ae3SPaolo Bonzini walker->fault.vector = PF_VECTOR;
482c50d8ae3SPaolo Bonzini walker->fault.error_code_valid = true;
483c50d8ae3SPaolo Bonzini walker->fault.error_code = errcode;
484c50d8ae3SPaolo Bonzini
485c50d8ae3SPaolo Bonzini #if PTTYPE == PTTYPE_EPT
486c50d8ae3SPaolo Bonzini /*
487b85a97b8SJilin Yuan * Use PFERR_RSVD_MASK in error_code to tell if EPT
488c50d8ae3SPaolo Bonzini * misconfiguration requires to be injected. The detection is
489c50d8ae3SPaolo Bonzini * done by is_rsvd_bits_set() above.
490c50d8ae3SPaolo Bonzini *
491c50d8ae3SPaolo Bonzini * We set up the value of exit_qualification to inject:
492c50d8ae3SPaolo Bonzini * [2:0] - Derive from the access bits. The exit_qualification might be
493c50d8ae3SPaolo Bonzini * out of date if it is serving an EPT misconfiguration.
494c50d8ae3SPaolo Bonzini * [5:3] - Calculated by the page walk of the guest EPT page tables
495c50d8ae3SPaolo Bonzini * [7:8] - Derived from [7:8] of real exit_qualification
496c50d8ae3SPaolo Bonzini *
497c50d8ae3SPaolo Bonzini * The other bits are set to 0.
498c50d8ae3SPaolo Bonzini */
499c50d8ae3SPaolo Bonzini if (!(errcode & PFERR_RSVD_MASK)) {
500aecce510SSU Hang vcpu->arch.exit_qualification &= (EPT_VIOLATION_GVA_IS_VALID |
501aecce510SSU Hang EPT_VIOLATION_GVA_TRANSLATED);
502c50d8ae3SPaolo Bonzini if (write_fault)
503c50d8ae3SPaolo Bonzini vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_WRITE;
504c50d8ae3SPaolo Bonzini if (user_fault)
505c50d8ae3SPaolo Bonzini vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_READ;
506c50d8ae3SPaolo Bonzini if (fetch_fault)
507c50d8ae3SPaolo Bonzini vcpu->arch.exit_qualification |= EPT_VIOLATION_ACC_INSTR;
508ca2a7c22SSean Christopherson
509ca2a7c22SSean Christopherson /*
510ca2a7c22SSean Christopherson * Note, pte_access holds the raw RWX bits from the EPTE, not
511ca2a7c22SSean Christopherson * ACC_*_MASK flags!
512ca2a7c22SSean Christopherson */
513ca2a7c22SSean Christopherson vcpu->arch.exit_qualification |= (pte_access & VMX_EPT_RWX_MASK) <<
514ca2a7c22SSean Christopherson EPT_VIOLATION_RWX_SHIFT;
515c50d8ae3SPaolo Bonzini }
516c50d8ae3SPaolo Bonzini #endif
517c50d8ae3SPaolo Bonzini walker->fault.address = addr;
518c50d8ae3SPaolo Bonzini walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
519422e2e17SMaxim Levitsky walker->fault.async_page_fault = false;
520c50d8ae3SPaolo Bonzini
521c50d8ae3SPaolo Bonzini trace_kvm_mmu_walker_error(walker->fault.error_code);
522c50d8ae3SPaolo Bonzini return 0;
523c50d8ae3SPaolo Bonzini }
524c50d8ae3SPaolo Bonzini
FNAME(walk_addr)525c50d8ae3SPaolo Bonzini static int FNAME(walk_addr)(struct guest_walker *walker,
5265b22bbe7SLai Jiangshan struct kvm_vcpu *vcpu, gpa_t addr, u64 access)
527c50d8ae3SPaolo Bonzini {
528c50d8ae3SPaolo Bonzini return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
529c50d8ae3SPaolo Bonzini access);
530c50d8ae3SPaolo Bonzini }
531c50d8ae3SPaolo Bonzini
532c50d8ae3SPaolo Bonzini static bool
FNAME(prefetch_gpte)533c50d8ae3SPaolo Bonzini FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
53491ca7672SLai Jiangshan u64 *spte, pt_element_t gpte)
535c50d8ae3SPaolo Bonzini {
5368a9f566aSDavid Matlack struct kvm_memory_slot *slot;
537c50d8ae3SPaolo Bonzini unsigned pte_access;
538c50d8ae3SPaolo Bonzini gfn_t gfn;
539c50d8ae3SPaolo Bonzini kvm_pfn_t pfn;
540c50d8ae3SPaolo Bonzini
541c50d8ae3SPaolo Bonzini if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
542c50d8ae3SPaolo Bonzini return false;
543c50d8ae3SPaolo Bonzini
544c50d8ae3SPaolo Bonzini gfn = gpte_to_gfn(gpte);
545c50d8ae3SPaolo Bonzini pte_access = sp->role.access & FNAME(gpte_access)(gpte);
546c50d8ae3SPaolo Bonzini FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
5478a9f566aSDavid Matlack
54891ca7672SLai Jiangshan slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK);
5498a9f566aSDavid Matlack if (!slot)
5508a9f566aSDavid Matlack return false;
5518a9f566aSDavid Matlack
5528a9f566aSDavid Matlack pfn = gfn_to_pfn_memslot_atomic(slot, gfn);
553c50d8ae3SPaolo Bonzini if (is_error_pfn(pfn))
554c50d8ae3SPaolo Bonzini return false;
555c50d8ae3SPaolo Bonzini
5568a9f566aSDavid Matlack mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL);
557c50d8ae3SPaolo Bonzini kvm_release_pfn_clean(pfn);
558c50d8ae3SPaolo Bonzini return true;
559c50d8ae3SPaolo Bonzini }
560c50d8ae3SPaolo Bonzini
FNAME(gpte_changed)561c50d8ae3SPaolo Bonzini static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
562c50d8ae3SPaolo Bonzini struct guest_walker *gw, int level)
563c50d8ae3SPaolo Bonzini {
564c50d8ae3SPaolo Bonzini pt_element_t curr_pte;
565c50d8ae3SPaolo Bonzini gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
566c50d8ae3SPaolo Bonzini u64 mask;
567c50d8ae3SPaolo Bonzini int r, index;
568c50d8ae3SPaolo Bonzini
5693bae0459SSean Christopherson if (level == PG_LEVEL_4K) {
570c50d8ae3SPaolo Bonzini mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
571c50d8ae3SPaolo Bonzini base_gpa = pte_gpa & ~mask;
572c50d8ae3SPaolo Bonzini index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
573c50d8ae3SPaolo Bonzini
574c50d8ae3SPaolo Bonzini r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
575c50d8ae3SPaolo Bonzini gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
576c50d8ae3SPaolo Bonzini curr_pte = gw->prefetch_ptes[index];
577c50d8ae3SPaolo Bonzini } else
578c50d8ae3SPaolo Bonzini r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
579c50d8ae3SPaolo Bonzini &curr_pte, sizeof(curr_pte));
580c50d8ae3SPaolo Bonzini
581c50d8ae3SPaolo Bonzini return r || curr_pte != gw->ptes[level - 1];
582c50d8ae3SPaolo Bonzini }
583c50d8ae3SPaolo Bonzini
FNAME(pte_prefetch)584c50d8ae3SPaolo Bonzini static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
585c50d8ae3SPaolo Bonzini u64 *sptep)
586c50d8ae3SPaolo Bonzini {
587c50d8ae3SPaolo Bonzini struct kvm_mmu_page *sp;
588c50d8ae3SPaolo Bonzini pt_element_t *gptep = gw->prefetch_ptes;
589c50d8ae3SPaolo Bonzini u64 *spte;
590c50d8ae3SPaolo Bonzini int i;
591c50d8ae3SPaolo Bonzini
59257354682SSean Christopherson sp = sptep_to_sp(sptep);
593c50d8ae3SPaolo Bonzini
5943bae0459SSean Christopherson if (sp->role.level > PG_LEVEL_4K)
595c50d8ae3SPaolo Bonzini return;
596c50d8ae3SPaolo Bonzini
5974a42d848SDavid Stevens /*
5984a42d848SDavid Stevens * If addresses are being invalidated, skip prefetching to avoid
5994a42d848SDavid Stevens * accidentally prefetching those addresses.
6004a42d848SDavid Stevens */
60120ec3ebdSChao Peng if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
6024a42d848SDavid Stevens return;
6034a42d848SDavid Stevens
604c50d8ae3SPaolo Bonzini if (sp->role.direct)
605c50d8ae3SPaolo Bonzini return __direct_pte_prefetch(vcpu, sp, sptep);
606c50d8ae3SPaolo Bonzini
60779e48cecSSean Christopherson i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
608c50d8ae3SPaolo Bonzini spte = sp->spt + i;
609c50d8ae3SPaolo Bonzini
610c50d8ae3SPaolo Bonzini for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
611c50d8ae3SPaolo Bonzini if (spte == sptep)
612c50d8ae3SPaolo Bonzini continue;
613c50d8ae3SPaolo Bonzini
614c50d8ae3SPaolo Bonzini if (is_shadow_present_pte(*spte))
615c50d8ae3SPaolo Bonzini continue;
616c50d8ae3SPaolo Bonzini
61791ca7672SLai Jiangshan if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i]))
618c50d8ae3SPaolo Bonzini break;
619c50d8ae3SPaolo Bonzini }
620c50d8ae3SPaolo Bonzini }
621c50d8ae3SPaolo Bonzini
622c50d8ae3SPaolo Bonzini /*
623c50d8ae3SPaolo Bonzini * Fetch a shadow pte for a specific level in the paging hierarchy.
624c50d8ae3SPaolo Bonzini * If the guest tries to write a write-protected page, we need to
625c50d8ae3SPaolo Bonzini * emulate this operation, return 1 to indicate this case.
626c50d8ae3SPaolo Bonzini */
FNAME(fetch)6279c03b182SPaolo Bonzini static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
6289c03b182SPaolo Bonzini struct guest_walker *gw)
629c50d8ae3SPaolo Bonzini {
630c50d8ae3SPaolo Bonzini struct kvm_mmu_page *sp = NULL;
631c50d8ae3SPaolo Bonzini struct kvm_shadow_walk_iterator it;
632b1bd5cbaSLai Jiangshan unsigned int direct_access, access;
63373a3c659SPaolo Bonzini int top_level, ret;
6349c03b182SPaolo Bonzini gfn_t base_gfn = fault->gfn;
635c50d8ae3SPaolo Bonzini
6369c03b182SPaolo Bonzini WARN_ON_ONCE(gw->gfn != base_gfn);
637c50d8ae3SPaolo Bonzini direct_access = gw->pte_access;
638c50d8ae3SPaolo Bonzini
6394d25502aSPaolo Bonzini top_level = vcpu->arch.mmu->cpu_role.base.level;
640c50d8ae3SPaolo Bonzini if (top_level == PT32E_ROOT_LEVEL)
641c50d8ae3SPaolo Bonzini top_level = PT32_ROOT_LEVEL;
642c50d8ae3SPaolo Bonzini /*
643c50d8ae3SPaolo Bonzini * Verify that the top-level gpte is still there. Since the page
644c50d8ae3SPaolo Bonzini * is a root page, it is either write protected (and cannot be
645c50d8ae3SPaolo Bonzini * changed from now on) or it is invalid (in which case, we don't
646c50d8ae3SPaolo Bonzini * really care if it changes underneath us after this point).
647c50d8ae3SPaolo Bonzini */
648c50d8ae3SPaolo Bonzini if (FNAME(gpte_changed)(vcpu, gw, top_level))
649c50d8ae3SPaolo Bonzini goto out_gpte_changed;
650c50d8ae3SPaolo Bonzini
65120ba462dSSean Christopherson if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
652c50d8ae3SPaolo Bonzini goto out_gpte_changed;
653c50d8ae3SPaolo Bonzini
6540e3223d8SSean Christopherson /*
6550e3223d8SSean Christopherson * Load a new root and retry the faulting instruction in the extremely
6560e3223d8SSean Christopherson * unlikely scenario that the guest root gfn became visible between
6570e3223d8SSean Christopherson * loading a dummy root and handling the resulting page fault, e.g. if
6580e3223d8SSean Christopherson * userspace create a memslot in the interim.
6590e3223d8SSean Christopherson */
6600e3223d8SSean Christopherson if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
6610e3223d8SSean Christopherson kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
6620e3223d8SSean Christopherson goto out_gpte_changed;
6630e3223d8SSean Christopherson }
6640e3223d8SSean Christopherson
6659e3fbdfdSLai Jiangshan for_each_shadow_entry(vcpu, fault->addr, it) {
666c50d8ae3SPaolo Bonzini gfn_t table_gfn;
667c50d8ae3SPaolo Bonzini
668c50d8ae3SPaolo Bonzini clear_sp_write_flooding_count(it.sptep);
6699e3fbdfdSLai Jiangshan if (it.level == gw->level)
6709e3fbdfdSLai Jiangshan break;
671c50d8ae3SPaolo Bonzini
672c50d8ae3SPaolo Bonzini table_gfn = gw->table_gfn[it.level - 2];
673b1bd5cbaSLai Jiangshan access = gw->pt_access[it.level - 2];
6742e65e842SDavid Matlack sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
6752e65e842SDavid Matlack false, access);
6762e65e842SDavid Matlack
6770cd8dc73SPaolo Bonzini if (sp != ERR_PTR(-EEXIST)) {
67865855ed8SLai Jiangshan /*
67965855ed8SLai Jiangshan * We must synchronize the pagetable before linking it
68065855ed8SLai Jiangshan * because the guest doesn't need to flush tlb when
68165855ed8SLai Jiangshan * the gpte is changed from non-present to present.
68265855ed8SLai Jiangshan * Otherwise, the guest may use the wrong mapping.
68365855ed8SLai Jiangshan *
68465855ed8SLai Jiangshan * For PG_LEVEL_4K, kvm_mmu_get_page() has already
68565855ed8SLai Jiangshan * synchronized it transiently via kvm_sync_page().
68665855ed8SLai Jiangshan *
68765855ed8SLai Jiangshan * For higher level pagetable, we synchronize it via
68865855ed8SLai Jiangshan * the slower mmu_sync_children(). If it needs to
68965855ed8SLai Jiangshan * break, some progress has been made; return
69065855ed8SLai Jiangshan * RET_PF_RETRY and retry on the next #PF.
69165855ed8SLai Jiangshan * KVM_REQ_MMU_SYNC is not necessary but it
69265855ed8SLai Jiangshan * expedites the process.
69365855ed8SLai Jiangshan */
69465855ed8SLai Jiangshan if (sp->unsync_children &&
69565855ed8SLai Jiangshan mmu_sync_children(vcpu, sp, false))
69665855ed8SLai Jiangshan return RET_PF_RETRY;
697c50d8ae3SPaolo Bonzini }
698c50d8ae3SPaolo Bonzini
699c50d8ae3SPaolo Bonzini /*
700c50d8ae3SPaolo Bonzini * Verify that the gpte in the page we've just write
701c50d8ae3SPaolo Bonzini * protected is still there.
702c50d8ae3SPaolo Bonzini */
703c50d8ae3SPaolo Bonzini if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
704c50d8ae3SPaolo Bonzini goto out_gpte_changed;
705c50d8ae3SPaolo Bonzini
7060cd8dc73SPaolo Bonzini if (sp != ERR_PTR(-EEXIST))
707c50d8ae3SPaolo Bonzini link_shadow_page(vcpu, it.sptep, sp);
70839fda5d8SLai Jiangshan
70939fda5d8SLai Jiangshan if (fault->write && table_gfn == fault->gfn)
71039fda5d8SLai Jiangshan fault->write_fault_to_shadow_pgtable = true;
711c50d8ae3SPaolo Bonzini }
712c50d8ae3SPaolo Bonzini
7139a967700SLai Jiangshan /*
7149a967700SLai Jiangshan * Adjust the hugepage size _after_ resolving indirect shadow pages.
7159a967700SLai Jiangshan * KVM doesn't support mapping hugepages into the guest for gfns that
7169a967700SLai Jiangshan * are being shadowed by KVM, i.e. allocating a new shadow page may
7179a967700SLai Jiangshan * affect the allowed hugepage size.
7189a967700SLai Jiangshan */
71973a3c659SPaolo Bonzini kvm_mmu_hugepage_adjust(vcpu, fault);
7204cd071d1SSean Christopherson
721f0066d94SPaolo Bonzini trace_kvm_mmu_spte_requested(fault);
722c50d8ae3SPaolo Bonzini
723c50d8ae3SPaolo Bonzini for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
724c50d8ae3SPaolo Bonzini /*
725c50d8ae3SPaolo Bonzini * We cannot overwrite existing page tables with an NX
726c50d8ae3SPaolo Bonzini * large page, as the leaf could be executable.
727c50d8ae3SPaolo Bonzini */
72873a3c659SPaolo Bonzini if (fault->nx_huge_page_workaround_enabled)
729536f0e6aSPaolo Bonzini disallowed_hugepage_adjust(fault, *it.sptep, it.level);
730c50d8ae3SPaolo Bonzini
731c667a3baSHou Wenlong base_gfn = gfn_round_for_level(fault->gfn, it.level);
73273a3c659SPaolo Bonzini if (it.level == fault->goal_level)
733c50d8ae3SPaolo Bonzini break;
734c50d8ae3SPaolo Bonzini
735c50d8ae3SPaolo Bonzini validate_direct_spte(vcpu, it.sptep, direct_access);
736c50d8ae3SPaolo Bonzini
7372e65e842SDavid Matlack sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
7382e65e842SDavid Matlack true, direct_access);
7390cd8dc73SPaolo Bonzini if (sp == ERR_PTR(-EEXIST))
7400cd8dc73SPaolo Bonzini continue;
7410cd8dc73SPaolo Bonzini
742c50d8ae3SPaolo Bonzini link_shadow_page(vcpu, it.sptep, sp);
743428e9216SSean Christopherson if (fault->huge_page_disallowed)
74455c510e2SSean Christopherson account_nx_huge_page(vcpu->kvm, sp,
745428e9216SSean Christopherson fault->req_level >= it.level);
746c50d8ae3SPaolo Bonzini }
747c50d8ae3SPaolo Bonzini
748b1a429fbSSean Christopherson if (WARN_ON_ONCE(it.level != fault->goal_level))
749b1a429fbSSean Christopherson return -EFAULT;
750b1a429fbSSean Christopherson
7518a9f566aSDavid Matlack ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access,
752a12f4381SPaolo Bonzini base_gfn, fault->pfn, fault);
75312703759SSean Christopherson if (ret == RET_PF_SPURIOUS)
75412703759SSean Christopherson return ret;
75512703759SSean Christopherson
756c50d8ae3SPaolo Bonzini FNAME(pte_prefetch)(vcpu, gw, it.sptep);
757c50d8ae3SPaolo Bonzini return ret;
758c50d8ae3SPaolo Bonzini
759c50d8ae3SPaolo Bonzini out_gpte_changed:
760c50d8ae3SPaolo Bonzini return RET_PF_RETRY;
761c50d8ae3SPaolo Bonzini }
762c50d8ae3SPaolo Bonzini
763c50d8ae3SPaolo Bonzini /*
764c50d8ae3SPaolo Bonzini * Page fault handler. There are several causes for a page fault:
765c50d8ae3SPaolo Bonzini * - there is no shadow pte for the guest pte
766c50d8ae3SPaolo Bonzini * - write access through a shadow pte marked read only so that we can set
767c50d8ae3SPaolo Bonzini * the dirty bit
768c50d8ae3SPaolo Bonzini * - write access to a shadow pte marked read only so we can update the page
769c50d8ae3SPaolo Bonzini * dirty bitmap, when userspace requests it
770c50d8ae3SPaolo Bonzini * - mmio access; in this case we will never install a present shadow pte
771c50d8ae3SPaolo Bonzini * - normal guest page fault due to the guest pte marked not present, not
772c50d8ae3SPaolo Bonzini * writable, or not executable
773c50d8ae3SPaolo Bonzini *
774c50d8ae3SPaolo Bonzini * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
775c50d8ae3SPaolo Bonzini * a negative value on error.
776c50d8ae3SPaolo Bonzini */
FNAME(page_fault)777c501040aSPaolo Bonzini static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
778c50d8ae3SPaolo Bonzini {
779c50d8ae3SPaolo Bonzini struct guest_walker walker;
780*0d5c7fcfSNikolay Kuratov kvm_pfn_t orig_pfn;
781c50d8ae3SPaolo Bonzini int r;
782c50d8ae3SPaolo Bonzini
783c501040aSPaolo Bonzini WARN_ON_ONCE(fault->is_tdp);
784c50d8ae3SPaolo Bonzini
785c50d8ae3SPaolo Bonzini /*
7869c03b182SPaolo Bonzini * Look up the guest pte for the faulting address.
787c50d8ae3SPaolo Bonzini * If PFEC.RSVD is set, this is a shadow page fault.
788c50d8ae3SPaolo Bonzini * The bit needs to be cleared before walking guest page tables.
789c50d8ae3SPaolo Bonzini */
7909c03b182SPaolo Bonzini r = FNAME(walk_addr)(&walker, vcpu, fault->addr,
7919c03b182SPaolo Bonzini fault->error_code & ~PFERR_RSVD_MASK);
792c50d8ae3SPaolo Bonzini
793c50d8ae3SPaolo Bonzini /*
794c50d8ae3SPaolo Bonzini * The page is not mapped by the guest. Let the guest handle it.
795c50d8ae3SPaolo Bonzini */
796c50d8ae3SPaolo Bonzini if (!r) {
7972839180cSPaolo Bonzini if (!fault->prefetch)
7980cd665bdSPaolo Bonzini kvm_inject_emulated_page_fault(vcpu, &walker.fault);
799c50d8ae3SPaolo Bonzini
800c50d8ae3SPaolo Bonzini return RET_PF_RETRY;
801c50d8ae3SPaolo Bonzini }
802c50d8ae3SPaolo Bonzini
803b8a5d551SPaolo Bonzini fault->gfn = walker.gfn;
8049a967700SLai Jiangshan fault->max_level = walker.level;
805e710c5f6SDavid Matlack fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
806e710c5f6SDavid Matlack
807b8a5d551SPaolo Bonzini if (page_fault_handle_page_track(vcpu, fault)) {
8089c03b182SPaolo Bonzini shadow_page_table_clear_flood(vcpu, fault->addr);
809c50d8ae3SPaolo Bonzini return RET_PF_EMULATE;
810c50d8ae3SPaolo Bonzini }
811c50d8ae3SPaolo Bonzini
812378f5cd6SSean Christopherson r = mmu_topup_memory_caches(vcpu, true);
813f3747a5aSSean Christopherson if (r)
814f3747a5aSSean Christopherson return r;
815f3747a5aSSean Christopherson
816354c908cSDavid Matlack r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
8175276c616SSean Christopherson if (r != RET_PF_CONTINUE)
818c50d8ae3SPaolo Bonzini return r;
819c50d8ae3SPaolo Bonzini
820c50d8ae3SPaolo Bonzini /*
821c50d8ae3SPaolo Bonzini * Do not change pte_access if the pfn is a mmio page, otherwise
822c50d8ae3SPaolo Bonzini * we will cache the incorrect access into mmio spte.
823c50d8ae3SPaolo Bonzini */
824c501040aSPaolo Bonzini if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) &&
825e710c5f6SDavid Matlack !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) {
826c50d8ae3SPaolo Bonzini walker.pte_access |= ACC_WRITE_MASK;
827c50d8ae3SPaolo Bonzini walker.pte_access &= ~ACC_USER_MASK;
828c50d8ae3SPaolo Bonzini
829c50d8ae3SPaolo Bonzini /*
830c50d8ae3SPaolo Bonzini * If we converted a user page to a kernel page,
831c50d8ae3SPaolo Bonzini * so that the kernel can write to it when cr0.wp=0,
832c50d8ae3SPaolo Bonzini * then we should prevent the kernel from executing it
833c50d8ae3SPaolo Bonzini * if SMEP is enabled.
834c50d8ae3SPaolo Bonzini */
8359a65d0b7SSean Christopherson if (is_cr4_smep(vcpu->arch.mmu))
836c50d8ae3SPaolo Bonzini walker.pte_access &= ~ACC_EXEC_MASK;
837c50d8ae3SPaolo Bonzini }
838c50d8ae3SPaolo Bonzini
839*0d5c7fcfSNikolay Kuratov orig_pfn = fault->pfn;
840*0d5c7fcfSNikolay Kuratov
841c50d8ae3SPaolo Bonzini r = RET_PF_RETRY;
842531810caSBen Gardon write_lock(&vcpu->kvm->mmu_lock);
843a955cad8SSean Christopherson
844ba6e3fe2SDavid Matlack if (is_page_fault_stale(vcpu, fault))
845c50d8ae3SPaolo Bonzini goto out_unlock;
846c50d8ae3SPaolo Bonzini
8477bd7ded6SSean Christopherson r = make_mmu_pages_available(vcpu);
8487bd7ded6SSean Christopherson if (r)
849c50d8ae3SPaolo Bonzini goto out_unlock;
8509c03b182SPaolo Bonzini r = FNAME(fetch)(vcpu, fault, &walker);
851c50d8ae3SPaolo Bonzini
852c50d8ae3SPaolo Bonzini out_unlock:
853531810caSBen Gardon write_unlock(&vcpu->kvm->mmu_lock);
854*0d5c7fcfSNikolay Kuratov kvm_release_pfn_clean(orig_pfn);
855c50d8ae3SPaolo Bonzini return r;
856c50d8ae3SPaolo Bonzini }
857c50d8ae3SPaolo Bonzini
FNAME(get_level1_sp_gpa)858c50d8ae3SPaolo Bonzini static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
859c50d8ae3SPaolo Bonzini {
860c50d8ae3SPaolo Bonzini int offset = 0;
861c50d8ae3SPaolo Bonzini
86220ba462dSSean Christopherson WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
863c50d8ae3SPaolo Bonzini
864c50d8ae3SPaolo Bonzini if (PTTYPE == 32)
8652ca3129eSSean Christopherson offset = sp->role.quadrant << SPTE_LEVEL_BITS;
866c50d8ae3SPaolo Bonzini
867c50d8ae3SPaolo Bonzini return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
868c50d8ae3SPaolo Bonzini }
869c50d8ae3SPaolo Bonzini
870736c291cSSean Christopherson /* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
FNAME(gva_to_gpa)8711f5a21eeSLai Jiangshan static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
8725b22bbe7SLai Jiangshan gpa_t addr, u64 access,
873c50d8ae3SPaolo Bonzini struct x86_exception *exception)
874c50d8ae3SPaolo Bonzini {
875c50d8ae3SPaolo Bonzini struct guest_walker walker;
8766e1d2a3fSHou Wenlong gpa_t gpa = INVALID_GPA;
877c50d8ae3SPaolo Bonzini int r;
878c50d8ae3SPaolo Bonzini
8791f5a21eeSLai Jiangshan #ifndef CONFIG_X86_64
8801f5a21eeSLai Jiangshan /* A 64-bit GVA should be impossible on 32-bit KVM. */
8811f5a21eeSLai Jiangshan WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
8821f5a21eeSLai Jiangshan #endif
8831f5a21eeSLai Jiangshan
8841f5a21eeSLai Jiangshan r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
885c50d8ae3SPaolo Bonzini
886c50d8ae3SPaolo Bonzini if (r) {
887c50d8ae3SPaolo Bonzini gpa = gfn_to_gpa(walker.gfn);
888736c291cSSean Christopherson gpa |= addr & ~PAGE_MASK;
889c50d8ae3SPaolo Bonzini } else if (exception)
890c50d8ae3SPaolo Bonzini *exception = walker.fault;
891c50d8ae3SPaolo Bonzini
892c50d8ae3SPaolo Bonzini return gpa;
893c50d8ae3SPaolo Bonzini }
894c50d8ae3SPaolo Bonzini
895c50d8ae3SPaolo Bonzini /*
8966a97575dSDavid Matlack * Using the information in sp->shadowed_translation (kvm_mmu_page_get_gfn()) is
8976a97575dSDavid Matlack * safe because:
898c50d8ae3SPaolo Bonzini * - The spte has a reference to the struct page, so the pfn for a given gfn
899c50d8ae3SPaolo Bonzini * can't change unless all sptes pointing to it are nuked first.
900c3e5e415SLai Jiangshan *
901c3e5e415SLai Jiangshan * Returns
902c3c6c9fcSLai Jiangshan * < 0: failed to sync spte
903c3c6c9fcSLai Jiangshan * 0: the spte is synced and no tlb flushing is required
904c3c6c9fcSLai Jiangshan * > 0: the spte is synced and tlb flushing is required
905c50d8ae3SPaolo Bonzini */
FNAME(sync_spte)906c3c6c9fcSLai Jiangshan static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
907c50d8ae3SPaolo Bonzini {
908c50d8ae3SPaolo Bonzini bool host_writable;
909c50d8ae3SPaolo Bonzini gpa_t first_pte_gpa;
9104758d47eSPaolo Bonzini u64 *sptep, spte;
91153597858SDavid Matlack struct kvm_memory_slot *slot;
912c50d8ae3SPaolo Bonzini unsigned pte_access;
913c50d8ae3SPaolo Bonzini pt_element_t gpte;
914c50d8ae3SPaolo Bonzini gpa_t pte_gpa;
915c50d8ae3SPaolo Bonzini gfn_t gfn;
916c50d8ae3SPaolo Bonzini
91719ace7d6SLai Jiangshan if (WARN_ON_ONCE(!sp->spt[i]))
918c3c6c9fcSLai Jiangshan return 0;
919c50d8ae3SPaolo Bonzini
920c3c6c9fcSLai Jiangshan first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
921c50d8ae3SPaolo Bonzini pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
922c50d8ae3SPaolo Bonzini
923c50d8ae3SPaolo Bonzini if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
924c50d8ae3SPaolo Bonzini sizeof(pt_element_t)))
925c3e5e415SLai Jiangshan return -1;
926c50d8ae3SPaolo Bonzini
927c3c6c9fcSLai Jiangshan if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte))
928c3c6c9fcSLai Jiangshan return 1;
929c50d8ae3SPaolo Bonzini
930c50d8ae3SPaolo Bonzini gfn = gpte_to_gfn(gpte);
931c50d8ae3SPaolo Bonzini pte_access = sp->role.access;
932c50d8ae3SPaolo Bonzini pte_access &= FNAME(gpte_access)(gpte);
933c50d8ae3SPaolo Bonzini FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
934c50d8ae3SPaolo Bonzini
935c3e5e415SLai Jiangshan if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access))
936c3c6c9fcSLai Jiangshan return 0;
937c50d8ae3SPaolo Bonzini
9389fb35657SSean Christopherson /*
9399fb35657SSean Christopherson * Drop the SPTE if the new protections would result in a RWX=0
9409fb35657SSean Christopherson * SPTE or if the gfn is changing. The RWX=0 case only affects
9419fb35657SSean Christopherson * EPT with execute-only support, i.e. EPT without an effective
9429fb35657SSean Christopherson * "present" bit, as all other paging modes will create a
9439fb35657SSean Christopherson * read-only SPTE if pte_access is zero.
9449fb35657SSean Christopherson */
9456a97575dSDavid Matlack if ((!pte_access && !shadow_present_mask) ||
9466a97575dSDavid Matlack gfn != kvm_mmu_page_get_gfn(sp, i)) {
947c50d8ae3SPaolo Bonzini drop_spte(vcpu->kvm, &sp->spt[i]);
948c3c6c9fcSLai Jiangshan return 1;
949c50d8ae3SPaolo Bonzini }
950e6722d92SLai Jiangshan /*
951e6722d92SLai Jiangshan * Do nothing if the permissions are unchanged. The existing SPTE is
952e6722d92SLai Jiangshan * still, and prefetch_invalid_gpte() has verified that the A/D bits
953e6722d92SLai Jiangshan * are set in the "new" gPTE, i.e. there is no danger of missing an A/D
954e6722d92SLai Jiangshan * update due to A/D bits being set in the SPTE but not the gPTE.
955e6722d92SLai Jiangshan */
956e6722d92SLai Jiangshan if (kvm_mmu_page_get_access(sp, i) == pte_access)
957e6722d92SLai Jiangshan return 0;
958c50d8ae3SPaolo Bonzini
9596a97575dSDavid Matlack /* Update the shadowed access bits in case they changed. */
9606a97575dSDavid Matlack kvm_mmu_page_set_access(sp, i, pte_access);
9616a97575dSDavid Matlack
9624758d47eSPaolo Bonzini sptep = &sp->spt[i];
9634758d47eSPaolo Bonzini spte = *sptep;
9644758d47eSPaolo Bonzini host_writable = spte & shadow_host_writable_mask;
96553597858SDavid Matlack slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
96653597858SDavid Matlack make_spte(vcpu, sp, slot, pte_access, gfn,
9674758d47eSPaolo Bonzini spte_to_pfn(spte), spte, true, false,
9687158bee4SPaolo Bonzini host_writable, &spte);
969c50d8ae3SPaolo Bonzini
970c3c6c9fcSLai Jiangshan return mmu_spte_update(sptep, spte);
971c50d8ae3SPaolo Bonzini }
972c50d8ae3SPaolo Bonzini
973c50d8ae3SPaolo Bonzini #undef pt_element_t
974c50d8ae3SPaolo Bonzini #undef guest_walker
975c50d8ae3SPaolo Bonzini #undef FNAME
976c50d8ae3SPaolo Bonzini #undef PT_BASE_ADDR_MASK
977c50d8ae3SPaolo Bonzini #undef PT_INDEX
978c50d8ae3SPaolo Bonzini #undef PT_LVL_ADDR_MASK
979c50d8ae3SPaolo Bonzini #undef PT_LVL_OFFSET_MASK
980c50d8ae3SPaolo Bonzini #undef PT_LEVEL_BITS
981c50d8ae3SPaolo Bonzini #undef PT_MAX_FULL_LEVELS
982c50d8ae3SPaolo Bonzini #undef gpte_to_gfn
983c50d8ae3SPaolo Bonzini #undef gpte_to_gfn_lvl
984c50d8ae3SPaolo Bonzini #undef PT_GUEST_ACCESSED_MASK
985c50d8ae3SPaolo Bonzini #undef PT_GUEST_DIRTY_MASK
986c50d8ae3SPaolo Bonzini #undef PT_GUEST_DIRTY_SHIFT
987c50d8ae3SPaolo Bonzini #undef PT_GUEST_ACCESSED_SHIFT
988c50d8ae3SPaolo Bonzini #undef PT_HAVE_ACCESSED_DIRTY
989