mmu.h (4b42fafc1cdc4aba38d4f147cb2f3f1a32cd4a15) mmu.h (7a98205deebfff9fc96f90d9e7b1a334b0bd3e2b)
1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)

--- 159 unchanged lines hidden (view full) ---

168 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
169 * but it will be one in index if SMAP checks are being overridden.
170 * It is important to keep this branchless.
171 */
172 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
173 int index = (pfec >> 1) +
174 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
175 bool fault = (mmu->permissions[index] >> pte_access) & 1;
1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)

--- 159 unchanged lines hidden (view full) ---

168 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
169 * but it will be one in index if SMAP checks are being overridden.
170 * It is important to keep this branchless.
171 */
172 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
173 int index = (pfec >> 1) +
174 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
175 bool fault = (mmu->permissions[index] >> pte_access) & 1;
176 u32 errcode = PFERR_PRESENT_MASK;
176
177 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
177
178 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
178 pfec |= PFERR_PRESENT_MASK;
179
180 if (unlikely(mmu->pkru_mask)) {
181 u32 pkru_bits, offset;
182
183 /*
184 * PKRU defines 32 bits, there are 16 domains and 2
185 * attribute bits per domain in pkru. pte_pkey is the
186 * index of the protection domain, so pte_pkey * 2 is
187 * is the index of the first bit for the domain.
188 */
189 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
190
191 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
179 if (unlikely(mmu->pkru_mask)) {
180 u32 pkru_bits, offset;
181
182 /*
183 * PKRU defines 32 bits, there are 16 domains and 2
184 * attribute bits per domain in pkru. pte_pkey is the
185 * index of the protection domain, so pte_pkey * 2 is
186 * is the index of the first bit for the domain.
187 */
188 pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
189
190 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
192 offset = pfec - 1 +
191 offset = (pfec & ~1) +
193 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
194
195 pkru_bits &= mmu->pkru_mask >> offset;
192 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
193
194 pkru_bits &= mmu->pkru_mask >> offset;
196 pfec |= -pkru_bits & PFERR_PK_MASK;
195 errcode |= -pkru_bits & PFERR_PK_MASK;
197 fault |= (pkru_bits != 0);
198 }
199
196 fault |= (pkru_bits != 0);
197 }
198
200 return -(uint32_t)fault & pfec;
199 return -(u32)fault & errcode;
201}
202
203void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
204void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
205
206void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
207void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
208bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
209 struct kvm_memory_slot *slot, u64 gfn);
210#endif
200}
201
202void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
203void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
204
205void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
206void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
207bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
208 struct kvm_memory_slot *slot, u64 gfn);
209#endif