mmu.h (3eb9992caff10b62cf0ed0bcb1667a58b13188fa) mmu.h (198c74f43f0f5473f99967aead30ddc622804bc1)
1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)

--- 90 unchanged lines hidden (view full) ---

99 return kvm_mmu_load(vcpu);
100}
101
102static inline int is_present_gpte(unsigned long pte)
103{
104 return pte & PT_PRESENT_MASK;
105}
106
1#ifndef __KVM_X86_MMU_H
2#define __KVM_X86_MMU_H
3
4#include <linux/kvm_host.h>
5#include "kvm_cache_regs.h"
6
7#define PT64_PT_BITS 9
8#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)

--- 90 unchanged lines hidden (view full) ---

99 return kvm_mmu_load(vcpu);
100}
101
102static inline int is_present_gpte(unsigned long pte)
103{
104 return pte & PT_PRESENT_MASK;
105}
106
107/*
108 * Currently, we have two sorts of write-protection, a) the first one
109 * write-protects guest page to sync the guest modification, b) another one is
110 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
111 * between these two sorts are:
112 * 1) the first case clears SPTE_MMU_WRITEABLE bit.
113 * 2) the first case requires flushing tlb immediately avoiding corrupting
114 * shadow page table between all vcpus so it should be in the protection of
115 * mmu-lock. And the another case does not need to flush tlb until returning
116 * the dirty bitmap to userspace since it only write-protects the page
117 * logged in the bitmap, that means the page in the dirty bitmap is not
118 * missed, so it can flush tlb out of mmu-lock.
119 *
120 * So, there is the problem: the first case can meet the corrupted tlb caused
121 * by another case which write-protects pages but without flush tlb
122 * immediately. In order to making the first case be aware this problem we let
123 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
124 * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
125 *
126 * Anyway, whenever a spte is updated (only permission and status bits are
127 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
128 * readonly, if that happens, we need to flush tlb. Fortunately,
129 * mmu_spte_update() has already handled it perfectly.
130 *
131 * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
132 * - if we want to see if it has writable tlb entry or if the spte can be
133 * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
134 * case, otherwise
135 * - if we fix page fault on the spte or do write-protection by dirty logging,
136 * check PT_WRITABLE_MASK.
137 *
138 * TODO: introduce APIs to split these two cases.
139 */
107static inline int is_writable_pte(unsigned long pte)
108{
109 return pte & PT_WRITABLE_MASK;
110}
111
112static inline bool is_write_protection(struct kvm_vcpu *vcpu)
113{
114 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);

--- 34 unchanged lines hidden ---
140static inline int is_writable_pte(unsigned long pte)
141{
142 return pte & PT_WRITABLE_MASK;
143}
144
145static inline bool is_write_protection(struct kvm_vcpu *vcpu)
146{
147 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);

--- 34 unchanged lines hidden ---