1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4
5 #include "x86.h"
6 #include "reverse_cpuid.h"
7 #include <asm/cpu.h>
8 #include <asm/processor.h>
9 #include <uapi/asm/kvm_para.h>
10
11 extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12 void kvm_set_cpu_caps(void);
13
14 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
17 u32 function, u32 index);
18 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
19 u32 function);
20 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries,
22 unsigned int type);
23 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
24 struct kvm_cpuid *cpuid,
25 struct kvm_cpuid_entry __user *entries);
26 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27 struct kvm_cpuid2 *cpuid,
28 struct kvm_cpuid_entry2 __user *entries);
29 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
30 struct kvm_cpuid2 *cpuid,
31 struct kvm_cpuid_entry2 __user *entries);
32 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
33 u32 *ecx, u32 *edx, bool exact_only);
34
35 void __init kvm_init_xstate_sizes(void);
36 u32 xstate_required_size(u64 xstate_bv, bool compacted);
37
38 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
39 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
40
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)41 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
42 {
43 return vcpu->arch.maxphyaddr;
44 }
45
kvm_vcpu_is_legal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)46 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
47 {
48 return !(gpa & vcpu->arch.reserved_gpa_bits);
49 }
50
kvm_vcpu_is_illegal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)51 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
52 {
53 return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
54 }
55
kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t alignment)56 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
57 gpa_t gpa, gpa_t alignment)
58 {
59 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
60 }
61
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)62 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
63 {
64 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
65 }
66
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,unsigned int leaf)67 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
68 unsigned int leaf)
69 {
70 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
71
72 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
73 *reg = kvm_cpu_caps[leaf];
74 }
75
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)76 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
77 unsigned int x86_feature)
78 {
79 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
80 struct kvm_cpuid_entry2 *entry;
81
82 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
83 if (!entry)
84 return NULL;
85
86 return __cpuid_entry_get_reg(entry, cpuid.reg);
87 }
88
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)89 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
90 unsigned int x86_feature)
91 {
92 u32 *reg;
93
94 reg = guest_cpuid_get_register(vcpu, x86_feature);
95 if (!reg)
96 return false;
97
98 return *reg & __feature_bit(x86_feature);
99 }
100
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)101 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
102 unsigned int x86_feature)
103 {
104 u32 *reg;
105
106 reg = guest_cpuid_get_register(vcpu, x86_feature);
107 if (reg)
108 *reg &= ~__feature_bit(x86_feature);
109 }
110
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)111 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
112 {
113 struct kvm_cpuid_entry2 *best;
114
115 best = kvm_find_cpuid_entry(vcpu, 0);
116 return best &&
117 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
118 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
119 }
120
guest_cpuid_is_intel(struct kvm_vcpu * vcpu)121 static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
122 {
123 struct kvm_cpuid_entry2 *best;
124
125 best = kvm_find_cpuid_entry(vcpu, 0);
126 return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
127 }
128
guest_cpuid_is_amd_compatible(struct kvm_vcpu * vcpu)129 static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
130 {
131 return vcpu->arch.is_amd_compatible;
132 }
133
guest_cpuid_is_intel_compatible(struct kvm_vcpu * vcpu)134 static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
135 {
136 return !guest_cpuid_is_amd_compatible(vcpu);
137 }
138
guest_cpuid_family(struct kvm_vcpu * vcpu)139 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
140 {
141 struct kvm_cpuid_entry2 *best;
142
143 best = kvm_find_cpuid_entry(vcpu, 0x1);
144 if (!best)
145 return -1;
146
147 return x86_family(best->eax);
148 }
149
guest_cpuid_model(struct kvm_vcpu * vcpu)150 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
151 {
152 struct kvm_cpuid_entry2 *best;
153
154 best = kvm_find_cpuid_entry(vcpu, 0x1);
155 if (!best)
156 return -1;
157
158 return x86_model(best->eax);
159 }
160
cpuid_model_is_consistent(struct kvm_vcpu * vcpu)161 static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
162 {
163 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
164 }
165
guest_cpuid_stepping(struct kvm_vcpu * vcpu)166 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
167 {
168 struct kvm_cpuid_entry2 *best;
169
170 best = kvm_find_cpuid_entry(vcpu, 0x1);
171 if (!best)
172 return -1;
173
174 return x86_stepping(best->eax);
175 }
176
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)177 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
178 {
179 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
180 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
181 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
182 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
183 }
184
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)185 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
186 {
187 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
188 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
189 }
190
supports_cpuid_fault(struct kvm_vcpu * vcpu)191 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
192 {
193 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
194 }
195
cpuid_fault_enabled(struct kvm_vcpu * vcpu)196 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
197 {
198 return vcpu->arch.msr_misc_features_enables &
199 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
200 }
201
kvm_cpu_cap_clear(unsigned int x86_feature)202 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
203 {
204 unsigned int x86_leaf = __feature_leaf(x86_feature);
205
206 reverse_cpuid_check(x86_leaf);
207 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
208 }
209
kvm_cpu_cap_set(unsigned int x86_feature)210 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
211 {
212 unsigned int x86_leaf = __feature_leaf(x86_feature);
213
214 reverse_cpuid_check(x86_leaf);
215 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
216 }
217
kvm_cpu_cap_get(unsigned int x86_feature)218 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
219 {
220 unsigned int x86_leaf = __feature_leaf(x86_feature);
221
222 reverse_cpuid_check(x86_leaf);
223 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
224 }
225
kvm_cpu_cap_has(unsigned int x86_feature)226 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
227 {
228 return !!kvm_cpu_cap_get(x86_feature);
229 }
230
kvm_cpu_cap_check_and_set(unsigned int x86_feature)231 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
232 {
233 if (boot_cpu_has(x86_feature))
234 kvm_cpu_cap_set(x86_feature);
235 }
236
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)237 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
238 unsigned int kvm_feature)
239 {
240 if (!vcpu->arch.pv_cpuid.enforce)
241 return true;
242
243 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
244 }
245
246 enum kvm_governed_features {
247 #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
248 #include "governed_features.h"
249 KVM_NR_GOVERNED_FEATURES
250 };
251
kvm_governed_feature_index(unsigned int x86_feature)252 static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
253 {
254 switch (x86_feature) {
255 #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
256 #include "governed_features.h"
257 default:
258 return -1;
259 }
260 }
261
kvm_is_governed_feature(unsigned int x86_feature)262 static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
263 {
264 return kvm_governed_feature_index(x86_feature) >= 0;
265 }
266
kvm_governed_feature_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)267 static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
268 unsigned int x86_feature)
269 {
270 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
271
272 __set_bit(kvm_governed_feature_index(x86_feature),
273 vcpu->arch.governed_features.enabled);
274 }
275
kvm_governed_feature_check_and_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)276 static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
277 unsigned int x86_feature)
278 {
279 if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
280 kvm_governed_feature_set(vcpu, x86_feature);
281 }
282
guest_can_use(struct kvm_vcpu * vcpu,unsigned int x86_feature)283 static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
284 unsigned int x86_feature)
285 {
286 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
287
288 return test_bit(kvm_governed_feature_index(x86_feature),
289 vcpu->arch.governed_features.enabled);
290 }
291
292 #endif
293