1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4
5 #include "x86.h"
6 #include "reverse_cpuid.h"
7 #include <asm/cpu.h>
8 #include <asm/processor.h>
9 #include <uapi/asm/kvm_para.h>
10
11 extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12 void kvm_set_cpu_caps(void);
13
14 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu,
17 u32 function, u32 index);
18 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
19 u32 function);
20 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
21 struct kvm_cpuid_entry2 __user *entries,
22 unsigned int type);
23 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
24 struct kvm_cpuid *cpuid,
25 struct kvm_cpuid_entry __user *entries);
26 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27 struct kvm_cpuid2 *cpuid,
28 struct kvm_cpuid_entry2 __user *entries);
29 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
30 struct kvm_cpuid2 *cpuid,
31 struct kvm_cpuid_entry2 __user *entries);
32 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
33 u32 *ecx, u32 *edx, bool exact_only);
34
35 u32 xstate_required_size(u64 xstate_bv, bool compacted);
36
37 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
38 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
39
cpuid_maxphyaddr(struct kvm_vcpu * vcpu)40 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
41 {
42 return vcpu->arch.maxphyaddr;
43 }
44
kvm_vcpu_is_legal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)45 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
46 {
47 return !(gpa & vcpu->arch.reserved_gpa_bits);
48 }
49
kvm_vcpu_is_illegal_gpa(struct kvm_vcpu * vcpu,gpa_t gpa)50 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
51 {
52 return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
53 }
54
kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,gpa_t alignment)55 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
56 gpa_t gpa, gpa_t alignment)
57 {
58 return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
59 }
60
page_address_valid(struct kvm_vcpu * vcpu,gpa_t gpa)61 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
62 {
63 return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
64 }
65
cpuid_entry_override(struct kvm_cpuid_entry2 * entry,unsigned int leaf)66 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
67 unsigned int leaf)
68 {
69 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
70
71 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
72 *reg = kvm_cpu_caps[leaf];
73 }
74
guest_cpuid_get_register(struct kvm_vcpu * vcpu,unsigned int x86_feature)75 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
76 unsigned int x86_feature)
77 {
78 const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
79 struct kvm_cpuid_entry2 *entry;
80
81 entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
82 if (!entry)
83 return NULL;
84
85 return __cpuid_entry_get_reg(entry, cpuid.reg);
86 }
87
guest_cpuid_has(struct kvm_vcpu * vcpu,unsigned int x86_feature)88 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
89 unsigned int x86_feature)
90 {
91 u32 *reg;
92
93 reg = guest_cpuid_get_register(vcpu, x86_feature);
94 if (!reg)
95 return false;
96
97 return *reg & __feature_bit(x86_feature);
98 }
99
guest_cpuid_clear(struct kvm_vcpu * vcpu,unsigned int x86_feature)100 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
101 unsigned int x86_feature)
102 {
103 u32 *reg;
104
105 reg = guest_cpuid_get_register(vcpu, x86_feature);
106 if (reg)
107 *reg &= ~__feature_bit(x86_feature);
108 }
109
guest_cpuid_is_amd_or_hygon(struct kvm_vcpu * vcpu)110 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
111 {
112 struct kvm_cpuid_entry2 *best;
113
114 best = kvm_find_cpuid_entry(vcpu, 0);
115 return best &&
116 (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
117 is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
118 }
119
guest_cpuid_is_intel(struct kvm_vcpu * vcpu)120 static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
121 {
122 struct kvm_cpuid_entry2 *best;
123
124 best = kvm_find_cpuid_entry(vcpu, 0);
125 return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
126 }
127
guest_cpuid_is_amd_compatible(struct kvm_vcpu * vcpu)128 static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
129 {
130 return vcpu->arch.is_amd_compatible;
131 }
132
guest_cpuid_is_intel_compatible(struct kvm_vcpu * vcpu)133 static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
134 {
135 return !guest_cpuid_is_amd_compatible(vcpu);
136 }
137
guest_cpuid_family(struct kvm_vcpu * vcpu)138 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
139 {
140 struct kvm_cpuid_entry2 *best;
141
142 best = kvm_find_cpuid_entry(vcpu, 0x1);
143 if (!best)
144 return -1;
145
146 return x86_family(best->eax);
147 }
148
guest_cpuid_model(struct kvm_vcpu * vcpu)149 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
150 {
151 struct kvm_cpuid_entry2 *best;
152
153 best = kvm_find_cpuid_entry(vcpu, 0x1);
154 if (!best)
155 return -1;
156
157 return x86_model(best->eax);
158 }
159
cpuid_model_is_consistent(struct kvm_vcpu * vcpu)160 static inline bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
161 {
162 return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
163 }
164
guest_cpuid_stepping(struct kvm_vcpu * vcpu)165 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
166 {
167 struct kvm_cpuid_entry2 *best;
168
169 best = kvm_find_cpuid_entry(vcpu, 0x1);
170 if (!best)
171 return -1;
172
173 return x86_stepping(best->eax);
174 }
175
guest_has_spec_ctrl_msr(struct kvm_vcpu * vcpu)176 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
177 {
178 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
179 guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
180 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
181 guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
182 }
183
guest_has_pred_cmd_msr(struct kvm_vcpu * vcpu)184 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
185 {
186 return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
187 guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
188 }
189
supports_cpuid_fault(struct kvm_vcpu * vcpu)190 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
191 {
192 return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
193 }
194
cpuid_fault_enabled(struct kvm_vcpu * vcpu)195 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
196 {
197 return vcpu->arch.msr_misc_features_enables &
198 MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
199 }
200
kvm_cpu_cap_clear(unsigned int x86_feature)201 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
202 {
203 unsigned int x86_leaf = __feature_leaf(x86_feature);
204
205 reverse_cpuid_check(x86_leaf);
206 kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
207 }
208
kvm_cpu_cap_set(unsigned int x86_feature)209 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
210 {
211 unsigned int x86_leaf = __feature_leaf(x86_feature);
212
213 reverse_cpuid_check(x86_leaf);
214 kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
215 }
216
kvm_cpu_cap_get(unsigned int x86_feature)217 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
218 {
219 unsigned int x86_leaf = __feature_leaf(x86_feature);
220
221 reverse_cpuid_check(x86_leaf);
222 return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
223 }
224
kvm_cpu_cap_has(unsigned int x86_feature)225 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
226 {
227 return !!kvm_cpu_cap_get(x86_feature);
228 }
229
kvm_cpu_cap_check_and_set(unsigned int x86_feature)230 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
231 {
232 if (boot_cpu_has(x86_feature))
233 kvm_cpu_cap_set(x86_feature);
234 }
235
guest_pv_has(struct kvm_vcpu * vcpu,unsigned int kvm_feature)236 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
237 unsigned int kvm_feature)
238 {
239 if (!vcpu->arch.pv_cpuid.enforce)
240 return true;
241
242 return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
243 }
244
245 enum kvm_governed_features {
246 #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
247 #include "governed_features.h"
248 KVM_NR_GOVERNED_FEATURES
249 };
250
kvm_governed_feature_index(unsigned int x86_feature)251 static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
252 {
253 switch (x86_feature) {
254 #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
255 #include "governed_features.h"
256 default:
257 return -1;
258 }
259 }
260
kvm_is_governed_feature(unsigned int x86_feature)261 static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
262 {
263 return kvm_governed_feature_index(x86_feature) >= 0;
264 }
265
kvm_governed_feature_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)266 static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
267 unsigned int x86_feature)
268 {
269 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
270
271 __set_bit(kvm_governed_feature_index(x86_feature),
272 vcpu->arch.governed_features.enabled);
273 }
274
kvm_governed_feature_check_and_set(struct kvm_vcpu * vcpu,unsigned int x86_feature)275 static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
276 unsigned int x86_feature)
277 {
278 if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
279 kvm_governed_feature_set(vcpu, x86_feature);
280 }
281
guest_can_use(struct kvm_vcpu * vcpu,unsigned int x86_feature)282 static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
283 unsigned int x86_feature)
284 {
285 BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
286
287 return test_bit(kvm_governed_feature_index(x86_feature),
288 vcpu->arch.governed_features.enabled);
289 }
290
291 #endif
292