xref: /openbmc/linux/arch/x86/kvm/cpuid.h (revision ab589bac)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4 
5 #include "x86.h"
6 #include <asm/cpu.h>
7 #include <asm/processor.h>
8 #include <uapi/asm/kvm_para.h>
9 
10 extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
11 void kvm_set_cpu_caps(void);
12 
13 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
14 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
15 					      u32 function, u32 index);
16 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
17 			    struct kvm_cpuid_entry2 __user *entries,
18 			    unsigned int type);
19 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
20 			     struct kvm_cpuid *cpuid,
21 			     struct kvm_cpuid_entry __user *entries);
22 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23 			      struct kvm_cpuid2 *cpuid,
24 			      struct kvm_cpuid_entry2 __user *entries);
25 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
26 			      struct kvm_cpuid2 *cpuid,
27 			      struct kvm_cpuid_entry2 __user *entries);
28 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
29 	       u32 *ecx, u32 *edx, bool exact_only);
30 
31 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
32 
33 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
34 {
35 	return vcpu->arch.maxphyaddr;
36 }
37 
38 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
39 {
40 	return (gpa >= BIT_ULL(cpuid_maxphyaddr(vcpu)));
41 }
42 
43 struct cpuid_reg {
44 	u32 function;
45 	u32 index;
46 	int reg;
47 };
48 
49 static const struct cpuid_reg reverse_cpuid[] = {
50 	[CPUID_1_EDX]         = {         1, 0, CPUID_EDX},
51 	[CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
52 	[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
53 	[CPUID_1_ECX]         = {         1, 0, CPUID_ECX},
54 	[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
55 	[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
56 	[CPUID_7_0_EBX]       = {         7, 0, CPUID_EBX},
57 	[CPUID_D_1_EAX]       = {       0xd, 1, CPUID_EAX},
58 	[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
59 	[CPUID_6_EAX]         = {         6, 0, CPUID_EAX},
60 	[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
61 	[CPUID_7_ECX]         = {         7, 0, CPUID_ECX},
62 	[CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
63 	[CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
64 	[CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
65 };
66 
67 /*
68  * Reverse CPUID and its derivatives can only be used for hardware-defined
69  * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
70  * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
71  * is nonsensical as the bit number/mask is an arbitrary software-defined value
72  * and can't be used by KVM to query/control guest capabilities.  And obviously
73  * the leaf being queried must have an entry in the lookup table.
74  */
75 static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
76 {
77 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
78 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
79 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
80 	BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
81 	BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
82 	BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
83 }
84 
85 /*
86  * Retrieve the bit mask from an X86_FEATURE_* definition.  Features contain
87  * the hardware defined bit number (stored in bits 4:0) and a software defined
88  * "word" (stored in bits 31:5).  The word is used to index into arrays of
89  * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
90  */
91 static __always_inline u32 __feature_bit(int x86_feature)
92 {
93 	reverse_cpuid_check(x86_feature / 32);
94 	return 1 << (x86_feature & 31);
95 }
96 
97 #define feature_bit(name)  __feature_bit(X86_FEATURE_##name)
98 
99 static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
100 {
101 	unsigned int x86_leaf = x86_feature / 32;
102 
103 	reverse_cpuid_check(x86_leaf);
104 	return reverse_cpuid[x86_leaf];
105 }
106 
107 static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
108 						  u32 reg)
109 {
110 	switch (reg) {
111 	case CPUID_EAX:
112 		return &entry->eax;
113 	case CPUID_EBX:
114 		return &entry->ebx;
115 	case CPUID_ECX:
116 		return &entry->ecx;
117 	case CPUID_EDX:
118 		return &entry->edx;
119 	default:
120 		BUILD_BUG();
121 		return NULL;
122 	}
123 }
124 
125 static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
126 						unsigned int x86_feature)
127 {
128 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
129 
130 	return __cpuid_entry_get_reg(entry, cpuid.reg);
131 }
132 
133 static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
134 					   unsigned int x86_feature)
135 {
136 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
137 
138 	return *reg & __feature_bit(x86_feature);
139 }
140 
141 static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
142 					    unsigned int x86_feature)
143 {
144 	return cpuid_entry_get(entry, x86_feature);
145 }
146 
147 static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
148 					      unsigned int x86_feature)
149 {
150 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
151 
152 	*reg &= ~__feature_bit(x86_feature);
153 }
154 
155 static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
156 					    unsigned int x86_feature)
157 {
158 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
159 
160 	*reg |= __feature_bit(x86_feature);
161 }
162 
163 static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
164 					       unsigned int x86_feature,
165 					       bool set)
166 {
167 	u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
168 
169 	/*
170 	 * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
171 	 * compiler into using CMOV instead of Jcc when possible.
172 	 */
173 	if (set)
174 		*reg |= __feature_bit(x86_feature);
175 	else
176 		*reg &= ~__feature_bit(x86_feature);
177 }
178 
179 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
180 						 enum cpuid_leafs leaf)
181 {
182 	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
183 
184 	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
185 	*reg = kvm_cpu_caps[leaf];
186 }
187 
188 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
189 						     unsigned int x86_feature)
190 {
191 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
192 	struct kvm_cpuid_entry2 *entry;
193 
194 	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
195 	if (!entry)
196 		return NULL;
197 
198 	return __cpuid_entry_get_reg(entry, cpuid.reg);
199 }
200 
201 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
202 					    unsigned int x86_feature)
203 {
204 	u32 *reg;
205 
206 	reg = guest_cpuid_get_register(vcpu, x86_feature);
207 	if (!reg)
208 		return false;
209 
210 	return *reg & __feature_bit(x86_feature);
211 }
212 
213 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
214 					      unsigned int x86_feature)
215 {
216 	u32 *reg;
217 
218 	reg = guest_cpuid_get_register(vcpu, x86_feature);
219 	if (reg)
220 		*reg &= ~__feature_bit(x86_feature);
221 }
222 
223 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
224 {
225 	struct kvm_cpuid_entry2 *best;
226 
227 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
228 	return best &&
229 	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
230 		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
231 }
232 
233 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
234 {
235 	struct kvm_cpuid_entry2 *best;
236 
237 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
238 	if (!best)
239 		return -1;
240 
241 	return x86_family(best->eax);
242 }
243 
244 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
245 {
246 	struct kvm_cpuid_entry2 *best;
247 
248 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
249 	if (!best)
250 		return -1;
251 
252 	return x86_model(best->eax);
253 }
254 
255 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
256 {
257 	struct kvm_cpuid_entry2 *best;
258 
259 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
260 	if (!best)
261 		return -1;
262 
263 	return x86_stepping(best->eax);
264 }
265 
266 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
267 {
268 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
269 }
270 
271 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
272 {
273 	return vcpu->arch.msr_misc_features_enables &
274 		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
275 }
276 
277 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
278 {
279 	unsigned int x86_leaf = x86_feature / 32;
280 
281 	reverse_cpuid_check(x86_leaf);
282 	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
283 }
284 
285 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
286 {
287 	unsigned int x86_leaf = x86_feature / 32;
288 
289 	reverse_cpuid_check(x86_leaf);
290 	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
291 }
292 
293 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
294 {
295 	unsigned int x86_leaf = x86_feature / 32;
296 
297 	reverse_cpuid_check(x86_leaf);
298 	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
299 }
300 
301 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
302 {
303 	return !!kvm_cpu_cap_get(x86_feature);
304 }
305 
306 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
307 {
308 	if (boot_cpu_has(x86_feature))
309 		kvm_cpu_cap_set(x86_feature);
310 }
311 
312 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
313 {
314 	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
315 }
316 
317 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
318 					 unsigned int kvm_feature)
319 {
320 	if (!vcpu->arch.pv_cpuid.enforce)
321 		return true;
322 
323 	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
324 }
325 
326 #endif
327