xref: /openbmc/linux/arch/x86/kvm/cpuid.h (revision 4a0a1436)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_CPUID_H
3 #define ARCH_X86_KVM_CPUID_H
4 
5 #include "x86.h"
6 #include "reverse_cpuid.h"
7 #include <asm/cpu.h>
8 #include <asm/processor.h>
9 #include <uapi/asm/kvm_para.h>
10 
11 extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
12 void kvm_set_cpu_caps(void);
13 
14 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
15 void kvm_update_pv_runtime(struct kvm_vcpu *vcpu);
16 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
17 					      u32 function, u32 index);
18 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
19 			    struct kvm_cpuid_entry2 __user *entries,
20 			    unsigned int type);
21 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
22 			     struct kvm_cpuid *cpuid,
23 			     struct kvm_cpuid_entry __user *entries);
24 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
25 			      struct kvm_cpuid2 *cpuid,
26 			      struct kvm_cpuid_entry2 __user *entries);
27 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28 			      struct kvm_cpuid2 *cpuid,
29 			      struct kvm_cpuid_entry2 __user *entries);
30 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
31 	       u32 *ecx, u32 *edx, bool exact_only);
32 
33 u32 xstate_required_size(u64 xstate_bv, bool compacted);
34 
35 int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
36 u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
37 
38 static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
39 {
40 	return vcpu->arch.maxphyaddr;
41 }
42 
43 static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
44 {
45 	return !(gpa & vcpu->arch.reserved_gpa_bits);
46 }
47 
48 static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
49 {
50 	return !kvm_vcpu_is_legal_gpa(vcpu, gpa);
51 }
52 
53 static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu,
54 						 gpa_t gpa, gpa_t alignment)
55 {
56 	return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
57 }
58 
59 static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
60 {
61 	return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
62 }
63 
64 static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
65 						 unsigned int leaf)
66 {
67 	u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
68 
69 	BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
70 	*reg = kvm_cpu_caps[leaf];
71 }
72 
73 static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
74 						     unsigned int x86_feature)
75 {
76 	const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
77 	struct kvm_cpuid_entry2 *entry;
78 
79 	entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
80 	if (!entry)
81 		return NULL;
82 
83 	return __cpuid_entry_get_reg(entry, cpuid.reg);
84 }
85 
86 static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu,
87 					    unsigned int x86_feature)
88 {
89 	u32 *reg;
90 
91 	reg = guest_cpuid_get_register(vcpu, x86_feature);
92 	if (!reg)
93 		return false;
94 
95 	return *reg & __feature_bit(x86_feature);
96 }
97 
98 static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu,
99 					      unsigned int x86_feature)
100 {
101 	u32 *reg;
102 
103 	reg = guest_cpuid_get_register(vcpu, x86_feature);
104 	if (reg)
105 		*reg &= ~__feature_bit(x86_feature);
106 }
107 
108 static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
109 {
110 	struct kvm_cpuid_entry2 *best;
111 
112 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
113 	return best &&
114 	       (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
115 		is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
116 }
117 
118 static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
119 {
120 	struct kvm_cpuid_entry2 *best;
121 
122 	best = kvm_find_cpuid_entry(vcpu, 0, 0);
123 	return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
124 }
125 
126 static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
127 {
128 	struct kvm_cpuid_entry2 *best;
129 
130 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
131 	if (!best)
132 		return -1;
133 
134 	return x86_family(best->eax);
135 }
136 
137 static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
138 {
139 	struct kvm_cpuid_entry2 *best;
140 
141 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
142 	if (!best)
143 		return -1;
144 
145 	return x86_model(best->eax);
146 }
147 
148 static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
149 {
150 	struct kvm_cpuid_entry2 *best;
151 
152 	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
153 	if (!best)
154 		return -1;
155 
156 	return x86_stepping(best->eax);
157 }
158 
159 static inline bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
160 {
161 	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
162 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
163 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
164 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
165 }
166 
167 static inline bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
168 {
169 	return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
170 		guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB));
171 }
172 
173 static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
174 {
175 	return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
176 }
177 
178 static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
179 {
180 	return vcpu->arch.msr_misc_features_enables &
181 		  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
182 }
183 
184 static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
185 {
186 	unsigned int x86_leaf = __feature_leaf(x86_feature);
187 
188 	reverse_cpuid_check(x86_leaf);
189 	kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
190 }
191 
192 static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
193 {
194 	unsigned int x86_leaf = __feature_leaf(x86_feature);
195 
196 	reverse_cpuid_check(x86_leaf);
197 	kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
198 }
199 
200 static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
201 {
202 	unsigned int x86_leaf = __feature_leaf(x86_feature);
203 
204 	reverse_cpuid_check(x86_leaf);
205 	return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
206 }
207 
208 static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
209 {
210 	return !!kvm_cpu_cap_get(x86_feature);
211 }
212 
213 static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
214 {
215 	if (boot_cpu_has(x86_feature))
216 		kvm_cpu_cap_set(x86_feature);
217 }
218 
219 static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
220 					 unsigned int kvm_feature)
221 {
222 	if (!vcpu->arch.pv_cpuid.enforce)
223 		return true;
224 
225 	return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
226 }
227 
228 #endif
229