1 /*
2 * QEMU KVM support -- ARM specific functions.
3 *
4 * Copyright (c) 2012 Linaro Limited
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11 #ifndef QEMU_KVM_ARM_H
12 #define QEMU_KVM_ARM_H
13
14 #include "sysemu/kvm.h"
15
16 #define KVM_ARM_VGIC_V2 (1 << 0)
17 #define KVM_ARM_VGIC_V3 (1 << 1)
18
19 /**
20 * kvm_arm_register_device:
21 * @mr: memory region for this device
22 * @devid: the KVM device ID
23 * @group: device control API group for setting addresses
24 * @attr: device control API address type
25 * @dev_fd: device control device file descriptor
26 * @addr_ormask: value to be OR'ed with resolved address
27 *
28 * Remember the memory region @mr, and when it is mapped by the machine
29 * model, tell the kernel that base address using the device control API.
30 * @devid should be the ID of the device as defined by the arm-vgic device
31 * in the device control API. The machine model may map and unmap the device
32 * multiple times; the kernel will only be told the final address at the
33 * point where machine init is complete.
34 */
35 void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
36 uint64_t attr, int dev_fd, uint64_t addr_ormask);
37
38 /**
39 * write_list_to_kvmstate:
40 * @cpu: ARMCPU
41 * @level: the state level to sync
42 *
43 * For each register listed in the ARMCPU cpreg_indexes list, write
44 * its value from the cpreg_values list into the kernel (via ioctl).
45 * This updates KVM's working data structures from TCG data or
46 * from incoming migration state.
47 *
48 * Returns: true if all register values were updated correctly,
49 * false if some register was unknown to the kernel or could not
50 * be written (eg constant register with the wrong value).
51 * Note that we do not stop early on failure -- we will attempt
52 * writing all registers in the list.
53 */
54 bool write_list_to_kvmstate(ARMCPU *cpu, int level);
55
56 /**
57 * write_kvmstate_to_list:
58 * @cpu: ARMCPU
59 *
60 * For each register listed in the ARMCPU cpreg_indexes list, write
61 * its value from the kernel into the cpreg_values list. This is used to
62 * copy info from KVM's working data structures into TCG or
63 * for outbound migration.
64 *
65 * Returns: true if all register values were read correctly,
66 * false if some register was unknown or could not be read.
67 * Note that we do not stop early on failure -- we will attempt
68 * reading all registers in the list.
69 */
70 bool write_kvmstate_to_list(ARMCPU *cpu);
71
72 /**
73 * kvm_arm_cpu_pre_save:
74 * @cpu: ARMCPU
75 *
76 * Called after write_kvmstate_to_list() from cpu_pre_save() to update
77 * the cpreg list with KVM CPU state.
78 */
79 void kvm_arm_cpu_pre_save(ARMCPU *cpu);
80
81 /**
82 * kvm_arm_cpu_post_load:
83 * @cpu: ARMCPU
84 *
85 * Called from cpu_post_load() to update KVM CPU state from the cpreg list.
86 */
87 void kvm_arm_cpu_post_load(ARMCPU *cpu);
88
89 /**
90 * kvm_arm_reset_vcpu:
91 * @cpu: ARMCPU
92 *
93 * Called at reset time to kernel registers to their initial values.
94 */
95 void kvm_arm_reset_vcpu(ARMCPU *cpu);
96
97 #ifdef CONFIG_KVM
98 /**
99 * kvm_arm_create_scratch_host_vcpu:
100 * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
101 * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
102 * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
103 * an empty array.
104 * @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
105 * @init: filled in with the necessary values for creating a host
106 * vcpu. If NULL is provided, will not init the vCPU (though the cpufd
107 * will still be set up).
108 *
109 * Create a scratch vcpu in its own VM of the type preferred by the host
110 * kernel (as would be used for '-cpu host'), for purposes of probing it
111 * for capabilities.
112 *
113 * Returns: true on success (and fdarray and init are filled in),
114 * false on failure (and fdarray and init are not valid).
115 */
116 bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
117 int *fdarray,
118 struct kvm_vcpu_init *init);
119
120 /**
121 * kvm_arm_destroy_scratch_host_vcpu:
122 * @fdarray: array of fds as set up by kvm_arm_create_scratch_host_vcpu
123 *
124 * Tear down the scratch vcpu created by kvm_arm_create_scratch_host_vcpu.
125 */
126 void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
127
128 /**
129 * kvm_arm_sve_get_vls:
130 * @cpu: ARMCPU
131 *
132 * Get all the SVE vector lengths supported by the KVM host, setting
133 * the bits corresponding to their length in quadwords minus one
134 * (vq - 1) up to ARM_MAX_VQ. Return the resulting map.
135 */
136 uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu);
137
138 /**
139 * kvm_arm_set_cpu_features_from_host:
140 * @cpu: ARMCPU to set the features for
141 *
142 * Set up the ARMCPU struct fields up to match the information probed
143 * from the host CPU.
144 */
145 void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu);
146
147 /**
148 * kvm_arm_add_vcpu_properties:
149 * @cpu: The CPU object to add the properties to
150 *
151 * Add all KVM specific CPU properties to the CPU object. These
152 * are the CPU properties with "kvm-" prefixed names.
153 */
154 void kvm_arm_add_vcpu_properties(ARMCPU *cpu);
155
156 /**
157 * kvm_arm_steal_time_finalize:
158 * @cpu: ARMCPU for which to finalize kvm-steal-time
159 * @errp: Pointer to Error* for error propagation
160 *
161 * Validate the kvm-steal-time property selection and set its default
162 * based on KVM support and guest configuration.
163 */
164 void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp);
165
166 /**
167 * kvm_arm_aarch32_supported:
168 *
169 * Returns: true if KVM can enable AArch32 mode
170 * and false otherwise.
171 */
172 bool kvm_arm_aarch32_supported(void);
173
174 /**
175 * kvm_arm_pmu_supported:
176 *
177 * Returns: true if KVM can enable the PMU
178 * and false otherwise.
179 */
180 bool kvm_arm_pmu_supported(void);
181
182 /**
183 * kvm_arm_sve_supported:
184 *
185 * Returns true if KVM can enable SVE and false otherwise.
186 */
187 bool kvm_arm_sve_supported(void);
188
189 /**
190 * kvm_arm_mte_supported:
191 *
192 * Returns: true if KVM can enable MTE, and false otherwise.
193 */
194 bool kvm_arm_mte_supported(void);
195
196 /**
197 * kvm_arm_get_max_vm_ipa_size:
198 * @ms: Machine state handle
199 * @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
200 * for legacy KVM.
201 *
202 * Returns the number of bits in the IPA address space supported by KVM
203 */
204 int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa);
205
206 int kvm_arm_vgic_probe(void);
207
208 void kvm_arm_pmu_init(ARMCPU *cpu);
209 void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq);
210
211 /**
212 * kvm_arm_pvtime_init:
213 * @cpu: ARMCPU
214 * @ipa: Per-vcpu guest physical base address of the pvtime structures
215 *
216 * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa.
217 */
218 void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa);
219
220 int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
221
222 void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
223
224 #else
225
226 /*
227 * It's safe to call these functions without KVM support.
228 * They should either do nothing or return "not supported".
229 */
kvm_arm_aarch32_supported(void)230 static inline bool kvm_arm_aarch32_supported(void)
231 {
232 return false;
233 }
234
kvm_arm_pmu_supported(void)235 static inline bool kvm_arm_pmu_supported(void)
236 {
237 return false;
238 }
239
kvm_arm_sve_supported(void)240 static inline bool kvm_arm_sve_supported(void)
241 {
242 return false;
243 }
244
kvm_arm_mte_supported(void)245 static inline bool kvm_arm_mte_supported(void)
246 {
247 return false;
248 }
249
250 /*
251 * These functions should never actually be called without KVM support.
252 */
kvm_arm_set_cpu_features_from_host(ARMCPU * cpu)253 static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
254 {
255 g_assert_not_reached();
256 }
257
kvm_arm_add_vcpu_properties(ARMCPU * cpu)258 static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
259 {
260 g_assert_not_reached();
261 }
262
kvm_arm_get_max_vm_ipa_size(MachineState * ms,bool * fixed_ipa)263 static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
264 {
265 g_assert_not_reached();
266 }
267
kvm_arm_vgic_probe(void)268 static inline int kvm_arm_vgic_probe(void)
269 {
270 g_assert_not_reached();
271 }
272
kvm_arm_pmu_set_irq(ARMCPU * cpu,int irq)273 static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
274 {
275 g_assert_not_reached();
276 }
277
kvm_arm_pmu_init(ARMCPU * cpu)278 static inline void kvm_arm_pmu_init(ARMCPU *cpu)
279 {
280 g_assert_not_reached();
281 }
282
kvm_arm_pvtime_init(ARMCPU * cpu,uint64_t ipa)283 static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
284 {
285 g_assert_not_reached();
286 }
287
kvm_arm_steal_time_finalize(ARMCPU * cpu,Error ** errp)288 static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
289 {
290 g_assert_not_reached();
291 }
292
kvm_arm_sve_get_vls(ARMCPU * cpu)293 static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
294 {
295 g_assert_not_reached();
296 }
297
kvm_arm_enable_mte(Object * cpuobj,Error ** errp)298 static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
299 {
300 g_assert_not_reached();
301 }
302
303 #endif
304
305 #endif
306