xref: /openbmc/qemu/include/sysemu/kvm.h (revision e818c01a)
1 /*
2  * QEMU KVM support
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  *
12  */
13 
14 /* header to be included in non-KVM-specific code */
15 
16 #ifndef QEMU_KVM_H
17 #define QEMU_KVM_H
18 
19 #include "exec/memattrs.h"
20 #include "qemu/accel.h"
21 #include "qom/object.h"
22 
23 #ifdef COMPILING_PER_TARGET
24 # ifdef CONFIG_KVM
25 #  include <linux/kvm.h>
26 #  define CONFIG_KVM_IS_POSSIBLE
27 # endif
28 #else
29 # define CONFIG_KVM_IS_POSSIBLE
30 #endif
31 
32 #ifdef CONFIG_KVM_IS_POSSIBLE
33 
34 extern bool kvm_allowed;
35 extern bool kvm_kernel_irqchip;
36 extern bool kvm_split_irqchip;
37 extern bool kvm_async_interrupts_allowed;
38 extern bool kvm_halt_in_kernel_allowed;
39 extern bool kvm_resamplefds_allowed;
40 extern bool kvm_msi_via_irqfd_allowed;
41 extern bool kvm_gsi_routing_allowed;
42 extern bool kvm_gsi_direct_mapping;
43 extern bool kvm_readonly_mem_allowed;
44 extern bool kvm_msi_use_devid;
45 
46 #define kvm_enabled()           (kvm_allowed)
47 /**
48  * kvm_irqchip_in_kernel:
49  *
50  * Returns: true if an in-kernel irqchip was created.
51  * What this actually means is architecture and machine model
52  * specific: on PC, for instance, it means that the LAPIC
53  * is in kernel.  This function should never be used from generic
54  * target-independent code: use one of the following functions or
55  * some other specific check instead.
56  */
57 #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
58 
59 /**
60  * kvm_irqchip_is_split:
61  *
62  * Returns: true if the irqchip implementation is split between
63  * user and kernel space.  The details are architecture and
64  * machine specific.  On PC, it means that the PIC, IOAPIC, and
65  * PIT are in user space while the LAPIC is in the kernel.
66  */
67 #define kvm_irqchip_is_split() (kvm_split_irqchip)
68 
69 /**
70  * kvm_async_interrupts_enabled:
71  *
72  * Returns: true if we can deliver interrupts to KVM
73  * asynchronously (ie by ioctl from any thread at any time)
74  * rather than having to do interrupt delivery synchronously
75  * (where the vcpu must be stopped at a suitable point first).
76  */
77 #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
78 
79 /**
80  * kvm_halt_in_kernel
81  *
82  * Returns: true if halted cpus should still get a KVM_RUN ioctl to run
83  * inside of kernel space. This only works if MP state is implemented.
84  */
85 #define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
86 
87 /**
88  * kvm_irqfds_enabled:
89  *
90  * Returns: true if we can use irqfds to inject interrupts into
91  * a KVM CPU (ie the kernel supports irqfds and we are running
92  * with a configuration where it is meaningful to use them).
93  *
94  * Always available if running with in-kernel irqchip.
95  */
96 #define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
97 
98 /**
99  * kvm_resamplefds_enabled:
100  *
101  * Returns: true if we can use resamplefds to inject interrupts into
102  * a KVM CPU (ie the kernel supports resamplefds and we are running
103  * with a configuration where it is meaningful to use them).
104  */
105 #define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
106 
107 /**
108  * kvm_msi_via_irqfd_enabled:
109  *
110  * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
111  * to a KVM CPU via an irqfd. This requires that the kernel supports
112  * this and that we're running in a configuration that permits it.
113  */
114 #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
115 
116 /**
117  * kvm_gsi_routing_enabled:
118  *
119  * Returns: true if GSI routing is enabled (ie the kernel supports
120  * it and we're running in a configuration that permits it).
121  */
122 #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
123 
124 /**
125  * kvm_gsi_direct_mapping:
126  *
127  * Returns: true if GSI direct mapping is enabled.
128  */
129 #define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
130 
131 /**
132  * kvm_readonly_mem_enabled:
133  *
134  * Returns: true if KVM readonly memory is enabled (ie the kernel
135  * supports it and we're running in a configuration that permits it).
136  */
137 #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
138 
139 /**
140  * kvm_msi_devid_required:
141  * Returns: true if KVM requires a device id to be provided while
142  * defining an MSI routing entry.
143  */
144 #define kvm_msi_devid_required() (kvm_msi_use_devid)
145 
146 #else
147 
148 #define kvm_enabled()           (0)
149 #define kvm_irqchip_in_kernel() (false)
150 #define kvm_irqchip_is_split() (false)
151 #define kvm_async_interrupts_enabled() (false)
152 #define kvm_halt_in_kernel() (false)
153 #define kvm_irqfds_enabled() (false)
154 #define kvm_resamplefds_enabled() (false)
155 #define kvm_msi_via_irqfd_enabled() (false)
156 #define kvm_gsi_routing_allowed() (false)
157 #define kvm_gsi_direct_mapping() (false)
158 #define kvm_readonly_mem_enabled() (false)
159 #define kvm_msi_devid_required() (false)
160 
161 #endif  /* CONFIG_KVM_IS_POSSIBLE */
162 
163 struct kvm_run;
164 struct kvm_irq_routing_entry;
165 
166 typedef struct KVMCapabilityInfo {
167     const char *name;
168     int value;
169 } KVMCapabilityInfo;
170 
171 #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
172 #define KVM_CAP_LAST_INFO { NULL, 0 }
173 
174 struct KVMState;
175 
176 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
177 typedef struct KVMState KVMState;
178 DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
179                          TYPE_KVM_ACCEL)
180 
181 extern KVMState *kvm_state;
182 typedef struct Notifier Notifier;
183 
184 typedef struct KVMRouteChange {
185      KVMState *s;
186      int changes;
187 } KVMRouteChange;
188 
189 /* external API */
190 
191 unsigned int kvm_get_max_memslots(void);
192 unsigned int kvm_get_free_memslots(void);
193 bool kvm_has_sync_mmu(void);
194 int kvm_has_vcpu_events(void);
195 int kvm_max_nested_state_length(void);
196 int kvm_has_gsi_routing(void);
197 
198 /**
199  * kvm_arm_supports_user_irq
200  *
201  * Not all KVM implementations support notifications for kernel generated
202  * interrupt events to user space. This function indicates whether the current
203  * KVM implementation does support them.
204  *
205  * Returns: true if KVM supports using kernel generated IRQs from user space
206  */
207 bool kvm_arm_supports_user_irq(void);
208 
209 
210 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
211 int kvm_on_sigbus(int code, void *addr);
212 
213 #ifdef COMPILING_PER_TARGET
214 #include "cpu.h"
215 
216 void kvm_flush_coalesced_mmio_buffer(void);
217 
218 /**
219  * kvm_update_guest_debug(): ensure KVM debug structures updated
220  * @cs: the CPUState for this cpu
221  * @reinject_trap: KVM trap injection control
222  *
223  * There are usually per-arch specifics which will be handled by
224  * calling down to kvm_arch_update_guest_debug after the generic
225  * fields have been set.
226  */
227 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
228 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
229 #else
230 static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
231 {
232     return -EINVAL;
233 }
234 #endif
235 
236 /* internal API */
237 
238 int kvm_ioctl(KVMState *s, int type, ...);
239 
240 int kvm_vm_ioctl(KVMState *s, int type, ...);
241 
242 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...);
243 
244 /**
245  * kvm_device_ioctl - call an ioctl on a kvm device
246  * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE
247  * @type: The device-ctrl ioctl number
248  *
249  * Returns: -errno on error, nonnegative on success
250  */
251 int kvm_device_ioctl(int fd, int type, ...);
252 
253 /**
254  * kvm_vm_check_attr - check for existence of a specific vm attribute
255  * @s: The KVMState pointer
256  * @group: the group
257  * @attr: the attribute of that group to query for
258  *
259  * Returns: 1 if the attribute exists
260  *          0 if the attribute either does not exist or if the vm device
261  *            interface is unavailable
262  */
263 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
264 
265 /**
266  * kvm_device_check_attr - check for existence of a specific device attribute
267  * @fd: The device file descriptor
268  * @group: the group
269  * @attr: the attribute of that group to query for
270  *
271  * Returns: 1 if the attribute exists
272  *          0 if the attribute either does not exist or if the vm device
273  *            interface is unavailable
274  */
275 int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
276 
277 /**
278  * kvm_device_access - set or get value of a specific device attribute
279  * @fd: The device file descriptor
280  * @group: the group
281  * @attr: the attribute of that group to set or get
282  * @val: pointer to a storage area for the value
283  * @write: true for set and false for get operation
284  * @errp: error object handle
285  *
286  * Returns: 0 on success
287  *          < 0 on error
288  * Use kvm_device_check_attr() in order to check for the availability
289  * of optional attributes.
290  */
291 int kvm_device_access(int fd, int group, uint64_t attr,
292                       void *val, bool write, Error **errp);
293 
294 /**
295  * kvm_create_device - create a KVM device for the device control API
296  * @KVMState: The KVMState pointer
297  * @type: The KVM device type (see Documentation/virtual/kvm/devices in the
298  *        kernel source)
299  * @test: If true, only test if device can be created, but don't actually
300  *        create the device.
301  *
302  * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd;
303  */
304 int kvm_create_device(KVMState *s, uint64_t type, bool test);
305 
306 /**
307  * kvm_device_supported - probe whether KVM supports specific device
308  *
309  * @vmfd: The fd handler for VM
310  * @type: type of device
311  *
312  * @return: true if supported, otherwise false.
313  */
314 bool kvm_device_supported(int vmfd, uint64_t type);
315 
316 /**
317  * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
318  * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
319  *
320  * @returns: 0 when success, errno (<0) when failed.
321  */
322 int kvm_create_vcpu(CPUState *cpu);
323 
324 /**
325  * kvm_park_vcpu - Park QEMU KVM vCPU context
326  * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked.
327  *
328  * @returns: none
329  */
330 void kvm_park_vcpu(CPUState *cpu);
331 
332 /**
333  * kvm_unpark_vcpu - unpark QEMU KVM vCPU context
334  * @s: KVM State
335  * @vcpu_id: Architecture vCPU ID of the parked vCPU
336  *
337  * @returns: KVM fd
338  */
339 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id);
340 
341 /* Arch specific hooks */
342 
343 extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
344 
345 void kvm_arch_accel_class_init(ObjectClass *oc);
346 
347 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
348 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
349 
350 int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
351 
352 int kvm_arch_process_async_events(CPUState *cpu);
353 
354 int kvm_arch_get_registers(CPUState *cpu);
355 
356 /* state subset only touched by the VCPU itself during runtime */
357 #define KVM_PUT_RUNTIME_STATE   1
358 /* state subset modified during VCPU reset */
359 #define KVM_PUT_RESET_STATE     2
360 /* full state set, modified during initialization or on vmload */
361 #define KVM_PUT_FULL_STATE      3
362 
363 int kvm_arch_put_registers(CPUState *cpu, int level);
364 
365 int kvm_arch_get_default_type(MachineState *ms);
366 
367 int kvm_arch_init(MachineState *ms, KVMState *s);
368 
369 int kvm_arch_init_vcpu(CPUState *cpu);
370 int kvm_arch_destroy_vcpu(CPUState *cpu);
371 
372 bool kvm_vcpu_id_is_valid(int vcpu_id);
373 
374 /* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
375 unsigned long kvm_arch_vcpu_id(CPUState *cpu);
376 
377 #ifdef KVM_HAVE_MCE_INJECTION
378 void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
379 #endif
380 
381 void kvm_arch_init_irq_routing(KVMState *s);
382 
383 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
384                              uint64_t address, uint32_t data, PCIDevice *dev);
385 
386 /* Notify arch about newly added MSI routes */
387 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
388                                 int vector, PCIDevice *dev);
389 /* Notify arch about released MSI routes */
390 int kvm_arch_release_virq_post(int virq);
391 
392 int kvm_arch_msi_data_to_gsi(uint32_t data);
393 
394 int kvm_set_irq(KVMState *s, int irq, int level);
395 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
396 
397 void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
398 
399 void kvm_irqchip_add_change_notifier(Notifier *n);
400 void kvm_irqchip_remove_change_notifier(Notifier *n);
401 void kvm_irqchip_change_notify(void);
402 
403 struct kvm_guest_debug;
404 struct kvm_debug_exit_arch;
405 
406 struct kvm_sw_breakpoint {
407     vaddr pc;
408     vaddr saved_insn;
409     int use_count;
410     QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
411 };
412 
413 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
414                                                  vaddr pc);
415 
416 int kvm_sw_breakpoints_active(CPUState *cpu);
417 
418 int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
419                                   struct kvm_sw_breakpoint *bp);
420 int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
421                                   struct kvm_sw_breakpoint *bp);
422 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
423 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
424 void kvm_arch_remove_all_hw_breakpoints(void);
425 
426 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
427 
428 bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
429 
430 int kvm_check_extension(KVMState *s, unsigned int extension);
431 
432 int kvm_vm_check_extension(KVMState *s, unsigned int extension);
433 
434 #define kvm_vm_enable_cap(s, capability, cap_flags, ...)             \
435     ({                                                               \
436         struct kvm_enable_cap cap = {                                \
437             .cap = capability,                                       \
438             .flags = cap_flags,                                      \
439         };                                                           \
440         uint64_t args_tmp[] = { __VA_ARGS__ };                       \
441         size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args));  \
442         memcpy(cap.args, args_tmp, n * sizeof(cap.args[0]));         \
443         kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap);                       \
444     })
445 
446 #define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...)         \
447     ({                                                               \
448         struct kvm_enable_cap cap = {                                \
449             .cap = capability,                                       \
450             .flags = cap_flags,                                      \
451         };                                                           \
452         uint64_t args_tmp[] = { __VA_ARGS__ };                       \
453         size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args));  \
454         memcpy(cap.args, args_tmp, n * sizeof(cap.args[0]));         \
455         kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap);                   \
456     })
457 
458 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
459 
460 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
461                                        hwaddr *phys_addr);
462 
463 #endif /* COMPILING_PER_TARGET */
464 
465 void kvm_cpu_synchronize_state(CPUState *cpu);
466 
467 void kvm_init_cpu_signals(CPUState *cpu);
468 
469 /**
470  * kvm_irqchip_add_msi_route - Add MSI route for specific vector
471  * @c:      KVMRouteChange instance.
472  * @vector: which vector to add. This can be either MSI/MSIX
473  *          vector. The function will automatically detect whether
474  *          MSI/MSIX is enabled, and fetch corresponding MSI
475  *          message.
476  * @dev:    Owner PCI device to add the route. If @dev is specified
477  *          as @NULL, an empty MSI message will be inited.
478  * @return: virq (>=0) when success, errno (<0) when failed.
479  */
480 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
481 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
482                                  PCIDevice *dev);
483 void kvm_irqchip_commit_routes(KVMState *s);
484 
485 static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
486 {
487     return (KVMRouteChange) { .s = s, .changes = 0 };
488 }
489 
490 static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
491 {
492     if (c->changes) {
493         kvm_irqchip_commit_routes(c->s);
494         c->changes = 0;
495     }
496 }
497 
498 int kvm_irqchip_get_virq(KVMState *s);
499 void kvm_irqchip_release_virq(KVMState *s, int virq);
500 
501 void kvm_add_routing_entry(KVMState *s,
502                            struct kvm_irq_routing_entry *entry);
503 
504 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
505                                        EventNotifier *rn, int virq);
506 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
507                                           int virq);
508 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
509                                    EventNotifier *rn, qemu_irq irq);
510 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
511                                       qemu_irq irq);
512 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
513 void kvm_init_irq_routing(KVMState *s);
514 
515 bool kvm_kernel_irqchip_allowed(void);
516 bool kvm_kernel_irqchip_required(void);
517 bool kvm_kernel_irqchip_split(void);
518 
519 /**
520  * kvm_arch_irqchip_create:
521  * @KVMState: The KVMState pointer
522  *
523  * Allow architectures to create an in-kernel irq chip themselves.
524  *
525  * Returns: < 0: error
526  *            0: irq chip was not created
527  *          > 0: irq chip was created
528  */
529 int kvm_arch_irqchip_create(KVMState *s);
530 
531 /**
532  * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
533  * @id: The register ID
534  * @source: The pointer to the value to be set. It must point to a variable
535  *          of the correct type/size for the register being accessed.
536  *
537  * Returns: 0 on success, or a negative errno on failure.
538  */
539 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
540 
541 /**
542  * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl
543  * @id: The register ID
544  * @target: The pointer where the value is to be stored. It must point to a
545  *          variable of the correct type/size for the register being accessed.
546  *
547  * Returns: 0 on success, or a negative errno on failure.
548  */
549 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
550 
551 /* Notify resamplefd for EOI of specific interrupts. */
552 void kvm_resample_fd_notify(int gsi);
553 
554 bool kvm_dirty_ring_enabled(void);
555 
556 uint32_t kvm_dirty_ring_size(void);
557 
558 void kvm_mark_guest_state_protected(void);
559 
560 /**
561  * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
562  * reported for the VM.
563  */
564 bool kvm_hwpoisoned_mem(void);
565 
566 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
567 
568 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
569 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
570 
571 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
572 
573 #endif
574