1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13
14 /* header to be included in non-KVM-specific code */
15
16 #ifndef QEMU_KVM_H
17 #define QEMU_KVM_H
18
19 #include "exec/memattrs.h"
20 #include "qemu/accel.h"
21 #include "qom/object.h"
22
23 #ifdef COMPILING_PER_TARGET
24 # ifdef CONFIG_KVM
25 # include <linux/kvm.h>
26 # define CONFIG_KVM_IS_POSSIBLE
27 # endif
28 #else
29 # define CONFIG_KVM_IS_POSSIBLE
30 #endif
31
32 #ifdef CONFIG_KVM_IS_POSSIBLE
33
34 extern bool kvm_allowed;
35 extern bool kvm_kernel_irqchip;
36 extern bool kvm_split_irqchip;
37 extern bool kvm_async_interrupts_allowed;
38 extern bool kvm_halt_in_kernel_allowed;
39 extern bool kvm_resamplefds_allowed;
40 extern bool kvm_msi_via_irqfd_allowed;
41 extern bool kvm_gsi_routing_allowed;
42 extern bool kvm_gsi_direct_mapping;
43 extern bool kvm_readonly_mem_allowed;
44 extern bool kvm_msi_use_devid;
45
46 #define kvm_enabled() (kvm_allowed)
47 /**
48 * kvm_irqchip_in_kernel:
49 *
50 * Returns: true if an in-kernel irqchip was created.
51 * What this actually means is architecture and machine model
52 * specific: on PC, for instance, it means that the LAPIC
53 * is in kernel. This function should never be used from generic
54 * target-independent code: use one of the following functions or
55 * some other specific check instead.
56 */
57 #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip)
58
59 /**
60 * kvm_irqchip_is_split:
61 *
62 * Returns: true if the irqchip implementation is split between
63 * user and kernel space. The details are architecture and
64 * machine specific. On PC, it means that the PIC, IOAPIC, and
65 * PIT are in user space while the LAPIC is in the kernel.
66 */
67 #define kvm_irqchip_is_split() (kvm_split_irqchip)
68
69 /**
70 * kvm_async_interrupts_enabled:
71 *
72 * Returns: true if we can deliver interrupts to KVM
73 * asynchronously (ie by ioctl from any thread at any time)
74 * rather than having to do interrupt delivery synchronously
75 * (where the vcpu must be stopped at a suitable point first).
76 */
77 #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed)
78
79 /**
80 * kvm_halt_in_kernel
81 *
82 * Returns: true if halted cpus should still get a KVM_RUN ioctl to run
83 * inside of kernel space. This only works if MP state is implemented.
84 */
85 #define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed)
86
87 /**
88 * kvm_irqfds_enabled:
89 *
90 * Returns: true if we can use irqfds to inject interrupts into
91 * a KVM CPU (ie the kernel supports irqfds and we are running
92 * with a configuration where it is meaningful to use them).
93 *
94 * Always available if running with in-kernel irqchip.
95 */
96 #define kvm_irqfds_enabled() kvm_irqchip_in_kernel()
97
98 /**
99 * kvm_resamplefds_enabled:
100 *
101 * Returns: true if we can use resamplefds to inject interrupts into
102 * a KVM CPU (ie the kernel supports resamplefds and we are running
103 * with a configuration where it is meaningful to use them).
104 */
105 #define kvm_resamplefds_enabled() (kvm_resamplefds_allowed)
106
107 /**
108 * kvm_msi_via_irqfd_enabled:
109 *
110 * Returns: true if we can route a PCI MSI (Message Signaled Interrupt)
111 * to a KVM CPU via an irqfd. This requires that the kernel supports
112 * this and that we're running in a configuration that permits it.
113 */
114 #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed)
115
116 /**
117 * kvm_gsi_routing_enabled:
118 *
119 * Returns: true if GSI routing is enabled (ie the kernel supports
120 * it and we're running in a configuration that permits it).
121 */
122 #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed)
123
124 /**
125 * kvm_gsi_direct_mapping:
126 *
127 * Returns: true if GSI direct mapping is enabled.
128 */
129 #define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping)
130
131 /**
132 * kvm_readonly_mem_enabled:
133 *
134 * Returns: true if KVM readonly memory is enabled (ie the kernel
135 * supports it and we're running in a configuration that permits it).
136 */
137 #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed)
138
139 /**
140 * kvm_msi_devid_required:
141 * Returns: true if KVM requires a device id to be provided while
142 * defining an MSI routing entry.
143 */
144 #define kvm_msi_devid_required() (kvm_msi_use_devid)
145
146 #else
147
148 #define kvm_enabled() (0)
149 #define kvm_irqchip_in_kernel() (false)
150 #define kvm_irqchip_is_split() (false)
151 #define kvm_async_interrupts_enabled() (false)
152 #define kvm_halt_in_kernel() (false)
153 #define kvm_irqfds_enabled() (false)
154 #define kvm_resamplefds_enabled() (false)
155 #define kvm_msi_via_irqfd_enabled() (false)
156 #define kvm_gsi_routing_allowed() (false)
157 #define kvm_gsi_direct_mapping() (false)
158 #define kvm_readonly_mem_enabled() (false)
159 #define kvm_msi_devid_required() (false)
160
161 #endif /* CONFIG_KVM_IS_POSSIBLE */
162
163 struct kvm_run;
164 struct kvm_irq_routing_entry;
165
166 typedef struct KVMCapabilityInfo {
167 const char *name;
168 int value;
169 } KVMCapabilityInfo;
170
171 #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP }
172 #define KVM_CAP_LAST_INFO { NULL, 0 }
173
174 struct KVMState;
175
176 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
177 typedef struct KVMState KVMState;
178 DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE,
179 TYPE_KVM_ACCEL)
180
181 extern KVMState *kvm_state;
182 typedef struct Notifier Notifier;
183
184 typedef struct KVMRouteChange {
185 KVMState *s;
186 int changes;
187 } KVMRouteChange;
188
189 /* external API */
190
191 unsigned int kvm_get_max_memslots(void);
192 unsigned int kvm_get_free_memslots(void);
193 bool kvm_has_sync_mmu(void);
194 int kvm_has_vcpu_events(void);
195 int kvm_max_nested_state_length(void);
196 int kvm_has_gsi_routing(void);
197
198 /**
199 * kvm_arm_supports_user_irq
200 *
201 * Not all KVM implementations support notifications for kernel generated
202 * interrupt events to user space. This function indicates whether the current
203 * KVM implementation does support them.
204 *
205 * Returns: true if KVM supports using kernel generated IRQs from user space
206 */
207 bool kvm_arm_supports_user_irq(void);
208
209
210 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
211 int kvm_on_sigbus(int code, void *addr);
212
213 #ifdef COMPILING_PER_TARGET
214 #include "cpu.h"
215
216 void kvm_flush_coalesced_mmio_buffer(void);
217
218 /**
219 * kvm_update_guest_debug(): ensure KVM debug structures updated
220 * @cs: the CPUState for this cpu
221 * @reinject_trap: KVM trap injection control
222 *
223 * There are usually per-arch specifics which will be handled by
224 * calling down to kvm_arch_update_guest_debug after the generic
225 * fields have been set.
226 */
227 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG
228 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap);
229 #else
kvm_update_guest_debug(CPUState * cpu,unsigned long reinject_trap)230 static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
231 {
232 return -EINVAL;
233 }
234 #endif
235
236 /* internal API */
237
238 int kvm_ioctl(KVMState *s, unsigned long type, ...);
239
240 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...);
241
242 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...);
243
244 /**
245 * kvm_device_ioctl - call an ioctl on a kvm device
246 * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE
247 * @type: The device-ctrl ioctl number
248 *
249 * Returns: -errno on error, nonnegative on success
250 */
251 int kvm_device_ioctl(int fd, unsigned long type, ...);
252
253 /**
254 * kvm_vm_check_attr - check for existence of a specific vm attribute
255 * @s: The KVMState pointer
256 * @group: the group
257 * @attr: the attribute of that group to query for
258 *
259 * Returns: 1 if the attribute exists
260 * 0 if the attribute either does not exist or if the vm device
261 * interface is unavailable
262 */
263 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr);
264
265 /**
266 * kvm_device_check_attr - check for existence of a specific device attribute
267 * @fd: The device file descriptor
268 * @group: the group
269 * @attr: the attribute of that group to query for
270 *
271 * Returns: 1 if the attribute exists
272 * 0 if the attribute either does not exist or if the vm device
273 * interface is unavailable
274 */
275 int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr);
276
277 /**
278 * kvm_device_access - set or get value of a specific device attribute
279 * @fd: The device file descriptor
280 * @group: the group
281 * @attr: the attribute of that group to set or get
282 * @val: pointer to a storage area for the value
283 * @write: true for set and false for get operation
284 * @errp: error object handle
285 *
286 * Returns: 0 on success
287 * < 0 on error
288 * Use kvm_device_check_attr() in order to check for the availability
289 * of optional attributes.
290 */
291 int kvm_device_access(int fd, int group, uint64_t attr,
292 void *val, bool write, Error **errp);
293
294 /**
295 * kvm_create_device - create a KVM device for the device control API
296 * @KVMState: The KVMState pointer
297 * @type: The KVM device type (see Documentation/virtual/kvm/devices in the
298 * kernel source)
299 * @test: If true, only test if device can be created, but don't actually
300 * create the device.
301 *
302 * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd;
303 */
304 int kvm_create_device(KVMState *s, uint64_t type, bool test);
305
306 /**
307 * kvm_device_supported - probe whether KVM supports specific device
308 *
309 * @vmfd: The fd handler for VM
310 * @type: type of device
311 *
312 * @return: true if supported, otherwise false.
313 */
314 bool kvm_device_supported(int vmfd, uint64_t type);
315
316 /**
317 * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
318 * @cpu: QOM CPUState object for which KVM vCPU has to be fetched/created.
319 *
320 * @returns: 0 when success, errno (<0) when failed.
321 */
322 int kvm_create_vcpu(CPUState *cpu);
323
324 /**
325 * kvm_park_vcpu - Park QEMU KVM vCPU context
326 * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked.
327 *
328 * @returns: none
329 */
330 void kvm_park_vcpu(CPUState *cpu);
331
332 /**
333 * kvm_unpark_vcpu - unpark QEMU KVM vCPU context
334 * @s: KVM State
335 * @vcpu_id: Architecture vCPU ID of the parked vCPU
336 *
337 * @returns: KVM fd
338 */
339 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id);
340
341 /**
342 * kvm_create_and_park_vcpu - Create and park a KVM vCPU
343 * @cpu: QOM CPUState object for which KVM vCPU has to be created and parked.
344 *
345 * @returns: 0 when success, errno (<0) when failed.
346 */
347 int kvm_create_and_park_vcpu(CPUState *cpu);
348
349 /* Arch specific hooks */
350
351 extern const KVMCapabilityInfo kvm_arch_required_capabilities[];
352
353 void kvm_arch_accel_class_init(ObjectClass *oc);
354
355 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run);
356 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run);
357
358 int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run);
359
360 int kvm_arch_process_async_events(CPUState *cpu);
361
362 int kvm_arch_get_registers(CPUState *cpu, Error **errp);
363
364 /* state subset only touched by the VCPU itself during runtime */
365 #define KVM_PUT_RUNTIME_STATE 1
366 /* state subset modified during VCPU reset */
367 #define KVM_PUT_RESET_STATE 2
368 /* full state set, modified during initialization or on vmload */
369 #define KVM_PUT_FULL_STATE 3
370
371 int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp);
372
373 int kvm_arch_get_default_type(MachineState *ms);
374
375 int kvm_arch_init(MachineState *ms, KVMState *s);
376
377 int kvm_arch_init_vcpu(CPUState *cpu);
378 int kvm_arch_destroy_vcpu(CPUState *cpu);
379
380 bool kvm_vcpu_id_is_valid(int vcpu_id);
381
382 /* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */
383 unsigned long kvm_arch_vcpu_id(CPUState *cpu);
384
385 #ifdef KVM_HAVE_MCE_INJECTION
386 void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
387 #endif
388
389 void kvm_arch_init_irq_routing(KVMState *s);
390
391 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
392 uint64_t address, uint32_t data, PCIDevice *dev);
393
394 /* Notify arch about newly added MSI routes */
395 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
396 int vector, PCIDevice *dev);
397 /* Notify arch about released MSI routes */
398 int kvm_arch_release_virq_post(int virq);
399
400 int kvm_arch_msi_data_to_gsi(uint32_t data);
401
402 int kvm_set_irq(KVMState *s, int irq, int level);
403 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg);
404
405 void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin);
406
407 void kvm_irqchip_add_change_notifier(Notifier *n);
408 void kvm_irqchip_remove_change_notifier(Notifier *n);
409 void kvm_irqchip_change_notify(void);
410
411 struct kvm_guest_debug;
412 struct kvm_debug_exit_arch;
413
414 struct kvm_sw_breakpoint {
415 vaddr pc;
416 vaddr saved_insn;
417 int use_count;
418 QTAILQ_ENTRY(kvm_sw_breakpoint) entry;
419 };
420
421 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
422 vaddr pc);
423
424 int kvm_sw_breakpoints_active(CPUState *cpu);
425
426 int kvm_arch_insert_sw_breakpoint(CPUState *cpu,
427 struct kvm_sw_breakpoint *bp);
428 int kvm_arch_remove_sw_breakpoint(CPUState *cpu,
429 struct kvm_sw_breakpoint *bp);
430 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type);
431 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type);
432 void kvm_arch_remove_all_hw_breakpoints(void);
433
434 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg);
435
436 bool kvm_arch_stop_on_emulation_error(CPUState *cpu);
437
438 int kvm_check_extension(KVMState *s, unsigned int extension);
439
440 int kvm_vm_check_extension(KVMState *s, unsigned int extension);
441
442 #define kvm_vm_enable_cap(s, capability, cap_flags, ...) \
443 ({ \
444 struct kvm_enable_cap cap = { \
445 .cap = capability, \
446 .flags = cap_flags, \
447 }; \
448 uint64_t args_tmp[] = { __VA_ARGS__ }; \
449 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
450 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
451 kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \
452 })
453
454 #define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \
455 ({ \
456 struct kvm_enable_cap cap = { \
457 .cap = capability, \
458 .flags = cap_flags, \
459 }; \
460 uint64_t args_tmp[] = { __VA_ARGS__ }; \
461 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \
462 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \
463 kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \
464 })
465
466 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len);
467
468 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
469 hwaddr *phys_addr);
470
471 #endif /* COMPILING_PER_TARGET */
472
473 void kvm_cpu_synchronize_state(CPUState *cpu);
474
475 void kvm_init_cpu_signals(CPUState *cpu);
476
477 /**
478 * kvm_irqchip_add_msi_route - Add MSI route for specific vector
479 * @c: KVMRouteChange instance.
480 * @vector: which vector to add. This can be either MSI/MSIX
481 * vector. The function will automatically detect whether
482 * MSI/MSIX is enabled, and fetch corresponding MSI
483 * message.
484 * @dev: Owner PCI device to add the route. If @dev is specified
485 * as @NULL, an empty MSI message will be inited.
486 * @return: virq (>=0) when success, errno (<0) when failed.
487 */
488 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
489 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
490 PCIDevice *dev);
491 void kvm_irqchip_commit_routes(KVMState *s);
492
kvm_irqchip_begin_route_changes(KVMState * s)493 static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
494 {
495 return (KVMRouteChange) { .s = s, .changes = 0 };
496 }
497
kvm_irqchip_commit_route_changes(KVMRouteChange * c)498 static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c)
499 {
500 if (c->changes) {
501 kvm_irqchip_commit_routes(c->s);
502 c->changes = 0;
503 }
504 }
505
506 int kvm_irqchip_get_virq(KVMState *s);
507 void kvm_irqchip_release_virq(KVMState *s, int virq);
508
509 void kvm_add_routing_entry(KVMState *s,
510 struct kvm_irq_routing_entry *entry);
511
512 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
513 EventNotifier *rn, int virq);
514 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
515 int virq);
516 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
517 EventNotifier *rn, qemu_irq irq);
518 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
519 qemu_irq irq);
520 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi);
521 void kvm_init_irq_routing(KVMState *s);
522
523 bool kvm_kernel_irqchip_allowed(void);
524 bool kvm_kernel_irqchip_required(void);
525 bool kvm_kernel_irqchip_split(void);
526
527 /**
528 * kvm_arch_irqchip_create:
529 * @KVMState: The KVMState pointer
530 *
531 * Allow architectures to create an in-kernel irq chip themselves.
532 *
533 * Returns: < 0: error
534 * 0: irq chip was not created
535 * > 0: irq chip was created
536 */
537 int kvm_arch_irqchip_create(KVMState *s);
538
539 /**
540 * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl
541 * @id: The register ID
542 * @source: The pointer to the value to be set. It must point to a variable
543 * of the correct type/size for the register being accessed.
544 *
545 * Returns: 0 on success, or a negative errno on failure.
546 */
547 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source);
548
549 /**
550 * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl
551 * @id: The register ID
552 * @target: The pointer where the value is to be stored. It must point to a
553 * variable of the correct type/size for the register being accessed.
554 *
555 * Returns: 0 on success, or a negative errno on failure.
556 */
557 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target);
558
559 /* Notify resamplefd for EOI of specific interrupts. */
560 void kvm_resample_fd_notify(int gsi);
561
562 bool kvm_dirty_ring_enabled(void);
563
564 uint32_t kvm_dirty_ring_size(void);
565
566 void kvm_mark_guest_state_protected(void);
567
568 /**
569 * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page
570 * reported for the VM.
571 */
572 bool kvm_hwpoisoned_mem(void);
573
574 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp);
575
576 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size);
577 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size);
578
579 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private);
580
581 #endif
582