1 /* 2 * QEMU KVM support 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 * 12 */ 13 14 /* header to be included in non-KVM-specific code */ 15 16 #ifndef QEMU_KVM_H 17 #define QEMU_KVM_H 18 19 #include "exec/memattrs.h" 20 #include "qemu/accel.h" 21 #include "qom/object.h" 22 23 #ifdef COMPILING_PER_TARGET 24 # ifdef CONFIG_KVM 25 # include <linux/kvm.h> 26 # define CONFIG_KVM_IS_POSSIBLE 27 # endif 28 #else 29 # define CONFIG_KVM_IS_POSSIBLE 30 #endif 31 32 #ifdef CONFIG_KVM_IS_POSSIBLE 33 34 extern bool kvm_allowed; 35 extern bool kvm_kernel_irqchip; 36 extern bool kvm_split_irqchip; 37 extern bool kvm_async_interrupts_allowed; 38 extern bool kvm_halt_in_kernel_allowed; 39 extern bool kvm_resamplefds_allowed; 40 extern bool kvm_msi_via_irqfd_allowed; 41 extern bool kvm_gsi_routing_allowed; 42 extern bool kvm_gsi_direct_mapping; 43 extern bool kvm_readonly_mem_allowed; 44 extern bool kvm_msi_use_devid; 45 46 #define kvm_enabled() (kvm_allowed) 47 /** 48 * kvm_irqchip_in_kernel: 49 * 50 * Returns: true if an in-kernel irqchip was created. 51 * What this actually means is architecture and machine model 52 * specific: on PC, for instance, it means that the LAPIC 53 * is in kernel. This function should never be used from generic 54 * target-independent code: use one of the following functions or 55 * some other specific check instead. 56 */ 57 #define kvm_irqchip_in_kernel() (kvm_kernel_irqchip) 58 59 /** 60 * kvm_irqchip_is_split: 61 * 62 * Returns: true if the irqchip implementation is split between 63 * user and kernel space. The details are architecture and 64 * machine specific. On PC, it means that the PIC, IOAPIC, and 65 * PIT are in user space while the LAPIC is in the kernel. 66 */ 67 #define kvm_irqchip_is_split() (kvm_split_irqchip) 68 69 /** 70 * kvm_async_interrupts_enabled: 71 * 72 * Returns: true if we can deliver interrupts to KVM 73 * asynchronously (ie by ioctl from any thread at any time) 74 * rather than having to do interrupt delivery synchronously 75 * (where the vcpu must be stopped at a suitable point first). 76 */ 77 #define kvm_async_interrupts_enabled() (kvm_async_interrupts_allowed) 78 79 /** 80 * kvm_halt_in_kernel 81 * 82 * Returns: true if halted cpus should still get a KVM_RUN ioctl to run 83 * inside of kernel space. This only works if MP state is implemented. 84 */ 85 #define kvm_halt_in_kernel() (kvm_halt_in_kernel_allowed) 86 87 /** 88 * kvm_irqfds_enabled: 89 * 90 * Returns: true if we can use irqfds to inject interrupts into 91 * a KVM CPU (ie the kernel supports irqfds and we are running 92 * with a configuration where it is meaningful to use them). 93 * 94 * Always available if running with in-kernel irqchip. 95 */ 96 #define kvm_irqfds_enabled() kvm_irqchip_in_kernel() 97 98 /** 99 * kvm_resamplefds_enabled: 100 * 101 * Returns: true if we can use resamplefds to inject interrupts into 102 * a KVM CPU (ie the kernel supports resamplefds and we are running 103 * with a configuration where it is meaningful to use them). 104 */ 105 #define kvm_resamplefds_enabled() (kvm_resamplefds_allowed) 106 107 /** 108 * kvm_msi_via_irqfd_enabled: 109 * 110 * Returns: true if we can route a PCI MSI (Message Signaled Interrupt) 111 * to a KVM CPU via an irqfd. This requires that the kernel supports 112 * this and that we're running in a configuration that permits it. 113 */ 114 #define kvm_msi_via_irqfd_enabled() (kvm_msi_via_irqfd_allowed) 115 116 /** 117 * kvm_gsi_routing_enabled: 118 * 119 * Returns: true if GSI routing is enabled (ie the kernel supports 120 * it and we're running in a configuration that permits it). 121 */ 122 #define kvm_gsi_routing_enabled() (kvm_gsi_routing_allowed) 123 124 /** 125 * kvm_gsi_direct_mapping: 126 * 127 * Returns: true if GSI direct mapping is enabled. 128 */ 129 #define kvm_gsi_direct_mapping() (kvm_gsi_direct_mapping) 130 131 /** 132 * kvm_readonly_mem_enabled: 133 * 134 * Returns: true if KVM readonly memory is enabled (ie the kernel 135 * supports it and we're running in a configuration that permits it). 136 */ 137 #define kvm_readonly_mem_enabled() (kvm_readonly_mem_allowed) 138 139 /** 140 * kvm_msi_devid_required: 141 * Returns: true if KVM requires a device id to be provided while 142 * defining an MSI routing entry. 143 */ 144 #define kvm_msi_devid_required() (kvm_msi_use_devid) 145 146 #else 147 148 #define kvm_enabled() (0) 149 #define kvm_irqchip_in_kernel() (false) 150 #define kvm_irqchip_is_split() (false) 151 #define kvm_async_interrupts_enabled() (false) 152 #define kvm_halt_in_kernel() (false) 153 #define kvm_irqfds_enabled() (false) 154 #define kvm_resamplefds_enabled() (false) 155 #define kvm_msi_via_irqfd_enabled() (false) 156 #define kvm_gsi_routing_allowed() (false) 157 #define kvm_gsi_direct_mapping() (false) 158 #define kvm_readonly_mem_enabled() (false) 159 #define kvm_msi_devid_required() (false) 160 161 #endif /* CONFIG_KVM_IS_POSSIBLE */ 162 163 struct kvm_run; 164 struct kvm_irq_routing_entry; 165 166 typedef struct KVMCapabilityInfo { 167 const char *name; 168 int value; 169 } KVMCapabilityInfo; 170 171 #define KVM_CAP_INFO(CAP) { "KVM_CAP_" stringify(CAP), KVM_CAP_##CAP } 172 #define KVM_CAP_LAST_INFO { NULL, 0 } 173 174 struct KVMState; 175 176 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") 177 typedef struct KVMState KVMState; 178 DECLARE_INSTANCE_CHECKER(KVMState, KVM_STATE, 179 TYPE_KVM_ACCEL) 180 181 extern KVMState *kvm_state; 182 typedef struct Notifier Notifier; 183 184 typedef struct KVMRouteChange { 185 KVMState *s; 186 int changes; 187 } KVMRouteChange; 188 189 /* external API */ 190 191 unsigned int kvm_get_max_memslots(void); 192 unsigned int kvm_get_free_memslots(void); 193 bool kvm_has_sync_mmu(void); 194 int kvm_has_vcpu_events(void); 195 int kvm_max_nested_state_length(void); 196 int kvm_has_gsi_routing(void); 197 198 /** 199 * kvm_arm_supports_user_irq 200 * 201 * Not all KVM implementations support notifications for kernel generated 202 * interrupt events to user space. This function indicates whether the current 203 * KVM implementation does support them. 204 * 205 * Returns: true if KVM supports using kernel generated IRQs from user space 206 */ 207 bool kvm_arm_supports_user_irq(void); 208 209 210 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); 211 int kvm_on_sigbus(int code, void *addr); 212 213 #ifdef COMPILING_PER_TARGET 214 #include "cpu.h" 215 216 void kvm_flush_coalesced_mmio_buffer(void); 217 218 /** 219 * kvm_update_guest_debug(): ensure KVM debug structures updated 220 * @cs: the CPUState for this cpu 221 * @reinject_trap: KVM trap injection control 222 * 223 * There are usually per-arch specifics which will be handled by 224 * calling down to kvm_arch_update_guest_debug after the generic 225 * fields have been set. 226 */ 227 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 228 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap); 229 #else 230 static inline int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) 231 { 232 return -EINVAL; 233 } 234 #endif 235 236 /* internal API */ 237 238 int kvm_ioctl(KVMState *s, int type, ...); 239 240 int kvm_vm_ioctl(KVMState *s, int type, ...); 241 242 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); 243 244 /** 245 * kvm_device_ioctl - call an ioctl on a kvm device 246 * @fd: The KVM device file descriptor as returned from KVM_CREATE_DEVICE 247 * @type: The device-ctrl ioctl number 248 * 249 * Returns: -errno on error, nonnegative on success 250 */ 251 int kvm_device_ioctl(int fd, int type, ...); 252 253 /** 254 * kvm_vm_check_attr - check for existence of a specific vm attribute 255 * @s: The KVMState pointer 256 * @group: the group 257 * @attr: the attribute of that group to query for 258 * 259 * Returns: 1 if the attribute exists 260 * 0 if the attribute either does not exist or if the vm device 261 * interface is unavailable 262 */ 263 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr); 264 265 /** 266 * kvm_device_check_attr - check for existence of a specific device attribute 267 * @fd: The device file descriptor 268 * @group: the group 269 * @attr: the attribute of that group to query for 270 * 271 * Returns: 1 if the attribute exists 272 * 0 if the attribute either does not exist or if the vm device 273 * interface is unavailable 274 */ 275 int kvm_device_check_attr(int fd, uint32_t group, uint64_t attr); 276 277 /** 278 * kvm_device_access - set or get value of a specific device attribute 279 * @fd: The device file descriptor 280 * @group: the group 281 * @attr: the attribute of that group to set or get 282 * @val: pointer to a storage area for the value 283 * @write: true for set and false for get operation 284 * @errp: error object handle 285 * 286 * Returns: 0 on success 287 * < 0 on error 288 * Use kvm_device_check_attr() in order to check for the availability 289 * of optional attributes. 290 */ 291 int kvm_device_access(int fd, int group, uint64_t attr, 292 void *val, bool write, Error **errp); 293 294 /** 295 * kvm_create_device - create a KVM device for the device control API 296 * @KVMState: The KVMState pointer 297 * @type: The KVM device type (see Documentation/virtual/kvm/devices in the 298 * kernel source) 299 * @test: If true, only test if device can be created, but don't actually 300 * create the device. 301 * 302 * Returns: -errno on error, nonnegative on success: @test ? 0 : device fd; 303 */ 304 int kvm_create_device(KVMState *s, uint64_t type, bool test); 305 306 /** 307 * kvm_device_supported - probe whether KVM supports specific device 308 * 309 * @vmfd: The fd handler for VM 310 * @type: type of device 311 * 312 * @return: true if supported, otherwise false. 313 */ 314 bool kvm_device_supported(int vmfd, uint64_t type); 315 316 /* Arch specific hooks */ 317 318 extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; 319 320 void kvm_arch_accel_class_init(ObjectClass *oc); 321 322 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); 323 MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); 324 325 int kvm_arch_handle_exit(CPUState *cpu, struct kvm_run *run); 326 327 int kvm_arch_process_async_events(CPUState *cpu); 328 329 int kvm_arch_get_registers(CPUState *cpu); 330 331 /* state subset only touched by the VCPU itself during runtime */ 332 #define KVM_PUT_RUNTIME_STATE 1 333 /* state subset modified during VCPU reset */ 334 #define KVM_PUT_RESET_STATE 2 335 /* full state set, modified during initialization or on vmload */ 336 #define KVM_PUT_FULL_STATE 3 337 338 int kvm_arch_put_registers(CPUState *cpu, int level); 339 340 int kvm_arch_get_default_type(MachineState *ms); 341 342 int kvm_arch_init(MachineState *ms, KVMState *s); 343 344 int kvm_arch_init_vcpu(CPUState *cpu); 345 int kvm_arch_destroy_vcpu(CPUState *cpu); 346 347 bool kvm_vcpu_id_is_valid(int vcpu_id); 348 349 /* Returns VCPU ID to be used on KVM_CREATE_VCPU ioctl() */ 350 unsigned long kvm_arch_vcpu_id(CPUState *cpu); 351 352 #ifdef KVM_HAVE_MCE_INJECTION 353 void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr); 354 #endif 355 356 void kvm_arch_init_irq_routing(KVMState *s); 357 358 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 359 uint64_t address, uint32_t data, PCIDevice *dev); 360 361 /* Notify arch about newly added MSI routes */ 362 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 363 int vector, PCIDevice *dev); 364 /* Notify arch about released MSI routes */ 365 int kvm_arch_release_virq_post(int virq); 366 367 int kvm_arch_msi_data_to_gsi(uint32_t data); 368 369 int kvm_set_irq(KVMState *s, int irq, int level); 370 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg); 371 372 void kvm_irqchip_add_irq_route(KVMState *s, int gsi, int irqchip, int pin); 373 374 void kvm_irqchip_add_change_notifier(Notifier *n); 375 void kvm_irqchip_remove_change_notifier(Notifier *n); 376 void kvm_irqchip_change_notify(void); 377 378 struct kvm_guest_debug; 379 struct kvm_debug_exit_arch; 380 381 struct kvm_sw_breakpoint { 382 vaddr pc; 383 vaddr saved_insn; 384 int use_count; 385 QTAILQ_ENTRY(kvm_sw_breakpoint) entry; 386 }; 387 388 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, 389 vaddr pc); 390 391 int kvm_sw_breakpoints_active(CPUState *cpu); 392 393 int kvm_arch_insert_sw_breakpoint(CPUState *cpu, 394 struct kvm_sw_breakpoint *bp); 395 int kvm_arch_remove_sw_breakpoint(CPUState *cpu, 396 struct kvm_sw_breakpoint *bp); 397 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type); 398 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type); 399 void kvm_arch_remove_all_hw_breakpoints(void); 400 401 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg); 402 403 bool kvm_arch_stop_on_emulation_error(CPUState *cpu); 404 405 int kvm_check_extension(KVMState *s, unsigned int extension); 406 407 int kvm_vm_check_extension(KVMState *s, unsigned int extension); 408 409 #define kvm_vm_enable_cap(s, capability, cap_flags, ...) \ 410 ({ \ 411 struct kvm_enable_cap cap = { \ 412 .cap = capability, \ 413 .flags = cap_flags, \ 414 }; \ 415 uint64_t args_tmp[] = { __VA_ARGS__ }; \ 416 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ 417 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ 418 kvm_vm_ioctl(s, KVM_ENABLE_CAP, &cap); \ 419 }) 420 421 #define kvm_vcpu_enable_cap(cpu, capability, cap_flags, ...) \ 422 ({ \ 423 struct kvm_enable_cap cap = { \ 424 .cap = capability, \ 425 .flags = cap_flags, \ 426 }; \ 427 uint64_t args_tmp[] = { __VA_ARGS__ }; \ 428 size_t n = MIN(ARRAY_SIZE(args_tmp), ARRAY_SIZE(cap.args)); \ 429 memcpy(cap.args, args_tmp, n * sizeof(cap.args[0])); \ 430 kvm_vcpu_ioctl(cpu, KVM_ENABLE_CAP, &cap); \ 431 }) 432 433 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len); 434 435 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr, 436 hwaddr *phys_addr); 437 438 #endif /* COMPILING_PER_TARGET */ 439 440 void kvm_cpu_synchronize_state(CPUState *cpu); 441 442 void kvm_init_cpu_signals(CPUState *cpu); 443 444 /** 445 * kvm_irqchip_add_msi_route - Add MSI route for specific vector 446 * @c: KVMRouteChange instance. 447 * @vector: which vector to add. This can be either MSI/MSIX 448 * vector. The function will automatically detect whether 449 * MSI/MSIX is enabled, and fetch corresponding MSI 450 * message. 451 * @dev: Owner PCI device to add the route. If @dev is specified 452 * as @NULL, an empty MSI message will be inited. 453 * @return: virq (>=0) when success, errno (<0) when failed. 454 */ 455 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); 456 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, 457 PCIDevice *dev); 458 void kvm_irqchip_commit_routes(KVMState *s); 459 460 static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s) 461 { 462 return (KVMRouteChange) { .s = s, .changes = 0 }; 463 } 464 465 static inline void kvm_irqchip_commit_route_changes(KVMRouteChange *c) 466 { 467 if (c->changes) { 468 kvm_irqchip_commit_routes(c->s); 469 c->changes = 0; 470 } 471 } 472 473 void kvm_irqchip_release_virq(KVMState *s, int virq); 474 475 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter); 476 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint); 477 478 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 479 EventNotifier *rn, int virq); 480 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 481 int virq); 482 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, 483 EventNotifier *rn, qemu_irq irq); 484 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, 485 qemu_irq irq); 486 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi); 487 void kvm_init_irq_routing(KVMState *s); 488 489 bool kvm_kernel_irqchip_allowed(void); 490 bool kvm_kernel_irqchip_required(void); 491 bool kvm_kernel_irqchip_split(void); 492 493 /** 494 * kvm_arch_irqchip_create: 495 * @KVMState: The KVMState pointer 496 * 497 * Allow architectures to create an in-kernel irq chip themselves. 498 * 499 * Returns: < 0: error 500 * 0: irq chip was not created 501 * > 0: irq chip was created 502 */ 503 int kvm_arch_irqchip_create(KVMState *s); 504 505 /** 506 * kvm_set_one_reg - set a register value in KVM via KVM_SET_ONE_REG ioctl 507 * @id: The register ID 508 * @source: The pointer to the value to be set. It must point to a variable 509 * of the correct type/size for the register being accessed. 510 * 511 * Returns: 0 on success, or a negative errno on failure. 512 */ 513 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source); 514 515 /** 516 * kvm_get_one_reg - get a register value from KVM via KVM_GET_ONE_REG ioctl 517 * @id: The register ID 518 * @target: The pointer where the value is to be stored. It must point to a 519 * variable of the correct type/size for the register being accessed. 520 * 521 * Returns: 0 on success, or a negative errno on failure. 522 */ 523 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target); 524 525 /* Notify resamplefd for EOI of specific interrupts. */ 526 void kvm_resample_fd_notify(int gsi); 527 528 bool kvm_dirty_ring_enabled(void); 529 530 uint32_t kvm_dirty_ring_size(void); 531 532 void kvm_mark_guest_state_protected(void); 533 534 /** 535 * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page 536 * reported for the VM. 537 */ 538 bool kvm_hwpoisoned_mem(void); 539 540 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp); 541 542 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size); 543 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size); 544 545 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private); 546 547 #endif 548