1 /* 2 * QEMU KVM support 3 * 4 * Copyright IBM, Corp. 2008 5 * Red Hat, Inc. 2008 6 * 7 * Authors: 8 * Anthony Liguori <aliguori@us.ibm.com> 9 * Glauber Costa <gcosta@redhat.com> 10 * 11 * This work is licensed under the terms of the GNU GPL, version 2 or later. 12 * See the COPYING file in the top-level directory. 13 * 14 */ 15 16 #include "qemu/osdep.h" 17 #include <sys/ioctl.h> 18 #include <poll.h> 19 20 #include <linux/kvm.h> 21 22 #include "qemu/atomic.h" 23 #include "qemu/option.h" 24 #include "qemu/config-file.h" 25 #include "qemu/error-report.h" 26 #include "qapi/error.h" 27 #include "hw/pci/msi.h" 28 #include "hw/pci/msix.h" 29 #include "hw/s390x/adapter.h" 30 #include "gdbstub/enums.h" 31 #include "sysemu/kvm_int.h" 32 #include "sysemu/runstate.h" 33 #include "sysemu/cpus.h" 34 #include "sysemu/accel-blocker.h" 35 #include "qemu/bswap.h" 36 #include "exec/memory.h" 37 #include "exec/ram_addr.h" 38 #include "qemu/event_notifier.h" 39 #include "qemu/main-loop.h" 40 #include "trace.h" 41 #include "hw/irq.h" 42 #include "qapi/visitor.h" 43 #include "qapi/qapi-types-common.h" 44 #include "qapi/qapi-visit-common.h" 45 #include "sysemu/reset.h" 46 #include "qemu/guest-random.h" 47 #include "sysemu/hw_accel.h" 48 #include "kvm-cpus.h" 49 #include "sysemu/dirtylimit.h" 50 #include "qemu/range.h" 51 52 #include "hw/boards.h" 53 #include "sysemu/stats.h" 54 55 /* This check must be after config-host.h is included */ 56 #ifdef CONFIG_EVENTFD 57 #include <sys/eventfd.h> 58 #endif 59 60 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We 61 * need to use the real host PAGE_SIZE, as that's what KVM will use. 62 */ 63 #ifdef PAGE_SIZE 64 #undef PAGE_SIZE 65 #endif 66 #define PAGE_SIZE qemu_real_host_page_size() 67 68 #ifndef KVM_GUESTDBG_BLOCKIRQ 69 #define KVM_GUESTDBG_BLOCKIRQ 0 70 #endif 71 72 struct KVMParkedVcpu { 73 unsigned long vcpu_id; 74 int kvm_fd; 75 QLIST_ENTRY(KVMParkedVcpu) node; 76 }; 77 78 KVMState *kvm_state; 79 bool kvm_kernel_irqchip; 80 bool kvm_split_irqchip; 81 bool kvm_async_interrupts_allowed; 82 bool kvm_halt_in_kernel_allowed; 83 bool kvm_resamplefds_allowed; 84 bool kvm_msi_via_irqfd_allowed; 85 bool kvm_gsi_routing_allowed; 86 bool kvm_gsi_direct_mapping; 87 bool kvm_allowed; 88 bool kvm_readonly_mem_allowed; 89 bool kvm_vm_attributes_allowed; 90 bool kvm_msi_use_devid; 91 static bool kvm_has_guest_debug; 92 static int kvm_sstep_flags; 93 static bool kvm_immediate_exit; 94 static uint64_t kvm_supported_memory_attributes; 95 static bool kvm_guest_memfd_supported; 96 static hwaddr kvm_max_slot_size = ~0; 97 98 static const KVMCapabilityInfo kvm_required_capabilites[] = { 99 KVM_CAP_INFO(USER_MEMORY), 100 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), 101 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS), 102 KVM_CAP_INFO(INTERNAL_ERROR_DATA), 103 KVM_CAP_INFO(IOEVENTFD), 104 KVM_CAP_INFO(IOEVENTFD_ANY_LENGTH), 105 KVM_CAP_LAST_INFO 106 }; 107 108 static NotifierList kvm_irqchip_change_notifiers = 109 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers); 110 111 struct KVMResampleFd { 112 int gsi; 113 EventNotifier *resample_event; 114 QLIST_ENTRY(KVMResampleFd) node; 115 }; 116 typedef struct KVMResampleFd KVMResampleFd; 117 118 /* 119 * Only used with split irqchip where we need to do the resample fd 120 * kick for the kernel from userspace. 121 */ 122 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list = 123 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list); 124 125 static QemuMutex kml_slots_lock; 126 127 #define kvm_slots_lock() qemu_mutex_lock(&kml_slots_lock) 128 #define kvm_slots_unlock() qemu_mutex_unlock(&kml_slots_lock) 129 130 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem); 131 132 static inline void kvm_resample_fd_remove(int gsi) 133 { 134 KVMResampleFd *rfd; 135 136 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { 137 if (rfd->gsi == gsi) { 138 QLIST_REMOVE(rfd, node); 139 g_free(rfd); 140 break; 141 } 142 } 143 } 144 145 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event) 146 { 147 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1); 148 149 rfd->gsi = gsi; 150 rfd->resample_event = event; 151 152 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node); 153 } 154 155 void kvm_resample_fd_notify(int gsi) 156 { 157 KVMResampleFd *rfd; 158 159 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) { 160 if (rfd->gsi == gsi) { 161 event_notifier_set(rfd->resample_event); 162 trace_kvm_resample_fd_notify(gsi); 163 return; 164 } 165 } 166 } 167 168 unsigned int kvm_get_max_memslots(void) 169 { 170 KVMState *s = KVM_STATE(current_accel()); 171 172 return s->nr_slots; 173 } 174 175 unsigned int kvm_get_free_memslots(void) 176 { 177 unsigned int used_slots = 0; 178 KVMState *s = kvm_state; 179 int i; 180 181 kvm_slots_lock(); 182 for (i = 0; i < s->nr_as; i++) { 183 if (!s->as[i].ml) { 184 continue; 185 } 186 used_slots = MAX(used_slots, s->as[i].ml->nr_used_slots); 187 } 188 kvm_slots_unlock(); 189 190 return s->nr_slots - used_slots; 191 } 192 193 /* Called with KVMMemoryListener.slots_lock held */ 194 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) 195 { 196 KVMState *s = kvm_state; 197 int i; 198 199 for (i = 0; i < s->nr_slots; i++) { 200 if (kml->slots[i].memory_size == 0) { 201 return &kml->slots[i]; 202 } 203 } 204 205 return NULL; 206 } 207 208 /* Called with KVMMemoryListener.slots_lock held */ 209 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) 210 { 211 KVMSlot *slot = kvm_get_free_slot(kml); 212 213 if (slot) { 214 return slot; 215 } 216 217 fprintf(stderr, "%s: no free slot available\n", __func__); 218 abort(); 219 } 220 221 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, 222 hwaddr start_addr, 223 hwaddr size) 224 { 225 KVMState *s = kvm_state; 226 int i; 227 228 for (i = 0; i < s->nr_slots; i++) { 229 KVMSlot *mem = &kml->slots[i]; 230 231 if (start_addr == mem->start_addr && size == mem->memory_size) { 232 return mem; 233 } 234 } 235 236 return NULL; 237 } 238 239 /* 240 * Calculate and align the start address and the size of the section. 241 * Return the size. If the size is 0, the aligned section is empty. 242 */ 243 static hwaddr kvm_align_section(MemoryRegionSection *section, 244 hwaddr *start) 245 { 246 hwaddr size = int128_get64(section->size); 247 hwaddr delta, aligned; 248 249 /* kvm works in page size chunks, but the function may be called 250 with sub-page size and unaligned start address. Pad the start 251 address to next and truncate size to previous page boundary. */ 252 aligned = ROUND_UP(section->offset_within_address_space, 253 qemu_real_host_page_size()); 254 delta = aligned - section->offset_within_address_space; 255 *start = aligned; 256 if (delta > size) { 257 return 0; 258 } 259 260 return (size - delta) & qemu_real_host_page_mask(); 261 } 262 263 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, 264 hwaddr *phys_addr) 265 { 266 KVMMemoryListener *kml = &s->memory_listener; 267 int i, ret = 0; 268 269 kvm_slots_lock(); 270 for (i = 0; i < s->nr_slots; i++) { 271 KVMSlot *mem = &kml->slots[i]; 272 273 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { 274 *phys_addr = mem->start_addr + (ram - mem->ram); 275 ret = 1; 276 break; 277 } 278 } 279 kvm_slots_unlock(); 280 281 return ret; 282 } 283 284 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new) 285 { 286 KVMState *s = kvm_state; 287 struct kvm_userspace_memory_region2 mem; 288 int ret; 289 290 mem.slot = slot->slot | (kml->as_id << 16); 291 mem.guest_phys_addr = slot->start_addr; 292 mem.userspace_addr = (unsigned long)slot->ram; 293 mem.flags = slot->flags; 294 mem.guest_memfd = slot->guest_memfd; 295 mem.guest_memfd_offset = slot->guest_memfd_offset; 296 297 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { 298 /* Set the slot size to 0 before setting the slot to the desired 299 * value. This is needed based on KVM commit 75d61fbc. */ 300 mem.memory_size = 0; 301 302 if (kvm_guest_memfd_supported) { 303 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem); 304 } else { 305 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 306 } 307 if (ret < 0) { 308 goto err; 309 } 310 } 311 mem.memory_size = slot->memory_size; 312 if (kvm_guest_memfd_supported) { 313 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION2, &mem); 314 } else { 315 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); 316 } 317 slot->old_flags = mem.flags; 318 err: 319 trace_kvm_set_user_memory(mem.slot >> 16, (uint16_t)mem.slot, mem.flags, 320 mem.guest_phys_addr, mem.memory_size, 321 mem.userspace_addr, mem.guest_memfd, 322 mem.guest_memfd_offset, ret); 323 if (ret < 0) { 324 if (kvm_guest_memfd_supported) { 325 error_report("%s: KVM_SET_USER_MEMORY_REGION2 failed, slot=%d," 326 " start=0x%" PRIx64 ", size=0x%" PRIx64 "," 327 " flags=0x%" PRIx32 ", guest_memfd=%" PRId32 "," 328 " guest_memfd_offset=0x%" PRIx64 ": %s", 329 __func__, mem.slot, slot->start_addr, 330 (uint64_t)mem.memory_size, mem.flags, 331 mem.guest_memfd, (uint64_t)mem.guest_memfd_offset, 332 strerror(errno)); 333 } else { 334 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d," 335 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s", 336 __func__, mem.slot, slot->start_addr, 337 (uint64_t)mem.memory_size, strerror(errno)); 338 } 339 } 340 return ret; 341 } 342 343 void kvm_park_vcpu(CPUState *cpu) 344 { 345 struct KVMParkedVcpu *vcpu; 346 347 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); 348 349 vcpu = g_malloc0(sizeof(*vcpu)); 350 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); 351 vcpu->kvm_fd = cpu->kvm_fd; 352 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); 353 } 354 355 int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id) 356 { 357 struct KVMParkedVcpu *cpu; 358 int kvm_fd = -ENOENT; 359 360 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { 361 if (cpu->vcpu_id == vcpu_id) { 362 QLIST_REMOVE(cpu, node); 363 kvm_fd = cpu->kvm_fd; 364 g_free(cpu); 365 break; 366 } 367 } 368 369 trace_kvm_unpark_vcpu(vcpu_id, kvm_fd > 0 ? "unparked" : "!found parked"); 370 371 return kvm_fd; 372 } 373 374 int kvm_create_vcpu(CPUState *cpu) 375 { 376 unsigned long vcpu_id = kvm_arch_vcpu_id(cpu); 377 KVMState *s = kvm_state; 378 int kvm_fd; 379 380 /* check if the KVM vCPU already exist but is parked */ 381 kvm_fd = kvm_unpark_vcpu(s, vcpu_id); 382 if (kvm_fd < 0) { 383 /* vCPU not parked: create a new KVM vCPU */ 384 kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id); 385 if (kvm_fd < 0) { 386 error_report("KVM_CREATE_VCPU IOCTL failed for vCPU %lu", vcpu_id); 387 return kvm_fd; 388 } 389 } 390 391 cpu->kvm_fd = kvm_fd; 392 cpu->kvm_state = s; 393 cpu->vcpu_dirty = true; 394 cpu->dirty_pages = 0; 395 cpu->throttle_us_per_full = 0; 396 397 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd); 398 399 return 0; 400 } 401 402 int kvm_create_and_park_vcpu(CPUState *cpu) 403 { 404 int ret = 0; 405 406 ret = kvm_create_vcpu(cpu); 407 if (!ret) { 408 kvm_park_vcpu(cpu); 409 } 410 411 return ret; 412 } 413 414 static int do_kvm_destroy_vcpu(CPUState *cpu) 415 { 416 KVMState *s = kvm_state; 417 int mmap_size; 418 int ret = 0; 419 420 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); 421 422 ret = kvm_arch_destroy_vcpu(cpu); 423 if (ret < 0) { 424 goto err; 425 } 426 427 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); 428 if (mmap_size < 0) { 429 ret = mmap_size; 430 trace_kvm_failed_get_vcpu_mmap_size(); 431 goto err; 432 } 433 434 ret = munmap(cpu->kvm_run, mmap_size); 435 if (ret < 0) { 436 goto err; 437 } 438 439 if (cpu->kvm_dirty_gfns) { 440 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); 441 if (ret < 0) { 442 goto err; 443 } 444 } 445 446 kvm_park_vcpu(cpu); 447 err: 448 return ret; 449 } 450 451 void kvm_destroy_vcpu(CPUState *cpu) 452 { 453 if (do_kvm_destroy_vcpu(cpu) < 0) { 454 error_report("kvm_destroy_vcpu failed"); 455 exit(EXIT_FAILURE); 456 } 457 } 458 459 int kvm_init_vcpu(CPUState *cpu, Error **errp) 460 { 461 KVMState *s = kvm_state; 462 int mmap_size; 463 int ret; 464 465 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); 466 467 ret = kvm_create_vcpu(cpu); 468 if (ret < 0) { 469 error_setg_errno(errp, -ret, 470 "kvm_init_vcpu: kvm_create_vcpu failed (%lu)", 471 kvm_arch_vcpu_id(cpu)); 472 goto err; 473 } 474 475 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); 476 if (mmap_size < 0) { 477 ret = mmap_size; 478 error_setg_errno(errp, -mmap_size, 479 "kvm_init_vcpu: KVM_GET_VCPU_MMAP_SIZE failed"); 480 goto err; 481 } 482 483 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 484 cpu->kvm_fd, 0); 485 if (cpu->kvm_run == MAP_FAILED) { 486 ret = -errno; 487 error_setg_errno(errp, ret, 488 "kvm_init_vcpu: mmap'ing vcpu state failed (%lu)", 489 kvm_arch_vcpu_id(cpu)); 490 goto err; 491 } 492 493 if (s->coalesced_mmio && !s->coalesced_mmio_ring) { 494 s->coalesced_mmio_ring = 495 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; 496 } 497 498 if (s->kvm_dirty_ring_size) { 499 /* Use MAP_SHARED to share pages with the kernel */ 500 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, 501 PROT_READ | PROT_WRITE, MAP_SHARED, 502 cpu->kvm_fd, 503 PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); 504 if (cpu->kvm_dirty_gfns == MAP_FAILED) { 505 ret = -errno; 506 goto err; 507 } 508 } 509 510 ret = kvm_arch_init_vcpu(cpu); 511 if (ret < 0) { 512 error_setg_errno(errp, -ret, 513 "kvm_init_vcpu: kvm_arch_init_vcpu failed (%lu)", 514 kvm_arch_vcpu_id(cpu)); 515 } 516 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); 517 518 err: 519 return ret; 520 } 521 522 /* 523 * dirty pages logging control 524 */ 525 526 static int kvm_mem_flags(MemoryRegion *mr) 527 { 528 bool readonly = mr->readonly || memory_region_is_romd(mr); 529 int flags = 0; 530 531 if (memory_region_get_dirty_log_mask(mr) != 0) { 532 flags |= KVM_MEM_LOG_DIRTY_PAGES; 533 } 534 if (readonly && kvm_readonly_mem_allowed) { 535 flags |= KVM_MEM_READONLY; 536 } 537 if (memory_region_has_guest_memfd(mr)) { 538 assert(kvm_guest_memfd_supported); 539 flags |= KVM_MEM_GUEST_MEMFD; 540 } 541 return flags; 542 } 543 544 /* Called with KVMMemoryListener.slots_lock held */ 545 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, 546 MemoryRegion *mr) 547 { 548 mem->flags = kvm_mem_flags(mr); 549 550 /* If nothing changed effectively, no need to issue ioctl */ 551 if (mem->flags == mem->old_flags) { 552 return 0; 553 } 554 555 kvm_slot_init_dirty_bitmap(mem); 556 return kvm_set_user_memory_region(kml, mem, false); 557 } 558 559 static int kvm_section_update_flags(KVMMemoryListener *kml, 560 MemoryRegionSection *section) 561 { 562 hwaddr start_addr, size, slot_size; 563 KVMSlot *mem; 564 int ret = 0; 565 566 size = kvm_align_section(section, &start_addr); 567 if (!size) { 568 return 0; 569 } 570 571 kvm_slots_lock(); 572 573 while (size && !ret) { 574 slot_size = MIN(kvm_max_slot_size, size); 575 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 576 if (!mem) { 577 /* We don't have a slot if we want to trap every access. */ 578 goto out; 579 } 580 581 ret = kvm_slot_update_flags(kml, mem, section->mr); 582 start_addr += slot_size; 583 size -= slot_size; 584 } 585 586 out: 587 kvm_slots_unlock(); 588 return ret; 589 } 590 591 static void kvm_log_start(MemoryListener *listener, 592 MemoryRegionSection *section, 593 int old, int new) 594 { 595 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 596 int r; 597 598 if (old != 0) { 599 return; 600 } 601 602 r = kvm_section_update_flags(kml, section); 603 if (r < 0) { 604 abort(); 605 } 606 } 607 608 static void kvm_log_stop(MemoryListener *listener, 609 MemoryRegionSection *section, 610 int old, int new) 611 { 612 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 613 int r; 614 615 if (new != 0) { 616 return; 617 } 618 619 r = kvm_section_update_flags(kml, section); 620 if (r < 0) { 621 abort(); 622 } 623 } 624 625 /* get kvm's dirty pages bitmap and update qemu's */ 626 static void kvm_slot_sync_dirty_pages(KVMSlot *slot) 627 { 628 ram_addr_t start = slot->ram_start_offset; 629 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); 630 631 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); 632 } 633 634 static void kvm_slot_reset_dirty_pages(KVMSlot *slot) 635 { 636 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); 637 } 638 639 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) 640 641 /* Allocate the dirty bitmap for a slot */ 642 static void kvm_slot_init_dirty_bitmap(KVMSlot *mem) 643 { 644 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { 645 return; 646 } 647 648 /* 649 * XXX bad kernel interface alert 650 * For dirty bitmap, kernel allocates array of size aligned to 651 * bits-per-long. But for case when the kernel is 64bits and 652 * the userspace is 32bits, userspace can't align to the same 653 * bits-per-long, since sizeof(long) is different between kernel 654 * and user space. This way, userspace will provide buffer which 655 * may be 4 bytes less than the kernel will use, resulting in 656 * userspace memory corruption (which is not detectable by valgrind 657 * too, in most cases). 658 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in 659 * a hope that sizeof(long) won't become >8 any time soon. 660 * 661 * Note: the granule of kvm dirty log is qemu_real_host_page_size. 662 * And mem->memory_size is aligned to it (otherwise this mem can't 663 * be registered to KVM). 664 */ 665 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), 666 /*HOST_LONG_BITS*/ 64) / 8; 667 mem->dirty_bmap = g_malloc0(bitmap_size); 668 mem->dirty_bmap_size = bitmap_size; 669 } 670 671 /* 672 * Sync dirty bitmap from kernel to KVMSlot.dirty_bmap, return true if 673 * succeeded, false otherwise 674 */ 675 static bool kvm_slot_get_dirty_log(KVMState *s, KVMSlot *slot) 676 { 677 struct kvm_dirty_log d = {}; 678 int ret; 679 680 d.dirty_bitmap = slot->dirty_bmap; 681 d.slot = slot->slot | (slot->as_id << 16); 682 ret = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d); 683 684 if (ret == -ENOENT) { 685 /* kernel does not have dirty bitmap in this slot */ 686 ret = 0; 687 } 688 if (ret) { 689 error_report_once("%s: KVM_GET_DIRTY_LOG failed with %d", 690 __func__, ret); 691 } 692 return ret == 0; 693 } 694 695 /* Should be with all slots_lock held for the address spaces. */ 696 static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, 697 uint32_t slot_id, uint64_t offset) 698 { 699 KVMMemoryListener *kml; 700 KVMSlot *mem; 701 702 if (as_id >= s->nr_as) { 703 return; 704 } 705 706 kml = s->as[as_id].ml; 707 mem = &kml->slots[slot_id]; 708 709 if (!mem->memory_size || offset >= 710 (mem->memory_size / qemu_real_host_page_size())) { 711 return; 712 } 713 714 set_bit(offset, mem->dirty_bmap); 715 } 716 717 static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) 718 { 719 /* 720 * Read the flags before the value. Pairs with barrier in 721 * KVM's kvm_dirty_ring_push() function. 722 */ 723 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; 724 } 725 726 static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) 727 { 728 /* 729 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS 730 * sees the full content of the ring: 731 * 732 * CPU0 CPU1 CPU2 733 * ------------------------------------------------------------------------------ 734 * fill gfn0 735 * store-rel flags for gfn0 736 * load-acq flags for gfn0 737 * store-rel RESET for gfn0 738 * ioctl(RESET_RINGS) 739 * load-acq flags for gfn0 740 * check if flags have RESET 741 * 742 * The synchronization goes from CPU2 to CPU0 to CPU1. 743 */ 744 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); 745 } 746 747 /* 748 * Should be with all slots_lock held for the address spaces. It returns the 749 * dirty page we've collected on this dirty ring. 750 */ 751 static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) 752 { 753 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; 754 uint32_t ring_size = s->kvm_dirty_ring_size; 755 uint32_t count = 0, fetch = cpu->kvm_fetch_index; 756 757 /* 758 * It's possible that we race with vcpu creation code where the vcpu is 759 * put onto the vcpus list but not yet initialized the dirty ring 760 * structures. If so, skip it. 761 */ 762 if (!cpu->created) { 763 return 0; 764 } 765 766 assert(dirty_gfns && ring_size); 767 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); 768 769 while (true) { 770 cur = &dirty_gfns[fetch % ring_size]; 771 if (!dirty_gfn_is_dirtied(cur)) { 772 break; 773 } 774 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, 775 cur->offset); 776 dirty_gfn_set_collected(cur); 777 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); 778 fetch++; 779 count++; 780 } 781 cpu->kvm_fetch_index = fetch; 782 cpu->dirty_pages += count; 783 784 return count; 785 } 786 787 /* Must be with slots_lock held */ 788 static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) 789 { 790 int ret; 791 uint64_t total = 0; 792 int64_t stamp; 793 794 stamp = get_clock(); 795 796 if (cpu) { 797 total = kvm_dirty_ring_reap_one(s, cpu); 798 } else { 799 CPU_FOREACH(cpu) { 800 total += kvm_dirty_ring_reap_one(s, cpu); 801 } 802 } 803 804 if (total) { 805 ret = kvm_vm_ioctl(s, KVM_RESET_DIRTY_RINGS); 806 assert(ret == total); 807 } 808 809 stamp = get_clock() - stamp; 810 811 if (total) { 812 trace_kvm_dirty_ring_reap(total, stamp / 1000); 813 } 814 815 return total; 816 } 817 818 /* 819 * Currently for simplicity, we must hold BQL before calling this. We can 820 * consider to drop the BQL if we're clear with all the race conditions. 821 */ 822 static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu) 823 { 824 uint64_t total; 825 826 /* 827 * We need to lock all kvm slots for all address spaces here, 828 * because: 829 * 830 * (1) We need to mark dirty for dirty bitmaps in multiple slots 831 * and for tons of pages, so it's better to take the lock here 832 * once rather than once per page. And more importantly, 833 * 834 * (2) We must _NOT_ publish dirty bits to the other threads 835 * (e.g., the migration thread) via the kvm memory slot dirty 836 * bitmaps before correctly re-protect those dirtied pages. 837 * Otherwise we can have potential risk of data corruption if 838 * the page data is read in the other thread before we do 839 * reset below. 840 */ 841 kvm_slots_lock(); 842 total = kvm_dirty_ring_reap_locked(s, cpu); 843 kvm_slots_unlock(); 844 845 return total; 846 } 847 848 static void do_kvm_cpu_synchronize_kick(CPUState *cpu, run_on_cpu_data arg) 849 { 850 /* No need to do anything */ 851 } 852 853 /* 854 * Kick all vcpus out in a synchronized way. When returned, we 855 * guarantee that every vcpu has been kicked and at least returned to 856 * userspace once. 857 */ 858 static void kvm_cpu_synchronize_kick_all(void) 859 { 860 CPUState *cpu; 861 862 CPU_FOREACH(cpu) { 863 run_on_cpu(cpu, do_kvm_cpu_synchronize_kick, RUN_ON_CPU_NULL); 864 } 865 } 866 867 /* 868 * Flush all the existing dirty pages to the KVM slot buffers. When 869 * this call returns, we guarantee that all the touched dirty pages 870 * before calling this function have been put into the per-kvmslot 871 * dirty bitmap. 872 * 873 * This function must be called with BQL held. 874 */ 875 static void kvm_dirty_ring_flush(void) 876 { 877 trace_kvm_dirty_ring_flush(0); 878 /* 879 * The function needs to be serialized. Since this function 880 * should always be with BQL held, serialization is guaranteed. 881 * However, let's be sure of it. 882 */ 883 assert(bql_locked()); 884 /* 885 * First make sure to flush the hardware buffers by kicking all 886 * vcpus out in a synchronous way. 887 */ 888 kvm_cpu_synchronize_kick_all(); 889 kvm_dirty_ring_reap(kvm_state, NULL); 890 trace_kvm_dirty_ring_flush(1); 891 } 892 893 /** 894 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space 895 * 896 * This function will first try to fetch dirty bitmap from the kernel, 897 * and then updates qemu's dirty bitmap. 898 * 899 * NOTE: caller must be with kml->slots_lock held. 900 * 901 * @kml: the KVM memory listener object 902 * @section: the memory section to sync the dirty bitmap with 903 */ 904 static void kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, 905 MemoryRegionSection *section) 906 { 907 KVMState *s = kvm_state; 908 KVMSlot *mem; 909 hwaddr start_addr, size; 910 hwaddr slot_size; 911 912 size = kvm_align_section(section, &start_addr); 913 while (size) { 914 slot_size = MIN(kvm_max_slot_size, size); 915 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 916 if (!mem) { 917 /* We don't have a slot if we want to trap every access. */ 918 return; 919 } 920 if (kvm_slot_get_dirty_log(s, mem)) { 921 kvm_slot_sync_dirty_pages(mem); 922 } 923 start_addr += slot_size; 924 size -= slot_size; 925 } 926 } 927 928 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ 929 #define KVM_CLEAR_LOG_SHIFT 6 930 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size() << KVM_CLEAR_LOG_SHIFT) 931 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) 932 933 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, 934 uint64_t size) 935 { 936 KVMState *s = kvm_state; 937 uint64_t end, bmap_start, start_delta, bmap_npages; 938 struct kvm_clear_dirty_log d; 939 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size(); 940 int ret; 941 942 /* 943 * We need to extend either the start or the size or both to 944 * satisfy the KVM interface requirement. Firstly, do the start 945 * page alignment on 64 host pages 946 */ 947 bmap_start = start & KVM_CLEAR_LOG_MASK; 948 start_delta = start - bmap_start; 949 bmap_start /= psize; 950 951 /* 952 * The kernel interface has restriction on the size too, that either: 953 * 954 * (1) the size is 64 host pages aligned (just like the start), or 955 * (2) the size fills up until the end of the KVM memslot. 956 */ 957 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN) 958 << KVM_CLEAR_LOG_SHIFT; 959 end = mem->memory_size / psize; 960 if (bmap_npages > end - bmap_start) { 961 bmap_npages = end - bmap_start; 962 } 963 start_delta /= psize; 964 965 /* 966 * Prepare the bitmap to clear dirty bits. Here we must guarantee 967 * that we won't clear any unknown dirty bits otherwise we might 968 * accidentally clear some set bits which are not yet synced from 969 * the kernel into QEMU's bitmap, then we'll lose track of the 970 * guest modifications upon those pages (which can directly lead 971 * to guest data loss or panic after migration). 972 * 973 * Layout of the KVMSlot.dirty_bmap: 974 * 975 * |<-------- bmap_npages -----------..>| 976 * [1] 977 * start_delta size 978 * |----------------|-------------|------------------|------------| 979 * ^ ^ ^ ^ 980 * | | | | 981 * start bmap_start (start) end 982 * of memslot of memslot 983 * 984 * [1] bmap_npages can be aligned to either 64 pages or the end of slot 985 */ 986 987 assert(bmap_start % BITS_PER_LONG == 0); 988 /* We should never do log_clear before log_sync */ 989 assert(mem->dirty_bmap); 990 if (start_delta || bmap_npages - size / psize) { 991 /* Slow path - we need to manipulate a temp bitmap */ 992 bmap_clear = bitmap_new(bmap_npages); 993 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, 994 bmap_start, start_delta + size / psize); 995 /* 996 * We need to fill the holes at start because that was not 997 * specified by the caller and we extended the bitmap only for 998 * 64 pages alignment 999 */ 1000 bitmap_clear(bmap_clear, 0, start_delta); 1001 d.dirty_bitmap = bmap_clear; 1002 } else { 1003 /* 1004 * Fast path - both start and size align well with BITS_PER_LONG 1005 * (or the end of memory slot) 1006 */ 1007 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); 1008 } 1009 1010 d.first_page = bmap_start; 1011 /* It should never overflow. If it happens, say something */ 1012 assert(bmap_npages <= UINT32_MAX); 1013 d.num_pages = bmap_npages; 1014 d.slot = mem->slot | (as_id << 16); 1015 1016 ret = kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d); 1017 if (ret < 0 && ret != -ENOENT) { 1018 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, " 1019 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d", 1020 __func__, d.slot, (uint64_t)d.first_page, 1021 (uint32_t)d.num_pages, ret); 1022 } else { 1023 ret = 0; 1024 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages); 1025 } 1026 1027 /* 1028 * After we have updated the remote dirty bitmap, we update the 1029 * cached bitmap as well for the memslot, then if another user 1030 * clears the same region we know we shouldn't clear it again on 1031 * the remote otherwise it's data loss as well. 1032 */ 1033 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, 1034 size / psize); 1035 /* This handles the NULL case well */ 1036 g_free(bmap_clear); 1037 return ret; 1038 } 1039 1040 1041 /** 1042 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range 1043 * 1044 * NOTE: this will be a no-op if we haven't enabled manual dirty log 1045 * protection in the host kernel because in that case this operation 1046 * will be done within log_sync(). 1047 * 1048 * @kml: the kvm memory listener 1049 * @section: the memory range to clear dirty bitmap 1050 */ 1051 static int kvm_physical_log_clear(KVMMemoryListener *kml, 1052 MemoryRegionSection *section) 1053 { 1054 KVMState *s = kvm_state; 1055 uint64_t start, size, offset, count; 1056 KVMSlot *mem; 1057 int ret = 0, i; 1058 1059 if (!s->manual_dirty_log_protect) { 1060 /* No need to do explicit clear */ 1061 return ret; 1062 } 1063 1064 start = section->offset_within_address_space; 1065 size = int128_get64(section->size); 1066 1067 if (!size) { 1068 /* Nothing more we can do... */ 1069 return ret; 1070 } 1071 1072 kvm_slots_lock(); 1073 1074 for (i = 0; i < s->nr_slots; i++) { 1075 mem = &kml->slots[i]; 1076 /* Discard slots that are empty or do not overlap the section */ 1077 if (!mem->memory_size || 1078 mem->start_addr > start + size - 1 || 1079 start > mem->start_addr + mem->memory_size - 1) { 1080 continue; 1081 } 1082 1083 if (start >= mem->start_addr) { 1084 /* The slot starts before section or is aligned to it. */ 1085 offset = start - mem->start_addr; 1086 count = MIN(mem->memory_size - offset, size); 1087 } else { 1088 /* The slot starts after section. */ 1089 offset = 0; 1090 count = MIN(mem->memory_size, size - (mem->start_addr - start)); 1091 } 1092 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); 1093 if (ret < 0) { 1094 break; 1095 } 1096 } 1097 1098 kvm_slots_unlock(); 1099 1100 return ret; 1101 } 1102 1103 static void kvm_coalesce_mmio_region(MemoryListener *listener, 1104 MemoryRegionSection *secion, 1105 hwaddr start, hwaddr size) 1106 { 1107 KVMState *s = kvm_state; 1108 1109 if (s->coalesced_mmio) { 1110 struct kvm_coalesced_mmio_zone zone; 1111 1112 zone.addr = start; 1113 zone.size = size; 1114 zone.pad = 0; 1115 1116 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); 1117 } 1118 } 1119 1120 static void kvm_uncoalesce_mmio_region(MemoryListener *listener, 1121 MemoryRegionSection *secion, 1122 hwaddr start, hwaddr size) 1123 { 1124 KVMState *s = kvm_state; 1125 1126 if (s->coalesced_mmio) { 1127 struct kvm_coalesced_mmio_zone zone; 1128 1129 zone.addr = start; 1130 zone.size = size; 1131 zone.pad = 0; 1132 1133 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); 1134 } 1135 } 1136 1137 static void kvm_coalesce_pio_add(MemoryListener *listener, 1138 MemoryRegionSection *section, 1139 hwaddr start, hwaddr size) 1140 { 1141 KVMState *s = kvm_state; 1142 1143 if (s->coalesced_pio) { 1144 struct kvm_coalesced_mmio_zone zone; 1145 1146 zone.addr = start; 1147 zone.size = size; 1148 zone.pio = 1; 1149 1150 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); 1151 } 1152 } 1153 1154 static void kvm_coalesce_pio_del(MemoryListener *listener, 1155 MemoryRegionSection *section, 1156 hwaddr start, hwaddr size) 1157 { 1158 KVMState *s = kvm_state; 1159 1160 if (s->coalesced_pio) { 1161 struct kvm_coalesced_mmio_zone zone; 1162 1163 zone.addr = start; 1164 zone.size = size; 1165 zone.pio = 1; 1166 1167 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); 1168 } 1169 } 1170 1171 int kvm_check_extension(KVMState *s, unsigned int extension) 1172 { 1173 int ret; 1174 1175 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); 1176 if (ret < 0) { 1177 ret = 0; 1178 } 1179 1180 return ret; 1181 } 1182 1183 int kvm_vm_check_extension(KVMState *s, unsigned int extension) 1184 { 1185 int ret; 1186 1187 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension); 1188 if (ret < 0) { 1189 /* VM wide version not implemented, use global one instead */ 1190 ret = kvm_check_extension(s, extension); 1191 } 1192 1193 return ret; 1194 } 1195 1196 /* 1197 * We track the poisoned pages to be able to: 1198 * - replace them on VM reset 1199 * - block a migration for a VM with a poisoned page 1200 */ 1201 typedef struct HWPoisonPage { 1202 ram_addr_t ram_addr; 1203 QLIST_ENTRY(HWPoisonPage) list; 1204 } HWPoisonPage; 1205 1206 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list = 1207 QLIST_HEAD_INITIALIZER(hwpoison_page_list); 1208 1209 static void kvm_unpoison_all(void *param) 1210 { 1211 HWPoisonPage *page, *next_page; 1212 1213 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { 1214 QLIST_REMOVE(page, list); 1215 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); 1216 g_free(page); 1217 } 1218 } 1219 1220 void kvm_hwpoison_page_add(ram_addr_t ram_addr) 1221 { 1222 HWPoisonPage *page; 1223 1224 QLIST_FOREACH(page, &hwpoison_page_list, list) { 1225 if (page->ram_addr == ram_addr) { 1226 return; 1227 } 1228 } 1229 page = g_new(HWPoisonPage, 1); 1230 page->ram_addr = ram_addr; 1231 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); 1232 } 1233 1234 bool kvm_hwpoisoned_mem(void) 1235 { 1236 return !QLIST_EMPTY(&hwpoison_page_list); 1237 } 1238 1239 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) 1240 { 1241 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN 1242 /* The kernel expects ioeventfd values in HOST_BIG_ENDIAN 1243 * endianness, but the memory core hands them in target endianness. 1244 * For example, PPC is always treated as big-endian even if running 1245 * on KVM and on PPC64LE. Correct here. 1246 */ 1247 switch (size) { 1248 case 2: 1249 val = bswap16(val); 1250 break; 1251 case 4: 1252 val = bswap32(val); 1253 break; 1254 } 1255 #endif 1256 return val; 1257 } 1258 1259 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, 1260 bool assign, uint32_t size, bool datamatch) 1261 { 1262 int ret; 1263 struct kvm_ioeventfd iofd = { 1264 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, 1265 .addr = addr, 1266 .len = size, 1267 .flags = 0, 1268 .fd = fd, 1269 }; 1270 1271 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size, 1272 datamatch); 1273 if (!kvm_enabled()) { 1274 return -ENOSYS; 1275 } 1276 1277 if (datamatch) { 1278 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; 1279 } 1280 if (!assign) { 1281 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1282 } 1283 1284 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); 1285 1286 if (ret < 0) { 1287 return -errno; 1288 } 1289 1290 return 0; 1291 } 1292 1293 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val, 1294 bool assign, uint32_t size, bool datamatch) 1295 { 1296 struct kvm_ioeventfd kick = { 1297 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, 1298 .addr = addr, 1299 .flags = KVM_IOEVENTFD_FLAG_PIO, 1300 .len = size, 1301 .fd = fd, 1302 }; 1303 int r; 1304 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch); 1305 if (!kvm_enabled()) { 1306 return -ENOSYS; 1307 } 1308 if (datamatch) { 1309 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH; 1310 } 1311 if (!assign) { 1312 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 1313 } 1314 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 1315 if (r < 0) { 1316 return r; 1317 } 1318 return 0; 1319 } 1320 1321 1322 static const KVMCapabilityInfo * 1323 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) 1324 { 1325 while (list->name) { 1326 if (!kvm_check_extension(s, list->value)) { 1327 return list; 1328 } 1329 list++; 1330 } 1331 return NULL; 1332 } 1333 1334 void kvm_set_max_memslot_size(hwaddr max_slot_size) 1335 { 1336 g_assert( 1337 ROUND_UP(max_slot_size, qemu_real_host_page_size()) == max_slot_size 1338 ); 1339 kvm_max_slot_size = max_slot_size; 1340 } 1341 1342 static int kvm_set_memory_attributes(hwaddr start, uint64_t size, uint64_t attr) 1343 { 1344 struct kvm_memory_attributes attrs; 1345 int r; 1346 1347 assert((attr & kvm_supported_memory_attributes) == attr); 1348 attrs.attributes = attr; 1349 attrs.address = start; 1350 attrs.size = size; 1351 attrs.flags = 0; 1352 1353 r = kvm_vm_ioctl(kvm_state, KVM_SET_MEMORY_ATTRIBUTES, &attrs); 1354 if (r) { 1355 error_report("failed to set memory (0x%" HWADDR_PRIx "+0x%" PRIx64 ") " 1356 "with attr 0x%" PRIx64 " error '%s'", 1357 start, size, attr, strerror(errno)); 1358 } 1359 return r; 1360 } 1361 1362 int kvm_set_memory_attributes_private(hwaddr start, uint64_t size) 1363 { 1364 return kvm_set_memory_attributes(start, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); 1365 } 1366 1367 int kvm_set_memory_attributes_shared(hwaddr start, uint64_t size) 1368 { 1369 return kvm_set_memory_attributes(start, size, 0); 1370 } 1371 1372 /* Called with KVMMemoryListener.slots_lock held */ 1373 static void kvm_set_phys_mem(KVMMemoryListener *kml, 1374 MemoryRegionSection *section, bool add) 1375 { 1376 KVMSlot *mem; 1377 int err; 1378 MemoryRegion *mr = section->mr; 1379 bool writable = !mr->readonly && !mr->rom_device; 1380 hwaddr start_addr, size, slot_size, mr_offset; 1381 ram_addr_t ram_start_offset; 1382 void *ram; 1383 1384 if (!memory_region_is_ram(mr)) { 1385 if (writable || !kvm_readonly_mem_allowed) { 1386 return; 1387 } else if (!mr->romd_mode) { 1388 /* If the memory device is not in romd_mode, then we actually want 1389 * to remove the kvm memory slot so all accesses will trap. */ 1390 add = false; 1391 } 1392 } 1393 1394 size = kvm_align_section(section, &start_addr); 1395 if (!size) { 1396 return; 1397 } 1398 1399 /* The offset of the kvmslot within the memory region */ 1400 mr_offset = section->offset_within_region + start_addr - 1401 section->offset_within_address_space; 1402 1403 /* use aligned delta to align the ram address and offset */ 1404 ram = memory_region_get_ram_ptr(mr) + mr_offset; 1405 ram_start_offset = memory_region_get_ram_addr(mr) + mr_offset; 1406 1407 if (!add) { 1408 do { 1409 slot_size = MIN(kvm_max_slot_size, size); 1410 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); 1411 if (!mem) { 1412 return; 1413 } 1414 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1415 /* 1416 * NOTE: We should be aware of the fact that here we're only 1417 * doing a best effort to sync dirty bits. No matter whether 1418 * we're using dirty log or dirty ring, we ignored two facts: 1419 * 1420 * (1) dirty bits can reside in hardware buffers (PML) 1421 * 1422 * (2) after we collected dirty bits here, pages can be dirtied 1423 * again before we do the final KVM_SET_USER_MEMORY_REGION to 1424 * remove the slot. 1425 * 1426 * Not easy. Let's cross the fingers until it's fixed. 1427 */ 1428 if (kvm_state->kvm_dirty_ring_size) { 1429 kvm_dirty_ring_reap_locked(kvm_state, NULL); 1430 if (kvm_state->kvm_dirty_ring_with_bitmap) { 1431 kvm_slot_sync_dirty_pages(mem); 1432 kvm_slot_get_dirty_log(kvm_state, mem); 1433 } 1434 } else { 1435 kvm_slot_get_dirty_log(kvm_state, mem); 1436 } 1437 kvm_slot_sync_dirty_pages(mem); 1438 } 1439 1440 /* unregister the slot */ 1441 g_free(mem->dirty_bmap); 1442 mem->dirty_bmap = NULL; 1443 mem->memory_size = 0; 1444 mem->flags = 0; 1445 err = kvm_set_user_memory_region(kml, mem, false); 1446 if (err) { 1447 fprintf(stderr, "%s: error unregistering slot: %s\n", 1448 __func__, strerror(-err)); 1449 abort(); 1450 } 1451 start_addr += slot_size; 1452 size -= slot_size; 1453 kml->nr_used_slots--; 1454 } while (size); 1455 return; 1456 } 1457 1458 /* register the new slot */ 1459 do { 1460 slot_size = MIN(kvm_max_slot_size, size); 1461 mem = kvm_alloc_slot(kml); 1462 mem->as_id = kml->as_id; 1463 mem->memory_size = slot_size; 1464 mem->start_addr = start_addr; 1465 mem->ram_start_offset = ram_start_offset; 1466 mem->ram = ram; 1467 mem->flags = kvm_mem_flags(mr); 1468 mem->guest_memfd = mr->ram_block->guest_memfd; 1469 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; 1470 1471 kvm_slot_init_dirty_bitmap(mem); 1472 err = kvm_set_user_memory_region(kml, mem, true); 1473 if (err) { 1474 fprintf(stderr, "%s: error registering slot: %s\n", __func__, 1475 strerror(-err)); 1476 abort(); 1477 } 1478 1479 if (memory_region_has_guest_memfd(mr)) { 1480 err = kvm_set_memory_attributes_private(start_addr, slot_size); 1481 if (err) { 1482 error_report("%s: failed to set memory attribute private: %s", 1483 __func__, strerror(-err)); 1484 exit(1); 1485 } 1486 } 1487 1488 start_addr += slot_size; 1489 ram_start_offset += slot_size; 1490 ram += slot_size; 1491 size -= slot_size; 1492 kml->nr_used_slots++; 1493 } while (size); 1494 } 1495 1496 static void *kvm_dirty_ring_reaper_thread(void *data) 1497 { 1498 KVMState *s = data; 1499 struct KVMDirtyRingReaper *r = &s->reaper; 1500 1501 rcu_register_thread(); 1502 1503 trace_kvm_dirty_ring_reaper("init"); 1504 1505 while (true) { 1506 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; 1507 trace_kvm_dirty_ring_reaper("wait"); 1508 /* 1509 * TODO: provide a smarter timeout rather than a constant? 1510 */ 1511 sleep(1); 1512 1513 /* keep sleeping so that dirtylimit not be interfered by reaper */ 1514 if (dirtylimit_in_service()) { 1515 continue; 1516 } 1517 1518 trace_kvm_dirty_ring_reaper("wakeup"); 1519 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; 1520 1521 bql_lock(); 1522 kvm_dirty_ring_reap(s, NULL); 1523 bql_unlock(); 1524 1525 r->reaper_iteration++; 1526 } 1527 1528 g_assert_not_reached(); 1529 } 1530 1531 static void kvm_dirty_ring_reaper_init(KVMState *s) 1532 { 1533 struct KVMDirtyRingReaper *r = &s->reaper; 1534 1535 qemu_thread_create(&r->reaper_thr, "kvm-reaper", 1536 kvm_dirty_ring_reaper_thread, 1537 s, QEMU_THREAD_JOINABLE); 1538 } 1539 1540 static int kvm_dirty_ring_init(KVMState *s) 1541 { 1542 uint32_t ring_size = s->kvm_dirty_ring_size; 1543 uint64_t ring_bytes = ring_size * sizeof(struct kvm_dirty_gfn); 1544 unsigned int capability = KVM_CAP_DIRTY_LOG_RING; 1545 int ret; 1546 1547 s->kvm_dirty_ring_size = 0; 1548 s->kvm_dirty_ring_bytes = 0; 1549 1550 /* Bail if the dirty ring size isn't specified */ 1551 if (!ring_size) { 1552 return 0; 1553 } 1554 1555 /* 1556 * Read the max supported pages. Fall back to dirty logging mode 1557 * if the dirty ring isn't supported. 1558 */ 1559 ret = kvm_vm_check_extension(s, capability); 1560 if (ret <= 0) { 1561 capability = KVM_CAP_DIRTY_LOG_RING_ACQ_REL; 1562 ret = kvm_vm_check_extension(s, capability); 1563 } 1564 1565 if (ret <= 0) { 1566 warn_report("KVM dirty ring not available, using bitmap method"); 1567 return 0; 1568 } 1569 1570 if (ring_bytes > ret) { 1571 error_report("KVM dirty ring size %" PRIu32 " too big " 1572 "(maximum is %ld). Please use a smaller value.", 1573 ring_size, (long)ret / sizeof(struct kvm_dirty_gfn)); 1574 return -EINVAL; 1575 } 1576 1577 ret = kvm_vm_enable_cap(s, capability, 0, ring_bytes); 1578 if (ret) { 1579 error_report("Enabling of KVM dirty ring failed: %s. " 1580 "Suggested minimum value is 1024.", strerror(-ret)); 1581 return -EIO; 1582 } 1583 1584 /* Enable the backup bitmap if it is supported */ 1585 ret = kvm_vm_check_extension(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP); 1586 if (ret > 0) { 1587 ret = kvm_vm_enable_cap(s, KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP, 0); 1588 if (ret) { 1589 error_report("Enabling of KVM dirty ring's backup bitmap failed: " 1590 "%s. ", strerror(-ret)); 1591 return -EIO; 1592 } 1593 1594 s->kvm_dirty_ring_with_bitmap = true; 1595 } 1596 1597 s->kvm_dirty_ring_size = ring_size; 1598 s->kvm_dirty_ring_bytes = ring_bytes; 1599 1600 return 0; 1601 } 1602 1603 static void kvm_region_add(MemoryListener *listener, 1604 MemoryRegionSection *section) 1605 { 1606 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1607 KVMMemoryUpdate *update; 1608 1609 update = g_new0(KVMMemoryUpdate, 1); 1610 update->section = *section; 1611 1612 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next); 1613 } 1614 1615 static void kvm_region_del(MemoryListener *listener, 1616 MemoryRegionSection *section) 1617 { 1618 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1619 KVMMemoryUpdate *update; 1620 1621 update = g_new0(KVMMemoryUpdate, 1); 1622 update->section = *section; 1623 1624 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next); 1625 } 1626 1627 static void kvm_region_commit(MemoryListener *listener) 1628 { 1629 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, 1630 listener); 1631 KVMMemoryUpdate *u1, *u2; 1632 bool need_inhibit = false; 1633 1634 if (QSIMPLEQ_EMPTY(&kml->transaction_add) && 1635 QSIMPLEQ_EMPTY(&kml->transaction_del)) { 1636 return; 1637 } 1638 1639 /* 1640 * We have to be careful when regions to add overlap with ranges to remove. 1641 * We have to simulate atomic KVM memslot updates by making sure no ioctl() 1642 * is currently active. 1643 * 1644 * The lists are order by addresses, so it's easy to find overlaps. 1645 */ 1646 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); 1647 u2 = QSIMPLEQ_FIRST(&kml->transaction_add); 1648 while (u1 && u2) { 1649 Range r1, r2; 1650 1651 range_init_nofail(&r1, u1->section.offset_within_address_space, 1652 int128_get64(u1->section.size)); 1653 range_init_nofail(&r2, u2->section.offset_within_address_space, 1654 int128_get64(u2->section.size)); 1655 1656 if (range_overlaps_range(&r1, &r2)) { 1657 need_inhibit = true; 1658 break; 1659 } 1660 if (range_lob(&r1) < range_lob(&r2)) { 1661 u1 = QSIMPLEQ_NEXT(u1, next); 1662 } else { 1663 u2 = QSIMPLEQ_NEXT(u2, next); 1664 } 1665 } 1666 1667 kvm_slots_lock(); 1668 if (need_inhibit) { 1669 accel_ioctl_inhibit_begin(); 1670 } 1671 1672 /* Remove all memslots before adding the new ones. */ 1673 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) { 1674 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); 1675 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next); 1676 1677 kvm_set_phys_mem(kml, &u1->section, false); 1678 memory_region_unref(u1->section.mr); 1679 1680 g_free(u1); 1681 } 1682 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) { 1683 u1 = QSIMPLEQ_FIRST(&kml->transaction_add); 1684 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next); 1685 1686 memory_region_ref(u1->section.mr); 1687 kvm_set_phys_mem(kml, &u1->section, true); 1688 1689 g_free(u1); 1690 } 1691 1692 if (need_inhibit) { 1693 accel_ioctl_inhibit_end(); 1694 } 1695 kvm_slots_unlock(); 1696 } 1697 1698 static void kvm_log_sync(MemoryListener *listener, 1699 MemoryRegionSection *section) 1700 { 1701 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1702 1703 kvm_slots_lock(); 1704 kvm_physical_sync_dirty_bitmap(kml, section); 1705 kvm_slots_unlock(); 1706 } 1707 1708 static void kvm_log_sync_global(MemoryListener *l, bool last_stage) 1709 { 1710 KVMMemoryListener *kml = container_of(l, KVMMemoryListener, listener); 1711 KVMState *s = kvm_state; 1712 KVMSlot *mem; 1713 int i; 1714 1715 /* Flush all kernel dirty addresses into KVMSlot dirty bitmap */ 1716 kvm_dirty_ring_flush(); 1717 1718 /* 1719 * TODO: make this faster when nr_slots is big while there are 1720 * only a few used slots (small VMs). 1721 */ 1722 kvm_slots_lock(); 1723 for (i = 0; i < s->nr_slots; i++) { 1724 mem = &kml->slots[i]; 1725 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { 1726 kvm_slot_sync_dirty_pages(mem); 1727 1728 if (s->kvm_dirty_ring_with_bitmap && last_stage && 1729 kvm_slot_get_dirty_log(s, mem)) { 1730 kvm_slot_sync_dirty_pages(mem); 1731 } 1732 1733 /* 1734 * This is not needed by KVM_GET_DIRTY_LOG because the 1735 * ioctl will unconditionally overwrite the whole region. 1736 * However kvm dirty ring has no such side effect. 1737 */ 1738 kvm_slot_reset_dirty_pages(mem); 1739 } 1740 } 1741 kvm_slots_unlock(); 1742 } 1743 1744 static void kvm_log_clear(MemoryListener *listener, 1745 MemoryRegionSection *section) 1746 { 1747 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); 1748 int r; 1749 1750 r = kvm_physical_log_clear(kml, section); 1751 if (r < 0) { 1752 error_report_once("%s: kvm log clear failed: mr=%s " 1753 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__, 1754 section->mr->name, section->offset_within_region, 1755 int128_get64(section->size)); 1756 abort(); 1757 } 1758 } 1759 1760 static void kvm_mem_ioeventfd_add(MemoryListener *listener, 1761 MemoryRegionSection *section, 1762 bool match_data, uint64_t data, 1763 EventNotifier *e) 1764 { 1765 int fd = event_notifier_get_fd(e); 1766 int r; 1767 1768 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, 1769 data, true, int128_get64(section->size), 1770 match_data); 1771 if (r < 0) { 1772 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", 1773 __func__, strerror(-r), -r); 1774 abort(); 1775 } 1776 } 1777 1778 static void kvm_mem_ioeventfd_del(MemoryListener *listener, 1779 MemoryRegionSection *section, 1780 bool match_data, uint64_t data, 1781 EventNotifier *e) 1782 { 1783 int fd = event_notifier_get_fd(e); 1784 int r; 1785 1786 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, 1787 data, false, int128_get64(section->size), 1788 match_data); 1789 if (r < 0) { 1790 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", 1791 __func__, strerror(-r), -r); 1792 abort(); 1793 } 1794 } 1795 1796 static void kvm_io_ioeventfd_add(MemoryListener *listener, 1797 MemoryRegionSection *section, 1798 bool match_data, uint64_t data, 1799 EventNotifier *e) 1800 { 1801 int fd = event_notifier_get_fd(e); 1802 int r; 1803 1804 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, 1805 data, true, int128_get64(section->size), 1806 match_data); 1807 if (r < 0) { 1808 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", 1809 __func__, strerror(-r), -r); 1810 abort(); 1811 } 1812 } 1813 1814 static void kvm_io_ioeventfd_del(MemoryListener *listener, 1815 MemoryRegionSection *section, 1816 bool match_data, uint64_t data, 1817 EventNotifier *e) 1818 1819 { 1820 int fd = event_notifier_get_fd(e); 1821 int r; 1822 1823 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, 1824 data, false, int128_get64(section->size), 1825 match_data); 1826 if (r < 0) { 1827 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", 1828 __func__, strerror(-r), -r); 1829 abort(); 1830 } 1831 } 1832 1833 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, 1834 AddressSpace *as, int as_id, const char *name) 1835 { 1836 int i; 1837 1838 kml->slots = g_new0(KVMSlot, s->nr_slots); 1839 kml->as_id = as_id; 1840 1841 for (i = 0; i < s->nr_slots; i++) { 1842 kml->slots[i].slot = i; 1843 } 1844 1845 QSIMPLEQ_INIT(&kml->transaction_add); 1846 QSIMPLEQ_INIT(&kml->transaction_del); 1847 1848 kml->listener.region_add = kvm_region_add; 1849 kml->listener.region_del = kvm_region_del; 1850 kml->listener.commit = kvm_region_commit; 1851 kml->listener.log_start = kvm_log_start; 1852 kml->listener.log_stop = kvm_log_stop; 1853 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; 1854 kml->listener.name = name; 1855 1856 if (s->kvm_dirty_ring_size) { 1857 kml->listener.log_sync_global = kvm_log_sync_global; 1858 } else { 1859 kml->listener.log_sync = kvm_log_sync; 1860 kml->listener.log_clear = kvm_log_clear; 1861 } 1862 1863 memory_listener_register(&kml->listener, as); 1864 1865 for (i = 0; i < s->nr_as; ++i) { 1866 if (!s->as[i].as) { 1867 s->as[i].as = as; 1868 s->as[i].ml = kml; 1869 break; 1870 } 1871 } 1872 } 1873 1874 static MemoryListener kvm_io_listener = { 1875 .name = "kvm-io", 1876 .coalesced_io_add = kvm_coalesce_pio_add, 1877 .coalesced_io_del = kvm_coalesce_pio_del, 1878 .eventfd_add = kvm_io_ioeventfd_add, 1879 .eventfd_del = kvm_io_ioeventfd_del, 1880 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, 1881 }; 1882 1883 int kvm_set_irq(KVMState *s, int irq, int level) 1884 { 1885 struct kvm_irq_level event; 1886 int ret; 1887 1888 assert(kvm_async_interrupts_enabled()); 1889 1890 event.level = level; 1891 event.irq = irq; 1892 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); 1893 if (ret < 0) { 1894 perror("kvm_set_irq"); 1895 abort(); 1896 } 1897 1898 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; 1899 } 1900 1901 #ifdef KVM_CAP_IRQ_ROUTING 1902 typedef struct KVMMSIRoute { 1903 struct kvm_irq_routing_entry kroute; 1904 QTAILQ_ENTRY(KVMMSIRoute) entry; 1905 } KVMMSIRoute; 1906 1907 static void set_gsi(KVMState *s, unsigned int gsi) 1908 { 1909 set_bit(gsi, s->used_gsi_bitmap); 1910 } 1911 1912 static void clear_gsi(KVMState *s, unsigned int gsi) 1913 { 1914 clear_bit(gsi, s->used_gsi_bitmap); 1915 } 1916 1917 void kvm_init_irq_routing(KVMState *s) 1918 { 1919 int gsi_count; 1920 1921 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; 1922 if (gsi_count > 0) { 1923 /* Round up so we can search ints using ffs */ 1924 s->used_gsi_bitmap = bitmap_new(gsi_count); 1925 s->gsi_count = gsi_count; 1926 } 1927 1928 s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); 1929 s->nr_allocated_irq_routes = 0; 1930 1931 kvm_arch_init_irq_routing(s); 1932 } 1933 1934 void kvm_irqchip_commit_routes(KVMState *s) 1935 { 1936 int ret; 1937 1938 if (kvm_gsi_direct_mapping()) { 1939 return; 1940 } 1941 1942 if (!kvm_gsi_routing_enabled()) { 1943 return; 1944 } 1945 1946 s->irq_routes->flags = 0; 1947 trace_kvm_irqchip_commit_routes(); 1948 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); 1949 assert(ret == 0); 1950 } 1951 1952 void kvm_add_routing_entry(KVMState *s, 1953 struct kvm_irq_routing_entry *entry) 1954 { 1955 struct kvm_irq_routing_entry *new; 1956 int n, size; 1957 1958 if (s->irq_routes->nr == s->nr_allocated_irq_routes) { 1959 n = s->nr_allocated_irq_routes * 2; 1960 if (n < 64) { 1961 n = 64; 1962 } 1963 size = sizeof(struct kvm_irq_routing); 1964 size += n * sizeof(*new); 1965 s->irq_routes = g_realloc(s->irq_routes, size); 1966 s->nr_allocated_irq_routes = n; 1967 } 1968 n = s->irq_routes->nr++; 1969 new = &s->irq_routes->entries[n]; 1970 1971 *new = *entry; 1972 1973 set_gsi(s, entry->gsi); 1974 } 1975 1976 static int kvm_update_routing_entry(KVMState *s, 1977 struct kvm_irq_routing_entry *new_entry) 1978 { 1979 struct kvm_irq_routing_entry *entry; 1980 int n; 1981 1982 for (n = 0; n < s->irq_routes->nr; n++) { 1983 entry = &s->irq_routes->entries[n]; 1984 if (entry->gsi != new_entry->gsi) { 1985 continue; 1986 } 1987 1988 if(!memcmp(entry, new_entry, sizeof *entry)) { 1989 return 0; 1990 } 1991 1992 *entry = *new_entry; 1993 1994 return 0; 1995 } 1996 1997 return -ESRCH; 1998 } 1999 2000 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) 2001 { 2002 struct kvm_irq_routing_entry e = {}; 2003 2004 assert(pin < s->gsi_count); 2005 2006 e.gsi = irq; 2007 e.type = KVM_IRQ_ROUTING_IRQCHIP; 2008 e.flags = 0; 2009 e.u.irqchip.irqchip = irqchip; 2010 e.u.irqchip.pin = pin; 2011 kvm_add_routing_entry(s, &e); 2012 } 2013 2014 void kvm_irqchip_release_virq(KVMState *s, int virq) 2015 { 2016 struct kvm_irq_routing_entry *e; 2017 int i; 2018 2019 if (kvm_gsi_direct_mapping()) { 2020 return; 2021 } 2022 2023 for (i = 0; i < s->irq_routes->nr; i++) { 2024 e = &s->irq_routes->entries[i]; 2025 if (e->gsi == virq) { 2026 s->irq_routes->nr--; 2027 *e = s->irq_routes->entries[s->irq_routes->nr]; 2028 } 2029 } 2030 clear_gsi(s, virq); 2031 kvm_arch_release_virq_post(virq); 2032 trace_kvm_irqchip_release_virq(virq); 2033 } 2034 2035 void kvm_irqchip_add_change_notifier(Notifier *n) 2036 { 2037 notifier_list_add(&kvm_irqchip_change_notifiers, n); 2038 } 2039 2040 void kvm_irqchip_remove_change_notifier(Notifier *n) 2041 { 2042 notifier_remove(n); 2043 } 2044 2045 void kvm_irqchip_change_notify(void) 2046 { 2047 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); 2048 } 2049 2050 int kvm_irqchip_get_virq(KVMState *s) 2051 { 2052 int next_virq; 2053 2054 /* Return the lowest unused GSI in the bitmap */ 2055 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); 2056 if (next_virq >= s->gsi_count) { 2057 return -ENOSPC; 2058 } else { 2059 return next_virq; 2060 } 2061 } 2062 2063 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) 2064 { 2065 struct kvm_msi msi; 2066 2067 msi.address_lo = (uint32_t)msg.address; 2068 msi.address_hi = msg.address >> 32; 2069 msi.data = le32_to_cpu(msg.data); 2070 msi.flags = 0; 2071 memset(msi.pad, 0, sizeof(msi.pad)); 2072 2073 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi); 2074 } 2075 2076 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) 2077 { 2078 struct kvm_irq_routing_entry kroute = {}; 2079 int virq; 2080 KVMState *s = c->s; 2081 MSIMessage msg = {0, 0}; 2082 2083 if (pci_available && dev) { 2084 msg = pci_get_msi_message(dev, vector); 2085 } 2086 2087 if (kvm_gsi_direct_mapping()) { 2088 return kvm_arch_msi_data_to_gsi(msg.data); 2089 } 2090 2091 if (!kvm_gsi_routing_enabled()) { 2092 return -ENOSYS; 2093 } 2094 2095 virq = kvm_irqchip_get_virq(s); 2096 if (virq < 0) { 2097 return virq; 2098 } 2099 2100 kroute.gsi = virq; 2101 kroute.type = KVM_IRQ_ROUTING_MSI; 2102 kroute.flags = 0; 2103 kroute.u.msi.address_lo = (uint32_t)msg.address; 2104 kroute.u.msi.address_hi = msg.address >> 32; 2105 kroute.u.msi.data = le32_to_cpu(msg.data); 2106 if (pci_available && kvm_msi_devid_required()) { 2107 kroute.flags = KVM_MSI_VALID_DEVID; 2108 kroute.u.msi.devid = pci_requester_id(dev); 2109 } 2110 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { 2111 kvm_irqchip_release_virq(s, virq); 2112 return -EINVAL; 2113 } 2114 2115 if (s->irq_routes->nr < s->gsi_count) { 2116 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", 2117 vector, virq); 2118 2119 kvm_add_routing_entry(s, &kroute); 2120 kvm_arch_add_msi_route_post(&kroute, vector, dev); 2121 c->changes++; 2122 } else { 2123 kvm_irqchip_release_virq(s, virq); 2124 return -ENOSPC; 2125 } 2126 2127 return virq; 2128 } 2129 2130 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, 2131 PCIDevice *dev) 2132 { 2133 struct kvm_irq_routing_entry kroute = {}; 2134 2135 if (kvm_gsi_direct_mapping()) { 2136 return 0; 2137 } 2138 2139 if (!kvm_irqchip_in_kernel()) { 2140 return -ENOSYS; 2141 } 2142 2143 kroute.gsi = virq; 2144 kroute.type = KVM_IRQ_ROUTING_MSI; 2145 kroute.flags = 0; 2146 kroute.u.msi.address_lo = (uint32_t)msg.address; 2147 kroute.u.msi.address_hi = msg.address >> 32; 2148 kroute.u.msi.data = le32_to_cpu(msg.data); 2149 if (pci_available && kvm_msi_devid_required()) { 2150 kroute.flags = KVM_MSI_VALID_DEVID; 2151 kroute.u.msi.devid = pci_requester_id(dev); 2152 } 2153 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { 2154 return -EINVAL; 2155 } 2156 2157 trace_kvm_irqchip_update_msi_route(virq); 2158 2159 return kvm_update_routing_entry(s, &kroute); 2160 } 2161 2162 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, 2163 EventNotifier *resample, int virq, 2164 bool assign) 2165 { 2166 int fd = event_notifier_get_fd(event); 2167 int rfd = resample ? event_notifier_get_fd(resample) : -1; 2168 2169 struct kvm_irqfd irqfd = { 2170 .fd = fd, 2171 .gsi = virq, 2172 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN, 2173 }; 2174 2175 if (rfd != -1) { 2176 assert(assign); 2177 if (kvm_irqchip_is_split()) { 2178 /* 2179 * When the slow irqchip (e.g. IOAPIC) is in the 2180 * userspace, KVM kernel resamplefd will not work because 2181 * the EOI of the interrupt will be delivered to userspace 2182 * instead, so the KVM kernel resamplefd kick will be 2183 * skipped. The userspace here mimics what the kernel 2184 * provides with resamplefd, remember the resamplefd and 2185 * kick it when we receive EOI of this IRQ. 2186 * 2187 * This is hackery because IOAPIC is mostly bypassed 2188 * (except EOI broadcasts) when irqfd is used. However 2189 * this can bring much performance back for split irqchip 2190 * with INTx IRQs (for VFIO, this gives 93% perf of the 2191 * full fast path, which is 46% perf boost comparing to 2192 * the INTx slow path). 2193 */ 2194 kvm_resample_fd_insert(virq, resample); 2195 } else { 2196 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE; 2197 irqfd.resamplefd = rfd; 2198 } 2199 } else if (!assign) { 2200 if (kvm_irqchip_is_split()) { 2201 kvm_resample_fd_remove(virq); 2202 } 2203 } 2204 2205 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd); 2206 } 2207 2208 #else /* !KVM_CAP_IRQ_ROUTING */ 2209 2210 void kvm_init_irq_routing(KVMState *s) 2211 { 2212 } 2213 2214 void kvm_irqchip_release_virq(KVMState *s, int virq) 2215 { 2216 } 2217 2218 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) 2219 { 2220 abort(); 2221 } 2222 2223 int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) 2224 { 2225 return -ENOSYS; 2226 } 2227 2228 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) 2229 { 2230 return -ENOSYS; 2231 } 2232 2233 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint) 2234 { 2235 return -ENOSYS; 2236 } 2237 2238 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, 2239 EventNotifier *resample, int virq, 2240 bool assign) 2241 { 2242 abort(); 2243 } 2244 2245 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) 2246 { 2247 return -ENOSYS; 2248 } 2249 #endif /* !KVM_CAP_IRQ_ROUTING */ 2250 2251 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 2252 EventNotifier *rn, int virq) 2253 { 2254 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true); 2255 } 2256 2257 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, 2258 int virq) 2259 { 2260 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false); 2261 } 2262 2263 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, 2264 EventNotifier *rn, qemu_irq irq) 2265 { 2266 gpointer key, gsi; 2267 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); 2268 2269 if (!found) { 2270 return -ENXIO; 2271 } 2272 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi)); 2273 } 2274 2275 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, 2276 qemu_irq irq) 2277 { 2278 gpointer key, gsi; 2279 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); 2280 2281 if (!found) { 2282 return -ENXIO; 2283 } 2284 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi)); 2285 } 2286 2287 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) 2288 { 2289 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); 2290 } 2291 2292 static void kvm_irqchip_create(KVMState *s) 2293 { 2294 int ret; 2295 2296 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); 2297 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { 2298 ; 2299 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { 2300 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0); 2301 if (ret < 0) { 2302 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); 2303 exit(1); 2304 } 2305 } else { 2306 return; 2307 } 2308 2309 if (kvm_check_extension(s, KVM_CAP_IRQFD) <= 0) { 2310 fprintf(stderr, "kvm: irqfd not implemented\n"); 2311 exit(1); 2312 } 2313 2314 /* First probe and see if there's a arch-specific hook to create the 2315 * in-kernel irqchip for us */ 2316 ret = kvm_arch_irqchip_create(s); 2317 if (ret == 0) { 2318 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { 2319 error_report("Split IRQ chip mode not supported."); 2320 exit(1); 2321 } else { 2322 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); 2323 } 2324 } 2325 if (ret < 0) { 2326 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); 2327 exit(1); 2328 } 2329 2330 kvm_kernel_irqchip = true; 2331 /* If we have an in-kernel IRQ chip then we must have asynchronous 2332 * interrupt delivery (though the reverse is not necessarily true) 2333 */ 2334 kvm_async_interrupts_allowed = true; 2335 kvm_halt_in_kernel_allowed = true; 2336 2337 kvm_init_irq_routing(s); 2338 2339 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); 2340 } 2341 2342 /* Find number of supported CPUs using the recommended 2343 * procedure from the kernel API documentation to cope with 2344 * older kernels that may be missing capabilities. 2345 */ 2346 static int kvm_recommended_vcpus(KVMState *s) 2347 { 2348 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS); 2349 return (ret) ? ret : 4; 2350 } 2351 2352 static int kvm_max_vcpus(KVMState *s) 2353 { 2354 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); 2355 return (ret) ? ret : kvm_recommended_vcpus(s); 2356 } 2357 2358 static int kvm_max_vcpu_id(KVMState *s) 2359 { 2360 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID); 2361 return (ret) ? ret : kvm_max_vcpus(s); 2362 } 2363 2364 bool kvm_vcpu_id_is_valid(int vcpu_id) 2365 { 2366 KVMState *s = KVM_STATE(current_accel()); 2367 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); 2368 } 2369 2370 bool kvm_dirty_ring_enabled(void) 2371 { 2372 return kvm_state && kvm_state->kvm_dirty_ring_size; 2373 } 2374 2375 static void query_stats_cb(StatsResultList **result, StatsTarget target, 2376 strList *names, strList *targets, Error **errp); 2377 static void query_stats_schemas_cb(StatsSchemaList **result, Error **errp); 2378 2379 uint32_t kvm_dirty_ring_size(void) 2380 { 2381 return kvm_state->kvm_dirty_ring_size; 2382 } 2383 2384 static int do_kvm_create_vm(MachineState *ms, int type) 2385 { 2386 KVMState *s; 2387 int ret; 2388 2389 s = KVM_STATE(ms->accelerator); 2390 2391 do { 2392 ret = kvm_ioctl(s, KVM_CREATE_VM, type); 2393 } while (ret == -EINTR); 2394 2395 if (ret < 0) { 2396 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret)); 2397 2398 #ifdef TARGET_S390X 2399 if (ret == -EINVAL) { 2400 error_printf("Host kernel setup problem detected." 2401 " Please verify:\n"); 2402 error_printf("- for kernels supporting the" 2403 " switch_amode or user_mode parameters, whether"); 2404 error_printf(" user space is running in primary address space\n"); 2405 error_printf("- for kernels supporting the vm.allocate_pgste" 2406 " sysctl, whether it is enabled\n"); 2407 } 2408 #elif defined(TARGET_PPC) 2409 if (ret == -EINVAL) { 2410 error_printf("PPC KVM module is not loaded. Try modprobe kvm_%s.\n", 2411 (type == 2) ? "pr" : "hv"); 2412 } 2413 #endif 2414 } 2415 2416 return ret; 2417 } 2418 2419 static int find_kvm_machine_type(MachineState *ms) 2420 { 2421 MachineClass *mc = MACHINE_GET_CLASS(ms); 2422 int type; 2423 2424 if (object_property_find(OBJECT(current_machine), "kvm-type")) { 2425 g_autofree char *kvm_type; 2426 kvm_type = object_property_get_str(OBJECT(current_machine), 2427 "kvm-type", 2428 &error_abort); 2429 type = mc->kvm_type(ms, kvm_type); 2430 } else if (mc->kvm_type) { 2431 type = mc->kvm_type(ms, NULL); 2432 } else { 2433 type = kvm_arch_get_default_type(ms); 2434 } 2435 return type; 2436 } 2437 2438 static int kvm_setup_dirty_ring(KVMState *s) 2439 { 2440 uint64_t dirty_log_manual_caps; 2441 int ret; 2442 2443 /* 2444 * Enable KVM dirty ring if supported, otherwise fall back to 2445 * dirty logging mode 2446 */ 2447 ret = kvm_dirty_ring_init(s); 2448 if (ret < 0) { 2449 return ret; 2450 } 2451 2452 /* 2453 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is not needed when dirty ring is 2454 * enabled. More importantly, KVM_DIRTY_LOG_INITIALLY_SET will assume no 2455 * page is wr-protected initially, which is against how kvm dirty ring is 2456 * usage - kvm dirty ring requires all pages are wr-protected at the very 2457 * beginning. Enabling this feature for dirty ring causes data corruption. 2458 * 2459 * TODO: Without KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and kvm clear dirty log, 2460 * we may expect a higher stall time when starting the migration. In the 2461 * future we can enable KVM_CLEAR_DIRTY_LOG to work with dirty ring too: 2462 * instead of clearing dirty bit, it can be a way to explicitly wr-protect 2463 * guest pages. 2464 */ 2465 if (!s->kvm_dirty_ring_size) { 2466 dirty_log_manual_caps = 2467 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 2468 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 2469 KVM_DIRTY_LOG_INITIALLY_SET); 2470 s->manual_dirty_log_protect = dirty_log_manual_caps; 2471 if (dirty_log_manual_caps) { 2472 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 2473 dirty_log_manual_caps); 2474 if (ret) { 2475 warn_report("Trying to enable capability %"PRIu64" of " 2476 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. " 2477 "Falling back to the legacy mode. ", 2478 dirty_log_manual_caps); 2479 s->manual_dirty_log_protect = 0; 2480 } 2481 } 2482 } 2483 2484 return 0; 2485 } 2486 2487 static int kvm_init(MachineState *ms) 2488 { 2489 MachineClass *mc = MACHINE_GET_CLASS(ms); 2490 static const char upgrade_note[] = 2491 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" 2492 "(see http://sourceforge.net/projects/kvm).\n"; 2493 const struct { 2494 const char *name; 2495 int num; 2496 } num_cpus[] = { 2497 { "SMP", ms->smp.cpus }, 2498 { "hotpluggable", ms->smp.max_cpus }, 2499 { /* end of list */ } 2500 }, *nc = num_cpus; 2501 int soft_vcpus_limit, hard_vcpus_limit; 2502 KVMState *s; 2503 const KVMCapabilityInfo *missing_cap; 2504 int ret; 2505 int type; 2506 2507 qemu_mutex_init(&kml_slots_lock); 2508 2509 s = KVM_STATE(ms->accelerator); 2510 2511 /* 2512 * On systems where the kernel can support different base page 2513 * sizes, host page size may be different from TARGET_PAGE_SIZE, 2514 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum 2515 * page size for the system though. 2516 */ 2517 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size()); 2518 2519 s->sigmask_len = 8; 2520 accel_blocker_init(); 2521 2522 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 2523 QTAILQ_INIT(&s->kvm_sw_breakpoints); 2524 #endif 2525 QLIST_INIT(&s->kvm_parked_vcpus); 2526 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); 2527 if (s->fd == -1) { 2528 error_report("Could not access KVM kernel module: %m"); 2529 ret = -errno; 2530 goto err; 2531 } 2532 2533 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); 2534 if (ret < KVM_API_VERSION) { 2535 if (ret >= 0) { 2536 ret = -EINVAL; 2537 } 2538 error_report("kvm version too old"); 2539 goto err; 2540 } 2541 2542 if (ret > KVM_API_VERSION) { 2543 ret = -EINVAL; 2544 error_report("kvm version not supported"); 2545 goto err; 2546 } 2547 2548 kvm_supported_memory_attributes = kvm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES); 2549 kvm_guest_memfd_supported = 2550 kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) && 2551 kvm_check_extension(s, KVM_CAP_USER_MEMORY2) && 2552 (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE); 2553 2554 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT); 2555 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); 2556 2557 /* If unspecified, use the default value */ 2558 if (!s->nr_slots) { 2559 s->nr_slots = 32; 2560 } 2561 2562 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); 2563 if (s->nr_as <= 1) { 2564 s->nr_as = 1; 2565 } 2566 s->as = g_new0(struct KVMAs, s->nr_as); 2567 2568 type = find_kvm_machine_type(ms); 2569 if (type < 0) { 2570 ret = -EINVAL; 2571 goto err; 2572 } 2573 2574 ret = do_kvm_create_vm(ms, type); 2575 if (ret < 0) { 2576 goto err; 2577 } 2578 2579 s->vmfd = ret; 2580 2581 /* check the vcpu limits */ 2582 soft_vcpus_limit = kvm_recommended_vcpus(s); 2583 hard_vcpus_limit = kvm_max_vcpus(s); 2584 2585 while (nc->name) { 2586 if (nc->num > soft_vcpus_limit) { 2587 warn_report("Number of %s cpus requested (%d) exceeds " 2588 "the recommended cpus supported by KVM (%d)", 2589 nc->name, nc->num, soft_vcpus_limit); 2590 2591 if (nc->num > hard_vcpus_limit) { 2592 error_report("Number of %s cpus requested (%d) exceeds " 2593 "the maximum cpus supported by KVM (%d)", 2594 nc->name, nc->num, hard_vcpus_limit); 2595 exit(1); 2596 } 2597 } 2598 nc++; 2599 } 2600 2601 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); 2602 if (!missing_cap) { 2603 missing_cap = 2604 kvm_check_extension_list(s, kvm_arch_required_capabilities); 2605 } 2606 if (missing_cap) { 2607 ret = -EINVAL; 2608 error_report("kvm does not support %s", missing_cap->name); 2609 error_printf("%s", upgrade_note); 2610 goto err; 2611 } 2612 2613 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); 2614 s->coalesced_pio = s->coalesced_mmio && 2615 kvm_check_extension(s, KVM_CAP_COALESCED_PIO); 2616 2617 ret = kvm_setup_dirty_ring(s); 2618 if (ret < 0) { 2619 goto err; 2620 } 2621 2622 #ifdef KVM_CAP_VCPU_EVENTS 2623 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); 2624 #endif 2625 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); 2626 2627 s->irq_set_ioctl = KVM_IRQ_LINE; 2628 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) { 2629 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; 2630 } 2631 2632 kvm_readonly_mem_allowed = 2633 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0); 2634 2635 kvm_resamplefds_allowed = 2636 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0); 2637 2638 kvm_vm_attributes_allowed = 2639 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0); 2640 2641 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 2642 kvm_has_guest_debug = 2643 (kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG) > 0); 2644 #endif 2645 2646 kvm_sstep_flags = 0; 2647 if (kvm_has_guest_debug) { 2648 kvm_sstep_flags = SSTEP_ENABLE; 2649 2650 #if defined TARGET_KVM_HAVE_GUEST_DEBUG 2651 int guest_debug_flags = 2652 kvm_check_extension(s, KVM_CAP_SET_GUEST_DEBUG2); 2653 2654 if (guest_debug_flags & KVM_GUESTDBG_BLOCKIRQ) { 2655 kvm_sstep_flags |= SSTEP_NOIRQ; 2656 } 2657 #endif 2658 } 2659 2660 kvm_state = s; 2661 2662 ret = kvm_arch_init(ms, s); 2663 if (ret < 0) { 2664 goto err; 2665 } 2666 2667 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { 2668 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; 2669 } 2670 2671 qemu_register_reset(kvm_unpoison_all, NULL); 2672 2673 if (s->kernel_irqchip_allowed) { 2674 kvm_irqchip_create(s); 2675 } 2676 2677 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; 2678 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; 2679 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; 2680 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; 2681 2682 kvm_memory_listener_register(s, &s->memory_listener, 2683 &address_space_memory, 0, "kvm-memory"); 2684 memory_listener_register(&kvm_io_listener, 2685 &address_space_io); 2686 2687 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); 2688 if (!s->sync_mmu) { 2689 ret = ram_block_discard_disable(true); 2690 assert(!ret); 2691 } 2692 2693 if (s->kvm_dirty_ring_size) { 2694 kvm_dirty_ring_reaper_init(s); 2695 } 2696 2697 if (kvm_check_extension(kvm_state, KVM_CAP_BINARY_STATS_FD)) { 2698 add_stats_callbacks(STATS_PROVIDER_KVM, query_stats_cb, 2699 query_stats_schemas_cb); 2700 } 2701 2702 return 0; 2703 2704 err: 2705 assert(ret < 0); 2706 if (s->vmfd >= 0) { 2707 close(s->vmfd); 2708 } 2709 if (s->fd != -1) { 2710 close(s->fd); 2711 } 2712 g_free(s->as); 2713 g_free(s->memory_listener.slots); 2714 2715 return ret; 2716 } 2717 2718 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) 2719 { 2720 s->sigmask_len = sigmask_len; 2721 } 2722 2723 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, 2724 int size, uint32_t count) 2725 { 2726 int i; 2727 uint8_t *ptr = data; 2728 2729 for (i = 0; i < count; i++) { 2730 address_space_rw(&address_space_io, port, attrs, 2731 ptr, size, 2732 direction == KVM_EXIT_IO_OUT); 2733 ptr += size; 2734 } 2735 } 2736 2737 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) 2738 { 2739 int i; 2740 2741 fprintf(stderr, "KVM internal error. Suberror: %d\n", 2742 run->internal.suberror); 2743 2744 for (i = 0; i < run->internal.ndata; ++i) { 2745 fprintf(stderr, "extra data[%d]: 0x%016"PRIx64"\n", 2746 i, (uint64_t)run->internal.data[i]); 2747 } 2748 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { 2749 fprintf(stderr, "emulation failure\n"); 2750 if (!kvm_arch_stop_on_emulation_error(cpu)) { 2751 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2752 return EXCP_INTERRUPT; 2753 } 2754 } 2755 /* FIXME: Should trigger a qmp message to let management know 2756 * something went wrong. 2757 */ 2758 return -1; 2759 } 2760 2761 void kvm_flush_coalesced_mmio_buffer(void) 2762 { 2763 KVMState *s = kvm_state; 2764 2765 if (!s || s->coalesced_flush_in_progress) { 2766 return; 2767 } 2768 2769 s->coalesced_flush_in_progress = true; 2770 2771 if (s->coalesced_mmio_ring) { 2772 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; 2773 while (ring->first != ring->last) { 2774 struct kvm_coalesced_mmio *ent; 2775 2776 ent = &ring->coalesced_mmio[ring->first]; 2777 2778 if (ent->pio == 1) { 2779 address_space_write(&address_space_io, ent->phys_addr, 2780 MEMTXATTRS_UNSPECIFIED, ent->data, 2781 ent->len); 2782 } else { 2783 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); 2784 } 2785 smp_wmb(); 2786 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; 2787 } 2788 } 2789 2790 s->coalesced_flush_in_progress = false; 2791 } 2792 2793 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg) 2794 { 2795 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { 2796 Error *err = NULL; 2797 int ret = kvm_arch_get_registers(cpu, &err); 2798 if (ret) { 2799 if (err) { 2800 error_reportf_err(err, "Failed to synchronize CPU state: "); 2801 } else { 2802 error_report("Failed to get registers: %s", strerror(-ret)); 2803 } 2804 2805 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2806 vm_stop(RUN_STATE_INTERNAL_ERROR); 2807 } 2808 2809 cpu->vcpu_dirty = true; 2810 } 2811 } 2812 2813 void kvm_cpu_synchronize_state(CPUState *cpu) 2814 { 2815 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { 2816 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL); 2817 } 2818 } 2819 2820 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) 2821 { 2822 Error *err = NULL; 2823 int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err); 2824 if (ret) { 2825 if (err) { 2826 error_reportf_err(err, "Restoring resisters after reset: "); 2827 } else { 2828 error_report("Failed to put registers after reset: %s", 2829 strerror(-ret)); 2830 } 2831 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 2832 vm_stop(RUN_STATE_INTERNAL_ERROR); 2833 } 2834 2835 cpu->vcpu_dirty = false; 2836 } 2837 2838 void kvm_cpu_synchronize_post_reset(CPUState *cpu) 2839 { 2840 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL); 2841 } 2842 2843 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) 2844 { 2845 Error *err = NULL; 2846 int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err); 2847 if (ret) { 2848 if (err) { 2849 error_reportf_err(err, "Putting registers after init: "); 2850 } else { 2851 error_report("Failed to put registers after init: %s", 2852 strerror(-ret)); 2853 } 2854 exit(1); 2855 } 2856 2857 cpu->vcpu_dirty = false; 2858 } 2859 2860 void kvm_cpu_synchronize_post_init(CPUState *cpu) 2861 { 2862 if (!kvm_state->guest_state_protected) { 2863 /* 2864 * This runs before the machine_init_done notifiers, and is the last 2865 * opportunity to synchronize the state of confidential guests. 2866 */ 2867 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL); 2868 } 2869 } 2870 2871 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg) 2872 { 2873 cpu->vcpu_dirty = true; 2874 } 2875 2876 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu) 2877 { 2878 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); 2879 } 2880 2881 #ifdef KVM_HAVE_MCE_INJECTION 2882 static __thread void *pending_sigbus_addr; 2883 static __thread int pending_sigbus_code; 2884 static __thread bool have_sigbus_pending; 2885 #endif 2886 2887 static void kvm_cpu_kick(CPUState *cpu) 2888 { 2889 qatomic_set(&cpu->kvm_run->immediate_exit, 1); 2890 } 2891 2892 static void kvm_cpu_kick_self(void) 2893 { 2894 if (kvm_immediate_exit) { 2895 kvm_cpu_kick(current_cpu); 2896 } else { 2897 qemu_cpu_kick_self(); 2898 } 2899 } 2900 2901 static void kvm_eat_signals(CPUState *cpu) 2902 { 2903 struct timespec ts = { 0, 0 }; 2904 siginfo_t siginfo; 2905 sigset_t waitset; 2906 sigset_t chkset; 2907 int r; 2908 2909 if (kvm_immediate_exit) { 2910 qatomic_set(&cpu->kvm_run->immediate_exit, 0); 2911 /* Write kvm_run->immediate_exit before the cpu->exit_request 2912 * write in kvm_cpu_exec. 2913 */ 2914 smp_wmb(); 2915 return; 2916 } 2917 2918 sigemptyset(&waitset); 2919 sigaddset(&waitset, SIG_IPI); 2920 2921 do { 2922 r = sigtimedwait(&waitset, &siginfo, &ts); 2923 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { 2924 perror("sigtimedwait"); 2925 exit(1); 2926 } 2927 2928 r = sigpending(&chkset); 2929 if (r == -1) { 2930 perror("sigpending"); 2931 exit(1); 2932 } 2933 } while (sigismember(&chkset, SIG_IPI)); 2934 } 2935 2936 int kvm_convert_memory(hwaddr start, hwaddr size, bool to_private) 2937 { 2938 MemoryRegionSection section; 2939 ram_addr_t offset; 2940 MemoryRegion *mr; 2941 RAMBlock *rb; 2942 void *addr; 2943 int ret = -1; 2944 2945 trace_kvm_convert_memory(start, size, to_private ? "shared_to_private" : "private_to_shared"); 2946 2947 if (!QEMU_PTR_IS_ALIGNED(start, qemu_real_host_page_size()) || 2948 !QEMU_PTR_IS_ALIGNED(size, qemu_real_host_page_size())) { 2949 return -1; 2950 } 2951 2952 if (!size) { 2953 return -1; 2954 } 2955 2956 section = memory_region_find(get_system_memory(), start, size); 2957 mr = section.mr; 2958 if (!mr) { 2959 /* 2960 * Ignore converting non-assigned region to shared. 2961 * 2962 * TDX requires vMMIO region to be shared to inject #VE to guest. 2963 * OVMF issues conservatively MapGPA(shared) on 32bit PCI MMIO region, 2964 * and vIO-APIC 0xFEC00000 4K page. 2965 * OVMF assigns 32bit PCI MMIO region to 2966 * [top of low memory: typically 2GB=0xC000000, 0xFC00000) 2967 */ 2968 if (!to_private) { 2969 return 0; 2970 } 2971 return -1; 2972 } 2973 2974 if (!memory_region_has_guest_memfd(mr)) { 2975 /* 2976 * Because vMMIO region must be shared, guest TD may convert vMMIO 2977 * region to shared explicitly. Don't complain such case. See 2978 * memory_region_type() for checking if the region is MMIO region. 2979 */ 2980 if (!to_private && 2981 !memory_region_is_ram(mr) && 2982 !memory_region_is_ram_device(mr) && 2983 !memory_region_is_rom(mr) && 2984 !memory_region_is_romd(mr)) { 2985 ret = 0; 2986 } else { 2987 error_report("Convert non guest_memfd backed memory region " 2988 "(0x%"HWADDR_PRIx" ,+ 0x%"HWADDR_PRIx") to %s", 2989 start, size, to_private ? "private" : "shared"); 2990 } 2991 goto out_unref; 2992 } 2993 2994 if (to_private) { 2995 ret = kvm_set_memory_attributes_private(start, size); 2996 } else { 2997 ret = kvm_set_memory_attributes_shared(start, size); 2998 } 2999 if (ret) { 3000 goto out_unref; 3001 } 3002 3003 addr = memory_region_get_ram_ptr(mr) + section.offset_within_region; 3004 rb = qemu_ram_block_from_host(addr, false, &offset); 3005 3006 if (to_private) { 3007 if (rb->page_size != qemu_real_host_page_size()) { 3008 /* 3009 * shared memory is backed by hugetlb, which is supposed to be 3010 * pre-allocated and doesn't need to be discarded 3011 */ 3012 goto out_unref; 3013 } 3014 ret = ram_block_discard_range(rb, offset, size); 3015 } else { 3016 ret = ram_block_discard_guest_memfd_range(rb, offset, size); 3017 } 3018 3019 out_unref: 3020 memory_region_unref(mr); 3021 return ret; 3022 } 3023 3024 int kvm_cpu_exec(CPUState *cpu) 3025 { 3026 struct kvm_run *run = cpu->kvm_run; 3027 int ret, run_ret; 3028 3029 trace_kvm_cpu_exec(); 3030 3031 if (kvm_arch_process_async_events(cpu)) { 3032 qatomic_set(&cpu->exit_request, 0); 3033 return EXCP_HLT; 3034 } 3035 3036 bql_unlock(); 3037 cpu_exec_start(cpu); 3038 3039 do { 3040 MemTxAttrs attrs; 3041 3042 if (cpu->vcpu_dirty) { 3043 Error *err = NULL; 3044 ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err); 3045 if (ret) { 3046 if (err) { 3047 error_reportf_err(err, "Putting registers after init: "); 3048 } else { 3049 error_report("Failed to put registers after init: %s", 3050 strerror(-ret)); 3051 } 3052 ret = -1; 3053 break; 3054 } 3055 3056 cpu->vcpu_dirty = false; 3057 } 3058 3059 kvm_arch_pre_run(cpu, run); 3060 if (qatomic_read(&cpu->exit_request)) { 3061 trace_kvm_interrupt_exit_request(); 3062 /* 3063 * KVM requires us to reenter the kernel after IO exits to complete 3064 * instruction emulation. This self-signal will ensure that we 3065 * leave ASAP again. 3066 */ 3067 kvm_cpu_kick_self(); 3068 } 3069 3070 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. 3071 * Matching barrier in kvm_eat_signals. 3072 */ 3073 smp_rmb(); 3074 3075 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); 3076 3077 attrs = kvm_arch_post_run(cpu, run); 3078 3079 #ifdef KVM_HAVE_MCE_INJECTION 3080 if (unlikely(have_sigbus_pending)) { 3081 bql_lock(); 3082 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, 3083 pending_sigbus_addr); 3084 have_sigbus_pending = false; 3085 bql_unlock(); 3086 } 3087 #endif 3088 3089 if (run_ret < 0) { 3090 if (run_ret == -EINTR || run_ret == -EAGAIN) { 3091 trace_kvm_io_window_exit(); 3092 kvm_eat_signals(cpu); 3093 ret = EXCP_INTERRUPT; 3094 break; 3095 } 3096 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) { 3097 fprintf(stderr, "error: kvm run failed %s\n", 3098 strerror(-run_ret)); 3099 #ifdef TARGET_PPC 3100 if (run_ret == -EBUSY) { 3101 fprintf(stderr, 3102 "This is probably because your SMT is enabled.\n" 3103 "VCPU can only run on primary threads with all " 3104 "secondary threads offline.\n"); 3105 } 3106 #endif 3107 ret = -1; 3108 break; 3109 } 3110 } 3111 3112 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); 3113 switch (run->exit_reason) { 3114 case KVM_EXIT_IO: 3115 /* Called outside BQL */ 3116 kvm_handle_io(run->io.port, attrs, 3117 (uint8_t *)run + run->io.data_offset, 3118 run->io.direction, 3119 run->io.size, 3120 run->io.count); 3121 ret = 0; 3122 break; 3123 case KVM_EXIT_MMIO: 3124 /* Called outside BQL */ 3125 address_space_rw(&address_space_memory, 3126 run->mmio.phys_addr, attrs, 3127 run->mmio.data, 3128 run->mmio.len, 3129 run->mmio.is_write); 3130 ret = 0; 3131 break; 3132 case KVM_EXIT_IRQ_WINDOW_OPEN: 3133 ret = EXCP_INTERRUPT; 3134 break; 3135 case KVM_EXIT_SHUTDOWN: 3136 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 3137 ret = EXCP_INTERRUPT; 3138 break; 3139 case KVM_EXIT_UNKNOWN: 3140 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", 3141 (uint64_t)run->hw.hardware_exit_reason); 3142 ret = -1; 3143 break; 3144 case KVM_EXIT_INTERNAL_ERROR: 3145 ret = kvm_handle_internal_error(cpu, run); 3146 break; 3147 case KVM_EXIT_DIRTY_RING_FULL: 3148 /* 3149 * We shouldn't continue if the dirty ring of this vcpu is 3150 * still full. Got kicked by KVM_RESET_DIRTY_RINGS. 3151 */ 3152 trace_kvm_dirty_ring_full(cpu->cpu_index); 3153 bql_lock(); 3154 /* 3155 * We throttle vCPU by making it sleep once it exit from kernel 3156 * due to dirty ring full. In the dirtylimit scenario, reaping 3157 * all vCPUs after a single vCPU dirty ring get full result in 3158 * the miss of sleep, so just reap the ring-fulled vCPU. 3159 */ 3160 if (dirtylimit_in_service()) { 3161 kvm_dirty_ring_reap(kvm_state, cpu); 3162 } else { 3163 kvm_dirty_ring_reap(kvm_state, NULL); 3164 } 3165 bql_unlock(); 3166 dirtylimit_vcpu_execute(cpu); 3167 ret = 0; 3168 break; 3169 case KVM_EXIT_SYSTEM_EVENT: 3170 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); 3171 switch (run->system_event.type) { 3172 case KVM_SYSTEM_EVENT_SHUTDOWN: 3173 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); 3174 ret = EXCP_INTERRUPT; 3175 break; 3176 case KVM_SYSTEM_EVENT_RESET: 3177 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); 3178 ret = EXCP_INTERRUPT; 3179 break; 3180 case KVM_SYSTEM_EVENT_CRASH: 3181 kvm_cpu_synchronize_state(cpu); 3182 bql_lock(); 3183 qemu_system_guest_panicked(cpu_get_crash_info(cpu)); 3184 bql_unlock(); 3185 ret = 0; 3186 break; 3187 default: 3188 ret = kvm_arch_handle_exit(cpu, run); 3189 break; 3190 } 3191 break; 3192 case KVM_EXIT_MEMORY_FAULT: 3193 trace_kvm_memory_fault(run->memory_fault.gpa, 3194 run->memory_fault.size, 3195 run->memory_fault.flags); 3196 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) { 3197 error_report("KVM_EXIT_MEMORY_FAULT: Unknown flag 0x%" PRIx64, 3198 (uint64_t)run->memory_fault.flags); 3199 ret = -1; 3200 break; 3201 } 3202 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size, 3203 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE); 3204 break; 3205 default: 3206 ret = kvm_arch_handle_exit(cpu, run); 3207 break; 3208 } 3209 } while (ret == 0); 3210 3211 cpu_exec_end(cpu); 3212 bql_lock(); 3213 3214 if (ret < 0) { 3215 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); 3216 vm_stop(RUN_STATE_INTERNAL_ERROR); 3217 } 3218 3219 qatomic_set(&cpu->exit_request, 0); 3220 return ret; 3221 } 3222 3223 int kvm_ioctl(KVMState *s, unsigned long type, ...) 3224 { 3225 int ret; 3226 void *arg; 3227 va_list ap; 3228 3229 va_start(ap, type); 3230 arg = va_arg(ap, void *); 3231 va_end(ap); 3232 3233 trace_kvm_ioctl(type, arg); 3234 ret = ioctl(s->fd, type, arg); 3235 if (ret == -1) { 3236 ret = -errno; 3237 } 3238 return ret; 3239 } 3240 3241 int kvm_vm_ioctl(KVMState *s, unsigned long type, ...) 3242 { 3243 int ret; 3244 void *arg; 3245 va_list ap; 3246 3247 va_start(ap, type); 3248 arg = va_arg(ap, void *); 3249 va_end(ap); 3250 3251 trace_kvm_vm_ioctl(type, arg); 3252 accel_ioctl_begin(); 3253 ret = ioctl(s->vmfd, type, arg); 3254 accel_ioctl_end(); 3255 if (ret == -1) { 3256 ret = -errno; 3257 } 3258 return ret; 3259 } 3260 3261 int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...) 3262 { 3263 int ret; 3264 void *arg; 3265 va_list ap; 3266 3267 va_start(ap, type); 3268 arg = va_arg(ap, void *); 3269 va_end(ap); 3270 3271 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); 3272 accel_cpu_ioctl_begin(cpu); 3273 ret = ioctl(cpu->kvm_fd, type, arg); 3274 accel_cpu_ioctl_end(cpu); 3275 if (ret == -1) { 3276 ret = -errno; 3277 } 3278 return ret; 3279 } 3280 3281 int kvm_device_ioctl(int fd, unsigned long type, ...) 3282 { 3283 int ret; 3284 void *arg; 3285 va_list ap; 3286 3287 va_start(ap, type); 3288 arg = va_arg(ap, void *); 3289 va_end(ap); 3290 3291 trace_kvm_device_ioctl(fd, type, arg); 3292 accel_ioctl_begin(); 3293 ret = ioctl(fd, type, arg); 3294 accel_ioctl_end(); 3295 if (ret == -1) { 3296 ret = -errno; 3297 } 3298 return ret; 3299 } 3300 3301 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr) 3302 { 3303 int ret; 3304 struct kvm_device_attr attribute = { 3305 .group = group, 3306 .attr = attr, 3307 }; 3308 3309 if (!kvm_vm_attributes_allowed) { 3310 return 0; 3311 } 3312 3313 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute); 3314 /* kvm returns 0 on success for HAS_DEVICE_ATTR */ 3315 return ret ? 0 : 1; 3316 } 3317 3318 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) 3319 { 3320 struct kvm_device_attr attribute = { 3321 .group = group, 3322 .attr = attr, 3323 .flags = 0, 3324 }; 3325 3326 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1; 3327 } 3328 3329 int kvm_device_access(int fd, int group, uint64_t attr, 3330 void *val, bool write, Error **errp) 3331 { 3332 struct kvm_device_attr kvmattr; 3333 int err; 3334 3335 kvmattr.flags = 0; 3336 kvmattr.group = group; 3337 kvmattr.attr = attr; 3338 kvmattr.addr = (uintptr_t)val; 3339 3340 err = kvm_device_ioctl(fd, 3341 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, 3342 &kvmattr); 3343 if (err < 0) { 3344 error_setg_errno(errp, -err, 3345 "KVM_%s_DEVICE_ATTR failed: Group %d " 3346 "attr 0x%016" PRIx64, 3347 write ? "SET" : "GET", group, attr); 3348 } 3349 return err; 3350 } 3351 3352 bool kvm_has_sync_mmu(void) 3353 { 3354 return kvm_state->sync_mmu; 3355 } 3356 3357 int kvm_has_vcpu_events(void) 3358 { 3359 return kvm_state->vcpu_events; 3360 } 3361 3362 int kvm_max_nested_state_length(void) 3363 { 3364 return kvm_state->max_nested_state_len; 3365 } 3366 3367 int kvm_has_gsi_routing(void) 3368 { 3369 #ifdef KVM_CAP_IRQ_ROUTING 3370 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING); 3371 #else 3372 return false; 3373 #endif 3374 } 3375 3376 bool kvm_arm_supports_user_irq(void) 3377 { 3378 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ); 3379 } 3380 3381 #ifdef TARGET_KVM_HAVE_GUEST_DEBUG 3382 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu, vaddr pc) 3383 { 3384 struct kvm_sw_breakpoint *bp; 3385 3386 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { 3387 if (bp->pc == pc) { 3388 return bp; 3389 } 3390 } 3391 return NULL; 3392 } 3393 3394 int kvm_sw_breakpoints_active(CPUState *cpu) 3395 { 3396 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); 3397 } 3398 3399 struct kvm_set_guest_debug_data { 3400 struct kvm_guest_debug dbg; 3401 int err; 3402 }; 3403 3404 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data) 3405 { 3406 struct kvm_set_guest_debug_data *dbg_data = 3407 (struct kvm_set_guest_debug_data *) data.host_ptr; 3408 3409 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, 3410 &dbg_data->dbg); 3411 } 3412 3413 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) 3414 { 3415 struct kvm_set_guest_debug_data data; 3416 3417 data.dbg.control = reinject_trap; 3418 3419 if (cpu->singlestep_enabled) { 3420 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; 3421 3422 if (cpu->singlestep_enabled & SSTEP_NOIRQ) { 3423 data.dbg.control |= KVM_GUESTDBG_BLOCKIRQ; 3424 } 3425 } 3426 kvm_arch_update_guest_debug(cpu, &data.dbg); 3427 3428 run_on_cpu(cpu, kvm_invoke_set_guest_debug, 3429 RUN_ON_CPU_HOST_PTR(&data)); 3430 return data.err; 3431 } 3432 3433 bool kvm_supports_guest_debug(void) 3434 { 3435 /* probed during kvm_init() */ 3436 return kvm_has_guest_debug; 3437 } 3438 3439 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) 3440 { 3441 struct kvm_sw_breakpoint *bp; 3442 int err; 3443 3444 if (type == GDB_BREAKPOINT_SW) { 3445 bp = kvm_find_sw_breakpoint(cpu, addr); 3446 if (bp) { 3447 bp->use_count++; 3448 return 0; 3449 } 3450 3451 bp = g_new(struct kvm_sw_breakpoint, 1); 3452 bp->pc = addr; 3453 bp->use_count = 1; 3454 err = kvm_arch_insert_sw_breakpoint(cpu, bp); 3455 if (err) { 3456 g_free(bp); 3457 return err; 3458 } 3459 3460 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); 3461 } else { 3462 err = kvm_arch_insert_hw_breakpoint(addr, len, type); 3463 if (err) { 3464 return err; 3465 } 3466 } 3467 3468 CPU_FOREACH(cpu) { 3469 err = kvm_update_guest_debug(cpu, 0); 3470 if (err) { 3471 return err; 3472 } 3473 } 3474 return 0; 3475 } 3476 3477 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len) 3478 { 3479 struct kvm_sw_breakpoint *bp; 3480 int err; 3481 3482 if (type == GDB_BREAKPOINT_SW) { 3483 bp = kvm_find_sw_breakpoint(cpu, addr); 3484 if (!bp) { 3485 return -ENOENT; 3486 } 3487 3488 if (bp->use_count > 1) { 3489 bp->use_count--; 3490 return 0; 3491 } 3492 3493 err = kvm_arch_remove_sw_breakpoint(cpu, bp); 3494 if (err) { 3495 return err; 3496 } 3497 3498 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); 3499 g_free(bp); 3500 } else { 3501 err = kvm_arch_remove_hw_breakpoint(addr, len, type); 3502 if (err) { 3503 return err; 3504 } 3505 } 3506 3507 CPU_FOREACH(cpu) { 3508 err = kvm_update_guest_debug(cpu, 0); 3509 if (err) { 3510 return err; 3511 } 3512 } 3513 return 0; 3514 } 3515 3516 void kvm_remove_all_breakpoints(CPUState *cpu) 3517 { 3518 struct kvm_sw_breakpoint *bp, *next; 3519 KVMState *s = cpu->kvm_state; 3520 CPUState *tmpcpu; 3521 3522 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { 3523 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { 3524 /* Try harder to find a CPU that currently sees the breakpoint. */ 3525 CPU_FOREACH(tmpcpu) { 3526 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) { 3527 break; 3528 } 3529 } 3530 } 3531 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); 3532 g_free(bp); 3533 } 3534 kvm_arch_remove_all_hw_breakpoints(); 3535 3536 CPU_FOREACH(cpu) { 3537 kvm_update_guest_debug(cpu, 0); 3538 } 3539 } 3540 3541 #endif /* !TARGET_KVM_HAVE_GUEST_DEBUG */ 3542 3543 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) 3544 { 3545 KVMState *s = kvm_state; 3546 struct kvm_signal_mask *sigmask; 3547 int r; 3548 3549 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset)); 3550 3551 sigmask->len = s->sigmask_len; 3552 memcpy(sigmask->sigset, sigset, sizeof(*sigset)); 3553 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask); 3554 g_free(sigmask); 3555 3556 return r; 3557 } 3558 3559 static void kvm_ipi_signal(int sig) 3560 { 3561 if (current_cpu) { 3562 assert(kvm_immediate_exit); 3563 kvm_cpu_kick(current_cpu); 3564 } 3565 } 3566 3567 void kvm_init_cpu_signals(CPUState *cpu) 3568 { 3569 int r; 3570 sigset_t set; 3571 struct sigaction sigact; 3572 3573 memset(&sigact, 0, sizeof(sigact)); 3574 sigact.sa_handler = kvm_ipi_signal; 3575 sigaction(SIG_IPI, &sigact, NULL); 3576 3577 pthread_sigmask(SIG_BLOCK, NULL, &set); 3578 #if defined KVM_HAVE_MCE_INJECTION 3579 sigdelset(&set, SIGBUS); 3580 pthread_sigmask(SIG_SETMASK, &set, NULL); 3581 #endif 3582 sigdelset(&set, SIG_IPI); 3583 if (kvm_immediate_exit) { 3584 r = pthread_sigmask(SIG_SETMASK, &set, NULL); 3585 } else { 3586 r = kvm_set_signal_mask(cpu, &set); 3587 } 3588 if (r) { 3589 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); 3590 exit(1); 3591 } 3592 } 3593 3594 /* Called asynchronously in VCPU thread. */ 3595 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) 3596 { 3597 #ifdef KVM_HAVE_MCE_INJECTION 3598 if (have_sigbus_pending) { 3599 return 1; 3600 } 3601 have_sigbus_pending = true; 3602 pending_sigbus_addr = addr; 3603 pending_sigbus_code = code; 3604 qatomic_set(&cpu->exit_request, 1); 3605 return 0; 3606 #else 3607 return 1; 3608 #endif 3609 } 3610 3611 /* Called synchronously (via signalfd) in main thread. */ 3612 int kvm_on_sigbus(int code, void *addr) 3613 { 3614 #ifdef KVM_HAVE_MCE_INJECTION 3615 /* Action required MCE kills the process if SIGBUS is blocked. Because 3616 * that's what happens in the I/O thread, where we handle MCE via signalfd, 3617 * we can only get action optional here. 3618 */ 3619 assert(code != BUS_MCEERR_AR); 3620 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr); 3621 return 0; 3622 #else 3623 return 1; 3624 #endif 3625 } 3626 3627 int kvm_create_device(KVMState *s, uint64_t type, bool test) 3628 { 3629 int ret; 3630 struct kvm_create_device create_dev; 3631 3632 create_dev.type = type; 3633 create_dev.fd = -1; 3634 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0; 3635 3636 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) { 3637 return -ENOTSUP; 3638 } 3639 3640 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev); 3641 if (ret) { 3642 return ret; 3643 } 3644 3645 return test ? 0 : create_dev.fd; 3646 } 3647 3648 bool kvm_device_supported(int vmfd, uint64_t type) 3649 { 3650 struct kvm_create_device create_dev = { 3651 .type = type, 3652 .fd = -1, 3653 .flags = KVM_CREATE_DEVICE_TEST, 3654 }; 3655 3656 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) { 3657 return false; 3658 } 3659 3660 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0); 3661 } 3662 3663 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source) 3664 { 3665 struct kvm_one_reg reg; 3666 int r; 3667 3668 reg.id = id; 3669 reg.addr = (uintptr_t) source; 3670 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); 3671 if (r) { 3672 trace_kvm_failed_reg_set(id, strerror(-r)); 3673 } 3674 return r; 3675 } 3676 3677 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) 3678 { 3679 struct kvm_one_reg reg; 3680 int r; 3681 3682 reg.id = id; 3683 reg.addr = (uintptr_t) target; 3684 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); 3685 if (r) { 3686 trace_kvm_failed_reg_get(id, strerror(-r)); 3687 } 3688 return r; 3689 } 3690 3691 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, 3692 hwaddr start_addr, hwaddr size) 3693 { 3694 KVMState *kvm = KVM_STATE(ms->accelerator); 3695 int i; 3696 3697 for (i = 0; i < kvm->nr_as; ++i) { 3698 if (kvm->as[i].as == as && kvm->as[i].ml) { 3699 size = MIN(kvm_max_slot_size, size); 3700 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, 3701 start_addr, size); 3702 } 3703 } 3704 3705 return false; 3706 } 3707 3708 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v, 3709 const char *name, void *opaque, 3710 Error **errp) 3711 { 3712 KVMState *s = KVM_STATE(obj); 3713 int64_t value = s->kvm_shadow_mem; 3714 3715 visit_type_int(v, name, &value, errp); 3716 } 3717 3718 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v, 3719 const char *name, void *opaque, 3720 Error **errp) 3721 { 3722 KVMState *s = KVM_STATE(obj); 3723 int64_t value; 3724 3725 if (s->fd != -1) { 3726 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3727 return; 3728 } 3729 3730 if (!visit_type_int(v, name, &value, errp)) { 3731 return; 3732 } 3733 3734 s->kvm_shadow_mem = value; 3735 } 3736 3737 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v, 3738 const char *name, void *opaque, 3739 Error **errp) 3740 { 3741 KVMState *s = KVM_STATE(obj); 3742 OnOffSplit mode; 3743 3744 if (s->fd != -1) { 3745 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3746 return; 3747 } 3748 3749 if (!visit_type_OnOffSplit(v, name, &mode, errp)) { 3750 return; 3751 } 3752 switch (mode) { 3753 case ON_OFF_SPLIT_ON: 3754 s->kernel_irqchip_allowed = true; 3755 s->kernel_irqchip_required = true; 3756 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; 3757 break; 3758 case ON_OFF_SPLIT_OFF: 3759 s->kernel_irqchip_allowed = false; 3760 s->kernel_irqchip_required = false; 3761 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; 3762 break; 3763 case ON_OFF_SPLIT_SPLIT: 3764 s->kernel_irqchip_allowed = true; 3765 s->kernel_irqchip_required = true; 3766 s->kernel_irqchip_split = ON_OFF_AUTO_ON; 3767 break; 3768 default: 3769 /* The value was checked in visit_type_OnOffSplit() above. If 3770 * we get here, then something is wrong in QEMU. 3771 */ 3772 abort(); 3773 } 3774 } 3775 3776 bool kvm_kernel_irqchip_allowed(void) 3777 { 3778 return kvm_state->kernel_irqchip_allowed; 3779 } 3780 3781 bool kvm_kernel_irqchip_required(void) 3782 { 3783 return kvm_state->kernel_irqchip_required; 3784 } 3785 3786 bool kvm_kernel_irqchip_split(void) 3787 { 3788 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; 3789 } 3790 3791 static void kvm_get_dirty_ring_size(Object *obj, Visitor *v, 3792 const char *name, void *opaque, 3793 Error **errp) 3794 { 3795 KVMState *s = KVM_STATE(obj); 3796 uint32_t value = s->kvm_dirty_ring_size; 3797 3798 visit_type_uint32(v, name, &value, errp); 3799 } 3800 3801 static void kvm_set_dirty_ring_size(Object *obj, Visitor *v, 3802 const char *name, void *opaque, 3803 Error **errp) 3804 { 3805 KVMState *s = KVM_STATE(obj); 3806 uint32_t value; 3807 3808 if (s->fd != -1) { 3809 error_setg(errp, "Cannot set properties after the accelerator has been initialized"); 3810 return; 3811 } 3812 3813 if (!visit_type_uint32(v, name, &value, errp)) { 3814 return; 3815 } 3816 if (value & (value - 1)) { 3817 error_setg(errp, "dirty-ring-size must be a power of two."); 3818 return; 3819 } 3820 3821 s->kvm_dirty_ring_size = value; 3822 } 3823 3824 static char *kvm_get_device(Object *obj, 3825 Error **errp G_GNUC_UNUSED) 3826 { 3827 KVMState *s = KVM_STATE(obj); 3828 3829 return g_strdup(s->device); 3830 } 3831 3832 static void kvm_set_device(Object *obj, 3833 const char *value, 3834 Error **errp G_GNUC_UNUSED) 3835 { 3836 KVMState *s = KVM_STATE(obj); 3837 3838 g_free(s->device); 3839 s->device = g_strdup(value); 3840 } 3841 3842 static void kvm_set_kvm_rapl(Object *obj, bool value, Error **errp) 3843 { 3844 KVMState *s = KVM_STATE(obj); 3845 s->msr_energy.enable = value; 3846 } 3847 3848 static void kvm_set_kvm_rapl_socket_path(Object *obj, 3849 const char *str, 3850 Error **errp) 3851 { 3852 KVMState *s = KVM_STATE(obj); 3853 g_free(s->msr_energy.socket_path); 3854 s->msr_energy.socket_path = g_strdup(str); 3855 } 3856 3857 static void kvm_accel_instance_init(Object *obj) 3858 { 3859 KVMState *s = KVM_STATE(obj); 3860 3861 s->fd = -1; 3862 s->vmfd = -1; 3863 s->kvm_shadow_mem = -1; 3864 s->kernel_irqchip_allowed = true; 3865 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; 3866 /* KVM dirty ring is by default off */ 3867 s->kvm_dirty_ring_size = 0; 3868 s->kvm_dirty_ring_with_bitmap = false; 3869 s->kvm_eager_split_size = 0; 3870 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; 3871 s->notify_window = 0; 3872 s->xen_version = 0; 3873 s->xen_gnttab_max_frames = 64; 3874 s->xen_evtchn_max_pirq = 256; 3875 s->device = NULL; 3876 s->msr_energy.enable = false; 3877 } 3878 3879 /** 3880 * kvm_gdbstub_sstep_flags(): 3881 * 3882 * Returns: SSTEP_* flags that KVM supports for guest debug. The 3883 * support is probed during kvm_init() 3884 */ 3885 static int kvm_gdbstub_sstep_flags(void) 3886 { 3887 return kvm_sstep_flags; 3888 } 3889 3890 static void kvm_accel_class_init(ObjectClass *oc, void *data) 3891 { 3892 AccelClass *ac = ACCEL_CLASS(oc); 3893 ac->name = "KVM"; 3894 ac->init_machine = kvm_init; 3895 ac->has_memory = kvm_accel_has_memory; 3896 ac->allowed = &kvm_allowed; 3897 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; 3898 3899 object_class_property_add(oc, "kernel-irqchip", "on|off|split", 3900 NULL, kvm_set_kernel_irqchip, 3901 NULL, NULL); 3902 object_class_property_set_description(oc, "kernel-irqchip", 3903 "Configure KVM in-kernel irqchip"); 3904 3905 object_class_property_add(oc, "kvm-shadow-mem", "int", 3906 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem, 3907 NULL, NULL); 3908 object_class_property_set_description(oc, "kvm-shadow-mem", 3909 "KVM shadow MMU size"); 3910 3911 object_class_property_add(oc, "dirty-ring-size", "uint32", 3912 kvm_get_dirty_ring_size, kvm_set_dirty_ring_size, 3913 NULL, NULL); 3914 object_class_property_set_description(oc, "dirty-ring-size", 3915 "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); 3916 3917 object_class_property_add_str(oc, "device", kvm_get_device, kvm_set_device); 3918 object_class_property_set_description(oc, "device", 3919 "Path to the device node to use (default: /dev/kvm)"); 3920 3921 object_class_property_add_bool(oc, "rapl", 3922 NULL, 3923 kvm_set_kvm_rapl); 3924 object_class_property_set_description(oc, "rapl", 3925 "Allow energy related MSRs for RAPL interface in Guest"); 3926 3927 object_class_property_add_str(oc, "rapl-helper-socket", NULL, 3928 kvm_set_kvm_rapl_socket_path); 3929 object_class_property_set_description(oc, "rapl-helper-socket", 3930 "Socket Path for comminucating with the Virtual MSR helper daemon"); 3931 3932 kvm_arch_accel_class_init(oc); 3933 } 3934 3935 static const TypeInfo kvm_accel_type = { 3936 .name = TYPE_KVM_ACCEL, 3937 .parent = TYPE_ACCEL, 3938 .instance_init = kvm_accel_instance_init, 3939 .class_init = kvm_accel_class_init, 3940 .instance_size = sizeof(KVMState), 3941 }; 3942 3943 static void kvm_type_init(void) 3944 { 3945 type_register_static(&kvm_accel_type); 3946 } 3947 3948 type_init(kvm_type_init); 3949 3950 typedef struct StatsArgs { 3951 union StatsResultsType { 3952 StatsResultList **stats; 3953 StatsSchemaList **schema; 3954 } result; 3955 strList *names; 3956 Error **errp; 3957 } StatsArgs; 3958 3959 static StatsList *add_kvmstat_entry(struct kvm_stats_desc *pdesc, 3960 uint64_t *stats_data, 3961 StatsList *stats_list, 3962 Error **errp) 3963 { 3964 3965 Stats *stats; 3966 uint64List *val_list = NULL; 3967 3968 /* Only add stats that we understand. */ 3969 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { 3970 case KVM_STATS_TYPE_CUMULATIVE: 3971 case KVM_STATS_TYPE_INSTANT: 3972 case KVM_STATS_TYPE_PEAK: 3973 case KVM_STATS_TYPE_LINEAR_HIST: 3974 case KVM_STATS_TYPE_LOG_HIST: 3975 break; 3976 default: 3977 return stats_list; 3978 } 3979 3980 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { 3981 case KVM_STATS_UNIT_NONE: 3982 case KVM_STATS_UNIT_BYTES: 3983 case KVM_STATS_UNIT_CYCLES: 3984 case KVM_STATS_UNIT_SECONDS: 3985 case KVM_STATS_UNIT_BOOLEAN: 3986 break; 3987 default: 3988 return stats_list; 3989 } 3990 3991 switch (pdesc->flags & KVM_STATS_BASE_MASK) { 3992 case KVM_STATS_BASE_POW10: 3993 case KVM_STATS_BASE_POW2: 3994 break; 3995 default: 3996 return stats_list; 3997 } 3998 3999 /* Alloc and populate data list */ 4000 stats = g_new0(Stats, 1); 4001 stats->name = g_strdup(pdesc->name); 4002 stats->value = g_new0(StatsValue, 1); 4003 4004 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { 4005 stats->value->u.boolean = *stats_data; 4006 stats->value->type = QTYPE_QBOOL; 4007 } else if (pdesc->size == 1) { 4008 stats->value->u.scalar = *stats_data; 4009 stats->value->type = QTYPE_QNUM; 4010 } else { 4011 int i; 4012 for (i = 0; i < pdesc->size; i++) { 4013 QAPI_LIST_PREPEND(val_list, stats_data[i]); 4014 } 4015 stats->value->u.list = val_list; 4016 stats->value->type = QTYPE_QLIST; 4017 } 4018 4019 QAPI_LIST_PREPEND(stats_list, stats); 4020 return stats_list; 4021 } 4022 4023 static StatsSchemaValueList *add_kvmschema_entry(struct kvm_stats_desc *pdesc, 4024 StatsSchemaValueList *list, 4025 Error **errp) 4026 { 4027 StatsSchemaValueList *schema_entry = g_new0(StatsSchemaValueList, 1); 4028 schema_entry->value = g_new0(StatsSchemaValue, 1); 4029 4030 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { 4031 case KVM_STATS_TYPE_CUMULATIVE: 4032 schema_entry->value->type = STATS_TYPE_CUMULATIVE; 4033 break; 4034 case KVM_STATS_TYPE_INSTANT: 4035 schema_entry->value->type = STATS_TYPE_INSTANT; 4036 break; 4037 case KVM_STATS_TYPE_PEAK: 4038 schema_entry->value->type = STATS_TYPE_PEAK; 4039 break; 4040 case KVM_STATS_TYPE_LINEAR_HIST: 4041 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; 4042 schema_entry->value->bucket_size = pdesc->bucket_size; 4043 schema_entry->value->has_bucket_size = true; 4044 break; 4045 case KVM_STATS_TYPE_LOG_HIST: 4046 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; 4047 break; 4048 default: 4049 goto exit; 4050 } 4051 4052 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { 4053 case KVM_STATS_UNIT_NONE: 4054 break; 4055 case KVM_STATS_UNIT_BOOLEAN: 4056 schema_entry->value->has_unit = true; 4057 schema_entry->value->unit = STATS_UNIT_BOOLEAN; 4058 break; 4059 case KVM_STATS_UNIT_BYTES: 4060 schema_entry->value->has_unit = true; 4061 schema_entry->value->unit = STATS_UNIT_BYTES; 4062 break; 4063 case KVM_STATS_UNIT_CYCLES: 4064 schema_entry->value->has_unit = true; 4065 schema_entry->value->unit = STATS_UNIT_CYCLES; 4066 break; 4067 case KVM_STATS_UNIT_SECONDS: 4068 schema_entry->value->has_unit = true; 4069 schema_entry->value->unit = STATS_UNIT_SECONDS; 4070 break; 4071 default: 4072 goto exit; 4073 } 4074 4075 schema_entry->value->exponent = pdesc->exponent; 4076 if (pdesc->exponent) { 4077 switch (pdesc->flags & KVM_STATS_BASE_MASK) { 4078 case KVM_STATS_BASE_POW10: 4079 schema_entry->value->has_base = true; 4080 schema_entry->value->base = 10; 4081 break; 4082 case KVM_STATS_BASE_POW2: 4083 schema_entry->value->has_base = true; 4084 schema_entry->value->base = 2; 4085 break; 4086 default: 4087 goto exit; 4088 } 4089 } 4090 4091 schema_entry->value->name = g_strdup(pdesc->name); 4092 schema_entry->next = list; 4093 return schema_entry; 4094 exit: 4095 g_free(schema_entry->value); 4096 g_free(schema_entry); 4097 return list; 4098 } 4099 4100 /* Cached stats descriptors */ 4101 typedef struct StatsDescriptors { 4102 const char *ident; /* cache key, currently the StatsTarget */ 4103 struct kvm_stats_desc *kvm_stats_desc; 4104 struct kvm_stats_header kvm_stats_header; 4105 QTAILQ_ENTRY(StatsDescriptors) next; 4106 } StatsDescriptors; 4107 4108 static QTAILQ_HEAD(, StatsDescriptors) stats_descriptors = 4109 QTAILQ_HEAD_INITIALIZER(stats_descriptors); 4110 4111 /* 4112 * Return the descriptors for 'target', that either have already been read 4113 * or are retrieved from 'stats_fd'. 4114 */ 4115 static StatsDescriptors *find_stats_descriptors(StatsTarget target, int stats_fd, 4116 Error **errp) 4117 { 4118 StatsDescriptors *descriptors; 4119 const char *ident; 4120 struct kvm_stats_desc *kvm_stats_desc; 4121 struct kvm_stats_header *kvm_stats_header; 4122 size_t size_desc; 4123 ssize_t ret; 4124 4125 ident = StatsTarget_str(target); 4126 QTAILQ_FOREACH(descriptors, &stats_descriptors, next) { 4127 if (g_str_equal(descriptors->ident, ident)) { 4128 return descriptors; 4129 } 4130 } 4131 4132 descriptors = g_new0(StatsDescriptors, 1); 4133 4134 /* Read stats header */ 4135 kvm_stats_header = &descriptors->kvm_stats_header; 4136 ret = pread(stats_fd, kvm_stats_header, sizeof(*kvm_stats_header), 0); 4137 if (ret != sizeof(*kvm_stats_header)) { 4138 error_setg(errp, "KVM stats: failed to read stats header: " 4139 "expected %zu actual %zu", 4140 sizeof(*kvm_stats_header), ret); 4141 g_free(descriptors); 4142 return NULL; 4143 } 4144 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 4145 4146 /* Read stats descriptors */ 4147 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); 4148 ret = pread(stats_fd, kvm_stats_desc, 4149 size_desc * kvm_stats_header->num_desc, 4150 kvm_stats_header->desc_offset); 4151 4152 if (ret != size_desc * kvm_stats_header->num_desc) { 4153 error_setg(errp, "KVM stats: failed to read stats descriptors: " 4154 "expected %zu actual %zu", 4155 size_desc * kvm_stats_header->num_desc, ret); 4156 g_free(descriptors); 4157 g_free(kvm_stats_desc); 4158 return NULL; 4159 } 4160 descriptors->kvm_stats_desc = kvm_stats_desc; 4161 descriptors->ident = ident; 4162 QTAILQ_INSERT_TAIL(&stats_descriptors, descriptors, next); 4163 return descriptors; 4164 } 4165 4166 static void query_stats(StatsResultList **result, StatsTarget target, 4167 strList *names, int stats_fd, CPUState *cpu, 4168 Error **errp) 4169 { 4170 struct kvm_stats_desc *kvm_stats_desc; 4171 struct kvm_stats_header *kvm_stats_header; 4172 StatsDescriptors *descriptors; 4173 g_autofree uint64_t *stats_data = NULL; 4174 struct kvm_stats_desc *pdesc; 4175 StatsList *stats_list = NULL; 4176 size_t size_desc, size_data = 0; 4177 ssize_t ret; 4178 int i; 4179 4180 descriptors = find_stats_descriptors(target, stats_fd, errp); 4181 if (!descriptors) { 4182 return; 4183 } 4184 4185 kvm_stats_header = &descriptors->kvm_stats_header; 4186 kvm_stats_desc = descriptors->kvm_stats_desc; 4187 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 4188 4189 /* Tally the total data size; read schema data */ 4190 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 4191 pdesc = (void *)kvm_stats_desc + i * size_desc; 4192 size_data += pdesc->size * sizeof(*stats_data); 4193 } 4194 4195 stats_data = g_malloc0(size_data); 4196 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); 4197 4198 if (ret != size_data) { 4199 error_setg(errp, "KVM stats: failed to read data: " 4200 "expected %zu actual %zu", size_data, ret); 4201 return; 4202 } 4203 4204 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 4205 uint64_t *stats; 4206 pdesc = (void *)kvm_stats_desc + i * size_desc; 4207 4208 /* Add entry to the list */ 4209 stats = (void *)stats_data + pdesc->offset; 4210 if (!apply_str_list_filter(pdesc->name, names)) { 4211 continue; 4212 } 4213 stats_list = add_kvmstat_entry(pdesc, stats, stats_list, errp); 4214 } 4215 4216 if (!stats_list) { 4217 return; 4218 } 4219 4220 switch (target) { 4221 case STATS_TARGET_VM: 4222 add_stats_entry(result, STATS_PROVIDER_KVM, NULL, stats_list); 4223 break; 4224 case STATS_TARGET_VCPU: 4225 add_stats_entry(result, STATS_PROVIDER_KVM, 4226 cpu->parent_obj.canonical_path, 4227 stats_list); 4228 break; 4229 default: 4230 g_assert_not_reached(); 4231 } 4232 } 4233 4234 static void query_stats_schema(StatsSchemaList **result, StatsTarget target, 4235 int stats_fd, Error **errp) 4236 { 4237 struct kvm_stats_desc *kvm_stats_desc; 4238 struct kvm_stats_header *kvm_stats_header; 4239 StatsDescriptors *descriptors; 4240 struct kvm_stats_desc *pdesc; 4241 StatsSchemaValueList *stats_list = NULL; 4242 size_t size_desc; 4243 int i; 4244 4245 descriptors = find_stats_descriptors(target, stats_fd, errp); 4246 if (!descriptors) { 4247 return; 4248 } 4249 4250 kvm_stats_header = &descriptors->kvm_stats_header; 4251 kvm_stats_desc = descriptors->kvm_stats_desc; 4252 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; 4253 4254 /* Tally the total data size; read schema data */ 4255 for (i = 0; i < kvm_stats_header->num_desc; ++i) { 4256 pdesc = (void *)kvm_stats_desc + i * size_desc; 4257 stats_list = add_kvmschema_entry(pdesc, stats_list, errp); 4258 } 4259 4260 add_stats_schema(result, STATS_PROVIDER_KVM, target, stats_list); 4261 } 4262 4263 static void query_stats_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) 4264 { 4265 int stats_fd = cpu->kvm_vcpu_stats_fd; 4266 Error *local_err = NULL; 4267 4268 if (stats_fd == -1) { 4269 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); 4270 error_propagate(kvm_stats_args->errp, local_err); 4271 return; 4272 } 4273 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, 4274 kvm_stats_args->names, stats_fd, cpu, 4275 kvm_stats_args->errp); 4276 } 4277 4278 static void query_stats_schema_vcpu(CPUState *cpu, StatsArgs *kvm_stats_args) 4279 { 4280 int stats_fd = cpu->kvm_vcpu_stats_fd; 4281 Error *local_err = NULL; 4282 4283 if (stats_fd == -1) { 4284 error_setg_errno(&local_err, errno, "KVM stats: ioctl failed"); 4285 error_propagate(kvm_stats_args->errp, local_err); 4286 return; 4287 } 4288 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, 4289 kvm_stats_args->errp); 4290 } 4291 4292 static void query_stats_cb(StatsResultList **result, StatsTarget target, 4293 strList *names, strList *targets, Error **errp) 4294 { 4295 KVMState *s = kvm_state; 4296 CPUState *cpu; 4297 int stats_fd; 4298 4299 switch (target) { 4300 case STATS_TARGET_VM: 4301 { 4302 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); 4303 if (stats_fd == -1) { 4304 error_setg_errno(errp, errno, "KVM stats: ioctl failed"); 4305 return; 4306 } 4307 query_stats(result, target, names, stats_fd, NULL, errp); 4308 close(stats_fd); 4309 break; 4310 } 4311 case STATS_TARGET_VCPU: 4312 { 4313 StatsArgs stats_args; 4314 stats_args.result.stats = result; 4315 stats_args.names = names; 4316 stats_args.errp = errp; 4317 CPU_FOREACH(cpu) { 4318 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { 4319 continue; 4320 } 4321 query_stats_vcpu(cpu, &stats_args); 4322 } 4323 break; 4324 } 4325 default: 4326 break; 4327 } 4328 } 4329 4330 void query_stats_schemas_cb(StatsSchemaList **result, Error **errp) 4331 { 4332 StatsArgs stats_args; 4333 KVMState *s = kvm_state; 4334 int stats_fd; 4335 4336 stats_fd = kvm_vm_ioctl(s, KVM_GET_STATS_FD, NULL); 4337 if (stats_fd == -1) { 4338 error_setg_errno(errp, errno, "KVM stats: ioctl failed"); 4339 return; 4340 } 4341 query_stats_schema(result, STATS_TARGET_VM, stats_fd, errp); 4342 close(stats_fd); 4343 4344 if (first_cpu) { 4345 stats_args.result.schema = result; 4346 stats_args.errp = errp; 4347 query_stats_schema_vcpu(first_cpu, &stats_args); 4348 } 4349 } 4350 4351 void kvm_mark_guest_state_protected(void) 4352 { 4353 kvm_state->guest_state_protected = true; 4354 } 4355 4356 int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp) 4357 { 4358 int fd; 4359 struct kvm_create_guest_memfd guest_memfd = { 4360 .size = size, 4361 .flags = flags, 4362 }; 4363 4364 if (!kvm_guest_memfd_supported) { 4365 error_setg(errp, "KVM does not support guest_memfd"); 4366 return -1; 4367 } 4368 4369 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_GUEST_MEMFD, &guest_memfd); 4370 if (fd < 0) { 4371 error_setg_errno(errp, errno, "Error creating KVM guest_memfd"); 4372 return -1; 4373 } 4374 4375 return fd; 4376 } 4377