Lines Matching +full:min +full:- +full:wakeup +full:- +full:pin +full:- +full:assert +full:- +full:time +full:- +full:ms

12  * See the COPYING file in the top-level directory.
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
34 #include "system/accel-blocker.h"
39 #include "qemu/main-loop.h"
43 #include "qapi/qapi-types-common.h"
44 #include "qapi/qapi-visit-common.h"
46 #include "qemu/guest-random.h"
48 #include "kvm-cpus.h"
55 /* This check must be after config-host.h is included */
142 if (rfd->gsi == gsi) { in kvm_resample_fd_remove()
154 rfd->gsi = gsi; in kvm_resample_fd_insert()
155 rfd->resample_event = event; in kvm_resample_fd_insert()
165 if (rfd->gsi == gsi) { in kvm_resample_fd_notify()
166 event_notifier_set(rfd->resample_event); in kvm_resample_fd_notify()
183 unsigned int i, cur = kml->nr_slots_allocated; in kvm_slots_grow()
186 if (nr_slots_new > kvm_state->nr_slots_max) { in kvm_slots_grow()
187 nr_slots_new = kvm_state->nr_slots_max; in kvm_slots_grow()
198 assert(kml->slots); in kvm_slots_grow()
199 slots = g_renew(KVMSlot, kml->slots, nr_slots_new); in kvm_slots_grow()
202 * memslots require fields to be zero-initialized. E.g. pointers, in kvm_slots_grow()
205 memset(&slots[cur], 0x0, sizeof(slots[0]) * (nr_slots_new - cur)); in kvm_slots_grow()
212 kml->slots = slots; in kvm_slots_grow()
213 kml->nr_slots_allocated = nr_slots_new; in kvm_slots_grow()
221 return kvm_slots_grow(kml, kml->nr_slots_allocated * 2); in kvm_slots_double()
228 return s->nr_slots_max; in kvm_get_max_memslots()
238 for (i = 0; i < s->nr_as; i++) { in kvm_get_free_memslots()
239 if (!s->as[i].ml) { in kvm_get_free_memslots()
242 used_slots = MAX(used_slots, s->as[i].ml->nr_slots_used); in kvm_get_free_memslots()
246 return s->nr_slots_max - used_slots; in kvm_get_free_memslots()
255 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_get_free_slot()
256 if (kml->slots[i].memory_size == 0) { in kvm_get_free_slot()
257 return &kml->slots[i]; in kvm_get_free_slot()
267 n = kml->nr_slots_allocated; in kvm_get_free_slot()
269 return &kml->slots[n]; in kvm_get_free_slot()
294 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_lookup_matching_slot()
295 KVMSlot *mem = &kml->slots[i]; in kvm_lookup_matching_slot()
297 if (start_addr == mem->start_addr && size == mem->memory_size) { in kvm_lookup_matching_slot()
312 hwaddr size = int128_get64(section->size); in kvm_align_section()
316 with sub-page size and unaligned start address. Pad the start in kvm_align_section()
318 aligned = ROUND_UP(section->offset_within_address_space, in kvm_align_section()
320 delta = aligned - section->offset_within_address_space; in kvm_align_section()
326 return (size - delta) & qemu_real_host_page_mask(); in kvm_align_section()
332 KVMMemoryListener *kml = &s->memory_listener; in kvm_physical_memory_addr_from_host()
336 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_memory_addr_from_host()
337 KVMSlot *mem = &kml->slots[i]; in kvm_physical_memory_addr_from_host()
339 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { in kvm_physical_memory_addr_from_host()
340 *phys_addr = mem->start_addr + (ram - mem->ram); in kvm_physical_memory_addr_from_host()
356 mem.slot = slot->slot | (kml->as_id << 16); in kvm_set_user_memory_region()
357 mem.guest_phys_addr = slot->start_addr; in kvm_set_user_memory_region()
358 mem.userspace_addr = (unsigned long)slot->ram; in kvm_set_user_memory_region()
359 mem.flags = slot->flags; in kvm_set_user_memory_region()
360 mem.guest_memfd = slot->guest_memfd; in kvm_set_user_memory_region()
361 mem.guest_memfd_offset = slot->guest_memfd_offset; in kvm_set_user_memory_region()
363 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) { in kvm_set_user_memory_region()
377 mem.memory_size = slot->memory_size; in kvm_set_user_memory_region()
383 slot->old_flags = mem.flags; in kvm_set_user_memory_region()
395 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
402 __func__, mem.slot, slot->start_addr, in kvm_set_user_memory_region()
413 trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_park_vcpu()
416 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu); in kvm_park_vcpu()
417 vcpu->kvm_fd = cpu->kvm_fd; in kvm_park_vcpu()
418 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); in kvm_park_vcpu()
424 int kvm_fd = -ENOENT; in kvm_unpark_vcpu()
426 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_unpark_vcpu()
427 if (cpu->vcpu_id == vcpu_id) { in kvm_unpark_vcpu()
429 kvm_fd = cpu->kvm_fd; in kvm_unpark_vcpu()
444 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) { in kvm_reset_parked_vcpus()
445 kvm_arch_reset_parked_vcpu(cpu->vcpu_id, cpu->kvm_fd); in kvm_reset_parked_vcpus()
466 cpu->kvm_fd = kvm_fd; in kvm_create_vcpu()
467 cpu->kvm_state = s; in kvm_create_vcpu()
468 cpu->vcpu_dirty = true; in kvm_create_vcpu()
469 cpu->dirty_pages = 0; in kvm_create_vcpu()
470 cpu->throttle_us_per_full = 0; in kvm_create_vcpu()
472 trace_kvm_create_vcpu(cpu->cpu_index, vcpu_id, kvm_fd); in kvm_create_vcpu()
495 trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in do_kvm_destroy_vcpu()
509 ret = munmap(cpu->kvm_run, mmap_size); in do_kvm_destroy_vcpu()
514 if (cpu->kvm_dirty_gfns) { in do_kvm_destroy_vcpu()
515 ret = munmap(cpu->kvm_dirty_gfns, s->kvm_dirty_ring_bytes); in do_kvm_destroy_vcpu()
540 trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu)); in kvm_init_vcpu()
544 error_setg_errno(errp, -ret, in kvm_init_vcpu()
553 error_setg_errno(errp, -mmap_size, in kvm_init_vcpu()
558 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, in kvm_init_vcpu()
559 cpu->kvm_fd, 0); in kvm_init_vcpu()
560 if (cpu->kvm_run == MAP_FAILED) { in kvm_init_vcpu()
561 ret = -errno; in kvm_init_vcpu()
568 if (s->coalesced_mmio && !s->coalesced_mmio_ring) { in kvm_init_vcpu()
569 s->coalesced_mmio_ring = in kvm_init_vcpu()
570 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE; in kvm_init_vcpu()
573 if (s->kvm_dirty_ring_size) { in kvm_init_vcpu()
575 cpu->kvm_dirty_gfns = mmap(NULL, s->kvm_dirty_ring_bytes, in kvm_init_vcpu()
577 cpu->kvm_fd, in kvm_init_vcpu()
579 if (cpu->kvm_dirty_gfns == MAP_FAILED) { in kvm_init_vcpu()
580 ret = -errno; in kvm_init_vcpu()
587 error_setg_errno(errp, -ret, in kvm_init_vcpu()
591 cpu->kvm_vcpu_stats_fd = kvm_vcpu_ioctl(cpu, KVM_GET_STATS_FD, NULL); in kvm_init_vcpu()
603 bool readonly = mr->readonly || memory_region_is_romd(mr); in kvm_mem_flags()
613 assert(kvm_guest_memfd_supported); in kvm_mem_flags()
623 mem->flags = kvm_mem_flags(mr); in kvm_slot_update_flags()
626 if (mem->flags == mem->old_flags) { in kvm_slot_update_flags()
649 slot_size = MIN(kvm_max_slot_size, size); in kvm_section_update_flags()
656 ret = kvm_slot_update_flags(kml, mem, section->mr); in kvm_section_update_flags()
658 size -= slot_size; in kvm_section_update_flags()
703 ram_addr_t start = slot->ram_start_offset; in kvm_slot_sync_dirty_pages()
704 ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); in kvm_slot_sync_dirty_pages()
706 cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); in kvm_slot_sync_dirty_pages()
711 memset(slot->dirty_bmap, 0, slot->dirty_bmap_size); in kvm_slot_reset_dirty_pages()
714 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
719 if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) { in kvm_slot_init_dirty_bitmap()
726 * bits-per-long. But for case when the kernel is 64bits and in kvm_slot_init_dirty_bitmap()
728 * bits-per-long, since sizeof(long) is different between kernel in kvm_slot_init_dirty_bitmap()
734 * a hope that sizeof(long) won't become >8 any time soon. in kvm_slot_init_dirty_bitmap()
737 * And mem->memory_size is aligned to it (otherwise this mem can't in kvm_slot_init_dirty_bitmap()
740 hwaddr bitmap_size = ALIGN(mem->memory_size / qemu_real_host_page_size(), in kvm_slot_init_dirty_bitmap()
742 mem->dirty_bmap = g_malloc0(bitmap_size); in kvm_slot_init_dirty_bitmap()
743 mem->dirty_bmap_size = bitmap_size; in kvm_slot_init_dirty_bitmap()
755 d.dirty_bitmap = slot->dirty_bmap; in kvm_slot_get_dirty_log()
756 d.slot = slot->slot | (slot->as_id << 16); in kvm_slot_get_dirty_log()
759 if (ret == -ENOENT) { in kvm_slot_get_dirty_log()
777 if (as_id >= s->nr_as) { in kvm_dirty_ring_mark_page()
781 kml = s->as[as_id].ml; in kvm_dirty_ring_mark_page()
782 mem = &kml->slots[slot_id]; in kvm_dirty_ring_mark_page()
784 if (!mem->memory_size || offset >= in kvm_dirty_ring_mark_page()
785 (mem->memory_size / qemu_real_host_page_size())) { in kvm_dirty_ring_mark_page()
789 set_bit(offset, mem->dirty_bmap); in kvm_dirty_ring_mark_page()
798 return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; in dirty_gfn_is_dirtied()
804 * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS in dirty_gfn_set_collected()
808 * ------------------------------------------------------------------------------ in dirty_gfn_set_collected()
810 * store-rel flags for gfn0 in dirty_gfn_set_collected()
811 * load-acq flags for gfn0 in dirty_gfn_set_collected()
812 * store-rel RESET for gfn0 in dirty_gfn_set_collected()
814 * load-acq flags for gfn0 in dirty_gfn_set_collected()
819 qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); in dirty_gfn_set_collected()
828 struct kvm_dirty_gfn *dirty_gfns = cpu->kvm_dirty_gfns, *cur; in kvm_dirty_ring_reap_one()
829 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_reap_one()
830 uint32_t count = 0, fetch = cpu->kvm_fetch_index; in kvm_dirty_ring_reap_one()
837 if (!cpu->created) { in kvm_dirty_ring_reap_one()
841 assert(dirty_gfns && ring_size); in kvm_dirty_ring_reap_one()
842 trace_kvm_dirty_ring_reap_vcpu(cpu->cpu_index); in kvm_dirty_ring_reap_one()
849 kvm_dirty_ring_mark_page(s, cur->slot >> 16, cur->slot & 0xffff, in kvm_dirty_ring_reap_one()
850 cur->offset); in kvm_dirty_ring_reap_one()
852 trace_kvm_dirty_ring_page(cpu->cpu_index, fetch, cur->offset); in kvm_dirty_ring_reap_one()
856 cpu->kvm_fetch_index = fetch; in kvm_dirty_ring_reap_one()
857 cpu->dirty_pages += count; in kvm_dirty_ring_reap_one()
881 assert(ret == total); in kvm_dirty_ring_reap_locked()
884 stamp = get_clock() - stamp; in kvm_dirty_ring_reap_locked()
911 * bitmaps before correctly re-protect those dirtied pages. in kvm_dirty_ring_reap()
945 * before calling this function have been put into the per-kvmslot
958 assert(bql_locked()); in kvm_dirty_ring_flush()
969 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
974 * NOTE: caller must be with kml->slots_lock held.
989 slot_size = MIN(kvm_max_slot_size, size); in kvm_physical_sync_dirty_bitmap()
999 size -= slot_size; in kvm_physical_sync_dirty_bitmap()
1003 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
1006 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
1023 start_delta = start - bmap_start; in kvm_log_clear_one_slot()
1034 end = mem->memory_size / psize; in kvm_log_clear_one_slot()
1035 if (bmap_npages > end - bmap_start) { in kvm_log_clear_one_slot()
1036 bmap_npages = end - bmap_start; in kvm_log_clear_one_slot()
1050 * |<-------- bmap_npages -----------..>| in kvm_log_clear_one_slot()
1053 * |----------------|-------------|------------------|------------| in kvm_log_clear_one_slot()
1062 assert(bmap_start % BITS_PER_LONG == 0); in kvm_log_clear_one_slot()
1064 assert(mem->dirty_bmap); in kvm_log_clear_one_slot()
1065 if (start_delta || bmap_npages - size / psize) { in kvm_log_clear_one_slot()
1066 /* Slow path - we need to manipulate a temp bitmap */ in kvm_log_clear_one_slot()
1068 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, in kvm_log_clear_one_slot()
1079 * Fast path - both start and size align well with BITS_PER_LONG in kvm_log_clear_one_slot()
1082 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); in kvm_log_clear_one_slot()
1087 assert(bmap_npages <= UINT32_MAX); in kvm_log_clear_one_slot()
1089 d.slot = mem->slot | (as_id << 16); in kvm_log_clear_one_slot()
1092 if (ret < 0 && ret != -ENOENT) { in kvm_log_clear_one_slot()
1108 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, in kvm_log_clear_one_slot()
1117 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
1119 * NOTE: this will be a no-op if we haven't enabled manual dirty log
1134 if (!s->manual_dirty_log_protect) { in kvm_physical_log_clear()
1139 start = section->offset_within_address_space; in kvm_physical_log_clear()
1140 size = int128_get64(section->size); in kvm_physical_log_clear()
1149 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_physical_log_clear()
1150 mem = &kml->slots[i]; in kvm_physical_log_clear()
1152 if (!mem->memory_size || in kvm_physical_log_clear()
1153 mem->start_addr > start + size - 1 || in kvm_physical_log_clear()
1154 start > mem->start_addr + mem->memory_size - 1) { in kvm_physical_log_clear()
1158 if (start >= mem->start_addr) { in kvm_physical_log_clear()
1160 offset = start - mem->start_addr; in kvm_physical_log_clear()
1161 count = MIN(mem->memory_size - offset, size); in kvm_physical_log_clear()
1165 count = MIN(mem->memory_size, size - (mem->start_addr - start)); in kvm_physical_log_clear()
1167 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); in kvm_physical_log_clear()
1184 if (s->coalesced_mmio) { in kvm_coalesce_mmio_region()
1201 if (s->coalesced_mmio) { in kvm_uncoalesce_mmio_region()
1218 if (s->coalesced_pio) { in kvm_coalesce_pio_add()
1235 if (s->coalesced_pio) { in kvm_coalesce_pio_del()
1273 * - replace them on VM reset
1274 * - block a migration for a VM with a poisoned page
1290 qemu_ram_remap(page->ram_addr); in kvm_unpoison_all()
1300 if (page->ram_addr == ram_addr) { in kvm_hwpoison_page_add()
1305 page->ram_addr = ram_addr; in kvm_hwpoison_page_add()
1319 * For example, PPC is always treated as big-endian even if running in adjust_ioeventfd_endianness()
1349 return -ENOSYS; in kvm_set_ioeventfd_mmio()
1362 return -errno; in kvm_set_ioeventfd_mmio()
1381 return -ENOSYS; in kvm_set_ioeventfd_pio()
1400 while (list->name) { in kvm_check_extension_list()
1401 if (!kvm_check_extension(s, list->value)) { in kvm_check_extension_list()
1422 assert((attr & kvm_supported_memory_attributes) == attr); in kvm_set_memory_attributes()
1453 MemoryRegion *mr = section->mr; in kvm_set_phys_mem()
1454 bool writable = !mr->readonly && !mr->rom_device; in kvm_set_phys_mem()
1462 } else if (!mr->romd_mode) { in kvm_set_phys_mem()
1475 mr_offset = section->offset_within_region + start_addr - in kvm_set_phys_mem()
1476 section->offset_within_address_space; in kvm_set_phys_mem()
1484 slot_size = MIN(kvm_max_slot_size, size); in kvm_set_phys_mem()
1489 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_set_phys_mem()
1503 if (kvm_state->kvm_dirty_ring_size) { in kvm_set_phys_mem()
1505 if (kvm_state->kvm_dirty_ring_with_bitmap) { in kvm_set_phys_mem()
1516 g_free(mem->dirty_bmap); in kvm_set_phys_mem()
1517 mem->dirty_bmap = NULL; in kvm_set_phys_mem()
1518 mem->memory_size = 0; in kvm_set_phys_mem()
1519 mem->flags = 0; in kvm_set_phys_mem()
1523 __func__, strerror(-err)); in kvm_set_phys_mem()
1527 size -= slot_size; in kvm_set_phys_mem()
1528 kml->nr_slots_used--; in kvm_set_phys_mem()
1535 slot_size = MIN(kvm_max_slot_size, size); in kvm_set_phys_mem()
1537 mem->as_id = kml->as_id; in kvm_set_phys_mem()
1538 mem->memory_size = slot_size; in kvm_set_phys_mem()
1539 mem->start_addr = start_addr; in kvm_set_phys_mem()
1540 mem->ram_start_offset = ram_start_offset; in kvm_set_phys_mem()
1541 mem->ram = ram; in kvm_set_phys_mem()
1542 mem->flags = kvm_mem_flags(mr); in kvm_set_phys_mem()
1543 mem->guest_memfd = mr->ram_block->guest_memfd; in kvm_set_phys_mem()
1544 mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; in kvm_set_phys_mem()
1550 strerror(-err)); in kvm_set_phys_mem()
1558 __func__, strerror(-err)); in kvm_set_phys_mem()
1566 size -= slot_size; in kvm_set_phys_mem()
1567 kml->nr_slots_used++; in kvm_set_phys_mem()
1574 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_thread()
1581 r->reaper_state = KVM_DIRTY_RING_REAPER_WAIT; in kvm_dirty_ring_reaper_thread()
1593 trace_kvm_dirty_ring_reaper("wakeup"); in kvm_dirty_ring_reaper_thread()
1594 r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; in kvm_dirty_ring_reaper_thread()
1600 r->reaper_iteration++; in kvm_dirty_ring_reaper_thread()
1608 struct KVMDirtyRingReaper *r = &s->reaper; in kvm_dirty_ring_reaper_init()
1610 qemu_thread_create(&r->reaper_thr, "kvm-reaper", in kvm_dirty_ring_reaper_init()
1617 uint32_t ring_size = s->kvm_dirty_ring_size; in kvm_dirty_ring_init()
1622 s->kvm_dirty_ring_size = 0; in kvm_dirty_ring_init()
1623 s->kvm_dirty_ring_bytes = 0; in kvm_dirty_ring_init()
1649 return -EINVAL; in kvm_dirty_ring_init()
1655 "Suggested minimum value is 1024.", strerror(-ret)); in kvm_dirty_ring_init()
1656 return -EIO; in kvm_dirty_ring_init()
1665 "%s. ", strerror(-ret)); in kvm_dirty_ring_init()
1666 return -EIO; in kvm_dirty_ring_init()
1669 s->kvm_dirty_ring_with_bitmap = true; in kvm_dirty_ring_init()
1672 s->kvm_dirty_ring_size = ring_size; in kvm_dirty_ring_init()
1673 s->kvm_dirty_ring_bytes = ring_bytes; in kvm_dirty_ring_init()
1685 update->section = *section; in kvm_region_add()
1687 QSIMPLEQ_INSERT_TAIL(&kml->transaction_add, update, next); in kvm_region_add()
1697 update->section = *section; in kvm_region_del()
1699 QSIMPLEQ_INSERT_TAIL(&kml->transaction_del, update, next); in kvm_region_del()
1709 if (QSIMPLEQ_EMPTY(&kml->transaction_add) && in kvm_region_commit()
1710 QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1721 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1722 u2 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1726 range_init_nofail(&r1, u1->section.offset_within_address_space, in kvm_region_commit()
1727 int128_get64(u1->section.size)); in kvm_region_commit()
1728 range_init_nofail(&r2, u2->section.offset_within_address_space, in kvm_region_commit()
1729 int128_get64(u2->section.size)); in kvm_region_commit()
1748 while (!QSIMPLEQ_EMPTY(&kml->transaction_del)) { in kvm_region_commit()
1749 u1 = QSIMPLEQ_FIRST(&kml->transaction_del); in kvm_region_commit()
1750 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_del, next); in kvm_region_commit()
1752 kvm_set_phys_mem(kml, &u1->section, false); in kvm_region_commit()
1753 memory_region_unref(u1->section.mr); in kvm_region_commit()
1757 while (!QSIMPLEQ_EMPTY(&kml->transaction_add)) { in kvm_region_commit()
1758 u1 = QSIMPLEQ_FIRST(&kml->transaction_add); in kvm_region_commit()
1759 QSIMPLEQ_REMOVE_HEAD(&kml->transaction_add, next); in kvm_region_commit()
1761 memory_region_ref(u1->section.mr); in kvm_region_commit()
1762 kvm_set_phys_mem(kml, &u1->section, true); in kvm_region_commit()
1794 for (i = 0; i < kml->nr_slots_allocated; i++) { in kvm_log_sync_global()
1795 mem = &kml->slots[i]; in kvm_log_sync_global()
1796 if (mem->memory_size && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_log_sync_global()
1799 if (s->kvm_dirty_ring_with_bitmap && last_stage && in kvm_log_sync_global()
1825 section->mr->name, section->offset_within_region, in kvm_log_clear()
1826 int128_get64(section->size)); in kvm_log_clear()
1839 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_add()
1840 data, true, int128_get64(section->size), in kvm_mem_ioeventfd_add()
1844 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_add()
1857 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space, in kvm_mem_ioeventfd_del()
1858 data, false, int128_get64(section->size), in kvm_mem_ioeventfd_del()
1862 __func__, strerror(-r), -r); in kvm_mem_ioeventfd_del()
1875 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_add()
1876 data, true, int128_get64(section->size), in kvm_io_ioeventfd_add()
1880 __func__, strerror(-r), -r); in kvm_io_ioeventfd_add()
1894 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space, in kvm_io_ioeventfd_del()
1895 data, false, int128_get64(section->size), in kvm_io_ioeventfd_del()
1899 __func__, strerror(-r), -r); in kvm_io_ioeventfd_del()
1909 kml->as_id = as_id; in kvm_memory_listener_register()
1913 QSIMPLEQ_INIT(&kml->transaction_add); in kvm_memory_listener_register()
1914 QSIMPLEQ_INIT(&kml->transaction_del); in kvm_memory_listener_register()
1916 kml->listener.region_add = kvm_region_add; in kvm_memory_listener_register()
1917 kml->listener.region_del = kvm_region_del; in kvm_memory_listener_register()
1918 kml->listener.commit = kvm_region_commit; in kvm_memory_listener_register()
1919 kml->listener.log_start = kvm_log_start; in kvm_memory_listener_register()
1920 kml->listener.log_stop = kvm_log_stop; in kvm_memory_listener_register()
1921 kml->listener.priority = MEMORY_LISTENER_PRIORITY_ACCEL; in kvm_memory_listener_register()
1922 kml->listener.name = name; in kvm_memory_listener_register()
1924 if (s->kvm_dirty_ring_size) { in kvm_memory_listener_register()
1925 kml->listener.log_sync_global = kvm_log_sync_global; in kvm_memory_listener_register()
1927 kml->listener.log_sync = kvm_log_sync; in kvm_memory_listener_register()
1928 kml->listener.log_clear = kvm_log_clear; in kvm_memory_listener_register()
1931 memory_listener_register(&kml->listener, as); in kvm_memory_listener_register()
1933 for (i = 0; i < s->nr_as; ++i) { in kvm_memory_listener_register()
1934 if (!s->as[i].as) { in kvm_memory_listener_register()
1935 s->as[i].as = as; in kvm_memory_listener_register()
1936 s->as[i].ml = kml; in kvm_memory_listener_register()
1943 .name = "kvm-io",
1956 assert(kvm_async_interrupts_enabled()); in kvm_set_irq()
1960 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event); in kvm_set_irq()
1966 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status; in kvm_set_irq()
1977 set_bit(gsi, s->used_gsi_bitmap); in set_gsi()
1982 clear_bit(gsi, s->used_gsi_bitmap); in clear_gsi()
1989 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1; in kvm_init_irq_routing()
1992 s->used_gsi_bitmap = bitmap_new(gsi_count); in kvm_init_irq_routing()
1993 s->gsi_count = gsi_count; in kvm_init_irq_routing()
1996 s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); in kvm_init_irq_routing()
1997 s->nr_allocated_irq_routes = 0; in kvm_init_irq_routing()
2014 s->irq_routes->flags = 0; in kvm_irqchip_commit_routes()
2016 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); in kvm_irqchip_commit_routes()
2017 assert(ret == 0); in kvm_irqchip_commit_routes()
2026 if (s->irq_routes->nr == s->nr_allocated_irq_routes) { in kvm_add_routing_entry()
2027 n = s->nr_allocated_irq_routes * 2; in kvm_add_routing_entry()
2033 s->irq_routes = g_realloc(s->irq_routes, size); in kvm_add_routing_entry()
2034 s->nr_allocated_irq_routes = n; in kvm_add_routing_entry()
2036 n = s->irq_routes->nr++; in kvm_add_routing_entry()
2037 new = &s->irq_routes->entries[n]; in kvm_add_routing_entry()
2041 set_gsi(s, entry->gsi); in kvm_add_routing_entry()
2050 for (n = 0; n < s->irq_routes->nr; n++) { in kvm_update_routing_entry()
2051 entry = &s->irq_routes->entries[n]; in kvm_update_routing_entry()
2052 if (entry->gsi != new_entry->gsi) { in kvm_update_routing_entry()
2065 return -ESRCH; in kvm_update_routing_entry()
2068 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) in kvm_irqchip_add_irq_route() argument
2072 assert(pin < s->gsi_count); in kvm_irqchip_add_irq_route()
2078 e.u.irqchip.pin = pin; in kvm_irqchip_add_irq_route()
2091 for (i = 0; i < s->irq_routes->nr; i++) { in kvm_irqchip_release_virq()
2092 e = &s->irq_routes->entries[i]; in kvm_irqchip_release_virq()
2093 if (e->gsi == virq) { in kvm_irqchip_release_virq()
2094 s->irq_routes->nr--; in kvm_irqchip_release_virq()
2095 *e = s->irq_routes->entries[s->irq_routes->nr]; in kvm_irqchip_release_virq()
2123 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count); in kvm_irqchip_get_virq()
2124 if (next_virq >= s->gsi_count) { in kvm_irqchip_get_virq()
2125 return -ENOSPC; in kvm_irqchip_get_virq()
2148 KVMState *s = c->s; in kvm_irqchip_add_msi_route()
2160 return -ENOSYS; in kvm_irqchip_add_msi_route()
2180 return -EINVAL; in kvm_irqchip_add_msi_route()
2183 if (s->irq_routes->nr < s->gsi_count) { in kvm_irqchip_add_msi_route()
2184 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A", in kvm_irqchip_add_msi_route()
2189 c->changes++; in kvm_irqchip_add_msi_route()
2192 return -ENOSPC; in kvm_irqchip_add_msi_route()
2208 return -ENOSYS; in kvm_irqchip_update_msi_route()
2222 return -EINVAL; in kvm_irqchip_update_msi_route()
2235 int rfd = resample ? event_notifier_get_fd(resample) : -1; in kvm_irqchip_assign_irqfd()
2243 if (rfd != -1) { in kvm_irqchip_assign_irqfd()
2244 assert(assign); in kvm_irqchip_assign_irqfd()
2293 return -ENOSYS; in kvm_irqchip_add_msi_route()
2298 return -ENOSYS; in kvm_irqchip_add_adapter_route()
2303 return -ENOSYS; in kvm_irqchip_add_hv_sint_route()
2315 return -ENOSYS; in kvm_irqchip_update_msi_route()
2335 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_add_irqfd_notifier()
2338 return -ENXIO; in kvm_irqchip_add_irqfd_notifier()
2347 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); in kvm_irqchip_remove_irqfd_notifier()
2350 return -ENXIO; in kvm_irqchip_remove_irqfd_notifier()
2357 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); in kvm_irqchip_set_qemuirq_gsi()
2364 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); in kvm_irqchip_create()
2370 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2382 /* First probe and see if there's a arch-specific hook to create the in kvm_irqchip_create()
2383 * in-kernel irqchip for us */ in kvm_irqchip_create()
2386 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { in kvm_irqchip_create()
2394 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); in kvm_irqchip_create()
2399 /* If we have an in-kernel IRQ chip then we must have asynchronous in kvm_irqchip_create()
2407 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); in kvm_irqchip_create()
2440 return kvm_state && kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_enabled()
2449 return kvm_state->kvm_dirty_ring_size; in kvm_dirty_ring_size()
2452 static int do_kvm_create_vm(MachineState *ms, int type) in do_kvm_create_vm() argument
2457 s = KVM_STATE(ms->accelerator); in do_kvm_create_vm()
2461 } while (ret == -EINTR); in do_kvm_create_vm()
2464 error_report("ioctl(KVM_CREATE_VM) failed: %s", strerror(-ret)); in do_kvm_create_vm()
2467 if (ret == -EINVAL) { in do_kvm_create_vm()
2470 error_printf("- for kernels supporting the" in do_kvm_create_vm()
2473 error_printf("- for kernels supporting the vm.allocate_pgste" in do_kvm_create_vm()
2477 if (ret == -EINVAL) { in do_kvm_create_vm()
2487 static int find_kvm_machine_type(MachineState *ms) in find_kvm_machine_type() argument
2489 MachineClass *mc = MACHINE_GET_CLASS(ms); in find_kvm_machine_type()
2492 if (object_property_find(OBJECT(current_machine), "kvm-type")) { in find_kvm_machine_type()
2495 "kvm-type", in find_kvm_machine_type()
2497 type = mc->kvm_type(ms, kvm_type); in find_kvm_machine_type()
2498 } else if (mc->kvm_type) { in find_kvm_machine_type()
2499 type = mc->kvm_type(ms, NULL); in find_kvm_machine_type()
2501 type = kvm_arch_get_default_type(ms); in find_kvm_machine_type()
2523 * page is wr-protected initially, which is against how kvm dirty ring is in kvm_setup_dirty_ring()
2524 * usage - kvm dirty ring requires all pages are wr-protected at the very in kvm_setup_dirty_ring()
2528 * we may expect a higher stall time when starting the migration. In the in kvm_setup_dirty_ring()
2530 * instead of clearing dirty bit, it can be a way to explicitly wr-protect in kvm_setup_dirty_ring()
2533 if (!s->kvm_dirty_ring_size) { in kvm_setup_dirty_ring()
2538 s->manual_dirty_log_protect = dirty_log_manual_caps; in kvm_setup_dirty_ring()
2547 s->manual_dirty_log_protect = 0; in kvm_setup_dirty_ring()
2555 static int kvm_init(MachineState *ms) in kvm_init() argument
2557 MachineClass *mc = MACHINE_GET_CLASS(ms); in kvm_init()
2559 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" in kvm_init()
2565 { "SMP", ms->smp.cpus }, in kvm_init()
2566 { "hotpluggable", ms->smp.max_cpus }, in kvm_init()
2577 s = KVM_STATE(ms->accelerator); in kvm_init()
2585 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size()); in kvm_init()
2587 s->sigmask_len = 8; in kvm_init()
2591 QTAILQ_INIT(&s->kvm_sw_breakpoints); in kvm_init()
2593 QLIST_INIT(&s->kvm_parked_vcpus); in kvm_init()
2594 s->fd = qemu_open_old(s->device ?: "/dev/kvm", O_RDWR); in kvm_init()
2595 if (s->fd == -1) { in kvm_init()
2597 ret = -errno; in kvm_init()
2604 ret = -EINVAL; in kvm_init()
2611 ret = -EINVAL; in kvm_init()
2617 s->nr_slots_max = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); in kvm_init()
2620 if (!s->nr_slots_max) { in kvm_init()
2621 s->nr_slots_max = KVM_MEMSLOTS_NR_MAX_DEFAULT; in kvm_init()
2624 type = find_kvm_machine_type(ms); in kvm_init()
2626 ret = -EINVAL; in kvm_init()
2630 ret = do_kvm_create_vm(ms, type); in kvm_init()
2635 s->vmfd = ret; in kvm_init()
2637 s->nr_as = kvm_vm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); in kvm_init()
2638 if (s->nr_as <= 1) { in kvm_init()
2639 s->nr_as = 1; in kvm_init()
2641 s->as = g_new0(struct KVMAs, s->nr_as); in kvm_init()
2647 while (nc->name) { in kvm_init()
2648 if (nc->num > soft_vcpus_limit) { in kvm_init()
2651 nc->name, nc->num, soft_vcpus_limit); in kvm_init()
2653 if (nc->num > hard_vcpus_limit) { in kvm_init()
2656 nc->name, nc->num, hard_vcpus_limit); in kvm_init()
2669 ret = -EINVAL; in kvm_init()
2670 error_report("kvm does not support %s", missing_cap->name); in kvm_init()
2675 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); in kvm_init()
2676 s->coalesced_pio = s->coalesced_mmio && in kvm_init()
2685 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); in kvm_init()
2687 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); in kvm_init()
2689 s->irq_set_ioctl = KVM_IRQ_LINE; in kvm_init()
2691 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS; in kvm_init()
2724 ret = kvm_arch_init(ms, s); in kvm_init()
2735 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { in kvm_init()
2736 … s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; in kvm_init()
2741 if (s->kernel_irqchip_allowed) { in kvm_init()
2745 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; in kvm_init()
2746 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; in kvm_init()
2747 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; in kvm_init()
2748 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; in kvm_init()
2750 kvm_memory_listener_register(s, &s->memory_listener, in kvm_init()
2751 &address_space_memory, 0, "kvm-memory"); in kvm_init()
2755 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); in kvm_init()
2756 if (!s->sync_mmu) { in kvm_init()
2758 assert(!ret); in kvm_init()
2761 if (s->kvm_dirty_ring_size) { in kvm_init()
2773 assert(ret < 0); in kvm_init()
2774 if (s->vmfd >= 0) { in kvm_init()
2775 close(s->vmfd); in kvm_init()
2777 if (s->fd != -1) { in kvm_init()
2778 close(s->fd); in kvm_init()
2780 g_free(s->as); in kvm_init()
2781 g_free(s->memory_listener.slots); in kvm_init()
2788 s->sigmask_len = sigmask_len; in kvm_set_sigmask_len()
2810 run->internal.suberror); in kvm_handle_internal_error()
2812 for (i = 0; i < run->internal.ndata; ++i) { in kvm_handle_internal_error()
2814 i, (uint64_t)run->internal.data[i]); in kvm_handle_internal_error()
2816 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { in kvm_handle_internal_error()
2826 return -1; in kvm_handle_internal_error()
2833 if (!s || s->coalesced_flush_in_progress) { in kvm_flush_coalesced_mmio_buffer()
2837 s->coalesced_flush_in_progress = true; in kvm_flush_coalesced_mmio_buffer()
2839 if (s->coalesced_mmio_ring) { in kvm_flush_coalesced_mmio_buffer()
2840 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; in kvm_flush_coalesced_mmio_buffer()
2841 while (ring->first != ring->last) { in kvm_flush_coalesced_mmio_buffer()
2844 ent = &ring->coalesced_mmio[ring->first]; in kvm_flush_coalesced_mmio_buffer()
2846 if (ent->pio == 1) { in kvm_flush_coalesced_mmio_buffer()
2847 address_space_write(&address_space_io, ent->phys_addr, in kvm_flush_coalesced_mmio_buffer()
2848 MEMTXATTRS_UNSPECIFIED, ent->data, in kvm_flush_coalesced_mmio_buffer()
2849 ent->len); in kvm_flush_coalesced_mmio_buffer()
2851 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); in kvm_flush_coalesced_mmio_buffer()
2854 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; in kvm_flush_coalesced_mmio_buffer()
2858 s->coalesced_flush_in_progress = false; in kvm_flush_coalesced_mmio_buffer()
2863 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in do_kvm_cpu_synchronize_state()
2870 error_report("Failed to get registers: %s", strerror(-ret)); in do_kvm_cpu_synchronize_state()
2877 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_state()
2883 if (!cpu->vcpu_dirty && !kvm_state->guest_state_protected) { in kvm_cpu_synchronize_state()
2897 strerror(-ret)); in do_kvm_cpu_synchronize_post_reset()
2903 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_reset()
2924 strerror(-ret)); in do_kvm_cpu_synchronize_post_init()
2929 cpu->vcpu_dirty = false; in do_kvm_cpu_synchronize_post_init()
2934 if (!kvm_state->guest_state_protected) { in kvm_cpu_synchronize_post_init()
2945 cpu->vcpu_dirty = true; in do_kvm_cpu_synchronize_pre_loadvm()
2961 qatomic_set(&cpu->kvm_run->immediate_exit, 1); in kvm_cpu_kick()
2982 qatomic_set(&cpu->kvm_run->immediate_exit, 0); in kvm_eat_signals()
2983 /* Write kvm_run->immediate_exit before the cpu->exit_request in kvm_eat_signals()
2995 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { in kvm_eat_signals()
3001 if (r == -1) { in kvm_eat_signals()
3015 int ret = -EINVAL; in kvm_convert_memory()
3032 * Ignore converting non-assigned region to shared. in kvm_convert_memory()
3036 * and vIO-APIC 0xFEC00000 4K page. in kvm_convert_memory()
3079 if (rb->page_size != qemu_real_host_page_size()) { in kvm_convert_memory()
3082 * pre-allocated and doesn't need to be discarded in kvm_convert_memory()
3098 struct kvm_run *run = cpu->kvm_run; in kvm_cpu_exec()
3104 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3114 if (cpu->vcpu_dirty) { in kvm_cpu_exec()
3122 strerror(-ret)); in kvm_cpu_exec()
3124 ret = -1; in kvm_cpu_exec()
3128 cpu->vcpu_dirty = false; in kvm_cpu_exec()
3132 if (qatomic_read(&cpu->exit_request)) { in kvm_cpu_exec()
3136 * instruction emulation. This self-signal will ensure that we in kvm_cpu_exec()
3142 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. in kvm_cpu_exec()
3162 if (run_ret == -EINTR || run_ret == -EAGAIN) { in kvm_cpu_exec()
3168 if (!(run_ret == -EFAULT && run->exit_reason == KVM_EXIT_MEMORY_FAULT)) { in kvm_cpu_exec()
3170 strerror(-run_ret)); in kvm_cpu_exec()
3172 if (run_ret == -EBUSY) { in kvm_cpu_exec()
3179 ret = -1; in kvm_cpu_exec()
3184 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); in kvm_cpu_exec()
3185 switch (run->exit_reason) { in kvm_cpu_exec()
3188 kvm_handle_io(run->io.port, attrs, in kvm_cpu_exec()
3189 (uint8_t *)run + run->io.data_offset, in kvm_cpu_exec()
3190 run->io.direction, in kvm_cpu_exec()
3191 run->io.size, in kvm_cpu_exec()
3192 run->io.count); in kvm_cpu_exec()
3198 run->mmio.phys_addr, attrs, in kvm_cpu_exec()
3199 run->mmio.data, in kvm_cpu_exec()
3200 run->mmio.len, in kvm_cpu_exec()
3201 run->mmio.is_write); in kvm_cpu_exec()
3213 (uint64_t)run->hw.hardware_exit_reason); in kvm_cpu_exec()
3214 ret = -1; in kvm_cpu_exec()
3224 trace_kvm_dirty_ring_full(cpu->cpu_index); in kvm_cpu_exec()
3230 * the miss of sleep, so just reap the ring-fulled vCPU. in kvm_cpu_exec()
3242 trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); in kvm_cpu_exec()
3243 switch (run->system_event.type) { in kvm_cpu_exec()
3265 trace_kvm_memory_fault(run->memory_fault.gpa, in kvm_cpu_exec()
3266 run->memory_fault.size, in kvm_cpu_exec()
3267 run->memory_fault.flags); in kvm_cpu_exec()
3268 if (run->memory_fault.flags & ~KVM_MEMORY_EXIT_FLAG_PRIVATE) { in kvm_cpu_exec()
3270 (uint64_t)run->memory_fault.flags); in kvm_cpu_exec()
3271 ret = -1; in kvm_cpu_exec()
3274 ret = kvm_convert_memory(run->memory_fault.gpa, run->memory_fault.size, in kvm_cpu_exec()
3275 run->memory_fault.flags & KVM_MEMORY_EXIT_FLAG_PRIVATE); in kvm_cpu_exec()
3291 qatomic_set(&cpu->exit_request, 0); in kvm_cpu_exec()
3306 ret = ioctl(s->fd, type, arg); in kvm_ioctl()
3307 if (ret == -1) { in kvm_ioctl()
3308 ret = -errno; in kvm_ioctl()
3325 ret = ioctl(s->vmfd, type, arg); in kvm_vm_ioctl()
3327 if (ret == -1) { in kvm_vm_ioctl()
3328 ret = -errno; in kvm_vm_ioctl()
3343 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg); in kvm_vcpu_ioctl()
3345 ret = ioctl(cpu->kvm_fd, type, arg); in kvm_vcpu_ioctl()
3347 if (ret == -1) { in kvm_vcpu_ioctl()
3348 ret = -errno; in kvm_vcpu_ioctl()
3367 if (ret == -1) { in kvm_device_ioctl()
3368 ret = -errno; in kvm_device_ioctl()
3416 error_setg_errno(errp, -err, in kvm_device_access()
3426 return kvm_state->sync_mmu; in kvm_has_sync_mmu()
3431 return kvm_state->vcpu_events; in kvm_has_vcpu_events()
3436 return kvm_state->max_nested_state_len; in kvm_max_nested_state_length()
3458 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) { in kvm_find_sw_breakpoint()
3459 if (bp->pc == pc) { in kvm_find_sw_breakpoint()
3468 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints); in kvm_sw_breakpoints_active()
3481 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG, in kvm_invoke_set_guest_debug()
3482 &dbg_data->dbg); in kvm_invoke_set_guest_debug()
3491 if (cpu->singlestep_enabled) { in kvm_update_guest_debug()
3494 if (cpu->singlestep_enabled & SSTEP_NOIRQ) { in kvm_update_guest_debug()
3519 bp->use_count++; in kvm_insert_breakpoint()
3524 bp->pc = addr; in kvm_insert_breakpoint()
3525 bp->use_count = 1; in kvm_insert_breakpoint()
3532 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_insert_breakpoint()
3557 return -ENOENT; in kvm_remove_breakpoint()
3560 if (bp->use_count > 1) { in kvm_remove_breakpoint()
3561 bp->use_count--; in kvm_remove_breakpoint()
3570 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); in kvm_remove_breakpoint()
3591 KVMState *s = cpu->kvm_state; in kvm_remove_all_breakpoints()
3594 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { in kvm_remove_all_breakpoints()
3603 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry); in kvm_remove_all_breakpoints()
3623 sigmask->len = s->sigmask_len; in kvm_set_signal_mask()
3624 memcpy(sigmask->sigset, sigset, sizeof(*sigset)); in kvm_set_signal_mask()
3634 assert(kvm_immediate_exit); in kvm_ipi_signal()
3661 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); in kvm_init_cpu_signals()
3676 qatomic_set(&cpu->exit_request, 1); in kvm_on_sigbus_vcpu()
3691 assert(code != BUS_MCEERR_AR); in kvm_on_sigbus()
3705 create_dev.fd = -1; in kvm_create_device()
3709 return -ENOTSUP; in kvm_create_device()
3724 .fd = -1, in kvm_device_supported()
3744 trace_kvm_failed_reg_set(id, strerror(-r)); in kvm_set_one_reg()
3758 trace_kvm_failed_reg_get(id, strerror(-r)); in kvm_get_one_reg()
3763 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, in kvm_accel_has_memory() argument
3766 KVMState *kvm = KVM_STATE(ms->accelerator); in kvm_accel_has_memory()
3769 for (i = 0; i < kvm->nr_as; ++i) { in kvm_accel_has_memory()
3770 if (kvm->as[i].as == as && kvm->as[i].ml) { in kvm_accel_has_memory()
3771 size = MIN(kvm_max_slot_size, size); in kvm_accel_has_memory()
3772 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, in kvm_accel_has_memory()
3785 int64_t value = s->kvm_shadow_mem; in kvm_get_kvm_shadow_mem()
3797 if (s->fd != -1) { in kvm_set_kvm_shadow_mem()
3806 s->kvm_shadow_mem = value; in kvm_set_kvm_shadow_mem()
3816 if (s->fd != -1) { in kvm_set_kernel_irqchip()
3826 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3827 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3828 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3831 s->kernel_irqchip_allowed = false; in kvm_set_kernel_irqchip()
3832 s->kernel_irqchip_required = false; in kvm_set_kernel_irqchip()
3833 s->kernel_irqchip_split = ON_OFF_AUTO_OFF; in kvm_set_kernel_irqchip()
3836 s->kernel_irqchip_allowed = true; in kvm_set_kernel_irqchip()
3837 s->kernel_irqchip_required = true; in kvm_set_kernel_irqchip()
3838 s->kernel_irqchip_split = ON_OFF_AUTO_ON; in kvm_set_kernel_irqchip()
3850 return kvm_state->kernel_irqchip_allowed; in kvm_kernel_irqchip_allowed()
3855 return kvm_state->kernel_irqchip_required; in kvm_kernel_irqchip_required()
3860 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; in kvm_kernel_irqchip_split()
3868 uint32_t value = s->kvm_dirty_ring_size; in kvm_get_dirty_ring_size()
3880 if (s->fd != -1) { in kvm_set_dirty_ring_size()
3888 if (value & (value - 1)) { in kvm_set_dirty_ring_size()
3889 error_setg(errp, "dirty-ring-size must be a power of two."); in kvm_set_dirty_ring_size()
3893 s->kvm_dirty_ring_size = value; in kvm_set_dirty_ring_size()
3901 return g_strdup(s->device); in kvm_get_device()
3910 g_free(s->device); in kvm_set_device()
3911 s->device = g_strdup(value); in kvm_set_device()
3917 s->msr_energy.enable = value; in kvm_set_kvm_rapl()
3925 g_free(s->msr_energy.socket_path); in kvm_set_kvm_rapl_socket_path()
3926 s->msr_energy.socket_path = g_strdup(str); in kvm_set_kvm_rapl_socket_path()
3933 s->fd = -1; in kvm_accel_instance_init()
3934 s->vmfd = -1; in kvm_accel_instance_init()
3935 s->kvm_shadow_mem = -1; in kvm_accel_instance_init()
3936 s->kernel_irqchip_allowed = true; in kvm_accel_instance_init()
3937 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; in kvm_accel_instance_init()
3939 s->kvm_dirty_ring_size = 0; in kvm_accel_instance_init()
3940 s->kvm_dirty_ring_with_bitmap = false; in kvm_accel_instance_init()
3941 s->kvm_eager_split_size = 0; in kvm_accel_instance_init()
3942 s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; in kvm_accel_instance_init()
3943 s->notify_window = 0; in kvm_accel_instance_init()
3944 s->xen_version = 0; in kvm_accel_instance_init()
3945 s->xen_gnttab_max_frames = 64; in kvm_accel_instance_init()
3946 s->xen_evtchn_max_pirq = 256; in kvm_accel_instance_init()
3947 s->device = NULL; in kvm_accel_instance_init()
3948 s->msr_energy.enable = false; in kvm_accel_instance_init()
3965 ac->name = "KVM"; in kvm_accel_class_init()
3966 ac->init_machine = kvm_init; in kvm_accel_class_init()
3967 ac->has_memory = kvm_accel_has_memory; in kvm_accel_class_init()
3968 ac->allowed = &kvm_allowed; in kvm_accel_class_init()
3969 ac->gdbstub_supported_sstep_flags = kvm_gdbstub_sstep_flags; in kvm_accel_class_init()
3971 object_class_property_add(oc, "kernel-irqchip", "on|off|split", in kvm_accel_class_init()
3974 object_class_property_set_description(oc, "kernel-irqchip", in kvm_accel_class_init()
3975 "Configure KVM in-kernel irqchip"); in kvm_accel_class_init()
3977 object_class_property_add(oc, "kvm-shadow-mem", "int", in kvm_accel_class_init()
3980 object_class_property_set_description(oc, "kvm-shadow-mem", in kvm_accel_class_init()
3983 object_class_property_add(oc, "dirty-ring-size", "uint32", in kvm_accel_class_init()
3986 object_class_property_set_description(oc, "dirty-ring-size", in kvm_accel_class_init()
3999 object_class_property_add_str(oc, "rapl-helper-socket", NULL, in kvm_accel_class_init()
4001 object_class_property_set_description(oc, "rapl-helper-socket", in kvm_accel_class_init()
4041 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmstat_entry()
4052 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmstat_entry()
4063 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmstat_entry()
4073 stats->name = g_strdup(pdesc->name); in add_kvmstat_entry()
4074 stats->value = g_new0(StatsValue, 1); in add_kvmstat_entry()
4076 if ((pdesc->flags & KVM_STATS_UNIT_MASK) == KVM_STATS_UNIT_BOOLEAN) { in add_kvmstat_entry()
4077 stats->value->u.boolean = *stats_data; in add_kvmstat_entry()
4078 stats->value->type = QTYPE_QBOOL; in add_kvmstat_entry()
4079 } else if (pdesc->size == 1) { in add_kvmstat_entry()
4080 stats->value->u.scalar = *stats_data; in add_kvmstat_entry()
4081 stats->value->type = QTYPE_QNUM; in add_kvmstat_entry()
4084 for (i = 0; i < pdesc->size; i++) { in add_kvmstat_entry()
4087 stats->value->u.list = val_list; in add_kvmstat_entry()
4088 stats->value->type = QTYPE_QLIST; in add_kvmstat_entry()
4100 schema_entry->value = g_new0(StatsSchemaValue, 1); in add_kvmschema_entry()
4102 switch (pdesc->flags & KVM_STATS_TYPE_MASK) { in add_kvmschema_entry()
4104 schema_entry->value->type = STATS_TYPE_CUMULATIVE; in add_kvmschema_entry()
4107 schema_entry->value->type = STATS_TYPE_INSTANT; in add_kvmschema_entry()
4110 schema_entry->value->type = STATS_TYPE_PEAK; in add_kvmschema_entry()
4113 schema_entry->value->type = STATS_TYPE_LINEAR_HISTOGRAM; in add_kvmschema_entry()
4114 schema_entry->value->bucket_size = pdesc->bucket_size; in add_kvmschema_entry()
4115 schema_entry->value->has_bucket_size = true; in add_kvmschema_entry()
4118 schema_entry->value->type = STATS_TYPE_LOG2_HISTOGRAM; in add_kvmschema_entry()
4124 switch (pdesc->flags & KVM_STATS_UNIT_MASK) { in add_kvmschema_entry()
4128 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4129 schema_entry->value->unit = STATS_UNIT_BOOLEAN; in add_kvmschema_entry()
4132 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4133 schema_entry->value->unit = STATS_UNIT_BYTES; in add_kvmschema_entry()
4136 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4137 schema_entry->value->unit = STATS_UNIT_CYCLES; in add_kvmschema_entry()
4140 schema_entry->value->has_unit = true; in add_kvmschema_entry()
4141 schema_entry->value->unit = STATS_UNIT_SECONDS; in add_kvmschema_entry()
4147 schema_entry->value->exponent = pdesc->exponent; in add_kvmschema_entry()
4148 if (pdesc->exponent) { in add_kvmschema_entry()
4149 switch (pdesc->flags & KVM_STATS_BASE_MASK) { in add_kvmschema_entry()
4151 schema_entry->value->has_base = true; in add_kvmschema_entry()
4152 schema_entry->value->base = 10; in add_kvmschema_entry()
4155 schema_entry->value->has_base = true; in add_kvmschema_entry()
4156 schema_entry->value->base = 2; in add_kvmschema_entry()
4163 schema_entry->value->name = g_strdup(pdesc->name); in add_kvmschema_entry()
4164 schema_entry->next = list; in add_kvmschema_entry()
4167 g_free(schema_entry->value); in add_kvmschema_entry()
4199 if (g_str_equal(descriptors->ident, ident)) { in find_stats_descriptors()
4207 kvm_stats_header = &descriptors->kvm_stats_header; in find_stats_descriptors()
4216 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in find_stats_descriptors()
4219 kvm_stats_desc = g_malloc0_n(kvm_stats_header->num_desc, size_desc); in find_stats_descriptors()
4221 size_desc * kvm_stats_header->num_desc, in find_stats_descriptors()
4222 kvm_stats_header->desc_offset); in find_stats_descriptors()
4224 if (ret != size_desc * kvm_stats_header->num_desc) { in find_stats_descriptors()
4227 size_desc * kvm_stats_header->num_desc, ret); in find_stats_descriptors()
4232 descriptors->kvm_stats_desc = kvm_stats_desc; in find_stats_descriptors()
4233 descriptors->ident = ident; in find_stats_descriptors()
4257 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats()
4258 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats()
4259 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats()
4262 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4264 size_data += pdesc->size * sizeof(*stats_data); in query_stats()
4268 ret = pread(stats_fd, stats_data, size_data, kvm_stats_header->data_offset); in query_stats()
4276 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats()
4281 stats = (void *)stats_data + pdesc->offset; in query_stats()
4282 if (!apply_str_list_filter(pdesc->name, names)) { in query_stats()
4298 cpu->parent_obj.canonical_path, in query_stats()
4322 kvm_stats_header = &descriptors->kvm_stats_header; in query_stats_schema()
4323 kvm_stats_desc = descriptors->kvm_stats_desc; in query_stats_schema()
4324 size_desc = sizeof(*kvm_stats_desc) + kvm_stats_header->name_size; in query_stats_schema()
4327 for (i = 0; i < kvm_stats_header->num_desc; ++i) { in query_stats_schema()
4337 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_vcpu()
4340 if (stats_fd == -1) { in query_stats_vcpu()
4342 error_propagate(kvm_stats_args->errp, local_err); in query_stats_vcpu()
4345 query_stats(kvm_stats_args->result.stats, STATS_TARGET_VCPU, in query_stats_vcpu()
4346 kvm_stats_args->names, stats_fd, cpu, in query_stats_vcpu()
4347 kvm_stats_args->errp); in query_stats_vcpu()
4352 int stats_fd = cpu->kvm_vcpu_stats_fd; in query_stats_schema_vcpu()
4355 if (stats_fd == -1) { in query_stats_schema_vcpu()
4357 error_propagate(kvm_stats_args->errp, local_err); in query_stats_schema_vcpu()
4360 query_stats_schema(kvm_stats_args->result.schema, STATS_TARGET_VCPU, stats_fd, in query_stats_schema_vcpu()
4361 kvm_stats_args->errp); in query_stats_schema_vcpu()
4375 if (stats_fd == -1) { in query_stats_cb()
4390 if (!apply_str_list_filter(cpu->parent_obj.canonical_path, targets)) { in query_stats_cb()
4409 if (stats_fd == -1) { in query_stats_schemas_cb()
4425 kvm_state->guest_state_protected = true; in kvm_mark_guest_state_protected()
4438 return -1; in kvm_create_guest_memfd()
4444 return -1; in kvm_create_guest_memfd()